Index: kernel/generic/src/proc/scheduler.c
===================================================================
--- kernel/generic/src/proc/scheduler.c	(revision cfffb29045b608bdb22354635488347597622ae3)
+++ kernel/generic/src/proc/scheduler.c	(revision e11ae91fc3a4227bcea74717abb36d585ceb5627)
@@ -142,6 +142,5 @@
 			spinlock_unlock(&THREAD->lock);
 			spinlock_unlock(&CPU->lock);
-			THREAD->saved_fpu_context = slab_alloc(fpu_context_slab,
-							       0);
+			THREAD->saved_fpu_context = slab_alloc(fpu_context_slab, 0);
 			/* We may have switched CPUs during slab_alloc */
 			goto restart; 
@@ -236,7 +235,8 @@
 
 		/*
-		 * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
+		 * Clear the THREAD_FLAG_STOLEN flag so that t can be migrated
+		 * when load balancing needs emerge.
 		 */
-		t->flags &= ~X_STOLEN;
+		t->flags &= ~THREAD_FLAG_STOLEN;
 		spinlock_unlock(&t->lock);
 
@@ -350,5 +350,6 @@
 	 */
 	context_save(&CPU->saved_context);
-	context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (uintptr_t) CPU->stack, CPU_STACK_SIZE);
+	context_set(&CPU->saved_context, FADDR(scheduler_separated_stack),
+		(uintptr_t) CPU->stack, CPU_STACK_SIZE);
 	context_restore(&CPU->saved_context);
 	/* not reached */
@@ -484,5 +485,6 @@
 
 #ifdef SCHEDULER_VERBOSE
-	printf("cpu%d: tid %d (priority=%d,ticks=%lld,nrdy=%ld)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy));
+	printf("cpu%d: tid %d (priority=%d,ticks=%lld,nrdy=%ld)\n",
+		CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy));
 #endif	
 
@@ -557,5 +559,5 @@
 			/*
 			 * Not interested in ourselves.
-			 * Doesn't require interrupt disabling for kcpulb is X_WIRED.
+			 * Doesn't require interrupt disabling for kcpulb has THREAD_FLAG_WIRED.
 			 */
 			if (CPU == cpu)
@@ -578,10 +580,12 @@
 				t = list_get_instance(l, thread_t, rq_link);
 				/*
-				 * We don't want to steal CPU-wired threads neither threads already stolen.
-				 * The latter prevents threads from migrating between CPU's without ever being run.
-				 * We don't want to steal threads whose FPU context is still in CPU.
+				 * We don't want to steal CPU-wired threads neither threads already
+				 * stolen. The latter prevents threads from migrating between CPU's
+				 * without ever being run. We don't want to steal threads whose FPU
+				 * context is still in CPU.
 				 */
 				spinlock_lock(&t->lock);
-				if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
+				if ((!(t->flags & (THREAD_FLAG_WIRED | THREAD_FLAG_STOLEN))) &&
+					(!(t->fpu_context_engaged)) ) {
 					/*
 					 * Remove t from r.
@@ -609,7 +613,9 @@
 				spinlock_lock(&t->lock);
 #ifdef KCPULB_VERBOSE
-				printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, avg=%nd\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active);
+				printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, avg=%nd\n",
+					CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy),
+					atomic_get(&nrdy) / config.cpu_active);
 #endif
-				t->flags |= X_STOLEN;
+				t->flags |= THREAD_FLAG_STOLEN;
 				t->state = Entering;
 				spinlock_unlock(&t->lock);
Index: kernel/generic/src/proc/thread.c
===================================================================
--- kernel/generic/src/proc/thread.c	(revision cfffb29045b608bdb22354635488347597622ae3)
+++ kernel/generic/src/proc/thread.c	(revision e11ae91fc3a4227bcea74717abb36d585ceb5627)
@@ -130,4 +130,7 @@
 	link_initialize(&t->wq_link);
 	link_initialize(&t->th_link);
+
+	/* call the architecture-specific part of the constructor */
+	thr_constructor_arch(t);
 	
 #ifdef ARCH_HAS_FPU
@@ -157,4 +160,7 @@
 {
 	thread_t *t = (thread_t *) obj;
+
+	/* call the architecture-specific part of the destructor */
+	thr_destructor_arch(t);
 
 	frame_free(KA2PA(t->kstack));
@@ -211,5 +217,5 @@
 	
 	cpu = CPU;
-	if (t->flags & X_WIRED) {
+	if (t->flags & THREAD_FLAG_WIRED) {
 		cpu = t->cpu;
 	}
@@ -296,6 +302,4 @@
 	if (!t)
 		return NULL;
-
-	thread_create_arch(t);
 	
 	/* Not needed, but good for debugging */
@@ -324,5 +328,5 @@
 	t->priority = -1;		/* start in rq[0] */
 	t->cpu = NULL;
-	t->flags = 0;
+	t->flags = flags;
 	t->state = Entering;
 	t->call_me = NULL;
@@ -348,4 +352,6 @@
 	t->fpu_context_exists = 0;
 	t->fpu_context_engaged = 0;
+
+	thread_create_arch(t);		/* might depend on previous initialization */
 	
 	/*
@@ -590,5 +596,5 @@
 	}
 
-	if ((t = thread_create(uinit, kernel_uarg, TASK, 0, namebuf))) {
+	if ((t = thread_create(uinit, kernel_uarg, TASK, THREAD_FLAG_USPACE, namebuf))) {
 		tid = t->tid;
 		thread_ready(t);
