Index: src/proc/scheduler.c
===================================================================
--- src/proc/scheduler.c	(revision d89652555244d155a50d0893a409004587e7c475)
+++ src/proc/scheduler.c	(revision 87cd61fecb29f65d0cba5f71159c3e0bccdc2010)
@@ -132,5 +132,5 @@
 		}
 	
-		/* avoid deadlock with relink_rq */
+		/* avoid deadlock with relink_rq() */
 		if (!spinlock_trylock(&CPU->lock)) {
 			/*
@@ -447,5 +447,4 @@
 
 			cpu = &cpus[(i + k) % config.cpu_active];
-			r = &cpu->rq[j];
 
 			/*
@@ -454,7 +453,8 @@
 			 */
 			if (CPU == cpu)
-				continue;
+				continue;				
 
 restart:		pri = cpu_priority_high();
+			r = &cpu->rq[j];
 			spinlock_lock(&r->lock);
 			if (r->n == 0) {
@@ -471,8 +471,9 @@
 		    		 * We don't want to steal CPU-wired threads neither threads already stolen.
 				 * The latter prevents threads from migrating between CPU's without ever being run.
-		        	 * We don't want to steal threads whose FPU context is still in CPU
+		        	 * We don't want to steal threads whose FPU context is still in CPU.
 				 */
 				spinlock_lock(&t->lock);
 				if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
+				
 					/*
 					 * Remove t from r.
Index: src/proc/thread.c
===================================================================
--- src/proc/thread.c	(revision d89652555244d155a50d0893a409004587e7c475)
+++ src/proc/thread.c	(revision 87cd61fecb29f65d0cba5f71159c3e0bccdc2010)
@@ -177,5 +177,5 @@
 		frame_ks = frame_alloc(FRAME_KA);
 		if (THREAD_USER_STACK & flags) {
-			frame_us = frame_alloc(0);
+			frame_us = frame_alloc(FRAME_KA);
 		}
 
