Index: generic/src/proc/scheduler.c
===================================================================
--- generic/src/proc/scheduler.c	(revision 97f1691963436356ca38316e0966ffeb57cd5639)
+++ generic/src/proc/scheduler.c	(revision 03427d078778ca1fdd95dc67f157523be0328ae8)
@@ -48,5 +48,7 @@
 #include <debug.h>
 
-atomic_t nrdy;
+static void scheduler_separated_stack(void);
+
+atomic_t nrdy;	/**< Number of ready threads in the system. */
 
 /** Take actions before new thread runs.
@@ -62,10 +64,10 @@
 {
 	before_thread_runs_arch();
-#ifdef CONFIG_FPU_LAZY
+	#ifdef CONFIG_FPU_LAZY
 	if(THREAD==CPU->fpu_owner) 
 		fpu_enable();
 	else
 		fpu_disable(); 
-#else
+	#else
 	fpu_enable();
 	if (THREAD->fpu_context_exists)
@@ -75,12 +77,12 @@
 		THREAD->fpu_context_exists=1;
 	}
-#endif
-}
-
-/** Take actions after old thread ran.
+	#endif
+}
+
+/** Take actions after THREAD had run.
  *
  * Perform actions that need to be
  * taken after the running thread
- * was preempted by the scheduler.
+ * had been preempted by the scheduler.
  *
  * THREAD->lock is locked on entry
@@ -108,7 +110,7 @@
 
 	spinlock_lock(&THREAD->lock);
-	if (THREAD->fpu_context_exists)
+	if (THREAD->fpu_context_exists) {
 		fpu_context_restore(&THREAD->saved_fpu_context);
-	else {
+	} else {
 		fpu_init(&(THREAD->saved_fpu_context));
 		THREAD->fpu_context_exists=1;
@@ -116,6 +118,6 @@
 	CPU->fpu_owner=THREAD;
 	THREAD->fpu_context_engaged = 1;
-
 	spinlock_unlock(&THREAD->lock);
+
 	spinlock_unlock(&CPU->lock);
 }
@@ -130,5 +132,4 @@
 {
 }
-
 
 /** Get thread to be scheduled
@@ -171,6 +172,5 @@
 	interrupts_disable();
 	
-	i = 0;
-	for (; i<RQ_COUNT; i++) {
+	for (i = 0; i<RQ_COUNT; i++) {
 		r = &CPU->rq[i];
 		spinlock_lock(&r->lock);
@@ -199,5 +199,5 @@
 
 		t->ticks = us2ticks((i+1)*10000);
-		t->priority = i;	/* eventually correct rq index */
+		t->priority = i;	/* correct rq index */
 
 		/*
@@ -212,5 +212,4 @@
 
 }
-
 
 /** Prevent rq starvation
@@ -256,137 +255,4 @@
 }
 
-
-/** Scheduler stack switch wrapper
- *
- * Second part of the scheduler() function
- * using new stack. Handling the actual context
- * switch to a new thread.
- *
- * Assume THREAD->lock is held.
- */
-static void scheduler_separated_stack(void)
-{
-	int priority;
-
-	ASSERT(CPU != NULL);
-
-	if (THREAD) {
-		/* must be run after switch to scheduler stack */
-		after_thread_ran();
-
-		switch (THREAD->state) {
-		    case Running:
-			THREAD->state = Ready;
-			spinlock_unlock(&THREAD->lock);
-			thread_ready(THREAD);
-			break;
-
-		    case Exiting:
-			thread_destroy(THREAD);
-			break;
-			
-		    case Sleeping:
-			/*
-			 * Prefer the thread after it's woken up.
-			 */
-			THREAD->priority = -1;
-
-			/*
-			 * We need to release wq->lock which we locked in waitq_sleep().
-			 * Address of wq->lock is kept in THREAD->sleep_queue.
-			 */
-			spinlock_unlock(&THREAD->sleep_queue->lock);
-
-			/*
-			 * Check for possible requests for out-of-context invocation.
-			 */
-			if (THREAD->call_me) {
-				THREAD->call_me(THREAD->call_me_with);
-				THREAD->call_me = NULL;
-				THREAD->call_me_with = NULL;
-			}
-
-			spinlock_unlock(&THREAD->lock);
-
-			break;
-
-		    default:
-			/*
-			 * Entering state is unexpected.
-			 */
-			panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
-			break;
-		}
-
-		THREAD = NULL;
-	}
-
-
-	THREAD = find_best_thread();
-	
-	spinlock_lock(&THREAD->lock);
-	priority = THREAD->priority;
-	spinlock_unlock(&THREAD->lock);	
-
-	relink_rq(priority);		
-
-	spinlock_lock(&THREAD->lock);	
-
-	/*
-	 * If both the old and the new task are the same, lots of work is avoided.
-	 */
-	if (TASK != THREAD->task) {
-		as_t *as1 = NULL;
-		as_t *as2;
-
-		if (TASK) {
-			spinlock_lock(&TASK->lock);
-			as1 = TASK->as;
-			spinlock_unlock(&TASK->lock);
-		}
-
-		spinlock_lock(&THREAD->task->lock);
-		as2 = THREAD->task->as;
-		spinlock_unlock(&THREAD->task->lock);
-		
-		/*
-		 * Note that it is possible for two tasks to share one address space.
-		 */
-		if (as1 != as2) {
-			/*
-			 * Both tasks and address spaces are different.
-			 * Replace the old one with the new one.
-			 */
-			as_switch(as1, as2);
-		}
-		TASK = THREAD->task;	
-	}
-
-	THREAD->state = Running;
-
-	#ifdef SCHEDULER_VERBOSE
-	printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy));
-	#endif	
-
-	/*
-	 * Some architectures provide late kernel PA2KA(identity)
-	 * mapping in a page fault handler. However, the page fault
-	 * handler uses the kernel stack of the running thread and
-	 * therefore cannot be used to map it. The kernel stack, if
-	 * necessary, is to be mapped in before_thread_runs(). This
-	 * function must be executed before the switch to the new stack.
-	 */
-	before_thread_runs();
-
-	/*
-	 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack.
-	 */
-	the_copy(THE, (the_t *) THREAD->kstack);
-	
-	context_restore(&THREAD->saved_context);
-	/* not reached */
-}
-
-
 /** The scheduler
  *
@@ -409,7 +275,7 @@
 	if (THREAD) {
 		spinlock_lock(&THREAD->lock);
-#ifndef CONFIG_FPU_LAZY
+		#ifndef CONFIG_FPU_LAZY
 		fpu_context_save(&(THREAD->saved_fpu_context));
-#endif
+		#endif
 		if (!context_save(&THREAD->saved_context)) {
 			/*
@@ -454,7 +320,133 @@
 }
 
-
-
-
+/** Scheduler stack switch wrapper
+ *
+ * Second part of the scheduler() function
+ * using new stack. Handling the actual context
+ * switch to a new thread.
+ *
+ * Assume THREAD->lock is held.
+ */
+void scheduler_separated_stack(void)
+{
+	int priority;
+
+	ASSERT(CPU != NULL);
+
+	if (THREAD) {
+		/* must be run after the switch to scheduler stack */
+		after_thread_ran();
+
+		switch (THREAD->state) {
+		    case Running:
+			THREAD->state = Ready;
+			spinlock_unlock(&THREAD->lock);
+			thread_ready(THREAD);
+			break;
+
+		    case Exiting:
+			thread_destroy(THREAD);
+			break;
+			
+		    case Sleeping:
+			/*
+			 * Prefer the thread after it's woken up.
+			 */
+			THREAD->priority = -1;
+
+			/*
+			 * We need to release wq->lock which we locked in waitq_sleep().
+			 * Address of wq->lock is kept in THREAD->sleep_queue.
+			 */
+			spinlock_unlock(&THREAD->sleep_queue->lock);
+
+			/*
+			 * Check for possible requests for out-of-context invocation.
+			 */
+			if (THREAD->call_me) {
+				THREAD->call_me(THREAD->call_me_with);
+				THREAD->call_me = NULL;
+				THREAD->call_me_with = NULL;
+			}
+
+			spinlock_unlock(&THREAD->lock);
+
+			break;
+
+		    default:
+			/*
+			 * Entering state is unexpected.
+			 */
+			panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
+			break;
+		}
+
+		THREAD = NULL;
+	}
+
+	THREAD = find_best_thread();
+	
+	spinlock_lock(&THREAD->lock);
+	priority = THREAD->priority;
+	spinlock_unlock(&THREAD->lock);	
+
+	relink_rq(priority);		
+
+	spinlock_lock(&THREAD->lock);	
+
+	/*
+	 * If both the old and the new task are the same, lots of work is avoided.
+	 */
+	if (TASK != THREAD->task) {
+		as_t *as1 = NULL;
+		as_t *as2;
+
+		if (TASK) {
+			spinlock_lock(&TASK->lock);
+			as1 = TASK->as;
+			spinlock_unlock(&TASK->lock);
+		}
+
+		spinlock_lock(&THREAD->task->lock);
+		as2 = THREAD->task->as;
+		spinlock_unlock(&THREAD->task->lock);
+		
+		/*
+		 * Note that it is possible for two tasks to share one address space.
+		 */
+		if (as1 != as2) {
+			/*
+			 * Both tasks and address spaces are different.
+			 * Replace the old one with the new one.
+			 */
+			as_switch(as1, as2);
+		}
+		TASK = THREAD->task;	
+	}
+
+	THREAD->state = Running;
+
+	#ifdef SCHEDULER_VERBOSE
+	printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy));
+	#endif	
+
+	/*
+	 * Some architectures provide late kernel PA2KA(identity)
+	 * mapping in a page fault handler. However, the page fault
+	 * handler uses the kernel stack of the running thread and
+	 * therefore cannot be used to map it. The kernel stack, if
+	 * necessary, is to be mapped in before_thread_runs(). This
+	 * function must be executed before the switch to the new stack.
+	 */
+	before_thread_runs();
+
+	/*
+	 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack.
+	 */
+	the_copy(THE, (the_t *) THREAD->kstack);
+	
+	context_restore(&THREAD->saved_context);
+	/* not reached */
+}
 
 #ifdef CONFIG_SMP
@@ -613,10 +605,12 @@
 	 * let's not be interrupted */
 	ipl = interrupts_disable();
-	printf("*********** Scheduler dump ***********\n");
+	printf("Scheduler dump:\n");
 	for (cpu=0;cpu < config.cpu_count; cpu++) {
+
 		if (!cpus[cpu].active)
 			continue;
+
 		spinlock_lock(&cpus[cpu].lock);
-		printf("cpu%d: nrdy: %d needs_relink: %d\n",
+		printf("cpu%d: nrdy: %d, needs_relink: %d\n",
 		       cpus[cpu].id, atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink);
 		
@@ -628,5 +622,5 @@
 				continue;
 			}
-			printf("\tRq %d: ", i);
+			printf("\trq[%d]: ", i);
 			for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) {
 				t = list_get_instance(cur, thread_t, rq_link);
