Index: kernel/generic/src/proc/scheduler.c
===================================================================
--- kernel/generic/src/proc/scheduler.c	(revision 481d47513438cdc33d95733bb4dadfbb80b196ef)
+++ kernel/generic/src/proc/scheduler.c	(revision c8e99bb315a5bcdd7baf9ad996e0c913004c1016)
@@ -33,5 +33,5 @@
 /**
  * @file
- * @brief	Scheduler and load balancing.
+ * @brief Scheduler and load balancing.
  *
  * This file contains the scheduler and kcpulb kernel thread which
@@ -68,5 +68,5 @@
 static void scheduler_separated_stack(void);
 
-atomic_t nrdy;	/**< Number of ready threads in the system. */
+atomic_t nrdy;  /**< Number of ready threads in the system. */
 
 /** Carry out actions before new task runs. */
@@ -89,8 +89,8 @@
 	before_thread_runs_arch();
 #ifdef CONFIG_FPU_LAZY
-	if(THREAD == CPU->fpu_owner) 
+	if(THREAD == CPU->fpu_owner)
 		fpu_enable();
 	else
-		fpu_disable(); 
+		fpu_disable();
 #else
 	fpu_enable();
@@ -123,17 +123,18 @@
 restart:
 	fpu_enable();
-	spinlock_lock(&CPU->lock);
-
+	irq_spinlock_lock(&CPU->lock, false);
+	
 	/* Save old context */
-	if (CPU->fpu_owner != NULL) {  
-		spinlock_lock(&CPU->fpu_owner->lock);
+	if (CPU->fpu_owner != NULL) {
+		irq_spinlock_lock(&CPU->fpu_owner->lock, false);
 		fpu_context_save(CPU->fpu_owner->saved_fpu_context);
-		/* don't prevent migration */
+		
+		/* Don't prevent migration */
 		CPU->fpu_owner->fpu_context_engaged = 0;
-		spinlock_unlock(&CPU->fpu_owner->lock);
+		irq_spinlock_unlock(&CPU->fpu_owner->lock, false);
 		CPU->fpu_owner = NULL;
 	}
-
-	spinlock_lock(&THREAD->lock);
+	
+	irq_spinlock_lock(&THREAD->lock, false);
 	if (THREAD->fpu_context_exists) {
 		fpu_context_restore(THREAD->saved_fpu_context);
@@ -142,21 +143,23 @@
 		if (!THREAD->saved_fpu_context) {
 			/* Might sleep */
-			spinlock_unlock(&THREAD->lock);
-			spinlock_unlock(&CPU->lock);
+			irq_spinlock_unlock(&THREAD->lock, false);
+			irq_spinlock_unlock(&CPU->lock, false);
 			THREAD->saved_fpu_context =
 			    (fpu_context_t *) slab_alloc(fpu_context_slab, 0);
+			
 			/* We may have switched CPUs during slab_alloc */
-			goto restart; 
+			goto restart;
 		}
 		fpu_init();
 		THREAD->fpu_context_exists = 1;
 	}
+	
 	CPU->fpu_owner = THREAD;
 	THREAD->fpu_context_engaged = 1;
-	spinlock_unlock(&THREAD->lock);
-
-	spinlock_unlock(&CPU->lock);
-}
-#endif
+	irq_spinlock_unlock(&THREAD->lock, false);
+	
+	irq_spinlock_unlock(&CPU->lock, false);
+}
+#endif /* CONFIG_FPU_LAZY */
 
 /** Initialize scheduler
@@ -180,10 +183,6 @@
 static thread_t *find_best_thread(void)
 {
-	thread_t *t;
-	runq_t *r;
-	int i;
-
 	ASSERT(CPU != NULL);
-
+	
 loop:
 	
@@ -194,61 +193,60 @@
 		 * This improves energy saving and hyperthreading.
 		 */
-
+		
 		 /* Mark CPU as it was idle this clock tick */
-		 spinlock_lock(&CPU->lock);
-		 CPU->idle = true;
-		 spinlock_unlock(&CPU->lock);
-
-		 interrupts_enable();
-		 /*
+		irq_spinlock_lock(&CPU->lock, false);
+		CPU->idle = true;
+		irq_spinlock_unlock(&CPU->lock, false);
+		
+		interrupts_enable();
+		/*
 		 * An interrupt might occur right now and wake up a thread.
 		 * In such case, the CPU will continue to go to sleep
 		 * even though there is a runnable thread.
 		 */
-		 cpu_sleep();
-		 interrupts_disable();
-		 goto loop;
-	}
-	
+		cpu_sleep();
+		interrupts_disable();
+		goto loop;
+	}
+	
+	unsigned int i;
 	for (i = 0; i < RQ_COUNT; i++) {
-		r = &CPU->rq[i];
-		spinlock_lock(&r->lock);
-		if (r->n == 0) {
+		irq_spinlock_lock(&(CPU->rq[i].lock), false);
+		if (CPU->rq[i].n == 0) {
 			/*
 			 * If this queue is empty, try a lower-priority queue.
 			 */
-			spinlock_unlock(&r->lock);
+			irq_spinlock_unlock(&(CPU->rq[i].lock), false);
 			continue;
 		}
-
+		
 		atomic_dec(&CPU->nrdy);
 		atomic_dec(&nrdy);
-		r->n--;
-
+		CPU->rq[i].n--;
+		
 		/*
 		 * Take the first thread from the queue.
 		 */
-		t = list_get_instance(r->rq_head.next, thread_t, rq_link);
-		list_remove(&t->rq_link);
-
-		spinlock_unlock(&r->lock);
-
-		spinlock_lock(&t->lock);
-		t->cpu = CPU;
-
-		t->ticks = us2ticks((i + 1) * 10000);
-		t->priority = i;	/* correct rq index */
-
+		thread_t *thread =
+		    list_get_instance(CPU->rq[i].rq_head.next, thread_t, rq_link);
+		list_remove(&thread->rq_link);
+		
+		irq_spinlock_pass(&(CPU->rq[i].lock), &thread->lock);
+		
+		thread->cpu = CPU;
+		thread->ticks = us2ticks((i + 1) * 10000);
+		thread->priority = i;  /* Correct rq index */
+		
 		/*
 		 * Clear the THREAD_FLAG_STOLEN flag so that t can be migrated
 		 * when load balancing needs emerge.
 		 */
-		t->flags &= ~THREAD_FLAG_STOLEN;
-		spinlock_unlock(&t->lock);
-
-		return t;
-	}
+		thread->flags &= ~THREAD_FLAG_STOLEN;
+		irq_spinlock_unlock(&thread->lock, false);
+		
+		return thread;
+	}
+	
 	goto loop;
-
 }
 
@@ -267,30 +265,31 @@
 {
 	link_t head;
-	runq_t *r;
-	int i, n;
-
+	
 	list_initialize(&head);
-	spinlock_lock(&CPU->lock);
+	irq_spinlock_lock(&CPU->lock, false);
+	
 	if (CPU->needs_relink > NEEDS_RELINK_MAX) {
+		int i;
 		for (i = start; i < RQ_COUNT - 1; i++) {
-			/* remember and empty rq[i + 1] */
-			r = &CPU->rq[i + 1];
-			spinlock_lock(&r->lock);
-			list_concat(&head, &r->rq_head);
-			n = r->n;
-			r->n = 0;
-			spinlock_unlock(&r->lock);
-		
-			/* append rq[i + 1] to rq[i] */
-			r = &CPU->rq[i];
-			spinlock_lock(&r->lock);
-			list_concat(&r->rq_head, &head);
-			r->n += n;
-			spinlock_unlock(&r->lock);
+			/* Remember and empty rq[i + 1] */
+			
+			irq_spinlock_lock(&CPU->rq[i + 1].lock, false);
+			list_concat(&head, &CPU->rq[i + 1].rq_head);
+			size_t n = CPU->rq[i + 1].n;
+			CPU->rq[i + 1].n = 0;
+			irq_spinlock_unlock(&CPU->rq[i + 1].lock, false);
+			
+			/* Append rq[i + 1] to rq[i] */
+			
+			irq_spinlock_lock(&CPU->rq[i].lock, false);
+			list_concat(&CPU->rq[i].rq_head, &head);
+			CPU->rq[i].n += n;
+			irq_spinlock_unlock(&CPU->rq[i].lock, false);
 		}
+		
 		CPU->needs_relink = 0;
 	}
-	spinlock_unlock(&CPU->lock);
-
+	
+	irq_spinlock_unlock(&CPU->lock, false);
 }
 
@@ -305,14 +304,14 @@
 {
 	volatile ipl_t ipl;
-
+	
 	ASSERT(CPU != NULL);
-
+	
 	ipl = interrupts_disable();
-
+	
 	if (atomic_get(&haltstate))
 		halt();
 	
 	if (THREAD) {
-		spinlock_lock(&THREAD->lock);
+		irq_spinlock_lock(&THREAD->lock, false);
 		
 		/* Update thread kernel accounting */
@@ -330,25 +329,27 @@
 			THREAD->last_cycle = get_cycle();
 			
-			spinlock_unlock(&THREAD->lock);
+			irq_spinlock_unlock(&THREAD->lock, false);
 			interrupts_restore(THREAD->saved_context.ipl);
 			
 			return;
 		}
-
+		
 		/*
 		 * Interrupt priority level of preempted thread is recorded
 		 * here to facilitate scheduler() invocations from
-		 * interrupts_disable()'d code (e.g. waitq_sleep_timeout()). 
+		 * interrupts_disable()'d code (e.g. waitq_sleep_timeout()).
+		 *
 		 */
 		THREAD->saved_context.ipl = ipl;
 	}
-
+	
 	/*
 	 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM
 	 * and preemption counter. At this point THE could be coming either
 	 * from THREAD's or CPU's stack.
+	 *
 	 */
 	the_copy(THE, (the_t *) CPU->stack);
-
+	
 	/*
 	 * We may not keep the old stack.
@@ -362,4 +363,5 @@
 	 * Therefore the scheduler() function continues in
 	 * scheduler_separated_stack().
+	 *
 	 */
 	context_save(&CPU->saved_context);
@@ -367,5 +369,6 @@
 	    (uintptr_t) CPU->stack, CPU_STACK_SIZE);
 	context_restore(&CPU->saved_context);
-	/* not reached */
+	
+	/* Not reached */
 }
 
@@ -377,12 +380,12 @@
  *
  * Assume THREAD->lock is held.
+ *
  */
 void scheduler_separated_stack(void)
 {
-	int priority;
 	DEADLOCK_PROBE_INIT(p_joinwq);
 	task_t *old_task = TASK;
 	as_t *old_as = AS;
-
+	
 	ASSERT(CPU != NULL);
 	
@@ -391,36 +394,40 @@
 	 * possible destruction should thread_destroy() be called on this or any
 	 * other processor while the scheduler is still using them.
+	 *
 	 */
 	if (old_task)
 		task_hold(old_task);
+	
 	if (old_as)
 		as_hold(old_as);
-
+	
 	if (THREAD) {
-		/* must be run after the switch to scheduler stack */
+		/* Must be run after the switch to scheduler stack */
 		after_thread_ran();
-
+		
 		switch (THREAD->state) {
 		case Running:
-			spinlock_unlock(&THREAD->lock);
+			irq_spinlock_unlock(&THREAD->lock, false);
 			thread_ready(THREAD);
 			break;
-
+		
 		case Exiting:
 repeat:
 			if (THREAD->detached) {
-				thread_destroy(THREAD);
+				thread_destroy(THREAD, false);
 			} else {
 				/*
 				 * The thread structure is kept allocated until
 				 * somebody calls thread_detach() on it.
+				 *
 				 */
-				if (!spinlock_trylock(&THREAD->join_wq.lock)) {
+				if (!irq_spinlock_trylock(&THREAD->join_wq.lock)) {
 					/*
 					 * Avoid deadlock.
+					 *
 					 */
-					spinlock_unlock(&THREAD->lock);
+					irq_spinlock_unlock(&THREAD->lock, false);
 					delay(HZ);
-					spinlock_lock(&THREAD->lock);
+					irq_spinlock_lock(&THREAD->lock, false);
 					DEADLOCK_PROBE(p_joinwq,
 					    DEADLOCK_THRESHOLD);
@@ -429,8 +436,8 @@
 				_waitq_wakeup_unsafe(&THREAD->join_wq,
 				    WAKEUP_FIRST);
-				spinlock_unlock(&THREAD->join_wq.lock);
+				irq_spinlock_unlock(&THREAD->join_wq.lock, false);
 				
 				THREAD->state = Lingering;
-				spinlock_unlock(&THREAD->lock);
+				irq_spinlock_unlock(&THREAD->lock, false);
 			}
 			break;
@@ -439,17 +446,20 @@
 			/*
 			 * Prefer the thread after it's woken up.
+			 *
 			 */
 			THREAD->priority = -1;
-
+			
 			/*
 			 * We need to release wq->lock which we locked in
 			 * waitq_sleep(). Address of wq->lock is kept in
 			 * THREAD->sleep_queue.
+			 *
 			 */
-			spinlock_unlock(&THREAD->sleep_queue->lock);
-
+			irq_spinlock_unlock(&THREAD->sleep_queue->lock, false);
+			
 			/*
 			 * Check for possible requests for out-of-context
 			 * invocation.
+			 *
 			 */
 			if (THREAD->call_me) {
@@ -458,12 +468,13 @@
 				THREAD->call_me_with = NULL;
 			}
-
-			spinlock_unlock(&THREAD->lock);
-
+			
+			irq_spinlock_unlock(&THREAD->lock, false);
+			
 			break;
-
+		
 		default:
 			/*
 			 * Entering state is unexpected.
+			 *
 			 */
 			panic("tid%" PRIu64 ": unexpected state %s.",
@@ -471,19 +482,20 @@
 			break;
 		}
-
+		
 		THREAD = NULL;
 	}
-
+	
 	THREAD = find_best_thread();
 	
-	spinlock_lock(&THREAD->lock);
-	priority = THREAD->priority;
-	spinlock_unlock(&THREAD->lock);	
-
-	relink_rq(priority);		
-
+	irq_spinlock_lock(&THREAD->lock, false);
+	int priority = THREAD->priority;
+	irq_spinlock_unlock(&THREAD->lock, false);
+	
+	relink_rq(priority);
+	
 	/*
 	 * If both the old and the new task are the same, lots of work is
 	 * avoided.
+	 *
 	 */
 	if (TASK != THREAD->task) {
@@ -493,4 +505,5 @@
 		 * Note that it is possible for two tasks to share one address
 		 * space.
+		 (
 		 */
 		if (old_as != new_as) {
@@ -498,26 +511,28 @@
 			 * Both tasks and address spaces are different.
 			 * Replace the old one with the new one.
+			 *
 			 */
 			as_switch(old_as, new_as);
 		}
-
+		
 		TASK = THREAD->task;
 		before_task_runs();
 	}
-
+	
 	if (old_task)
 		task_release(old_task);
+	
 	if (old_as)
 		as_release(old_as);
 	
-	spinlock_lock(&THREAD->lock);	
+	irq_spinlock_lock(&THREAD->lock, false);
 	THREAD->state = Running;
-
+	
 #ifdef SCHEDULER_VERBOSE
 	printf("cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64 
 	    ", nrdy=%ld)\n", CPU->id, THREAD->tid, THREAD->priority,
 	    THREAD->ticks, atomic_get(&CPU->nrdy));
-#endif	
-
+#endif
+	
 	/*
 	 * Some architectures provide late kernel PA2KA(identity)
@@ -527,15 +542,18 @@
 	 * necessary, is to be mapped in before_thread_runs(). This
 	 * function must be executed before the switch to the new stack.
+	 *
 	 */
 	before_thread_runs();
-
+	
 	/*
 	 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to
 	 * thread's stack.
+	 *
 	 */
 	the_copy(THE, (the_t *) THREAD->kstack);
 	
 	context_restore(&THREAD->saved_context);
-	/* not reached */
+	
+	/* Not reached */
 }
 
@@ -551,12 +569,7 @@
 void kcpulb(void *arg)
 {
-	thread_t *t;
-	int count;
 	atomic_count_t average;
-	unsigned int i;
-	int j;
-	int k = 0;
-	ipl_t ipl;
-
+	atomic_count_t rdy;
+	
 	/*
 	 * Detach kcpulb as nobody will call thread_join_timeout() on it.
@@ -569,5 +582,5 @@
 	 */
 	thread_sleep(1);
-
+	
 not_satisfied:
 	/*
@@ -575,46 +588,53 @@
 	 * other CPU's. Note that situation can have changed between two
 	 * passes. Each time get the most up to date counts.
+	 *
 	 */
 	average = atomic_get(&nrdy) / config.cpu_active + 1;
-	count = average - atomic_get(&CPU->nrdy);
-
-	if (count <= 0)
+	rdy = atomic_get(&CPU->nrdy);
+	
+	if (average <= rdy)
 		goto satisfied;
-
+	
+	atomic_count_t count = average - rdy;
+	
 	/*
 	 * Searching least priority queues on all CPU's first and most priority
 	 * queues on all CPU's last.
-	 */
-	for (j = RQ_COUNT - 1; j >= 0; j--) {
-		for (i = 0; i < config.cpu_active; i++) {
-			link_t *l;
-			runq_t *r;
-			cpu_t *cpu;
-
-			cpu = &cpus[(i + k) % config.cpu_active];
-
+	 *
+	 */
+	size_t acpu;
+	size_t acpu_bias = 0;
+	int rq;
+	
+	for (rq = RQ_COUNT - 1; rq >= 0; rq--) {
+		for (acpu = 0; acpu < config.cpu_active; acpu++) {
+			cpu_t *cpu = &cpus[(acpu + acpu_bias) % config.cpu_active];
+			
 			/*
 			 * Not interested in ourselves.
 			 * Doesn't require interrupt disabling for kcpulb has
 			 * THREAD_FLAG_WIRED.
+			 *
 			 */
 			if (CPU == cpu)
 				continue;
+			
 			if (atomic_get(&cpu->nrdy) <= average)
 				continue;
-
-			ipl = interrupts_disable();
-			r = &cpu->rq[j];
-			spinlock_lock(&r->lock);
-			if (r->n == 0) {
-				spinlock_unlock(&r->lock);
-				interrupts_restore(ipl);
+			
+			irq_spinlock_lock(&(cpu->rq[rq].lock), true);
+			if (cpu->rq[rq].n == 0) {
+				irq_spinlock_unlock(&(cpu->rq[rq].lock), true);
 				continue;
 			}
-		
-			t = NULL;
-			l = r->rq_head.prev;	/* search rq from the back */
-			while (l != &r->rq_head) {
-				t = list_get_instance(l, thread_t, rq_link);
+			
+			thread_t *thread = NULL;
+			
+			/* Search rq from the back */
+			link_t *link = cpu->rq[rq].rq_head.prev;
+			
+			while (link != &(cpu->rq[rq].rq_head)) {
+				thread = (thread_t *) list_get_instance(link, thread_t, rq_link);
+				
 				/*
 				 * We don't want to steal CPU-wired threads
@@ -624,33 +644,38 @@
 				 * steal threads whose FPU context is still in
 				 * CPU.
+				 *
 				 */
-				spinlock_lock(&t->lock);
-				if ((!(t->flags & (THREAD_FLAG_WIRED |
-				    THREAD_FLAG_STOLEN))) &&
-				    (!(t->fpu_context_engaged))) {
+				irq_spinlock_lock(&thread->lock, false);
+				
+				if ((!(thread->flags & (THREAD_FLAG_WIRED | THREAD_FLAG_STOLEN)))
+				    && (!(thread->fpu_context_engaged))) {
 					/*
-					 * Remove t from r.
+					 * Remove thread from ready queue.
 					 */
-					spinlock_unlock(&t->lock);
+					irq_spinlock_unlock(&thread->lock, false);
 					
 					atomic_dec(&cpu->nrdy);
 					atomic_dec(&nrdy);
-
-					r->n--;
-					list_remove(&t->rq_link);
-
+					
+					cpu->rq[rq].n--;
+					list_remove(&thread->rq_link);
+					
 					break;
 				}
-				spinlock_unlock(&t->lock);
-				l = l->prev;
-				t = NULL;
+				
+				irq_spinlock_unlock(&thread->lock, false);
+				
+				link = link->prev;
+				thread = NULL;
 			}
-			spinlock_unlock(&r->lock);
-
-			if (t) {
+			
+			if (thread) {
 				/*
-				 * Ready t on local CPU
+				 * Ready thread on local CPU
+				 *
 				 */
-				spinlock_lock(&t->lock);
+				
+				irq_spinlock_pass(&(cpu->rq[rq].lock), &thread->lock);
+				
 #ifdef KCPULB_VERBOSE
 				printf("kcpulb%u: TID %" PRIu64 " -> cpu%u, "
@@ -659,30 +684,32 @@
 				    atomic_get(&nrdy) / config.cpu_active);
 #endif
-				t->flags |= THREAD_FLAG_STOLEN;
-				t->state = Entering;
-				spinlock_unlock(&t->lock);
-	
-				thread_ready(t);
-
-				interrupts_restore(ipl);
-	
+				
+				thread->flags |= THREAD_FLAG_STOLEN;
+				thread->state = Entering;
+				
+				irq_spinlock_unlock(&thread->lock, true);
+				thread_ready(thread);
+				
 				if (--count == 0)
 					goto satisfied;
-					
+				
 				/*
 				 * We are not satisfied yet, focus on another
 				 * CPU next time.
+				 *
 				 */
-				k++;
+				acpu_bias++;
 				
 				continue;
-			}
-			interrupts_restore(ipl);
+			} else
+				irq_spinlock_unlock(&(cpu->rq[rq].lock), true);
+			
 		}
 	}
-
+	
 	if (atomic_get(&CPU->nrdy)) {
 		/*
 		 * Be a little bit light-weight and let migrated threads run.
+		 *
 		 */
 		scheduler();
@@ -691,60 +718,56 @@
 		 * We failed to migrate a single thread.
 		 * Give up this turn.
+		 *
 		 */
 		goto loop;
 	}
-		
+	
 	goto not_satisfied;
-
+	
 satisfied:
 	goto loop;
 }
-
 #endif /* CONFIG_SMP */
 
-
-/** Print information about threads & scheduler queues */
+/** Print information about threads & scheduler queues
+ *
+ */
 void sched_print_list(void)
 {
-	ipl_t ipl;
-	unsigned int cpu, i;
-	runq_t *r;
-	thread_t *t;
-	link_t *cur;
-
-	/* We are going to mess with scheduler structures,
-	 * let's not be interrupted */
-	ipl = interrupts_disable();
+	size_t cpu;
 	for (cpu = 0; cpu < config.cpu_count; cpu++) {
-
 		if (!cpus[cpu].active)
 			continue;
-
-		spinlock_lock(&cpus[cpu].lock);
+		
+		irq_spinlock_lock(&cpus[cpu].lock, true);
+		
 		printf("cpu%u: address=%p, nrdy=%ld, needs_relink=%" PRIs "\n",
 		    cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy),
 		    cpus[cpu].needs_relink);
 		
+		unsigned int i;
 		for (i = 0; i < RQ_COUNT; i++) {
-			r = &cpus[cpu].rq[i];
-			spinlock_lock(&r->lock);
-			if (!r->n) {
-				spinlock_unlock(&r->lock);
+			irq_spinlock_lock(&(cpus[cpu].rq[i].lock), false);
+			if (cpus[cpu].rq[i].n == 0) {
+				irq_spinlock_unlock(&(cpus[cpu].rq[i].lock), false);
 				continue;
 			}
+			
 			printf("\trq[%u]: ", i);
-			for (cur = r->rq_head.next; cur != &r->rq_head;
-				cur = cur->next) {
-				t = list_get_instance(cur, thread_t, rq_link);
-				printf("%" PRIu64 "(%s) ", t->tid,
-				    thread_states[t->state]);
+			link_t *cur;
+			for (cur = cpus[cpu].rq[i].rq_head.next;
+			    cur != &(cpus[cpu].rq[i].rq_head);
+			    cur = cur->next) {
+				thread_t *thread = list_get_instance(cur, thread_t, rq_link);
+				printf("%" PRIu64 "(%s) ", thread->tid,
+				    thread_states[thread->state]);
 			}
 			printf("\n");
-			spinlock_unlock(&r->lock);
+			
+			irq_spinlock_unlock(&(cpus[cpu].rq[i].lock), false);
 		}
-		spinlock_unlock(&cpus[cpu].lock);
-	}
-	
-	interrupts_restore(ipl);
+		
+		irq_spinlock_unlock(&cpus[cpu].lock, true);
+	}
 }
 
Index: kernel/generic/src/proc/task.c
===================================================================
--- kernel/generic/src/proc/task.c	(revision 481d47513438cdc33d95733bb4dadfbb80b196ef)
+++ kernel/generic/src/proc/task.c	(revision c8e99bb315a5bcdd7baf9ad996e0c913004c1016)
@@ -60,5 +60,5 @@
 
 /** Spinlock protecting the tasks_tree AVL tree. */
-SPINLOCK_INITIALIZE(tasks_lock);
+IRQ_SPINLOCK_INITIALIZE(tasks_lock);
 
 /** AVL tree of active tasks.
@@ -81,7 +81,9 @@
 /* Forward declarations. */
 static void task_kill_internal(task_t *);
-static int tsk_constructor(void *, int);
-
-/** Initialize kernel tasks support. */
+static int tsk_constructor(void *, unsigned int);
+
+/** Initialize kernel tasks support.
+ *
+ */
 void task_init(void)
 {
@@ -92,19 +94,23 @@
 }
 
-/*
+/** Task finish walker.
+ *
  * The idea behind this walker is to kill and count all tasks different from
  * TASK.
+ *
  */
 static bool task_done_walker(avltree_node_t *node, void *arg)
 {
-	task_t *t = avltree_get_instance(node, task_t, tasks_tree_node);
-	unsigned *cnt = (unsigned *) arg;
-	
-	if (t != TASK) {
+	task_t *task = avltree_get_instance(node, task_t, tasks_tree_node);
+	size_t *cnt = (size_t *) arg;
+	
+	if (task != TASK) {
 		(*cnt)++;
+		
 #ifdef CONFIG_DEBUG
-		printf("[%"PRIu64"] ", t->taskid);
-#endif
-		task_kill_internal(t);
+		printf("[%"PRIu64"] ", task->taskid);
+#endif
+		
+		task_kill_internal(task);
 	}
 	
@@ -113,51 +119,55 @@
 }
 
-/** Kill all tasks except the current task. */
+/** Kill all tasks except the current task.
+ *
+ */
 void task_done(void)
 {
-	unsigned tasks_left;
-	
-	do { /* Repeat until there are any tasks except TASK */
-		/* Messing with task structures, avoid deadlock */
+	size_t tasks_left;
+	
+	/* Repeat until there are any tasks except TASK */
+	do {
 #ifdef CONFIG_DEBUG
 		printf("Killing tasks... ");
 #endif
-		ipl_t ipl = interrupts_disable();
-		spinlock_lock(&tasks_lock);
+		
+		irq_spinlock_lock(&tasks_lock, true);
 		tasks_left = 0;
 		avltree_walk(&tasks_tree, task_done_walker, &tasks_left);
-		spinlock_unlock(&tasks_lock);
-		interrupts_restore(ipl);
+		irq_spinlock_unlock(&tasks_lock, true);
+		
 		thread_sleep(1);
+		
 #ifdef CONFIG_DEBUG
 		printf("\n");
 #endif
-	} while (tasks_left);
-}
-
-int tsk_constructor(void *obj, int kmflags)
-{
-	task_t *ta = obj;
-	int i;
-	
-	atomic_set(&ta->refcount, 0);
-	atomic_set(&ta->lifecount, 0);
-	atomic_set(&ta->active_calls, 0);
-	
-	spinlock_initialize(&ta->lock, "task_ta_lock");
-	mutex_initialize(&ta->futexes_lock, MUTEX_PASSIVE);
-	
-	list_initialize(&ta->th_head);
-	list_initialize(&ta->sync_box_head);
-	
-	ipc_answerbox_init(&ta->answerbox, ta);
+	} while (tasks_left > 0);
+}
+
+int tsk_constructor(void *obj, unsigned int kmflags)
+{
+	task_t *task = (task_t *) obj;
+	
+	atomic_set(&task->refcount, 0);
+	atomic_set(&task->lifecount, 0);
+	atomic_set(&task->active_calls, 0);
+	
+	irq_spinlock_initialize(&task->lock, "task_t_lock");
+	mutex_initialize(&task->futexes_lock, MUTEX_PASSIVE);
+	
+	list_initialize(&task->th_head);
+	list_initialize(&task->sync_box_head);
+	
+	ipc_answerbox_init(&task->answerbox, task);
+	
+	size_t i;
 	for (i = 0; i < IPC_MAX_PHONES; i++)
-		ipc_phone_init(&ta->phones[i]);
+		ipc_phone_init(&task->phones[i]);
 	
 #ifdef CONFIG_UDEBUG
 	/* Init kbox stuff */
-	ta->kb.thread = NULL;
-	ipc_answerbox_init(&ta->kb.box, ta);
-	mutex_initialize(&ta->kb.cleanup_lock, MUTEX_PASSIVE);
+	task->kb.thread = NULL;
+	ipc_answerbox_init(&task->kb.box, task);
+	mutex_initialize(&task->kb.cleanup_lock, MUTEX_PASSIVE);
 #endif
 	
@@ -175,86 +185,83 @@
 task_t *task_create(as_t *as, const char *name)
 {
-	ipl_t ipl;
-	task_t *ta;
-	
-	ta = (task_t *) slab_alloc(task_slab, 0);
-	task_create_arch(ta);
-	ta->as = as;
-	memcpy(ta->name, name, TASK_NAME_BUFLEN);
-	ta->name[TASK_NAME_BUFLEN - 1] = 0;
-	
-	ta->context = CONTEXT;
-	ta->capabilities = 0;
-	ta->ucycles = 0;
-	ta->kcycles = 0;
-
-	ta->ipc_info.call_sent = 0;
-	ta->ipc_info.call_recieved = 0;
-	ta->ipc_info.answer_sent = 0;
-	ta->ipc_info.answer_recieved = 0;
-	ta->ipc_info.irq_notif_recieved = 0;
-	ta->ipc_info.forwarded = 0;
-
+	task_t *task = (task_t *) slab_alloc(task_slab, 0);
+	task_create_arch(task);
+	
+	task->as = as;
+	str_cpy(task->name, TASK_NAME_BUFLEN, name);
+	
+	task->context = CONTEXT;
+	task->capabilities = 0;
+	task->ucycles = 0;
+	task->kcycles = 0;
+	
+	task->ipc_info.call_sent = 0;
+	task->ipc_info.call_recieved = 0;
+	task->ipc_info.answer_sent = 0;
+	task->ipc_info.answer_recieved = 0;
+	task->ipc_info.irq_notif_recieved = 0;
+	task->ipc_info.forwarded = 0;
+	
 #ifdef CONFIG_UDEBUG
 	/* Init debugging stuff */
-	udebug_task_init(&ta->udebug);
+	udebug_task_init(&task->udebug);
 	
 	/* Init kbox stuff */
-	ta->kb.finished = false;
+	task->kb.finished = false;
 #endif
 	
 	if ((ipc_phone_0) &&
-	    (context_check(ipc_phone_0->task->context, ta->context)))
-		ipc_phone_connect(&ta->phones[0], ipc_phone_0);
-	
-	btree_create(&ta->futexes);
+	    (context_check(ipc_phone_0->task->context, task->context)))
+		ipc_phone_connect(&task->phones[0], ipc_phone_0);
+	
+	btree_create(&task->futexes);
 	
 	/*
 	 * Get a reference to the address space.
 	 */
-	as_hold(ta->as);
-
-	ipl = interrupts_disable();
-	spinlock_lock(&tasks_lock);
-	ta->taskid = ++task_counter;
-	avltree_node_initialize(&ta->tasks_tree_node);
-	ta->tasks_tree_node.key = ta->taskid; 
-	avltree_insert(&tasks_tree, &ta->tasks_tree_node);
-	spinlock_unlock(&tasks_lock);
-	interrupts_restore(ipl);
-	
-	return ta;
+	as_hold(task->as);
+	
+	irq_spinlock_lock(&tasks_lock, true);
+	
+	task->taskid = ++task_counter;
+	avltree_node_initialize(&task->tasks_tree_node);
+	task->tasks_tree_node.key = task->taskid;
+	avltree_insert(&tasks_tree, &task->tasks_tree_node);
+	
+	irq_spinlock_unlock(&tasks_lock, true);
+	
+	return task;
 }
 
 /** Destroy task.
  *
- * @param t Task to be destroyed.
- *
- */
-void task_destroy(task_t *t)
+ * @param task Task to be destroyed.
+ *
+ */
+void task_destroy(task_t *task)
 {
 	/*
 	 * Remove the task from the task B+tree.
 	 */
-	spinlock_lock(&tasks_lock);
-	avltree_delete(&tasks_tree, &t->tasks_tree_node);
-	spinlock_unlock(&tasks_lock);
+	irq_spinlock_lock(&tasks_lock, true);
+	avltree_delete(&tasks_tree, &task->tasks_tree_node);
+	irq_spinlock_unlock(&tasks_lock, true);
 	
 	/*
 	 * Perform architecture specific task destruction.
 	 */
-	task_destroy_arch(t);
+	task_destroy_arch(task);
 	
 	/*
 	 * Free up dynamically allocated state.
 	 */
-	btree_destroy(&t->futexes);
+	btree_destroy(&task->futexes);
 	
 	/*
 	 * Drop our reference to the address space.
 	 */
-	as_release(t->as);
-	
-	slab_free(task_slab, t);
+	as_release(task->as);
+	
+	slab_free(task_slab, task);
 }
 
@@ -263,9 +270,10 @@
  * Holding a reference to a task prevents destruction of that task.
  *
- * @param t		Task to be held.
- */
-void task_hold(task_t *t)
-{
-	atomic_inc(&t->refcount);
+ * @param task Task to be held.
+ *
+ */
+void task_hold(task_t *task)
+{
+	atomic_inc(&task->refcount);
 }
 
@@ -274,10 +282,11 @@
  * The last one to release a reference to a task destroys the task.
  *
- * @param t		Task to be released.
- */
-void task_release(task_t *t)
-{
-	if ((atomic_predec(&t->refcount)) == 0)
-		task_destroy(t);
+ * @param task Task to be released.
+ *
+ */
+void task_release(task_t *task)
+{
+	if ((atomic_predec(&task->refcount)) == 0)
+		task_destroy(task);
 }
 
@@ -346,5 +355,5 @@
 	
 	if (node)
-		return avltree_get_instance(node, task_t, tasks_tree_node); 
+		return avltree_get_instance(node, task_t, tasks_tree_node);
 	
 	return NULL;
@@ -356,31 +365,34 @@
  * already disabled.
  *
- * @param t       Pointer to thread.
+ * @param task    Pointer to the task.
  * @param ucycles Out pointer to sum of all user cycles.
  * @param kcycles Out pointer to sum of all kernel cycles.
  *
  */
-void task_get_accounting(task_t *t, uint64_t *ucycles, uint64_t *kcycles)
+void task_get_accounting(task_t *task, uint64_t *ucycles, uint64_t *kcycles)
 {
 	/* Accumulated values of task */
-	uint64_t uret = t->ucycles;
-	uint64_t kret = t->kcycles;
+	uint64_t uret = task->ucycles;
+	uint64_t kret = task->kcycles;
 	
 	/* Current values of threads */
 	link_t *cur;
-	for (cur = t->th_head.next; cur != &t->th_head; cur = cur->next) {
-		thread_t *thr = list_get_instance(cur, thread_t, th_link);
-		
-		spinlock_lock(&thr->lock);
+	for (cur = task->th_head.next; cur != &task->th_head; cur = cur->next) {
+		thread_t *thread = list_get_instance(cur, thread_t, th_link);
+		
+		irq_spinlock_lock(&thread->lock, false);
+		
 		/* Process only counted threads */
-		if (!thr->uncounted) {
-			if (thr == THREAD) {
+		if (!thread->uncounted) {
+			if (thread == THREAD) {
 				/* Update accounting of current thread */
 				thread_update_accounting(false);
-			} 
-			uret += thr->ucycles;
-			kret += thr->kcycles;
+			}
+			
+			uret += thread->ucycles;
+			kret += thread->kcycles;
 		}
-		spinlock_unlock(&thr->lock);
+		
+		irq_spinlock_unlock(&thread->lock, false);
 	}
 	
@@ -389,5 +401,5 @@
 }
 
-static void task_kill_internal(task_t *ta)
+static void task_kill_internal(task_t *task)
 {
 	link_t *cur;
@@ -396,21 +408,22 @@
 	 * Interrupt all threads.
 	 */
-	spinlock_lock(&ta->lock);
-	for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) {
-		thread_t *thr;
+	irq_spinlock_lock(&task->lock, false);
+	for (cur = task->th_head.next; cur != &task->th_head; cur = cur->next) {
+		thread_t *thread = list_get_instance(cur, thread_t, th_link);
 		bool sleeping = false;
 		
-		thr = list_get_instance(cur, thread_t, th_link);
-		
-		spinlock_lock(&thr->lock);
-		thr->interrupted = true;
-		if (thr->state == Sleeping)
+		irq_spinlock_lock(&thread->lock, false);
+		
+		thread->interrupted = true;
+		if (thread->state == Sleeping)
 			sleeping = true;
-		spinlock_unlock(&thr->lock);
+		
+		irq_spinlock_unlock(&thread->lock, false);
 		
 		if (sleeping)
-			waitq_interrupt_sleep(thr);
+			waitq_interrupt_sleep(thread);
 	}
-	spinlock_unlock(&ta->lock);
+	
+	irq_spinlock_unlock(&task->lock, false);
 }
 
@@ -427,58 +440,55 @@
 int task_kill(task_id_t id)
 {
-	ipl_t ipl;
-	task_t *ta;
-
 	if (id == 1)
 		return EPERM;
 	
-	ipl = interrupts_disable();
-	spinlock_lock(&tasks_lock);
-	if (!(ta = task_find_by_id(id))) {
-		spinlock_unlock(&tasks_lock);
-		interrupts_restore(ipl);
+	irq_spinlock_lock(&tasks_lock, true);
+	
+	task_t *task = task_find_by_id(id);
+	if (!task) {
+		irq_spinlock_unlock(&tasks_lock, true);
 		return ENOENT;
 	}
-	task_kill_internal(ta);
-	spinlock_unlock(&tasks_lock);
-	interrupts_restore(ipl);
-	return 0;
+	
+	task_kill_internal(task);
+	irq_spinlock_unlock(&tasks_lock, true);
+	
+	return EOK;
 }
 
 static bool task_print_walker(avltree_node_t *node, void *arg)
 {
-	task_t *t = avltree_get_instance(node, task_t, tasks_tree_node);
-	int j;
-	
-	spinlock_lock(&t->lock);
+	task_t *task = avltree_get_instance(node, task_t, tasks_tree_node);
+	irq_spinlock_lock(&task->lock, false);
 	
 	uint64_t ucycles;
 	uint64_t kcycles;
 	char usuffix, ksuffix;
-	task_get_accounting(t, &ucycles, &kcycles);
+	task_get_accounting(task, &ucycles, &kcycles);
 	order_suffix(ucycles, &ucycles, &usuffix);
 	order_suffix(kcycles, &kcycles, &ksuffix);
 	
-#ifdef __32_BITS__	
+#ifdef __32_BITS__
 	printf("%-6" PRIu64 " %-12s %-3" PRIu32 " %10p %10p %9" PRIu64 "%c %9"
-		PRIu64 "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as,
-		ucycles, usuffix, kcycles, ksuffix, atomic_get(&t->refcount),
-		atomic_get(&t->active_calls));
+	    PRIu64 "%c %7ld %6ld", task->taskid, task->name, task->context,
+	    task, task->as, ucycles, usuffix, kcycles, ksuffix,
+	    atomic_get(&task->refcount), atomic_get(&task->active_calls));
 #endif
 	
 #ifdef __64_BITS__
 	printf("%-6" PRIu64 " %-12s %-3" PRIu32 " %18p %18p %9" PRIu64 "%c %9"
-		PRIu64 "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as,
-		ucycles, usuffix, kcycles, ksuffix, atomic_get(&t->refcount),
-		atomic_get(&t->active_calls));
-#endif
-	
-	for (j = 0; j < IPC_MAX_PHONES; j++) {
-		if (t->phones[j].callee)
-			printf(" %d:%p", j, t->phones[j].callee);
+	    PRIu64 "%c %7ld %6ld", task->taskid, task->name, task->context,
+	    task, task->as, ucycles, usuffix, kcycles, ksuffix,
+	    atomic_get(&task->refcount), atomic_get(&task->active_calls));
+#endif
+	
+	size_t i;
+	for (i = 0; i < IPC_MAX_PHONES; i++) {
+		if (task->phones[i].callee)
+			printf(" %" PRIs ":%p", i, task->phones[i].callee);
 	}
 	printf("\n");
 	
-	spinlock_unlock(&t->lock);
+	irq_spinlock_unlock(&task->lock, false);
 	return true;
 }
@@ -487,9 +497,6 @@
 void task_print_list(void)
 {
-	ipl_t ipl;
-	
 	/* Messing with task structures, avoid deadlock */
-	ipl = interrupts_disable();
-	spinlock_lock(&tasks_lock);
+	irq_spinlock_lock(&tasks_lock, true);
 	
 #ifdef __32_BITS__
@@ -509,6 +516,5 @@
 	avltree_walk(&tasks_tree, task_print_walker, NULL);
 	
-	spinlock_unlock(&tasks_lock);
-	interrupts_restore(ipl);
+	irq_spinlock_unlock(&tasks_lock, true);
 }
 
Index: kernel/generic/src/proc/thread.c
===================================================================
--- kernel/generic/src/proc/thread.c	(revision 481d47513438cdc33d95733bb4dadfbb80b196ef)
+++ kernel/generic/src/proc/thread.c	(revision c8e99bb315a5bcdd7baf9ad996e0c913004c1016)
@@ -33,5 +33,5 @@
 /**
  * @file
- * @brief	Thread management functions.
+ * @brief Thread management functions.
  */
 
@@ -94,6 +94,7 @@
  *
  * For locking rules, see declaration thereof.
- */
-SPINLOCK_INITIALIZE(threads_lock);
+ *
+ */
+IRQ_SPINLOCK_INITIALIZE(threads_lock);
 
 /** AVL tree of all threads.
@@ -101,11 +102,13 @@
  * When a thread is found in the threads_tree AVL tree, it is guaranteed to
  * exist as long as the threads_lock is held.
- */
-avltree_t threads_tree;		
-
-SPINLOCK_INITIALIZE(tidlock);
-thread_id_t last_tid = 0;
+ *
+ */
+avltree_t threads_tree;
+
+IRQ_SPINLOCK_STATIC_INITIALIZE(tidlock);
+static thread_id_t last_tid = 0;
 
 static slab_cache_t *thread_slab;
+
 #ifdef CONFIG_FPU
 slab_cache_t *fpu_context_slab;
@@ -125,15 +128,13 @@
 	void *arg = THREAD->thread_arg;
 	THREAD->last_cycle = get_cycle();
-
+	
 	/* This is where each thread wakes up after its creation */
-	spinlock_unlock(&THREAD->lock);
+	irq_spinlock_unlock(&THREAD->lock, false);
 	interrupts_enable();
-
+	
 	f(arg);
 	
 	/* Accumulate accounting to the task */
-	ipl_t ipl = interrupts_disable();
-	
-	spinlock_lock(&THREAD->lock);
+	irq_spinlock_lock(&THREAD->lock, true);
 	if (!THREAD->uncounted) {
 		thread_update_accounting(true);
@@ -142,73 +143,74 @@
 		uint64_t kcycles = THREAD->kcycles;
 		THREAD->kcycles = 0;
-
-		spinlock_unlock(&THREAD->lock);
 		
-		spinlock_lock(&TASK->lock);
+		irq_spinlock_pass(&THREAD->lock, &TASK->lock);
 		TASK->ucycles += ucycles;
 		TASK->kcycles += kcycles;
-		spinlock_unlock(&TASK->lock);
+		irq_spinlock_unlock(&TASK->lock, true);
 	} else
-		spinlock_unlock(&THREAD->lock);
-	
-	interrupts_restore(ipl);
+		irq_spinlock_unlock(&THREAD->lock, true);
 	
 	thread_exit();
-	/* not reached */
-}
-
-/** Initialization and allocation for thread_t structure */
-static int thr_constructor(void *obj, int kmflags)
-{
-	thread_t *t = (thread_t *) obj;
-
-	spinlock_initialize(&t->lock, "thread_t_lock");
-	link_initialize(&t->rq_link);
-	link_initialize(&t->wq_link);
-	link_initialize(&t->th_link);
-
+	
+	/* Not reached */
+}
+
+/** Initialization and allocation for thread_t structure
+ *
+ */
+static int thr_constructor(void *obj, unsigned int kmflags)
+{
+	thread_t *thread = (thread_t *) obj;
+	
+	irq_spinlock_initialize(&thread->lock, "thread_t_lock");
+	link_initialize(&thread->rq_link);
+	link_initialize(&thread->wq_link);
+	link_initialize(&thread->th_link);
+	
 	/* call the architecture-specific part of the constructor */
-	thr_constructor_arch(t);
+	thr_constructor_arch(thread);
 	
 #ifdef CONFIG_FPU
 #ifdef CONFIG_FPU_LAZY
-	t->saved_fpu_context = NULL;
-#else
-	t->saved_fpu_context = slab_alloc(fpu_context_slab, kmflags);
-	if (!t->saved_fpu_context)
+	thread->saved_fpu_context = NULL;
+#else /* CONFIG_FPU_LAZY */
+	thread->saved_fpu_context = slab_alloc(fpu_context_slab, kmflags);
+	if (!thread->saved_fpu_context)
 		return -1;
-#endif
-#endif
-
-	t->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags);
-	if (!t->kstack) {
+#endif /* CONFIG_FPU_LAZY */
+#endif /* CONFIG_FPU */
+	
+	thread->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags);
+	if (!thread->kstack) {
 #ifdef CONFIG_FPU
-		if (t->saved_fpu_context)
-			slab_free(fpu_context_slab, t->saved_fpu_context);
+		if (thread->saved_fpu_context)
+			slab_free(fpu_context_slab, thread->saved_fpu_context);
 #endif
 		return -1;
 	}
-
+	
 #ifdef CONFIG_UDEBUG
-	mutex_initialize(&t->udebug.lock, MUTEX_PASSIVE);
-#endif
-
+	mutex_initialize(&thread->udebug.lock, MUTEX_PASSIVE);
+#endif
+	
 	return 0;
 }
 
 /** Destruction of thread_t object */
-static int thr_destructor(void *obj)
-{
-	thread_t *t = (thread_t *) obj;
-
+static size_t thr_destructor(void *obj)
+{
+	thread_t *thread = (thread_t *) obj;
+	
 	/* call the architecture-specific part of the destructor */
-	thr_destructor_arch(t);
-
-	frame_free(KA2PA(t->kstack));
+	thr_destructor_arch(thread);
+	
+	frame_free(KA2PA(thread->kstack));
+	
 #ifdef CONFIG_FPU
-	if (t->saved_fpu_context)
-		slab_free(fpu_context_slab, t->saved_fpu_context);
-#endif
-	return 1; /* One page freed */
+	if (thread->saved_fpu_context)
+		slab_free(fpu_context_slab, thread->saved_fpu_context);
+#endif
+	
+	return 1;  /* One page freed */
 }
 
@@ -221,13 +223,14 @@
 {
 	THREAD = NULL;
+	
 	atomic_set(&nrdy, 0);
 	thread_slab = slab_cache_create("thread_slab", sizeof(thread_t), 0,
 	    thr_constructor, thr_destructor, 0);
-
+	
 #ifdef CONFIG_FPU
 	fpu_context_slab = slab_cache_create("fpu_slab", sizeof(fpu_context_t),
 	    FPU_CONTEXT_ALIGN, NULL, NULL, 0);
 #endif
-
+	
 	avltree_create(&threads_tree);
 }
@@ -235,145 +238,132 @@
 /** Make thread ready
  *
- * Switch thread t to the ready state.
+ * Switch thread to the ready state.
  *
  * @param t Thread to make ready.
  *
  */
-void thread_ready(thread_t *t)
-{
-	cpu_t *cpu;
-	runq_t *r;
-	ipl_t ipl;
-	int i, avg;
-
-	ipl = interrupts_disable();
-
-	spinlock_lock(&t->lock);
-
-	ASSERT(!(t->state == Ready));
-
-	i = (t->priority < RQ_COUNT - 1) ? ++t->priority : t->priority;
-	
-	cpu = CPU;
-	if (t->flags & THREAD_FLAG_WIRED) {
-		ASSERT(t->cpu != NULL);
-		cpu = t->cpu;
+void thread_ready(thread_t *thread)
+{
+	irq_spinlock_lock(&thread->lock, true);
+	
+	ASSERT(!(thread->state == Ready));
+	
+	int i = (thread->priority < RQ_COUNT - 1)
+	    ? ++thread->priority : thread->priority;
+	
+	cpu_t *cpu = CPU;
+	if (thread->flags & THREAD_FLAG_WIRED) {
+		ASSERT(thread->cpu != NULL);
+		cpu = thread->cpu;
 	}
-	t->state = Ready;
-	spinlock_unlock(&t->lock);
+	thread->state = Ready;
+	
+	irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock));
 	
 	/*
-	 * Append t to respective ready queue on respective processor.
+	 * Append thread to respective ready queue
+	 * on respective processor.
 	 */
-	r = &cpu->rq[i];
-	spinlock_lock(&r->lock);
-	list_append(&t->rq_link, &r->rq_head);
-	r->n++;
-	spinlock_unlock(&r->lock);
-
+	
+	list_append(&thread->rq_link, &cpu->rq[i].rq_head);
+	cpu->rq[i].n++;
+	irq_spinlock_unlock(&(cpu->rq[i].lock), true);
+	
 	atomic_inc(&nrdy);
-	// FIXME: Why is the avg value never read?
-	avg = atomic_get(&nrdy) / config.cpu_active;
+	// FIXME: Why is the avg value not used
+	// avg = atomic_get(&nrdy) / config.cpu_active;
 	atomic_inc(&cpu->nrdy);
-
+}
+
+/** Create new thread
+ *
+ * Create a new thread.
+ *
+ * @param func      Thread's implementing function.
+ * @param arg       Thread's implementing function argument.
+ * @param task      Task to which the thread belongs. The caller must
+ *                  guarantee that the task won't cease to exist during the
+ *                  call. The task's lock may not be held.
+ * @param flags     Thread flags.
+ * @param name      Symbolic name (a copy is made).
+ * @param uncounted Thread's accounting doesn't affect accumulated task
+ *                  accounting.
+ *
+ * @return New thread's structure on success, NULL on failure.
+ *
+ */
+thread_t *thread_create(void (* func)(void *), void *arg, task_t *task,
+    unsigned int flags, const char *name, bool uncounted)
+{
+	thread_t *thread = (thread_t *) slab_alloc(thread_slab, 0);
+	if (!thread)
+		return NULL;
+	
+	/* Not needed, but good for debugging */
+	memsetb(thread->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0);
+	
+	irq_spinlock_lock(&tidlock, true);
+	thread->tid = ++last_tid;
+	irq_spinlock_unlock(&tidlock, true);
+	
+	context_save(&thread->saved_context);
+	context_set(&thread->saved_context, FADDR(cushion),
+	    (uintptr_t) thread->kstack, THREAD_STACK_SIZE);
+	
+	the_initialize((the_t *) thread->kstack);
+	
+	ipl_t ipl = interrupts_disable();
+	thread->saved_context.ipl = interrupts_read();
 	interrupts_restore(ipl);
-}
-
-/** Create new thread
- *
- * Create a new thread.
- *
- * @param func		Thread's implementing function.
- * @param arg		Thread's implementing function argument.
- * @param task		Task to which the thread belongs. The caller must
- * 			guarantee that the task won't cease to exist during the
- * 			call. The task's lock may not be held.
- * @param flags		Thread flags.
- * @param name		Symbolic name (a copy is made).
- * @param uncounted	Thread's accounting doesn't affect accumulated task
- * 			accounting.
- *
- * @return 		New thread's structure on success, NULL on failure.
- *
- */
-thread_t *thread_create(void (* func)(void *), void *arg, task_t *task,
-    int flags, const char *name, bool uncounted)
-{
-	thread_t *t;
-	ipl_t ipl;
-	
-	t = (thread_t *) slab_alloc(thread_slab, 0);
-	if (!t)
-		return NULL;
-	
-	/* Not needed, but good for debugging */
-	memsetb(t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0);
-	
-	ipl = interrupts_disable();
-	spinlock_lock(&tidlock);
-	t->tid = ++last_tid;
-	spinlock_unlock(&tidlock);
-	interrupts_restore(ipl);
-	
-	context_save(&t->saved_context);
-	context_set(&t->saved_context, FADDR(cushion), (uintptr_t) t->kstack,
-	    THREAD_STACK_SIZE);
-	
-	the_initialize((the_t *) t->kstack);
-	
-	ipl = interrupts_disable();
-	t->saved_context.ipl = interrupts_read();
-	interrupts_restore(ipl);
-	
-	memcpy(t->name, name, THREAD_NAME_BUFLEN);
-	t->name[THREAD_NAME_BUFLEN - 1] = 0;
-	
-	t->thread_code = func;
-	t->thread_arg = arg;
-	t->ticks = -1;
-	t->ucycles = 0;
-	t->kcycles = 0;
-	t->uncounted = uncounted;
-	t->priority = -1;		/* start in rq[0] */
-	t->cpu = NULL;
-	t->flags = flags;
-	t->state = Entering;
-	t->call_me = NULL;
-	t->call_me_with = NULL;
-	
-	timeout_initialize(&t->sleep_timeout);
-	t->sleep_interruptible = false;
-	t->sleep_queue = NULL;
-	t->timeout_pending = 0;
-
-	t->in_copy_from_uspace = false;
-	t->in_copy_to_uspace = false;
-
-	t->interrupted = false;	
-	t->detached = false;
-	waitq_initialize(&t->join_wq);
-	
-	t->rwlock_holder_type = RWLOCK_NONE;
-		
-	t->task = task;
-	
-	t->fpu_context_exists = 0;
-	t->fpu_context_engaged = 0;
-
-	avltree_node_initialize(&t->threads_tree_node);
-	t->threads_tree_node.key = (uintptr_t) t;
-
+	
+	str_cpy(thread->name, THREAD_NAME_BUFLEN, name);
+	
+	thread->thread_code = func;
+	thread->thread_arg = arg;
+	thread->ticks = -1;
+	thread->ucycles = 0;
+	thread->kcycles = 0;
+	thread->uncounted = uncounted;
+	thread->priority = -1;          /* Start in rq[0] */
+	thread->cpu = NULL;
+	thread->flags = flags;
+	thread->state = Entering;
+	thread->call_me = NULL;
+	thread->call_me_with = NULL;
+	
+	timeout_initialize(&thread->sleep_timeout);
+	thread->sleep_interruptible = false;
+	thread->sleep_queue = NULL;
+	thread->timeout_pending = false;
+	
+	thread->in_copy_from_uspace = false;
+	thread->in_copy_to_uspace = false;
+	
+	thread->interrupted = false;
+	thread->detached = false;
+	waitq_initialize(&thread->join_wq);
+	
+	thread->rwlock_holder_type = RWLOCK_NONE;
+	
+	thread->task = task;
+	
+	thread->fpu_context_exists = 0;
+	thread->fpu_context_engaged = 0;
+	
+	avltree_node_initialize(&thread->threads_tree_node);
+	thread->threads_tree_node.key = (uintptr_t) thread;
+	
 #ifdef CONFIG_UDEBUG
 	/* Init debugging stuff */
-	udebug_thread_initialize(&t->udebug);
-#endif
-
-	/* might depend on previous initialization */
-	thread_create_arch(t);	
-
+	udebug_thread_initialize(&thread->udebug);
+#endif
+	
+	/* Might depend on previous initialization */
+	thread_create_arch(thread);
+	
 	if (!(flags & THREAD_FLAG_NOATTACH))
-		thread_attach(t, task);
-
-	return t;
+		thread_attach(thread, task);
+	
+	return thread;
 }
 
@@ -381,37 +371,39 @@
  *
  * Detach thread from all queues, cpus etc. and destroy it.
- *
- * Assume thread->lock is held!!
- */
-void thread_destroy(thread_t *t)
-{
-	ASSERT(t->state == Exiting || t->state == Lingering);
-	ASSERT(t->task);
-	ASSERT(t->cpu);
-
-	spinlock_lock(&t->cpu->lock);
-	if (t->cpu->fpu_owner == t)
-		t->cpu->fpu_owner = NULL;
-	spinlock_unlock(&t->cpu->lock);
-
-	spinlock_unlock(&t->lock);
-
-	spinlock_lock(&threads_lock);
-	avltree_delete(&threads_tree, &t->threads_tree_node);
-	spinlock_unlock(&threads_lock);
-
+ * Assume thread->lock is held!
+ *
+ * @param thread  Thread to be destroyed.
+ * @param irq_res Indicate whether it should unlock thread->lock
+ *                in interrupts-restore mode.
+ *
+ */
+void thread_destroy(thread_t *thread, bool irq_res)
+{
+	ASSERT((thread->state == Exiting) || (thread->state == Lingering));
+	ASSERT(thread->task);
+	ASSERT(thread->cpu);
+	
+	irq_spinlock_lock(&thread->cpu->lock, false);
+	if (thread->cpu->fpu_owner == thread)
+		thread->cpu->fpu_owner = NULL;
+	irq_spinlock_unlock(&thread->cpu->lock, false);
+	
+	irq_spinlock_pass(&thread->lock, &threads_lock);
+	
+	avltree_delete(&threads_tree, &thread->threads_tree_node);
+	
+	irq_spinlock_pass(&threads_lock, &thread->task->lock);
+	
 	/*
 	 * Detach from the containing task.
 	 */
-	spinlock_lock(&t->task->lock);
-	list_remove(&t->th_link);
-	spinlock_unlock(&t->task->lock);	
-
+	list_remove(&thread->th_link);
+	irq_spinlock_unlock(&thread->task->lock, irq_res);
+	
 	/*
 	 * Drop the reference to the containing task.
 	 */
-	task_release(t->task);
-	
-	slab_free(thread_slab, t);
+	task_release(thread->task);
+	slab_free(thread_slab, thread);
 }
 
@@ -421,46 +413,41 @@
  * threads_tree.
  *
- * @param t	Thread to be attached to the task.
- * @param task	Task to which the thread is to be attached.
- */
-void thread_attach(thread_t *t, task_t *task)
-{
-	ipl_t ipl;
-
+ * @param t    Thread to be attached to the task.
+ * @param task Task to which the thread is to be attached.
+ *
+ */
+void thread_attach(thread_t *thread, task_t *task)
+{
 	/*
 	 * Attach to the specified task.
 	 */
-	ipl = interrupts_disable();
-	spinlock_lock(&task->lock);
-
+	irq_spinlock_lock(&task->lock, true);
+	
 	/* Hold a reference to the task. */
 	task_hold(task);
-
+	
 	/* Must not count kbox thread into lifecount */
-	if (t->flags & THREAD_FLAG_USPACE)
+	if (thread->flags & THREAD_FLAG_USPACE)
 		atomic_inc(&task->lifecount);
-
-	list_append(&t->th_link, &task->th_head);
-	spinlock_unlock(&task->lock);
-
+	
+	list_append(&thread->th_link, &task->th_head);
+	
+	irq_spinlock_pass(&task->lock, &threads_lock);
+	
 	/*
 	 * Register this thread in the system-wide list.
 	 */
-	spinlock_lock(&threads_lock);
-	avltree_insert(&threads_tree, &t->threads_tree_node);
-	spinlock_unlock(&threads_lock);
-	
-	interrupts_restore(ipl);
+	avltree_insert(&threads_tree, &thread->threads_tree_node);
+	irq_spinlock_unlock(&threads_lock, true);
 }
 
 /** Terminate thread.
  *
- * End current thread execution and switch it to the exiting state. All pending
- * timeouts are executed.
+ * End current thread execution and switch it to the exiting state.
+ * All pending timeouts are executed.
+ *
  */
 void thread_exit(void)
 {
-	ipl_t ipl;
-
 	if (THREAD->flags & THREAD_FLAG_USPACE) {
 #ifdef CONFIG_UDEBUG
@@ -475,4 +462,5 @@
 			 * can only be created by threads of the same task.
 			 * We are safe to perform cleanup.
+			 *
 			 */
 			ipc_cleanup();
@@ -481,24 +469,21 @@
 		}
 	}
-
+	
 restart:
-	ipl = interrupts_disable();
-	spinlock_lock(&THREAD->lock);
-	if (THREAD->timeout_pending) { 
-		/* busy waiting for timeouts in progress */
-		spinlock_unlock(&THREAD->lock);
-		interrupts_restore(ipl);
+	irq_spinlock_lock(&THREAD->lock, true);
+	if (THREAD->timeout_pending) {
+		/* Busy waiting for timeouts in progress */
+		irq_spinlock_unlock(&THREAD->lock, true);
 		goto restart;
 	}
 	
 	THREAD->state = Exiting;
-	spinlock_unlock(&THREAD->lock);
+	irq_spinlock_unlock(&THREAD->lock, true);
+	
 	scheduler();
-
+	
 	/* Not reached */
-	while (1)
-		;
-}
-
+	while (true);
+}
 
 /** Thread sleep
@@ -515,5 +500,5 @@
 	while (sec > 0) {
 		uint32_t period = (sec > 1000) ? 1000 : sec;
-	
+		
 		thread_usleep(period * 1000000);
 		sec -= period;
@@ -523,18 +508,16 @@
 /** Wait for another thread to exit.
  *
- * @param t Thread to join on exit.
- * @param usec Timeout in microseconds.
- * @param flags Mode of operation.
+ * @param thread Thread to join on exit.
+ * @param usec   Timeout in microseconds.
+ * @param flags  Mode of operation.
  *
  * @return An error code from errno.h or an error code from synch.h.
- */
-int thread_join_timeout(thread_t *t, uint32_t usec, int flags)
-{
-	ipl_t ipl;
-	int rc;
-
-	if (t == THREAD)
+ *
+ */
+int thread_join_timeout(thread_t *thread, uint32_t usec, unsigned int flags)
+{
+	if (thread == THREAD)
 		return EINVAL;
-
+	
 	/*
 	 * Since thread join can only be called once on an undetached thread,
@@ -542,13 +525,9 @@
 	 */
 	
-	ipl = interrupts_disable();
-	spinlock_lock(&t->lock);
-	ASSERT(!t->detached);
-	spinlock_unlock(&t->lock);
-	interrupts_restore(ipl);
-	
-	rc = waitq_sleep_timeout(&t->join_wq, usec, flags);
-	
-	return rc;	
+	irq_spinlock_lock(&thread->lock, true);
+	ASSERT(!thread->detached);
+	irq_spinlock_unlock(&thread->lock, true);
+	
+	return waitq_sleep_timeout(&thread->join_wq, usec, flags);
 }
 
@@ -558,26 +537,28 @@
  * state, deallocate its resources.
  *
- * @param t Thread to be detached.
- */
-void thread_detach(thread_t *t)
-{
-	ipl_t ipl;
-
+ * @param thread Thread to be detached.
+ *
+ */
+void thread_detach(thread_t *thread)
+{
 	/*
 	 * Since the thread is expected not to be already detached,
 	 * pointer to it must be still valid.
 	 */
-	ipl = interrupts_disable();
-	spinlock_lock(&t->lock);
-	ASSERT(!t->detached);
-	if (t->state == Lingering) {
-		thread_destroy(t);	/* unlocks &t->lock */
-		interrupts_restore(ipl);
+	irq_spinlock_lock(&thread->lock, true);
+	ASSERT(!thread->detached);
+	
+	if (thread->state == Lingering) {
+		/*
+		 * Unlock &thread->lock and restore
+		 * interrupts in thread_destroy().
+		 */
+		thread_destroy(thread, true);
 		return;
 	} else {
-		t->detached = true;
+		thread->detached = true;
 	}
-	spinlock_unlock(&t->lock);
-	interrupts_restore(ipl);
+	
+	irq_spinlock_unlock(&thread->lock, true);
 }
 
@@ -601,5 +582,6 @@
  *
  * Register a function and its argument to be executed
- * on next context switch to the current thread.
+ * on next context switch to the current thread. Must
+ * be called with interrupts disabled.
  *
  * @param call_me      Out-of-context function.
@@ -609,67 +591,62 @@
 void thread_register_call_me(void (* call_me)(void *), void *call_me_with)
 {
-	ipl_t ipl;
-	
-	ipl = interrupts_disable();
-	spinlock_lock(&THREAD->lock);
+	irq_spinlock_lock(&THREAD->lock, false);
 	THREAD->call_me = call_me;
 	THREAD->call_me_with = call_me_with;
-	spinlock_unlock(&THREAD->lock);
-	interrupts_restore(ipl);
+	irq_spinlock_unlock(&THREAD->lock, false);
 }
 
 static bool thread_walker(avltree_node_t *node, void *arg)
 {
-	thread_t *t = avltree_get_instance(node, thread_t, threads_tree_node);
+	thread_t *thread = avltree_get_instance(node, thread_t, threads_tree_node);
 	
 	uint64_t ucycles, kcycles;
 	char usuffix, ksuffix;
-	order_suffix(t->ucycles, &ucycles, &usuffix);
-	order_suffix(t->kcycles, &kcycles, &ksuffix);
-
+	order_suffix(thread->ucycles, &ucycles, &usuffix);
+	order_suffix(thread->kcycles, &kcycles, &ksuffix);
+	
 #ifdef __32_BITS__
 	printf("%-6" PRIu64" %-10s %10p %-8s %10p %-3" PRIu32 " %10p %10p %9"
-		PRIu64 "%c %9" PRIu64 "%c ", t->tid, t->name, t,
-		thread_states[t->state], t->task, t->task->context, t->thread_code,
-		t->kstack, ucycles, usuffix, kcycles, ksuffix);
-#endif
-
+		PRIu64 "%c %9" PRIu64 "%c ", thread->tid, thread->name, thread,
+		thread_states[thread->state], thread->task, thread->task->context,
+		thread->thread_code, thread->kstack, ucycles, usuffix, kcycles, ksuffix);
+#endif
+	
 #ifdef __64_BITS__
 	printf("%-6" PRIu64" %-10s %18p %-8s %18p %-3" PRIu32 " %18p %18p %9"
-		PRIu64 "%c %9" PRIu64 "%c ", t->tid, t->name, t,
-		thread_states[t->state], t->task, t->task->context, t->thread_code,
-		t->kstack, ucycles, usuffix, kcycles, ksuffix);
-#endif
-			
-	if (t->cpu)
-		printf("%-4u", t->cpu->id);
+		PRIu64 "%c %9" PRIu64 "%c ", thread->tid, thread->name, thread,
+		thread_states[thread->state], thread->task, thread->task->context,
+		thread->thread_code, thread->kstack, ucycles, usuffix, kcycles, ksuffix);
+#endif
+	
+	if (thread->cpu)
+		printf("%-4u", thread->cpu->id);
 	else
 		printf("none");
-			
-	if (t->state == Sleeping) {
+	
+	if (thread->state == Sleeping) {
 #ifdef __32_BITS__
-		printf(" %10p", t->sleep_queue);
-#endif
-
+		printf(" %10p", thread->sleep_queue);
+#endif
+		
 #ifdef __64_BITS__
-		printf(" %18p", t->sleep_queue);
+		printf(" %18p", thread->sleep_queue);
 #endif
 	}
-			
+	
 	printf("\n");
-
+	
 	return true;
 }
 
-/** Print list of threads debug info */
+/** Print list of threads debug info
+ *
+ */
 void thread_print_list(void)
 {
-	ipl_t ipl;
-	
 	/* Messing with thread structures, avoid deadlock */
-	ipl = interrupts_disable();
-	spinlock_lock(&threads_lock);
-
-#ifdef __32_BITS__	
+	irq_spinlock_lock(&threads_lock, true);
+	
+#ifdef __32_BITS__
 	printf("tid    name       address    state    task       "
 		"ctx code       stack      ucycles    kcycles    cpu  "
@@ -679,5 +656,5 @@
 		"----------\n");
 #endif
-
+	
 #ifdef __64_BITS__
 	printf("tid    name       address            state    task               "
@@ -688,9 +665,8 @@
 		"------------------\n");
 #endif
-
+	
 	avltree_walk(&threads_tree, thread_walker, NULL);
-
-	spinlock_unlock(&threads_lock);
-	interrupts_restore(ipl);
+	
+	irq_spinlock_unlock(&threads_lock, true);
 }
 
@@ -700,13 +676,13 @@
  * interrupts must be already disabled.
  *
- * @param t Pointer to thread.
+ * @param thread Pointer to thread.
  *
  * @return True if thread t is known to the system, false otherwise.
- */
-bool thread_exists(thread_t *t)
-{
-	avltree_node_t *node;
-
-	node = avltree_search(&threads_tree, (avltree_key_t) ((uintptr_t) t));
+ *
+ */
+bool thread_exists(thread_t *thread)
+{
+	avltree_node_t *node =
+	    avltree_search(&threads_tree, (avltree_key_t) ((uintptr_t) thread));
 	
 	return node != NULL;
@@ -718,14 +694,16 @@
  * interrupts must be already disabled.
  *
- * @param user	True to update user accounting, false for kernel.
+ * @param user True to update user accounting, false for kernel.
+ *
  */
 void thread_update_accounting(bool user)
 {
 	uint64_t time = get_cycle();
-	if (user) {
+	
+	if (user)
 		THREAD->ucycles += time - THREAD->last_cycle;
-	} else {
+	else
 		THREAD->kcycles += time - THREAD->last_cycle;
-	}
+	
 	THREAD->last_cycle = time;
 }
@@ -774,23 +752,21 @@
     size_t name_len, thread_id_t *uspace_thread_id)
 {
-	thread_t *t;
-	char namebuf[THREAD_NAME_BUFLEN];
-	uspace_arg_t *kernel_uarg;
-	int rc;
-
 	if (name_len > THREAD_NAME_BUFLEN - 1)
 		name_len = THREAD_NAME_BUFLEN - 1;
-
-	rc = copy_from_uspace(namebuf, uspace_name, name_len);
+	
+	char namebuf[THREAD_NAME_BUFLEN];
+	int rc = copy_from_uspace(namebuf, uspace_name, name_len);
 	if (rc != 0)
 		return (unative_t) rc;
-
+	
 	namebuf[name_len] = 0;
-
+	
 	/*
 	 * In case of failure, kernel_uarg will be deallocated in this function.
 	 * In case of success, kernel_uarg will be freed in uinit().
+	 *
 	 */
-	kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
+	uspace_arg_t *kernel_uarg =
+	    (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
 	
 	rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t));
@@ -799,13 +775,11 @@
 		return (unative_t) rc;
 	}
-
-	t = thread_create(uinit, kernel_uarg, TASK,
+	
+	thread_t *thread = thread_create(uinit, kernel_uarg, TASK,
 	    THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf, false);
-	if (t) {
+	if (thread) {
 		if (uspace_thread_id != NULL) {
-			int rc;
-
-			rc = copy_to_uspace(uspace_thread_id, &t->tid,
-			    sizeof(t->tid));
+			rc = copy_to_uspace(uspace_thread_id, &thread->tid,
+			    sizeof(thread->tid));
 			if (rc != 0) {
 				/*
@@ -813,6 +787,7 @@
 				 * has already been created. We need to undo its
 				 * creation now.
+				 *
 				 */
-
+				
 				/*
 				 * The new thread structure is initialized, but
@@ -820,10 +795,11 @@
 				 * We can safely deallocate it.
 				 */
-				slab_free(thread_slab, t);
-			 	free(kernel_uarg);
-
+				slab_free(thread_slab, thread);
+				free(kernel_uarg);
+				
 				return (unative_t) rc;
 			 }
 		}
+		
 #ifdef CONFIG_UDEBUG
 		/*
@@ -833,15 +809,16 @@
 		 * THREAD_B events for threads that already existed
 		 * and could be detected with THREAD_READ before.
+		 *
 		 */
-		udebug_thread_b_event_attach(t, TASK);
+		udebug_thread_b_event_attach(thread, TASK);
 #else
-		thread_attach(t, TASK);
-#endif
-		thread_ready(t);
-
+		thread_attach(thread, TASK);
+#endif
+		thread_ready(thread);
+		
 		return 0;
 	} else
 		free(kernel_uarg);
-
+	
 	return (unative_t) ENOMEM;
 }
@@ -853,4 +830,5 @@
 {
 	thread_exit();
+	
 	/* Unreachable */
 	return 0;
@@ -863,4 +841,5 @@
  *
  * @return 0 on success or an error code from @ref errno.h.
+ *
  */
 unative_t sys_thread_get_id(thread_id_t *uspace_thread_id)
@@ -869,4 +848,5 @@
 	 * No need to acquire lock on THREAD because tid
 	 * remains constant for the lifespan of the thread.
+	 *
 	 */
 	return (unative_t) copy_to_uspace(uspace_thread_id, &THREAD->tid,
