Index: kernel/generic/src/synch/mutex.c
===================================================================
--- kernel/generic/src/synch/mutex.c	(revision 2b4a9f26cedf64d7586d2e0b91cc522a8d4fc07b)
+++ kernel/generic/src/synch/mutex.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -67,5 +67,5 @@
  *
  */
-int _mutex_lock_timeout(mutex_t *mtx, uint32_t usec, int flags)
+int _mutex_lock_timeout(mutex_t *mtx, uint32_t usec, unsigned int flags)
 {
 	int rc;
Index: kernel/generic/src/synch/rwlock.c
===================================================================
--- kernel/generic/src/synch/rwlock.c	(revision 2b4a9f26cedf64d7586d2e0b91cc522a8d4fc07b)
+++ kernel/generic/src/synch/rwlock.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -33,5 +33,5 @@
 /**
  * @file
- * @brief	Reader/Writer locks.
+ * @brief Reader/Writer locks.
  *
  * A reader/writer lock can be held by multiple readers at a time.
@@ -57,5 +57,5 @@
  * each thread can block on only one rwlock at a time.
  */
- 
+
 #include <synch/rwlock.h>
 #include <synch/spinlock.h>
@@ -69,9 +69,6 @@
 #include <panic.h>
 
-#define ALLOW_ALL		0
-#define ALLOW_READERS_ONLY	1
-
-static void let_others_in(rwlock_t *rwl, int readers_only);
-static void release_spinlock(void *arg);
+#define ALLOW_ALL           0
+#define ALLOW_READERS_ONLY  1
 
 /** Initialize reader/writer lock
@@ -80,11 +77,101 @@
  *
  * @param rwl Reader/Writer lock.
+ *
  */
 void rwlock_initialize(rwlock_t *rwl) {
-	spinlock_initialize(&rwl->lock, "rwlock_t");
+	irq_spinlock_initialize(&rwl->lock, "rwl.lock");
 	mutex_initialize(&rwl->exclusive, MUTEX_PASSIVE);
 	rwl->readers_in = 0;
 }
 
+/** Direct handoff of reader/writer lock ownership.
+ *
+ * Direct handoff of reader/writer lock ownership
+ * to waiting readers or a writer.
+ *
+ * Must be called with rwl->lock locked.
+ * Must be called with interrupts_disable()'d.
+ *
+ * @param rwl          Reader/Writer lock.
+ * @param readers_only See the description below.
+ *
+ * If readers_only is false: (unlock scenario)
+ * Let the first sleeper on 'exclusive' mutex in, no matter
+ * whether it is a reader or a writer. If there are more leading
+ * readers in line, let each of them in.
+ *
+ * Otherwise: (timeout scenario)
+ * Let all leading readers in.
+ *
+ */
+static void let_others_in(rwlock_t *rwl, int readers_only)
+{
+	rwlock_type_t type = RWLOCK_NONE;
+	thread_t *thread = NULL;
+	bool one_more = true;
+	
+	irq_spinlock_lock(&rwl->exclusive.sem.wq.lock, false);
+	
+	if (!list_empty(&rwl->exclusive.sem.wq.head))
+		thread = list_get_instance(rwl->exclusive.sem.wq.head.next,
+		    thread_t, wq_link);
+	
+	do {
+		if (thread) {
+			irq_spinlock_lock(&thread->lock, false);
+			type = thread->rwlock_holder_type;
+			irq_spinlock_unlock(&thread->lock, false);
+		}
+		
+		/*
+		 * If readers_only is true, we wake all leading readers
+		 * if and only if rwl is locked by another reader.
+		 * Assumption: readers_only ==> rwl->readers_in
+		 *
+		 */
+		if ((readers_only) && (type != RWLOCK_READER))
+			break;
+		
+		if (type == RWLOCK_READER) {
+			/*
+			 * Waking up a reader.
+			 * We are responsible for incrementing rwl->readers_in
+			 * for it.
+			 *
+			 */
+			 rwl->readers_in++;
+		}
+		
+		/*
+		 * Only the last iteration through this loop can increment
+		 * rwl->exclusive.sem.wq.missed_wakeup's. All preceeding
+		 * iterations will wake up a thread.
+		 *
+		 */
+		
+		/*
+		 * We call the internal version of waitq_wakeup, which
+		 * relies on the fact that the waitq is already locked.
+		 *
+		 */
+		_waitq_wakeup_unsafe(&rwl->exclusive.sem.wq, WAKEUP_FIRST);
+		
+		thread = NULL;
+		if (!list_empty(&rwl->exclusive.sem.wq.head)) {
+			thread = list_get_instance(rwl->exclusive.sem.wq.head.next,
+			    thread_t, wq_link);
+			
+			if (thread) {
+				irq_spinlock_lock(&thread->lock, false);
+				if (thread->rwlock_holder_type != RWLOCK_READER)
+					one_more = false;
+				irq_spinlock_unlock(&thread->lock, false);
+			}
+		}
+	} while ((type == RWLOCK_READER) && (thread) && (one_more));
+	
+	irq_spinlock_unlock(&rwl->exclusive.sem.wq.lock, false);
+}
+
 /** Acquire reader/writer lock for reading
  *
@@ -92,6 +179,6 @@
  * Timeout and willingness to block may be specified.
  *
- * @param rwl Reader/Writer lock.
- * @param usec Timeout in microseconds.
+ * @param rwl   Reader/Writer lock.
+ * @param usec  Timeout in microseconds.
  * @param flags Specify mode of operation.
  *
@@ -100,43 +187,54 @@
  *
  * @return See comment for waitq_sleep_timeout().
- */
-int _rwlock_write_lock_timeout(rwlock_t *rwl, uint32_t usec, int flags)
-{
-	ipl_t ipl;
-	int rc;
-	
-	ipl = interrupts_disable();
-	spinlock_lock(&THREAD->lock);
+ *
+ */
+int _rwlock_write_lock_timeout(rwlock_t *rwl, uint32_t usec, unsigned int flags)
+{
+	irq_spinlock_lock(&THREAD->lock, true);
 	THREAD->rwlock_holder_type = RWLOCK_WRITER;
-	spinlock_unlock(&THREAD->lock);	
-	interrupts_restore(ipl);
-
+	irq_spinlock_unlock(&THREAD->lock, true);
+	
 	/*
 	 * Writers take the easy part.
 	 * They just need to acquire the exclusive mutex.
+	 *
 	 */
-	rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags);
+	int rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags);
 	if (SYNCH_FAILED(rc)) {
-
 		/*
 		 * Lock operation timed out or was interrupted.
 		 * The state of rwl is UNKNOWN at this point.
 		 * No claims about its holder can be made.
-		 */
-		 
-		ipl = interrupts_disable();
-		spinlock_lock(&rwl->lock);
+		 *
+		 */
+		irq_spinlock_lock(&rwl->lock, true);
+		
 		/*
 		 * Now when rwl is locked, we can inspect it again.
 		 * If it is held by some readers already, we can let
 		 * readers from the head of the wait queue in.
+		 *
 		 */
 		if (rwl->readers_in)
 			let_others_in(rwl, ALLOW_READERS_ONLY);
-		spinlock_unlock(&rwl->lock);
-		interrupts_restore(ipl);
+		
+		irq_spinlock_unlock(&rwl->lock, true);
 	}
 	
 	return rc;
+}
+
+/** Release spinlock callback
+ *
+ * This is a callback function invoked from the scheduler.
+ * The callback is registered in _rwlock_read_lock_timeout().
+ *
+ * @param arg Spinlock.
+ *
+ */
+static void release_spinlock(void *arg)
+{
+	if (arg != NULL)
+		irq_spinlock_unlock((irq_spinlock_t *) arg, false);
 }
 
@@ -146,6 +244,6 @@
  * Timeout and willingness to block may be specified.
  *
- * @param rwl Reader/Writer lock.
- * @param usec Timeout in microseconds.
+ * @param rwl   Reader/Writer lock.
+ * @param usec  Timeout in microseconds.
  * @param flags Select mode of operation.
  *
@@ -154,23 +252,27 @@
  *
  * @return See comment for waitq_sleep_timeout().
- */
-int _rwlock_read_lock_timeout(rwlock_t *rwl, uint32_t usec, int flags)
-{
-	int rc;
-	ipl_t ipl;
-	
-	ipl = interrupts_disable();
-	spinlock_lock(&THREAD->lock);
+ *
+ */
+int _rwlock_read_lock_timeout(rwlock_t *rwl, uint32_t usec, unsigned int flags)
+{
+	/*
+	 * Since the locking scenarios get a little bit too
+	 * complicated, we do not rely on internal irq_spinlock_t
+	 * interrupt disabling logic here and control interrupts
+	 * manually.
+	 *
+	 */
+	ipl_t ipl = interrupts_disable();
+	
+	irq_spinlock_lock(&THREAD->lock, false);
 	THREAD->rwlock_holder_type = RWLOCK_READER;
-	spinlock_unlock(&THREAD->lock);	
-
-	spinlock_lock(&rwl->lock);
-
+	irq_spinlock_pass(&THREAD->lock, &rwl->lock);
+	
 	/*
 	 * Find out whether we can get what we want without blocking.
+	 *
 	 */
-	rc = mutex_trylock(&rwl->exclusive);
+	int rc = mutex_trylock(&rwl->exclusive);
 	if (SYNCH_FAILED(rc)) {
-
 		/*
 		 * 'exclusive' mutex is being held by someone else.
@@ -178,18 +280,19 @@
 		 * else waiting for it, we can enter the critical
 		 * section.
-		 */
-
+		 *
+		 */
+		
 		if (rwl->readers_in) {
-			spinlock_lock(&rwl->exclusive.sem.wq.lock);
+			irq_spinlock_lock(&rwl->exclusive.sem.wq.lock, false);
 			if (list_empty(&rwl->exclusive.sem.wq.head)) {
 				/*
 				 * We can enter.
 				 */
-				spinlock_unlock(&rwl->exclusive.sem.wq.lock);
+				irq_spinlock_unlock(&rwl->exclusive.sem.wq.lock, false);
 				goto shortcut;
 			}
-			spinlock_unlock(&rwl->exclusive.sem.wq.lock);
+			irq_spinlock_unlock(&rwl->exclusive.sem.wq.lock, false);
 		}
-
+		
 		/*
 		 * In order to prevent a race condition when a reader
@@ -197,11 +300,12 @@
 		 * we register a function to unlock rwl->lock
 		 * after this thread is put asleep.
-		 */
-		#ifdef CONFIG_SMP
+		 *
+		 */
+#ifdef CONFIG_SMP
 		thread_register_call_me(release_spinlock, &rwl->lock);
-		#else
+#else
 		thread_register_call_me(release_spinlock, NULL);
-		#endif
-				 
+#endif
+		
 		rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags);
 		switch (rc) {
@@ -209,7 +313,8 @@
 			/*
 			 * release_spinlock() wasn't called
+			 *
 			 */
 			thread_register_call_me(NULL, NULL);
-			spinlock_unlock(&rwl->lock);
+			irq_spinlock_unlock(&rwl->lock, false);
 		case ESYNCH_TIMEOUT:
 		case ESYNCH_INTERRUPTED:
@@ -217,6 +322,7 @@
 			 * The sleep timed out.
 			 * We just restore interrupt priority level.
+			 *
 			 */
-		case ESYNCH_OK_BLOCKED:		
+		case ESYNCH_OK_BLOCKED:
 			/*
 			 * We were woken with rwl->readers_in already
@@ -228,4 +334,5 @@
 			 * 'readers_in' is incremented. Same time means both
 			 * events happen atomically when rwl->lock is held.)
+			 *
 			 */
 			interrupts_restore(ipl);
@@ -240,16 +347,15 @@
 		return rc;
 	}
-
+	
 shortcut:
-
 	/*
 	 * We can increment readers_in only if we didn't go to sleep.
 	 * For sleepers, rwlock_let_others_in() will do the job.
+	 *
 	 */
 	rwl->readers_in++;
-	
-	spinlock_unlock(&rwl->lock);
+	irq_spinlock_unlock(&rwl->lock, false);
 	interrupts_restore(ipl);
-
+	
 	return ESYNCH_OK_ATOMIC;
 }
@@ -262,15 +368,11 @@
  *
  * @param rwl Reader/Writer lock.
+ *
  */
 void rwlock_write_unlock(rwlock_t *rwl)
 {
-	ipl_t ipl;
-	
-	ipl = interrupts_disable();
-	spinlock_lock(&rwl->lock);
+	irq_spinlock_lock(&rwl->lock, true);
 	let_others_in(rwl, ALLOW_ALL);
-	spinlock_unlock(&rwl->lock);
-	interrupts_restore(ipl);
-	
+	irq_spinlock_unlock(&rwl->lock, true);
 }
 
@@ -283,109 +385,14 @@
  *
  * @param rwl Reader/Writer lock.
+ *
  */
 void rwlock_read_unlock(rwlock_t *rwl)
 {
-	ipl_t ipl;
-
-	ipl = interrupts_disable();
-	spinlock_lock(&rwl->lock);
+	irq_spinlock_lock(&rwl->lock, true);
+	
 	if (!--rwl->readers_in)
 		let_others_in(rwl, ALLOW_ALL);
-	spinlock_unlock(&rwl->lock);
-	interrupts_restore(ipl);
-}
-
-
-/** Direct handoff of reader/writer lock ownership.
- *
- * Direct handoff of reader/writer lock ownership
- * to waiting readers or a writer.
- *
- * Must be called with rwl->lock locked.
- * Must be called with interrupts_disable()'d.
- *
- * @param rwl Reader/Writer lock.
- * @param readers_only See the description below.
- *
- * If readers_only is false: (unlock scenario)
- * Let the first sleeper on 'exclusive' mutex in, no matter
- * whether it is a reader or a writer. If there are more leading
- * readers in line, let each of them in.
- *
- * Otherwise: (timeout scenario)
- * Let all leading readers in.
- */
-void let_others_in(rwlock_t *rwl, int readers_only)
-{
-	rwlock_type_t type = RWLOCK_NONE;
-	thread_t *t = NULL;
-	bool one_more = true;
-	
-	spinlock_lock(&rwl->exclusive.sem.wq.lock);
-
-	if (!list_empty(&rwl->exclusive.sem.wq.head))
-		t = list_get_instance(rwl->exclusive.sem.wq.head.next, thread_t,
-		    wq_link);
-	do {
-		if (t) {
-			spinlock_lock(&t->lock);
-			type = t->rwlock_holder_type;
-			spinlock_unlock(&t->lock);			
-		}
-	
-		/*
-		 * If readers_only is true, we wake all leading readers
-		 * if and only if rwl is locked by another reader.
-		 * Assumption: readers_only ==> rwl->readers_in
-		 */
-		if (readers_only && (type != RWLOCK_READER))
-			break;
-
-
-		if (type == RWLOCK_READER) {
-			/*
-			 * Waking up a reader.
-			 * We are responsible for incrementing rwl->readers_in
-			 * for it.
-			 */
-			 rwl->readers_in++;
-		}
-
-		/*
-		 * Only the last iteration through this loop can increment
-		 * rwl->exclusive.sem.wq.missed_wakeup's. All preceeding
-		 * iterations will wake up a thread.
-		 */
-		/* We call the internal version of waitq_wakeup, which
-		 * relies on the fact that the waitq is already locked.
-		 */
-		_waitq_wakeup_unsafe(&rwl->exclusive.sem.wq, WAKEUP_FIRST);
-		
-		t = NULL;
-		if (!list_empty(&rwl->exclusive.sem.wq.head)) {
-			t = list_get_instance(rwl->exclusive.sem.wq.head.next,
-			    thread_t, wq_link);
-			if (t) {
-				spinlock_lock(&t->lock);
-				if (t->rwlock_holder_type != RWLOCK_READER)
-					one_more = false;
-				spinlock_unlock(&t->lock);	
-			}
-		}
-	} while ((type == RWLOCK_READER) && t && one_more);
-
-	spinlock_unlock(&rwl->exclusive.sem.wq.lock);
-}
-
-/** Release spinlock callback
- *
- * This is a callback function invoked from the scheduler.
- * The callback is registered in _rwlock_read_lock_timeout().
- *
- * @param arg Spinlock.
- */
-void release_spinlock(void *arg)
-{
-	spinlock_unlock((spinlock_t *) arg);
+	
+	irq_spinlock_unlock(&rwl->lock, true);
 }
 
Index: kernel/generic/src/synch/semaphore.c
===================================================================
--- kernel/generic/src/synch/semaphore.c	(revision 2b4a9f26cedf64d7586d2e0b91cc522a8d4fc07b)
+++ kernel/generic/src/synch/semaphore.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -33,5 +33,5 @@
 /**
  * @file
- * @brief	Semaphores.
+ * @brief Semaphores.
  */
 
@@ -47,20 +47,15 @@
  * Initialize semaphore.
  *
- * @param s Semaphore.
+ * @param sem Semaphore.
  * @param val Maximal number of threads allowed to enter critical section.
+ *
  */
-void semaphore_initialize(semaphore_t *s, int val)
+void semaphore_initialize(semaphore_t *sem, int val)
 {
-	ipl_t ipl;
+	waitq_initialize(&sem->wq);
 	
-	waitq_initialize(&s->wq);
-	
-	ipl = interrupts_disable();
-
-	spinlock_lock(&s->wq.lock);
-	s->wq.missed_wakeups = val;
-	spinlock_unlock(&s->wq.lock);
-
-	interrupts_restore(ipl);
+	irq_spinlock_lock(&sem->wq.lock, true);
+	sem->wq.missed_wakeups = val;
+	irq_spinlock_unlock(&sem->wq.lock, true);
 }
 
@@ -70,6 +65,6 @@
  * Conditional mode and mode with timeout can be requested.
  *
- * @param s Semaphore.
- * @param usec Timeout in microseconds.
+ * @param sem   Semaphore.
+ * @param usec  Timeout in microseconds.
  * @param flags Select mode of operation.
  *
@@ -78,8 +73,9 @@
  *
  * @return See comment for waitq_sleep_timeout().
+ *
  */
-int _semaphore_down_timeout(semaphore_t *s, uint32_t usec, int flags)
+int _semaphore_down_timeout(semaphore_t *sem, uint32_t usec, unsigned int flags)
 {
-	return waitq_sleep_timeout(&s->wq, usec, flags); 
+	return waitq_sleep_timeout(&sem->wq, usec, flags);
 }
 
@@ -89,8 +85,9 @@
  *
  * @param s Semaphore.
+ *
  */
-void semaphore_up(semaphore_t *s)
+void semaphore_up(semaphore_t *sem)
 {
-	waitq_wakeup(&s->wq, WAKEUP_FIRST);
+	waitq_wakeup(&sem->wq, WAKEUP_FIRST);
 }
 
Index: kernel/generic/src/synch/waitq.c
===================================================================
--- kernel/generic/src/synch/waitq.c	(revision 2b4a9f26cedf64d7586d2e0b91cc522a8d4fc07b)
+++ kernel/generic/src/synch/waitq.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -33,5 +33,5 @@
 /**
  * @file
- * @brief	Wait queue.
+ * @brief Wait queue.
  *
  * Wait queue is the basic synchronization primitive upon which all
@@ -41,4 +41,5 @@
  * fashion. Conditional operation as well as timeouts and interruptions
  * are supported.
+ *
  */
 
@@ -56,5 +57,5 @@
 #include <arch/cycle.h>
 
-static void waitq_sleep_timed_out(void *data);
+static void waitq_sleep_timed_out(void *);
 
 /** Initialize wait queue
@@ -62,9 +63,10 @@
  * Initialize wait queue.
  *
- * @param wq		Pointer to wait queue to be initialized.
+ * @param wq Pointer to wait queue to be initialized.
+ *
  */
 void waitq_initialize(waitq_t *wq)
 {
-	spinlock_initialize(&wq->lock, "waitq_lock");
+	irq_spinlock_initialize(&wq->lock, "wq.lock");
 	list_initialize(&wq->head);
 	wq->missed_wakeups = 0;
@@ -81,41 +83,44 @@
  * timeout at all.
  *
- * @param data		Pointer to the thread that called waitq_sleep_timeout().
+ * @param data Pointer to the thread that called waitq_sleep_timeout().
+ *
  */
 void waitq_sleep_timed_out(void *data)
 {
-	thread_t *t = (thread_t *) data;
-	waitq_t *wq;
+	thread_t *thread = (thread_t *) data;
 	bool do_wakeup = false;
 	DEADLOCK_PROBE_INIT(p_wqlock);
-
-	spinlock_lock(&threads_lock);
-	if (!thread_exists(t))
+	
+	irq_spinlock_lock(&threads_lock, false);
+	if (!thread_exists(thread))
 		goto out;
-
+	
 grab_locks:
-	spinlock_lock(&t->lock);
-	if ((wq = t->sleep_queue)) {		/* assignment */
-		if (!spinlock_trylock(&wq->lock)) {
-			spinlock_unlock(&t->lock);
+	irq_spinlock_lock(&thread->lock, false);
+	
+	waitq_t *wq;
+	if ((wq = thread->sleep_queue)) {  /* Assignment */
+		if (!irq_spinlock_trylock(&wq->lock)) {
+			irq_spinlock_unlock(&thread->lock, false);
 			DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD);
-			goto grab_locks;	/* avoid deadlock */
-		}
-
-		list_remove(&t->wq_link);
-		t->saved_context = t->sleep_timeout_context;
+			/* Avoid deadlock */
+			goto grab_locks;
+		}
+		
+		list_remove(&thread->wq_link);
+		thread->saved_context = thread->sleep_timeout_context;
 		do_wakeup = true;
-		t->sleep_queue = NULL;
-		spinlock_unlock(&wq->lock);
-	}
-	
-	t->timeout_pending = false;
-	spinlock_unlock(&t->lock);
+		thread->sleep_queue = NULL;
+		irq_spinlock_unlock(&wq->lock, false);
+	}
+	
+	thread->timeout_pending = false;
+	irq_spinlock_unlock(&thread->lock, false);
 	
 	if (do_wakeup)
-		thread_ready(t);
-
+		thread_ready(thread);
+	
 out:
-	spinlock_unlock(&threads_lock);
+	irq_spinlock_unlock(&threads_lock, false);
 }
 
@@ -125,52 +130,54 @@
  * If the thread is not found sleeping, no action is taken.
  *
- * @param t		Thread to be interrupted.
- */
-void waitq_interrupt_sleep(thread_t *t)
-{
+ * @param thread Thread to be interrupted.
+ *
+ */
+void waitq_interrupt_sleep(thread_t *thread)
+{
+	bool do_wakeup = false;
+	DEADLOCK_PROBE_INIT(p_wqlock);
+	
+	irq_spinlock_lock(&threads_lock, true);
+	if (!thread_exists(thread))
+		goto out;
+	
+grab_locks:
+	irq_spinlock_lock(&thread->lock, false);
+	
 	waitq_t *wq;
-	bool do_wakeup = false;
-	ipl_t ipl;
-	DEADLOCK_PROBE_INIT(p_wqlock);
-
-	ipl = interrupts_disable();
-	spinlock_lock(&threads_lock);
-	if (!thread_exists(t))
-		goto out;
-
-grab_locks:
-	spinlock_lock(&t->lock);
-	if ((wq = t->sleep_queue)) {		/* assignment */
-		if (!(t->sleep_interruptible)) {
+	if ((wq = thread->sleep_queue)) {  /* Assignment */
+		if (!(thread->sleep_interruptible)) {
 			/*
 			 * The sleep cannot be interrupted.
+			 *
 			 */
-			spinlock_unlock(&t->lock);
+			irq_spinlock_unlock(&thread->lock, false);
 			goto out;
 		}
-			
-		if (!spinlock_trylock(&wq->lock)) {
-			spinlock_unlock(&t->lock);
+		
+		if (!irq_spinlock_trylock(&wq->lock)) {
+			irq_spinlock_unlock(&thread->lock, false);
 			DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD);
-			goto grab_locks;	/* avoid deadlock */
-		}
-
-		if (t->timeout_pending && timeout_unregister(&t->sleep_timeout))
-			t->timeout_pending = false;
-
-		list_remove(&t->wq_link);
-		t->saved_context = t->sleep_interruption_context;
+			/* Avoid deadlock */
+			goto grab_locks;
+		}
+		
+		if ((thread->timeout_pending) &&
+		    (timeout_unregister(&thread->sleep_timeout)))
+			thread->timeout_pending = false;
+		
+		list_remove(&thread->wq_link);
+		thread->saved_context = thread->sleep_interruption_context;
 		do_wakeup = true;
-		t->sleep_queue = NULL;
-		spinlock_unlock(&wq->lock);
-	}
-	spinlock_unlock(&t->lock);
-
+		thread->sleep_queue = NULL;
+		irq_spinlock_unlock(&wq->lock, false);
+	}
+	irq_spinlock_unlock(&thread->lock, false);
+	
 	if (do_wakeup)
-		thread_ready(t);
-
+		thread_ready(thread);
+	
 out:
-	spinlock_unlock(&threads_lock);
-	interrupts_restore(ipl);
+	irq_spinlock_unlock(&threads_lock, true);
 }
 
@@ -180,30 +187,31 @@
  * is sleeping interruptibly.
  *
- * @param wq		Pointer to wait queue.
+ * @param wq Pointer to wait queue.
+ *
  */
 void waitq_unsleep(waitq_t *wq)
 {
-	ipl_t ipl;
-
-	ipl = interrupts_disable();
-	spinlock_lock(&wq->lock);
-
+	irq_spinlock_lock(&wq->lock, true);
+	
 	if (!list_empty(&wq->head)) {
-		thread_t *t;
-		
-		t = list_get_instance(wq->head.next, thread_t, wq_link);
-		spinlock_lock(&t->lock);
-		ASSERT(t->sleep_interruptible);
-		if (t->timeout_pending && timeout_unregister(&t->sleep_timeout))
-			t->timeout_pending = false;
-		list_remove(&t->wq_link);
-		t->saved_context = t->sleep_interruption_context;
-		t->sleep_queue = NULL;
-		spinlock_unlock(&t->lock);
-		thread_ready(t);
-	}
-
-	spinlock_unlock(&wq->lock);
-	interrupts_restore(ipl);
+		thread_t *thread = list_get_instance(wq->head.next, thread_t, wq_link);
+		
+		irq_spinlock_lock(&thread->lock, false);
+		
+		ASSERT(thread->sleep_interruptible);
+		
+		if ((thread->timeout_pending) &&
+		    (timeout_unregister(&thread->sleep_timeout)))
+			thread->timeout_pending = false;
+		
+		list_remove(&thread->wq_link);
+		thread->saved_context = thread->sleep_interruption_context;
+		thread->sleep_queue = NULL;
+		
+		irq_spinlock_unlock(&thread->lock, false);
+		thread_ready(thread);
+	}
+	
+	irq_spinlock_unlock(&wq->lock, true);
 }
 
@@ -221,14 +229,14 @@
  * and all the *_timeout() functions use it.
  *
- * @param wq		Pointer to wait queue.
- * @param usec		Timeout in microseconds.
- * @param flags		Specify mode of the sleep.
+ * @param wq    Pointer to wait queue.
+ * @param usec  Timeout in microseconds.
+ * @param flags Specify mode of the sleep.
  *
  * The sleep can be interrupted only if the
  * SYNCH_FLAGS_INTERRUPTIBLE bit is specified in flags.
- * 
+ *
  * If usec is greater than zero, regardless of the value of the
  * SYNCH_FLAGS_NON_BLOCKING bit in flags, the call will not return until either
- * timeout, interruption or wakeup comes. 
+ * timeout, interruption or wakeup comes.
  *
  * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is not set in flags,
@@ -238,31 +246,22 @@
  * call will immediately return, reporting either success or failure.
  *
- * @return		Returns one of ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT,
- * 			ESYNCH_INTERRUPTED, ESYNCH_OK_ATOMIC and
- * 			ESYNCH_OK_BLOCKED.
- *
- * @li	ESYNCH_WOULD_BLOCK means that the sleep failed because at the time of
- *	the call there was no pending wakeup.
- *
- * @li	ESYNCH_TIMEOUT means that the sleep timed out.
- *
- * @li	ESYNCH_INTERRUPTED means that somebody interrupted the sleeping thread.
- *
- * @li	ESYNCH_OK_ATOMIC means that the sleep succeeded and that there was
- * 	a pending wakeup at the time of the call. The caller was not put
- * 	asleep at all.
- * 
- * @li	ESYNCH_OK_BLOCKED means that the sleep succeeded; the full sleep was 
- * 	attempted.
- */
-int waitq_sleep_timeout(waitq_t *wq, uint32_t usec, int flags)
-{
-	ipl_t ipl;
-	int rc;
-
+ * @return ESYNCH_WOULD_BLOCK, meaning that the sleep failed because at the
+ *         time of the call there was no pending wakeup
+ * @return ESYNCH_TIMEOUT, meaning that the sleep timed out.
+ * @return ESYNCH_INTERRUPTED, meaning that somebody interrupted the sleeping
+ *         thread.
+ * @return ESYNCH_OK_ATOMIC, meaning that the sleep succeeded and that there
+ *         was a pending wakeup at the time of the call. The caller was not put
+ *         asleep at all.
+ * @return ESYNCH_OK_BLOCKED, meaning that the sleep succeeded; the full sleep
+ *         was attempted.
+ *
+ */
+int waitq_sleep_timeout(waitq_t *wq, uint32_t usec, unsigned int flags)
+{
 	ASSERT((!PREEMPTION_DISABLED) || (PARAM_NON_BLOCKING(flags, usec)));
 	
-	ipl = waitq_sleep_prepare(wq);
-	rc = waitq_sleep_timeout_unsafe(wq, usec, flags);
+	ipl_t ipl = waitq_sleep_prepare(wq);
+	int rc = waitq_sleep_timeout_unsafe(wq, usec, flags);
 	waitq_sleep_finish(wq, rc, ipl);
 	return rc;
@@ -274,7 +273,8 @@
  * and interrupts disabled.
  *
- * @param wq		Wait queue.
- *
- * @return		Interrupt level as it existed on entry to this function.
+ * @param wq Wait queue.
+ *
+ * @return Interrupt level as it existed on entry to this function.
+ *
  */
 ipl_t waitq_sleep_prepare(waitq_t *wq)
@@ -284,6 +284,6 @@
 restart:
 	ipl = interrupts_disable();
-
-	if (THREAD) {	/* needed during system initiailzation */
+	
+	if (THREAD) {  /* Needed during system initiailzation */
 		/*
 		 * Busy waiting for a delayed timeout.
@@ -292,15 +292,18 @@
 		 * Simply, the thread is not allowed to go to sleep if
 		 * there are timeouts in progress.
+		 *
 		 */
-		spinlock_lock(&THREAD->lock);
+		irq_spinlock_lock(&THREAD->lock, false);
+		
 		if (THREAD->timeout_pending) {
-			spinlock_unlock(&THREAD->lock);
+			irq_spinlock_unlock(&THREAD->lock, false);
 			interrupts_restore(ipl);
 			goto restart;
 		}
-		spinlock_unlock(&THREAD->lock);
-	}
-													
-	spinlock_lock(&wq->lock);
+		
+		irq_spinlock_unlock(&THREAD->lock, false);
+	}
+	
+	irq_spinlock_lock(&wq->lock, false);
 	return ipl;
 }
@@ -312,7 +315,8 @@
  * lock is released.
  *
- * @param wq		Wait queue.
- * @param rc		Return code of waitq_sleep_timeout_unsafe().
- * @param ipl		Interrupt level returned by waitq_sleep_prepare().
+ * @param wq  Wait queue.
+ * @param rc  Return code of waitq_sleep_timeout_unsafe().
+ * @param ipl Interrupt level returned by waitq_sleep_prepare().
+ *
  */
 void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl)
@@ -321,9 +325,10 @@
 	case ESYNCH_WOULD_BLOCK:
 	case ESYNCH_OK_ATOMIC:
-		spinlock_unlock(&wq->lock);
+		irq_spinlock_unlock(&wq->lock, false);
 		break;
 	default:
 		break;
 	}
+	
 	interrupts_restore(ipl);
 }
@@ -335,20 +340,20 @@
  * and followed by a call to waitq_sleep_finish().
  *
- * @param wq		See waitq_sleep_timeout().
- * @param usec		See waitq_sleep_timeout().
- * @param flags		See waitq_sleep_timeout().
- *
- * @return		See waitq_sleep_timeout().
- */
-int waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, int flags)
-{
-	/* checks whether to go to sleep at all */
+ * @param wq    See waitq_sleep_timeout().
+ * @param usec  See waitq_sleep_timeout().
+ * @param flags See waitq_sleep_timeout().
+ *
+ * @return See waitq_sleep_timeout().
+ *
+ */
+int waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, unsigned int flags)
+{
+	/* Checks whether to go to sleep at all */
 	if (wq->missed_wakeups) {
 		wq->missed_wakeups--;
 		return ESYNCH_OK_ATOMIC;
-	}
-	else {
+	} else {
 		if (PARAM_NON_BLOCKING(flags, usec)) {
-			/* return immediatelly instead of going to sleep */
+			/* Return immediatelly instead of going to sleep */
 			return ESYNCH_WOULD_BLOCK;
 		}
@@ -357,22 +362,24 @@
 	/*
 	 * Now we are firmly decided to go to sleep.
+	 *
 	 */
-	spinlock_lock(&THREAD->lock);
-
+	irq_spinlock_lock(&THREAD->lock, false);
+	
 	if (flags & SYNCH_FLAGS_INTERRUPTIBLE) {
-
 		/*
 		 * If the thread was already interrupted,
 		 * don't go to sleep at all.
+		 *
 		 */
 		if (THREAD->interrupted) {
-			spinlock_unlock(&THREAD->lock);
-			spinlock_unlock(&wq->lock);
+			irq_spinlock_unlock(&THREAD->lock, false);
+			irq_spinlock_unlock(&wq->lock, false);
 			return ESYNCH_INTERRUPTED;
 		}
-
+		
 		/*
 		 * Set context that will be restored if the sleep
 		 * of this thread is ever interrupted.
+		 *
 		 */
 		THREAD->sleep_interruptible = true;
@@ -380,12 +387,10 @@
 			/* Short emulation of scheduler() return code. */
 			THREAD->last_cycle = get_cycle();
-			spinlock_unlock(&THREAD->lock);
+			irq_spinlock_unlock(&THREAD->lock, false);
 			return ESYNCH_INTERRUPTED;
 		}
-
-	} else {
+	} else
 		THREAD->sleep_interruptible = false;
-	}
-
+	
 	if (usec) {
 		/* We use the timeout variant. */
@@ -393,28 +398,29 @@
 			/* Short emulation of scheduler() return code. */
 			THREAD->last_cycle = get_cycle();
-			spinlock_unlock(&THREAD->lock);
+			irq_spinlock_unlock(&THREAD->lock, false);
 			return ESYNCH_TIMEOUT;
 		}
+		
 		THREAD->timeout_pending = true;
 		timeout_register(&THREAD->sleep_timeout, (uint64_t) usec,
 		    waitq_sleep_timed_out, THREAD);
 	}
-
+	
 	list_append(&THREAD->wq_link, &wq->head);
-
+	
 	/*
 	 * Suspend execution.
+	 *
 	 */
 	THREAD->state = Sleeping;
 	THREAD->sleep_queue = wq;
-
-	spinlock_unlock(&THREAD->lock);
-
+	
+	irq_spinlock_unlock(&THREAD->lock, false);
+	
 	/* wq->lock is released in scheduler_separated_stack() */
-	scheduler(); 
+	scheduler();
 	
 	return ESYNCH_OK_BLOCKED;
 }
-
 
 /** Wake up first thread sleeping in a wait queue
@@ -426,18 +432,13 @@
  * timeout.
  *
- * @param wq		Pointer to wait queue.
- * @param mode		Wakeup mode.
+ * @param wq   Pointer to wait queue.
+ * @param mode Wakeup mode.
+ *
  */
 void waitq_wakeup(waitq_t *wq, wakeup_mode_t mode)
 {
-	ipl_t ipl;
-
-	ipl = interrupts_disable();
-	spinlock_lock(&wq->lock);
-
+	irq_spinlock_lock(&wq->lock, true);
 	_waitq_wakeup_unsafe(wq, mode);
-
-	spinlock_unlock(&wq->lock);
-	interrupts_restore(ipl);
+	irq_spinlock_unlock(&wq->lock, true);
 }
 
@@ -447,26 +448,27 @@
  * assumes wq->lock is already locked and interrupts are already disabled.
  *
- * @param wq		Pointer to wait queue.
- * @param mode		If mode is WAKEUP_FIRST, then the longest waiting
- * 			thread, if any, is woken up. If mode is WAKEUP_ALL, then
- *			all waiting threads, if any, are woken up. If there are
- *			no waiting threads to be woken up, the missed wakeup is
- *			recorded in the wait queue.
+ * @param wq   Pointer to wait queue.
+ * @param mode If mode is WAKEUP_FIRST, then the longest waiting
+ *             thread, if any, is woken up. If mode is WAKEUP_ALL, then
+ *             all waiting threads, if any, are woken up. If there are
+ *             no waiting threads to be woken up, the missed wakeup is
+ *             recorded in the wait queue.
+ *
  */
 void _waitq_wakeup_unsafe(waitq_t *wq, wakeup_mode_t mode)
 {
-	thread_t *t;
 	size_t count = 0;
-
-loop:	
+	
+loop:
 	if (list_empty(&wq->head)) {
 		wq->missed_wakeups++;
-		if (count && mode == WAKEUP_ALL)
+		if ((count) && (mode == WAKEUP_ALL))
 			wq->missed_wakeups--;
+		
 		return;
 	}
-
+	
 	count++;
-	t = list_get_instance(wq->head.next, thread_t, wq_link);
+	thread_t *thread = list_get_instance(wq->head.next, thread_t, wq_link);
 	
 	/*
@@ -480,20 +482,23 @@
 	 * invariant must hold:
 	 *
-	 * t->sleep_queue != NULL <=> t sleeps in a wait queue
+	 * thread->sleep_queue != NULL <=> thread sleeps in a wait queue
 	 *
 	 * For an observer who locks the thread, the invariant
 	 * holds only when the lock is held prior to removing
 	 * it from the wait queue.
+	 *
 	 */
-	spinlock_lock(&t->lock);
-	list_remove(&t->wq_link);
-	
-	if (t->timeout_pending && timeout_unregister(&t->sleep_timeout))
-		t->timeout_pending = false;
-	t->sleep_queue = NULL;
-	spinlock_unlock(&t->lock);
-
-	thread_ready(t);
-
+	irq_spinlock_lock(&thread->lock, false);
+	list_remove(&thread->wq_link);
+	
+	if ((thread->timeout_pending) &&
+	    (timeout_unregister(&thread->sleep_timeout)))
+		thread->timeout_pending = false;
+	
+	thread->sleep_queue = NULL;
+	irq_spinlock_unlock(&thread->lock, false);
+	
+	thread_ready(thread);
+	
 	if (mode == WAKEUP_ALL)
 		goto loop;
