Index: kernel/arch/ia32/src/smp/smp.c
===================================================================
--- kernel/arch/ia32/src/smp/smp.c	(revision 7f11dc6ba31c85d4b1daa9c4ebd4d6f99082a87c)
+++ kernel/arch/ia32/src/smp/smp.c	(revision 6ef9d9a9678ec103514bfbca545cf47e7f817419)
@@ -39,4 +39,5 @@
 #include <arch/boot/boot.h>
 #include <assert.h>
+#include <errno.h>
 #include <genarch/acpi/acpi.h>
 #include <genarch/acpi/madt.h>
@@ -178,5 +179,5 @@
 			 */
 			if (waitq_sleep_timeout(&ap_completion_wq, 1000000,
-			    SYNCH_FLAGS_NONE) == ESYNCH_TIMEOUT) {
+			    SYNCH_FLAGS_NONE, NULL) == ETIMEOUT) {
 				log(LF_ARCH, LVL_NOTE, "%s: waiting for cpu%u "
 				    "(APIC ID = %d) timed out", __FUNCTION__,
Index: kernel/arch/sparc64/src/smp/sun4u/smp.c
===================================================================
--- kernel/arch/sparc64/src/smp/sun4u/smp.c	(revision 7f11dc6ba31c85d4b1daa9c4ebd4d6f99082a87c)
+++ kernel/arch/sparc64/src/smp/sun4u/smp.c	(revision 6ef9d9a9678ec103514bfbca545cf47e7f817419)
@@ -106,6 +106,6 @@
 	waking_up_mid = mid;
 		
-	if (waitq_sleep_timeout(&ap_completion_wq, 1000000, SYNCH_FLAGS_NONE) ==
-	    ESYNCH_TIMEOUT)
+	if (waitq_sleep_timeout(&ap_completion_wq, 1000000,
+	    SYNCH_FLAGS_NONE, NULL) == ETIMEOUT)
 		log(LF_ARCH, LVL_NOTE, "%s: waiting for processor (mid = %" PRIu32
 		    ") timed out", __func__, mid);
Index: kernel/arch/sparc64/src/smp/sun4v/smp.c
===================================================================
--- kernel/arch/sparc64/src/smp/sun4v/smp.c	(revision 7f11dc6ba31c85d4b1daa9c4ebd4d6f99082a87c)
+++ kernel/arch/sparc64/src/smp/sun4v/smp.c	(revision 6ef9d9a9678ec103514bfbca545cf47e7f817419)
@@ -373,6 +373,6 @@
 #endif
 	
-	if (waitq_sleep_timeout(&ap_completion_wq, 10000000, SYNCH_FLAGS_NONE) ==
-	    ESYNCH_TIMEOUT)
+	if (waitq_sleep_timeout(&ap_completion_wq, 10000000,
+	    SYNCH_FLAGS_NONE, NULL) == ETIMEOUT)
 		printf("%s: waiting for processor (cpuid = %" PRIu64 ") timed out\n",
 		    __func__, cpuid);
Index: kernel/generic/include/synch/semaphore.h
===================================================================
--- kernel/generic/include/synch/semaphore.h	(revision 7f11dc6ba31c85d4b1daa9c4ebd4d6f99082a87c)
+++ kernel/generic/include/synch/semaphore.h	(revision 6ef9d9a9678ec103514bfbca545cf47e7f817419)
@@ -36,4 +36,5 @@
 #define KERN_SEMAPHORE_H_
 
+#include <errno.h>
 #include <stdint.h>
 #include <synch/waitq.h>
@@ -54,6 +55,6 @@
 
 #define semaphore_down_interruptable(s) \
-	(ESYNCH_INTERRUPTED != _semaphore_down_timeout((s), SYNCH_NO_TIMEOUT, \
-		SYNCH_FLAGS_INTERRUPTIBLE))
+	(_semaphore_down_timeout((s), SYNCH_NO_TIMEOUT, \
+		SYNCH_FLAGS_INTERRUPTIBLE) != EINTR)
 
 extern void semaphore_initialize(semaphore_t *, int);
Index: kernel/generic/include/synch/waitq.h
===================================================================
--- kernel/generic/include/synch/waitq.h	(revision 7f11dc6ba31c85d4b1daa9c4ebd4d6f99082a87c)
+++ kernel/generic/include/synch/waitq.h	(revision 6ef9d9a9678ec103514bfbca545cf47e7f817419)
@@ -67,13 +67,13 @@
 
 #define waitq_sleep(wq) \
-	waitq_sleep_timeout((wq), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE)
+	waitq_sleep_timeout((wq), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE, NULL)
 
 struct thread;
 
 extern void waitq_initialize(waitq_t *);
-extern int waitq_sleep_timeout(waitq_t *, uint32_t, unsigned int);
+extern int waitq_sleep_timeout(waitq_t *, uint32_t, unsigned int, bool *);
 extern ipl_t waitq_sleep_prepare(waitq_t *);
-extern int waitq_sleep_timeout_unsafe(waitq_t *, uint32_t, unsigned int);
-extern void waitq_sleep_finish(waitq_t *, int, ipl_t);
+extern int waitq_sleep_timeout_unsafe(waitq_t *, uint32_t, unsigned int, bool *);
+extern void waitq_sleep_finish(waitq_t *, bool, ipl_t);
 extern void waitq_wakeup(waitq_t *, wakeup_mode_t);
 extern void _waitq_wakeup_unsafe(waitq_t *, wakeup_mode_t);
Index: kernel/generic/src/ipc/ipc.c
===================================================================
--- kernel/generic/src/ipc/ipc.c	(revision 7f11dc6ba31c85d4b1daa9c4ebd4d6f99082a87c)
+++ kernel/generic/src/ipc/ipc.c	(revision 6ef9d9a9678ec103514bfbca545cf47e7f817419)
@@ -538,6 +538,6 @@
 	
 restart:
-	rc = waitq_sleep_timeout(&box->wq, usec, flags);
-	if (SYNCH_FAILED(rc))
+	rc = waitq_sleep_timeout(&box->wq, usec, flags, NULL);
+	if (rc != EOK)
 		return NULL;
 	
@@ -638,5 +638,5 @@
 		phone = list_get_instance(list_first(&box->connected_phones),
 		    phone_t, link);
-		if (SYNCH_FAILED(mutex_trylock(&phone->lock))) {
+		if (mutex_trylock(&phone->lock) != EOK) {
 			irq_spinlock_unlock(&box->lock, true);
 			DEADLOCK_PROBE(p_phonelck, DEADLOCK_THRESHOLD);
Index: kernel/generic/src/proc/thread.c
===================================================================
--- kernel/generic/src/proc/thread.c	(revision 7f11dc6ba31c85d4b1daa9c4ebd4d6f99082a87c)
+++ kernel/generic/src/proc/thread.c	(revision 6ef9d9a9678ec103514bfbca545cf47e7f817419)
@@ -548,5 +548,5 @@
  * 
  * Threads that are blocked waiting for a synchronization primitive 
- * are woken up with a return code of ESYNCH_INTERRUPTED if the
+ * are woken up with a return code of EINTR if the
  * blocking call was interruptable. See waitq_sleep_timeout().
  * 
@@ -653,5 +653,5 @@
 	irq_spinlock_unlock(&thread->lock, true);
 	
-	return waitq_sleep_timeout(&thread->join_wq, usec, flags);
+	return waitq_sleep_timeout(&thread->join_wq, usec, flags, NULL);
 }
 
@@ -700,5 +700,5 @@
 	waitq_initialize(&wq);
 	
-	(void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING);
+	(void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING, NULL);
 }
 
Index: kernel/generic/src/synch/condvar.c
===================================================================
--- kernel/generic/src/synch/condvar.c	(revision 7f11dc6ba31c85d4b1daa9c4ebd4d6f99082a87c)
+++ kernel/generic/src/synch/condvar.c	(revision 6ef9d9a9678ec103514bfbca545cf47e7f817419)
@@ -80,5 +80,5 @@
  * For exact description of meaning of possible combinations of usec and flags,
  * see comment for waitq_sleep_timeout().  Note that when
- * SYNCH_FLAGS_NON_BLOCKING is specified here, ESYNCH_WOULD_BLOCK is always
+ * SYNCH_FLAGS_NON_BLOCKING is specified here, EAGAIN is always
  * returned.
  *
@@ -89,4 +89,5 @@
 	int rc;
 	ipl_t ipl;
+	bool blocked;
 
 	ipl = waitq_sleep_prepare(&cv->wq);
@@ -95,7 +96,8 @@
 
 	cv->wq.missed_wakeups = 0;	/* Enforce blocking. */
-	rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags);
+	rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags, &blocked);
+	assert(blocked || rc != EOK);
 
-	waitq_sleep_finish(&cv->wq, rc, ipl);
+	waitq_sleep_finish(&cv->wq, blocked, ipl);
 	/* Lock only after releasing the waitq to avoid a possible deadlock. */
 	mutex_lock(mtx);
@@ -117,5 +119,5 @@
  * For exact description of meaning of possible combinations of usec and flags,
  * see comment for waitq_sleep_timeout().  Note that when
- * SYNCH_FLAGS_NON_BLOCKING is specified here, ESYNCH_WOULD_BLOCK is always
+ * SYNCH_FLAGS_NON_BLOCKING is specified here, EAGAIN is always
  * returned.
  *
@@ -127,5 +129,6 @@
 	int rc;
 	ipl_t ipl;
-	
+	bool blocked;
+
 	ipl = waitq_sleep_prepare(&cv->wq);
 
@@ -134,7 +137,8 @@
 
 	cv->wq.missed_wakeups = 0;	/* Enforce blocking. */
-	rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags);
+	rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags, &blocked);
+	assert(blocked || rc != EOK);
 
-	waitq_sleep_finish(&cv->wq, rc, ipl);
+	waitq_sleep_finish(&cv->wq, blocked, ipl);
 	/* Lock only after releasing the waitq to avoid a possible deadlock. */
 	spinlock_lock(lock);
@@ -152,5 +156,5 @@
  * For exact description of meaning of possible combinations of usec and flags,
  * see comment for waitq_sleep_timeout().  Note that when
- * SYNCH_FLAGS_NON_BLOCKING is specified here, ESYNCH_WOULD_BLOCK is always
+ * SYNCH_FLAGS_NON_BLOCKING is specified here, EAGAIN is always
  * returned.
  *
Index: kernel/generic/src/synch/futex.c
===================================================================
--- kernel/generic/src/synch/futex.c	(revision 7f11dc6ba31c85d4b1daa9c4ebd4d6f99082a87c)
+++ kernel/generic/src/synch/futex.c	(revision 6ef9d9a9678ec103514bfbca545cf47e7f817419)
@@ -395,6 +395,6 @@
  *
  * @return		If there is no physical mapping for uaddr ENOENT is
- *			returned. Otherwise returns a wait result as defined in
- *			synch.h.
+ *			returned. Otherwise returns the return value of
+ *                      waitq_sleep_timeout().
  */
 sysarg_t sys_futex_sleep(uintptr_t uaddr)
@@ -409,5 +409,6 @@
 #endif
 
-	int rc = waitq_sleep_timeout(&futex->wq, 0, SYNCH_FLAGS_INTERRUPTIBLE); 
+	int rc = waitq_sleep_timeout(
+	    &futex->wq, 0, SYNCH_FLAGS_INTERRUPTIBLE, NULL);
 
 #ifdef CONFIG_UDEBUG
@@ -430,5 +431,5 @@
 	if (futex) {
 		waitq_wakeup(&futex->wq, WAKEUP_FIRST);
-		return 0;
+		return EOK;
 	} else {
 		return (sysarg_t) ENOENT;
Index: kernel/generic/src/synch/mutex.c
===================================================================
--- kernel/generic/src/synch/mutex.c	(revision 7f11dc6ba31c85d4b1daa9c4ebd4d6f99082a87c)
+++ kernel/generic/src/synch/mutex.c	(revision 6ef9d9a9678ec103514bfbca545cf47e7f817419)
@@ -37,4 +37,5 @@
 
 #include <assert.h>
+#include <errno.h>
 #include <synch/mutex.h>
 #include <synch/semaphore.h>
@@ -95,8 +96,8 @@
 		if (mtx->owner == THREAD) {
 			mtx->nesting++;
-			return ESYNCH_OK_ATOMIC;
+			return EOK;
 		} else {
 			rc = _semaphore_down_timeout(&mtx->sem, usec, flags);
-			if (SYNCH_OK(rc)) {
+			if (rc == EOK) {
 				mtx->owner = THREAD;
 				mtx->nesting = 1;
@@ -119,6 +120,5 @@
 			}
 			rc = semaphore_trydown(&mtx->sem);
-		} while (SYNCH_FAILED(rc) &&
-		    !(flags & SYNCH_FLAGS_NON_BLOCKING));
+		} while (rc != EOK && !(flags & SYNCH_FLAGS_NON_BLOCKING));
 		if (deadlock_reported)
 			printf("cpu%u: not deadlocked\n", CPU->id);
Index: kernel/generic/src/synch/rcu.c
===================================================================
--- kernel/generic/src/synch/rcu.c	(revision 7f11dc6ba31c85d4b1daa9c4ebd4d6f99082a87c)
+++ kernel/generic/src/synch/rcu.c	(revision 6ef9d9a9678ec103514bfbca545cf47e7f817419)
@@ -960,5 +960,5 @@
 				SYNCH_NO_TIMEOUT, SYNCH_FLAGS_INTERRUPTIBLE);
 			
-			if (ret == ESYNCH_INTERRUPTED) {
+			if (ret == EINTR) {
 				spinlock_unlock(&rcu.gp_lock);
 				return false;			
@@ -1018,5 +1018,5 @@
 
 		/* rcu.expedite_now was signaled. */
-		if (ret == ESYNCH_OK_BLOCKED) {
+		if (ret == EOK) {
 			*expedite = true;
 		}
@@ -1024,5 +1024,5 @@
 		spinlock_unlock(&rcu.gp_lock);
 
-		return (ret != ESYNCH_INTERRUPTED);
+		return (ret != EINTR);
 	}
 }
@@ -1271,5 +1271,5 @@
 		int ret = _condvar_wait_timeout_spinlock(&rcu.gp_ended, &rcu.gp_lock, 
 			SYNCH_NO_TIMEOUT, SYNCH_FLAGS_INTERRUPTIBLE);
-		interrupted = (ret == ESYNCH_INTERRUPTED);
+		interrupted = (ret == EINTR);
 	}
 	
@@ -1332,5 +1332,5 @@
 			&rcu.gp_lock, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_INTERRUPTIBLE);
 		
-		interrupted = (ret == ESYNCH_INTERRUPTED);
+		interrupted = (ret == EINTR);
 	}
 	
@@ -1406,5 +1406,5 @@
 	spinlock_unlock(&rcu.gp_lock);
 	
-	return (ret != ESYNCH_INTERRUPTED);
+	return (ret != EINTR);
 }
 
Index: kernel/generic/src/synch/semaphore.c
===================================================================
--- kernel/generic/src/synch/semaphore.c	(revision 7f11dc6ba31c85d4b1daa9c4ebd4d6f99082a87c)
+++ kernel/generic/src/synch/semaphore.c	(revision 6ef9d9a9678ec103514bfbca545cf47e7f817419)
@@ -73,5 +73,5 @@
 int _semaphore_down_timeout(semaphore_t *sem, uint32_t usec, unsigned int flags)
 {
-	return waitq_sleep_timeout(&sem->wq, usec, flags);
+	return waitq_sleep_timeout(&sem->wq, usec, flags, NULL);
 }
 
Index: kernel/generic/src/synch/waitq.c
===================================================================
--- kernel/generic/src/synch/waitq.c	(revision 7f11dc6ba31c85d4b1daa9c4ebd4d6f99082a87c)
+++ kernel/generic/src/synch/waitq.c	(revision 6ef9d9a9678ec103514bfbca545cf47e7f817419)
@@ -45,4 +45,5 @@
 
 #include <assert.h>
+#include <errno.h>
 #include <synch/waitq.h>
 #include <synch/spinlock.h>
@@ -238,4 +239,8 @@
  * @param flags Specify mode of the sleep.
  *
+ * @param[out] blocked  On return, regardless of the return code,
+ *                      `*blocked` is set to `true` iff the thread went to
+ *                      sleep.
+ *
  * The sleep can be interrupted only if the
  * SYNCH_FLAGS_INTERRUPTIBLE bit is specified in flags.
@@ -251,23 +256,28 @@
  * call will immediately return, reporting either success or failure.
  *
- * @return ESYNCH_WOULD_BLOCK, meaning that the sleep failed because at the
- *         time of the call there was no pending wakeup
- * @return ESYNCH_TIMEOUT, meaning that the sleep timed out.
- * @return ESYNCH_INTERRUPTED, meaning that somebody interrupted the sleeping
- *         thread.
- * @return ESYNCH_OK_ATOMIC, meaning that the sleep succeeded and that there
- *         was a pending wakeup at the time of the call. The caller was not put
- *         asleep at all.
- * @return ESYNCH_OK_BLOCKED, meaning that the sleep succeeded; the full sleep
- *         was attempted.
- *
- */
-int waitq_sleep_timeout(waitq_t *wq, uint32_t usec, unsigned int flags)
+ * @return EAGAIN, meaning that the sleep failed because it was requested
+ *                 as SYNCH_FLAGS_NON_BLOCKING, but there was no pending wakeup.
+ * @return ETIMEOUT, meaning that the sleep timed out.
+ * @return EINTR, meaning that somebody interrupted the sleeping
+ *         thread. Check the value of `*blocked` to see if the thread slept,
+ *         or if a pending interrupt forced it to return immediately.
+ * @return EOK, meaning that none of the above conditions occured, and the
+ *              thread was woken up successfuly by `waitq_wakeup()`. Check
+ *              the value of `*blocked` to see if the thread slept or if
+ *              the wakeup was already pending.
+ *
+ */
+int waitq_sleep_timeout(waitq_t *wq, uint32_t usec, unsigned int flags, bool *blocked)
 {
 	assert((!PREEMPTION_DISABLED) || (PARAM_NON_BLOCKING(flags, usec)));
 	
 	ipl_t ipl = waitq_sleep_prepare(wq);
-	int rc = waitq_sleep_timeout_unsafe(wq, usec, flags);
-	waitq_sleep_finish(wq, rc, ipl);
+	bool nblocked;
+	int rc = waitq_sleep_timeout_unsafe(wq, usec, flags, &nblocked);
+	waitq_sleep_finish(wq, nblocked, ipl);
+
+	if (blocked != NULL) {
+		*blocked = nblocked;
+	}
 	return rc;
 }
@@ -320,32 +330,27 @@
  * lock is released.
  *
- * @param wq  Wait queue.
- * @param rc  Return code of waitq_sleep_timeout_unsafe().
- * @param ipl Interrupt level returned by waitq_sleep_prepare().
- *
- */
-void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl)
-{
-	switch (rc) {
-	case ESYNCH_WOULD_BLOCK:
-	case ESYNCH_OK_ATOMIC:
-		irq_spinlock_unlock(&wq->lock, false);
-		break;
-	default:
-		/* 
+ * @param wq       Wait queue.
+ * @param blocked  Out parameter of waitq_sleep_timeout_unsafe().
+ * @param ipl      Interrupt level returned by waitq_sleep_prepare().
+ *
+ */
+void waitq_sleep_finish(waitq_t *wq, bool blocked, ipl_t ipl)
+{
+	if (blocked) {
+		/*
 		 * Wait for a waitq_wakeup() or waitq_unsleep() to complete
 		 * before returning from waitq_sleep() to the caller. Otherwise
 		 * the caller might expect that the wait queue is no longer used 
 		 * and deallocate it (although the wakeup on a another cpu has 
-		 * not yet completed and is using the wait queue). 
-		 * 
-		 * Note that we have to do this for ESYNCH_OK_BLOCKED and
-		 * ESYNCH_INTERRUPTED, but not necessarily for ESYNCH_TIMEOUT
-		 * where the timeout handler stops using the waitq before waking 
-		 * us up. To be on the safe side, ensure the waitq is not in use 
-		 * anymore in this case as well.
+		 * not yet completed and is using the wait queue).
+		 *
+		 * Note that we have to do this for EOK and EINTR, but not
+		 * necessarily for ETIMEOUT where the timeout handler stops
+		 * using the waitq before waking us up. To be on the safe side,
+		 * ensure the waitq is not in use anymore in this case as well.
 		 */
 		waitq_complete_wakeup(wq);
-		break;
+	} else {
+		irq_spinlock_unlock(&wq->lock, false);
 	}
 	
@@ -363,17 +368,21 @@
  * @param flags See waitq_sleep_timeout().
  *
+ * @param[out] blocked  See waitq_sleep_timeout().
+ *
  * @return See waitq_sleep_timeout().
  *
  */
-int waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, unsigned int flags)
-{
+int waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, unsigned int flags, bool *blocked)
+{
+	*blocked = false;
+
 	/* Checks whether to go to sleep at all */
 	if (wq->missed_wakeups) {
 		wq->missed_wakeups--;
-		return ESYNCH_OK_ATOMIC;
+		return EOK;
 	} else {
 		if (PARAM_NON_BLOCKING(flags, usec)) {
 			/* Return immediately instead of going to sleep */
-			return ESYNCH_WOULD_BLOCK;
+			return EAGAIN;
 		}
 	}
@@ -392,6 +401,5 @@
 		if (THREAD->interrupted) {
 			irq_spinlock_unlock(&THREAD->lock, false);
-			irq_spinlock_unlock(&wq->lock, false);
-			return ESYNCH_INTERRUPTED;
+			return EINTR;
 		}
 		
@@ -405,5 +413,5 @@
 			THREAD->last_cycle = get_cycle();
 			irq_spinlock_unlock(&THREAD->lock, false);
-			return ESYNCH_INTERRUPTED;
+			return EINTR;
 		}
 	} else
@@ -416,5 +424,5 @@
 			THREAD->last_cycle = get_cycle();
 			irq_spinlock_unlock(&THREAD->lock, false);
-			return ESYNCH_TIMEOUT;
+			return ETIMEOUT;
 		}
 		
@@ -433,4 +441,9 @@
 	THREAD->sleep_queue = wq;
 	
+	/* Must be before entry to scheduler, because there are multiple
+	 * return vectors.
+	 */
+	*blocked = true;
+	
 	irq_spinlock_unlock(&THREAD->lock, false);
 	
@@ -438,5 +451,5 @@
 	scheduler();
 	
-	return ESYNCH_OK_BLOCKED;
+	return EOK;
 }
 
Index: kernel/generic/src/synch/workqueue.c
===================================================================
--- kernel/generic/src/synch/workqueue.c	(revision 7f11dc6ba31c85d4b1daa9c4ebd4d6f99082a87c)
+++ kernel/generic/src/synch/workqueue.c	(revision 6ef9d9a9678ec103514bfbca545cf47e7f817419)
@@ -38,4 +38,5 @@
 
 #include <assert.h>
+#include <errno.h>
 #include <synch/workqueue.h>
 #include <synch/spinlock.h>
@@ -897,5 +898,5 @@
 			&info->lock, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_INTERRUPTIBLE);
 		
-		stop = (ret == ESYNCH_INTERRUPTED);
+		stop = (ret == EINTR);
 	}
 	
Index: kernel/generic/src/sysinfo/stats.c
===================================================================
--- kernel/generic/src/sysinfo/stats.c	(revision 7f11dc6ba31c85d4b1daa9c4ebd4d6f99082a87c)
+++ kernel/generic/src/sysinfo/stats.c	(revision 6ef9d9a9678ec103514bfbca545cf47e7f817419)
@@ -157,5 +157,5 @@
 	 */
 	
-	if (SYNCH_FAILED(mutex_trylock(&as->lock)))
+	if (mutex_trylock(&as->lock) != EOK)
 		return 0;
 	
@@ -169,5 +169,5 @@
 			as_area_t *area = node->value[i];
 			
-			if (SYNCH_FAILED(mutex_trylock(&area->lock)))
+			if (mutex_trylock(&area->lock) != EOK)
 				continue;
 			
@@ -198,5 +198,5 @@
 	 */
 	
-	if (SYNCH_FAILED(mutex_trylock(&as->lock)))
+	if (mutex_trylock(&as->lock) != EOK)
 		return 0;
 	
@@ -209,5 +209,5 @@
 			as_area_t *area = node->value[i];
 			
-			if (SYNCH_FAILED(mutex_trylock(&area->lock)))
+			if (mutex_trylock(&area->lock) != EOK)
 				continue;
 			
Index: kernel/generic/src/udebug/udebug.c
===================================================================
--- kernel/generic/src/udebug/udebug.c	(revision 7f11dc6ba31c85d4b1daa9c4ebd4d6f99082a87c)
+++ kernel/generic/src/udebug/udebug.c	(revision 6ef9d9a9678ec103514bfbca545cf47e7f817419)
@@ -98,7 +98,7 @@
 	
 	wq->missed_wakeups = 0;  /* Enforce blocking. */
-	int rc = waitq_sleep_timeout_unsafe(wq, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE);
-	
-	waitq_sleep_finish(wq, rc, ipl);
+	bool blocked;
+	(void) waitq_sleep_timeout_unsafe(wq, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE, &blocked);
+	waitq_sleep_finish(wq, blocked, ipl);
 }
 
Index: kernel/test/synch/rcu1.c
===================================================================
--- kernel/test/synch/rcu1.c	(revision 7f11dc6ba31c85d4b1daa9c4ebd4d6f99082a87c)
+++ kernel/test/synch/rcu1.c	(revision 6ef9d9a9678ec103514bfbca545cf47e7f817419)
@@ -114,7 +114,7 @@
 			do {
 				int ret = thread_join_timeout(thread[i], 5 * 1000 * 1000, 0);
-				joined = (ret != ESYNCH_TIMEOUT);
+				joined = (ret != ETIMEOUT);
 				
-				if (ret == ESYNCH_OK_BLOCKED) {
+				if (ret == EOK) {
 					TPRINTF("%zu threads remain\n", thread_cnt - i - 1);
 				}
Index: kernel/test/synch/semaphore2.c
===================================================================
--- kernel/test/synch/semaphore2.c	(revision 7f11dc6ba31c85d4b1daa9c4ebd4d6f99082a87c)
+++ kernel/test/synch/semaphore2.c	(revision 6ef9d9a9678ec103514bfbca545cf47e7f817419)
@@ -70,5 +70,5 @@
 	TPRINTF("cpu%u, tid %" PRIu64 " down+ (%d)\n", CPU->id, THREAD->tid, to);
 	rc = semaphore_down_timeout(&sem, to);
-	if (SYNCH_FAILED(rc)) {
+	if (rc != EOK) {
 		TPRINTF("cpu%u, tid %" PRIu64 " down!\n", CPU->id, THREAD->tid);
 		return;
