Index: kernel/generic/src/synch/futex.c
===================================================================
--- kernel/generic/src/synch/futex.c	(revision 38dc82d20695b43a799be28d4fd2b2cd2c5bb785)
+++ kernel/generic/src/synch/futex.c	(revision 58fa3e668549d763497f0c6b6a9059794bb2ff4a)
@@ -61,4 +61,5 @@
  */
 
+#include <assert.h>
 #include <synch/futex.h>
 #include <synch/mutex.h>
@@ -241,6 +242,6 @@
 static void futex_add_ref(futex_t *futex)
 {
-	ASSERT(spinlock_locked(&futex_ht_lock));
-	ASSERT(0 < futex->refcount);
+	assert(spinlock_locked(&futex_ht_lock));
+	assert(0 < futex->refcount);
 	++futex->refcount;
 }
@@ -249,6 +250,6 @@
 static void futex_release_ref(futex_t *futex)
 {
-	ASSERT(spinlock_locked(&futex_ht_lock));
-	ASSERT(0 < futex->refcount);
+	assert(spinlock_locked(&futex_ht_lock));
+	assert(0 < futex->refcount);
 	
 	--futex->refcount;
@@ -459,5 +460,5 @@
 	futex_t *futex;
 
-	ASSERT(keys == 1);
+	assert(keys == 1);
 
 	futex = hash_table_get_instance(item, futex_t, ht_link);
Index: kernel/generic/src/synch/mutex.c
===================================================================
--- kernel/generic/src/synch/mutex.c	(revision 38dc82d20695b43a799be28d4fd2b2cd2c5bb785)
+++ kernel/generic/src/synch/mutex.c	(revision 58fa3e668549d763497f0c6b6a9059794bb2ff4a)
@@ -36,7 +36,7 @@
  */
 
+#include <assert.h>
 #include <synch/mutex.h>
 #include <synch/semaphore.h>
-#include <debug.h>
 #include <arch.h>
 #include <stacktrace.h>
@@ -88,7 +88,7 @@
 		rc = _semaphore_down_timeout(&mtx->sem, usec, flags);
 	} else {
-		ASSERT((mtx->type == MUTEX_ACTIVE) || (!THREAD));
-		ASSERT(usec == SYNCH_NO_TIMEOUT);
-		ASSERT(!(flags & SYNCH_FLAGS_INTERRUPTIBLE));
+		assert((mtx->type == MUTEX_ACTIVE) || (!THREAD));
+		assert(usec == SYNCH_NO_TIMEOUT);
+		assert(!(flags & SYNCH_FLAGS_INTERRUPTIBLE));
 		
 		unsigned int cnt = 0;
Index: kernel/generic/src/synch/rcu.c
===================================================================
--- kernel/generic/src/synch/rcu.c	(revision 38dc82d20695b43a799be28d4fd2b2cd2c5bb785)
+++ kernel/generic/src/synch/rcu.c	(revision 58fa3e668549d763497f0c6b6a9059794bb2ff4a)
@@ -123,5 +123,6 @@
  * 
  */
- 
+
+#include <assert.h>
 #include <synch/rcu.h>
 #include <synch/condvar.h>
@@ -404,5 +405,5 @@
 	/* Stop and wait for reclaimers. */
 	for (unsigned int cpu_id = 0; cpu_id < config.cpu_active; ++cpu_id) {
-		ASSERT(cpus[cpu_id].rcu.reclaimer_thr != NULL);
+		assert(cpus[cpu_id].rcu.reclaimer_thr != NULL);
 	
 		if (cpus[cpu_id].rcu.reclaimer_thr) {
@@ -487,5 +488,5 @@
 static void read_unlock_impl(size_t *pnesting_cnt)
 {
-	ASSERT(PREEMPTION_DISABLED || interrupts_disabled());
+	assert(PREEMPTION_DISABLED || interrupts_disabled());
 	
 	if (0 == --(*pnesting_cnt)) {
@@ -509,5 +510,5 @@
 void _rcu_signal_read_unlock(void)
 {
-	ASSERT(PREEMPTION_DISABLED || interrupts_disabled());
+	assert(PREEMPTION_DISABLED || interrupts_disabled());
 	
 	/*
@@ -531,5 +532,5 @@
 	 */
 	if (THREAD && local_atomic_exchange(&THREAD->rcu.was_preempted, false)) {
-		ASSERT(link_used(&THREAD->rcu.preempt_link));
+		assert(link_used(&THREAD->rcu.preempt_link));
 
 		rm_preempted_reader();
@@ -563,5 +564,5 @@
 {
 	/* Calling from a reader section will deadlock. */
-	ASSERT(!rcu_read_locked());
+	assert(!rcu_read_locked());
 	
 	synch_item_t completion; 
@@ -576,5 +577,5 @@
 {
 	synch_item_t *completion = member_to_inst(rcu_item, synch_item_t, rcu_item);
-	ASSERT(completion);
+	assert(completion);
 	waitq_wakeup(&completion->wq, WAKEUP_FIRST);
 }
@@ -615,5 +616,5 @@
 static void add_barrier_cb(void *arg)
 {
-	ASSERT(interrupts_disabled() || PREEMPTION_DISABLED);
+	assert(interrupts_disabled() || PREEMPTION_DISABLED);
 	atomic_inc(&rcu.barrier_wait_cnt);
 	rcu_call(&CPU->rcu.barrier_item, barrier_complete);
@@ -657,5 +658,5 @@
 	rcu_func_t func)
 {
-	ASSERT(rcu_item);
+	assert(rcu_item);
 	
 	rcu_item->func = func;
@@ -689,5 +690,5 @@
 static bool cur_cbs_empty(void)
 {
-	ASSERT(THREAD && THREAD->wired);
+	assert(THREAD && THREAD->wired);
 	return NULL == CPU->rcu.cur_cbs;
 }
@@ -695,5 +696,5 @@
 static bool next_cbs_empty(void)
 {
-	ASSERT(THREAD && THREAD->wired);
+	assert(THREAD && THREAD->wired);
 	return NULL == CPU->rcu.next_cbs;
 }
@@ -702,5 +703,5 @@
 static bool arriving_cbs_empty(void)
 {
-	ASSERT(THREAD && THREAD->wired);
+	assert(THREAD && THREAD->wired);
 	/* 
 	 * Accessing with interrupts enabled may at worst lead to 
@@ -719,6 +720,6 @@
 static void reclaimer(void *arg)
 {
-	ASSERT(THREAD && THREAD->wired);
-	ASSERT(THREAD == CPU->rcu.reclaimer_thr);
+	assert(THREAD && THREAD->wired);
+	assert(THREAD == CPU->rcu.reclaimer_thr);
 
 	rcu_gp_t last_compl_gp = 0;
@@ -726,5 +727,5 @@
 	
 	while (ok && wait_for_pending_cbs()) {
-		ASSERT(CPU->rcu.reclaimer_thr == THREAD);
+		assert(CPU->rcu.reclaimer_thr == THREAD);
 		
 		exec_completed_cbs(last_compl_gp);
@@ -765,5 +766,5 @@
 	/* Both next_cbs and cur_cbs GP elapsed. */
 	if (CPU->rcu.next_cbs_gp <= last_completed_gp) {
-		ASSERT(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp);
+		assert(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp);
 		
 		size_t exec_cnt = CPU->rcu.cur_cbs_cnt + CPU->rcu.next_cbs_cnt;
@@ -864,5 +865,5 @@
 	 */
 	if (CPU->rcu.next_cbs) {
-		ASSERT(CPU->rcu.parriving_cbs_tail != &CPU->rcu.arriving_cbs);
+		assert(CPU->rcu.parriving_cbs_tail != &CPU->rcu.arriving_cbs);
 		
 		CPU->rcu.arriving_cbs = NULL;
@@ -913,5 +914,5 @@
 	}
 	
-	ASSERT(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp);
+	assert(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp);
 	
 	return expedite;	
@@ -933,6 +934,6 @@
 	spinlock_lock(&rcu.gp_lock);
 
-	ASSERT(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp);
-	ASSERT(CPU->rcu.cur_cbs_gp <= _rcu_cur_gp + 1);
+	assert(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp);
+	assert(CPU->rcu.cur_cbs_gp <= _rcu_cur_gp + 1);
 	
 	while (rcu.completed_gp < CPU->rcu.cur_cbs_gp) {
@@ -1029,5 +1030,5 @@
 static void sample_local_cpu(void *arg)
 {
-	ASSERT(interrupts_disabled());
+	assert(interrupts_disabled());
 	cpu_mask_t *reader_cpus = (cpu_mask_t *)arg;
 	
@@ -1054,5 +1055,5 @@
 void rcu_after_thread_ran(void)
 {
-	ASSERT(interrupts_disabled());
+	assert(interrupts_disabled());
 
 	/* 
@@ -1116,5 +1117,5 @@
 void rcu_before_thread_runs(void)
 {
-	ASSERT(!rcu_read_locked());
+	assert(!rcu_read_locked());
 	
 	/* Load the thread's saved nesting count from before it was preempted. */
@@ -1129,5 +1130,5 @@
 void rcu_thread_exiting(void)
 {
-	ASSERT(THE->rcu_nesting == 0);
+	assert(THE->rcu_nesting == 0);
 	
 	/* 
@@ -1157,5 +1158,5 @@
 void _rcu_preempted_unlock(void)
 {
-	ASSERT(0 == THE->rcu_nesting || RCU_WAS_PREEMPTED == THE->rcu_nesting);
+	assert(0 == THE->rcu_nesting || RCU_WAS_PREEMPTED == THE->rcu_nesting);
 	
 	size_t prev = local_atomic_exchange(&THE->rcu_nesting, 0);
@@ -1220,6 +1221,6 @@
 	}
 	
-	ASSERT(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp);
-	ASSERT(_rcu_cur_gp <= CPU->rcu.cur_cbs_gp);
+	assert(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp);
+	assert(_rcu_cur_gp <= CPU->rcu.cur_cbs_gp);
 	
 	/* 
@@ -1262,5 +1263,5 @@
 static bool cv_wait_for_gp(rcu_gp_t wait_on_gp)
 {
-	ASSERT(spinlock_locked(&rcu.gp_lock));
+	assert(spinlock_locked(&rcu.gp_lock));
 	
 	bool interrupted = false;
@@ -1284,5 +1285,5 @@
 
 		if (detector_idle) {
-			ASSERT(_rcu_cur_gp == rcu.completed_gp);
+			assert(_rcu_cur_gp == rcu.completed_gp);
 			condvar_signal(&rcu.req_gp_changed);
 		}
@@ -1323,5 +1324,5 @@
 static bool wait_for_detect_req(void)
 {
-	ASSERT(spinlock_locked(&rcu.gp_lock));
+	assert(spinlock_locked(&rcu.gp_lock));
 	
 	bool interrupted = false;
@@ -1340,5 +1341,5 @@
 static void end_cur_gp(void)
 {
-	ASSERT(spinlock_locked(&rcu.gp_lock));
+	assert(spinlock_locked(&rcu.gp_lock));
 	
 	rcu.completed_gp = _rcu_cur_gp;
@@ -1423,6 +1424,6 @@
 static void sample_local_cpu(void *arg)
 {
-	ASSERT(interrupts_disabled());
-	ASSERT(!CPU->rcu.is_delaying_gp);
+	assert(interrupts_disabled());
+	assert(!CPU->rcu.is_delaying_gp);
 	
 	/* Cpu did not pass a quiescent state yet. */
@@ -1430,5 +1431,5 @@
 		/* Interrupted a reader in a reader critical section. */
 		if (0 < CPU->rcu.nesting_cnt) {
-			ASSERT(!CPU->idle);
+			assert(!CPU->idle);
 			/* 
 			 * Note to notify the detector from rcu_read_unlock(). 
@@ -1492,5 +1493,5 @@
 void rcu_after_thread_ran(void)
 {
-	ASSERT(interrupts_disabled());
+	assert(interrupts_disabled());
 
 	/* 
@@ -1559,6 +1560,6 @@
 void rcu_before_thread_runs(void)
 {
-	ASSERT(PREEMPTION_DISABLED || interrupts_disabled());
-	ASSERT(0 == CPU->rcu.nesting_cnt);
+	assert(PREEMPTION_DISABLED || interrupts_disabled());
+	assert(0 == CPU->rcu.nesting_cnt);
 	
 	/* Load the thread's saved nesting count from before it was preempted. */
@@ -1590,7 +1591,7 @@
 void rcu_thread_exiting(void)
 {
-	ASSERT(THREAD != NULL);
-	ASSERT(THREAD->state == Exiting);
-	ASSERT(PREEMPTION_DISABLED || interrupts_disabled());
+	assert(THREAD != NULL);
+	assert(THREAD->state == Exiting);
+	assert(PREEMPTION_DISABLED || interrupts_disabled());
 	
 	/* 
@@ -1615,5 +1616,5 @@
 static void start_new_gp(void)
 {
-	ASSERT(spinlock_locked(&rcu.gp_lock));
+	assert(spinlock_locked(&rcu.gp_lock));
 	
 	irq_spinlock_lock(&rcu.preempt_lock, true);
@@ -1734,5 +1735,5 @@
 static void upd_missed_gp_in_wait(rcu_gp_t completed_gp)
 {
-	ASSERT(CPU->rcu.cur_cbs_gp <= completed_gp);
+	assert(CPU->rcu.cur_cbs_gp <= completed_gp);
 	
 	size_t delta = (size_t)(completed_gp - CPU->rcu.cur_cbs_gp);
@@ -1764,5 +1765,5 @@
 	irq_spinlock_lock(&rcu.preempt_lock, true);
 	
-	ASSERT(link_used(&THREAD->rcu.preempt_link));
+	assert(link_used(&THREAD->rcu.preempt_link));
 
 	bool prev_empty = list_empty(&rcu.cur_preempted);
Index: kernel/generic/src/synch/waitq.c
===================================================================
--- kernel/generic/src/synch/waitq.c	(revision 38dc82d20695b43a799be28d4fd2b2cd2c5bb785)
+++ kernel/generic/src/synch/waitq.c	(revision 58fa3e668549d763497f0c6b6a9059794bb2ff4a)
@@ -44,4 +44,5 @@
  */
 
+#include <assert.h>
 #include <synch/waitq.h>
 #include <synch/spinlock.h>
@@ -203,5 +204,5 @@
 		irq_spinlock_lock(&thread->lock, false);
 		
-		ASSERT(thread->sleep_interruptible);
+		assert(thread->sleep_interruptible);
 		
 		if ((thread->timeout_pending) &&
@@ -264,5 +265,5 @@
 int waitq_sleep_timeout(waitq_t *wq, uint32_t usec, unsigned int flags)
 {
-	ASSERT((!PREEMPTION_DISABLED) || (PARAM_NON_BLOCKING(flags, usec)));
+	assert((!PREEMPTION_DISABLED) || (PARAM_NON_BLOCKING(flags, usec)));
 	
 	ipl_t ipl = waitq_sleep_prepare(wq);
@@ -496,5 +497,5 @@
 static void waitq_complete_wakeup(waitq_t *wq)
 {
-	ASSERT(interrupts_disabled());
+	assert(interrupts_disabled());
 	
 	irq_spinlock_lock(&wq->lock, false);
@@ -520,6 +521,6 @@
 	size_t count = 0;
 
-	ASSERT(interrupts_disabled());
-	ASSERT(irq_spinlock_locked(&wq->lock));
+	assert(interrupts_disabled());
+	assert(irq_spinlock_locked(&wq->lock));
 	
 loop:
Index: kernel/generic/src/synch/workqueue.c
===================================================================
--- kernel/generic/src/synch/workqueue.c	(revision 38dc82d20695b43a799be28d4fd2b2cd2c5bb785)
+++ kernel/generic/src/synch/workqueue.c	(revision 58fa3e668549d763497f0c6b6a9059794bb2ff4a)
@@ -37,4 +37,5 @@
  */
 
+#include <assert.h>
 #include <synch/workqueue.h>
 #include <synch/spinlock.h>
@@ -189,5 +190,5 @@
 	if (workq) {
 		if (workq_init(workq, name)) {
-			ASSERT(!workq_corrupted(workq));
+			assert(!workq_corrupted(workq));
 			return workq;
 		}
@@ -202,5 +203,5 @@
 void workq_destroy(struct work_queue *workq)
 {
-	ASSERT(!workq_corrupted(workq));
+	assert(!workq_corrupted(workq));
 	
 	irq_spinlock_lock(&workq->lock, true);
@@ -214,5 +215,5 @@
 		workq_stop(workq);
 	} else {
-		ASSERT(0 == running_workers);
+		assert(0 == running_workers);
 	}
 	
@@ -264,5 +265,5 @@
 static bool add_worker(struct work_queue *workq)
 {
-	ASSERT(!workq_corrupted(workq));
+	assert(!workq_corrupted(workq));
 
 	thread_t *thread = thread_create(worker_thread, workq, TASK, 
@@ -273,5 +274,5 @@
 		
 		/* cur_worker_cnt proactively increased in signal_worker_logic() .*/
-		ASSERT(0 < workq->cur_worker_cnt);
+		assert(0 < workq->cur_worker_cnt);
 		--workq->cur_worker_cnt;
 		
@@ -312,5 +313,5 @@
 		
 		/* cur_worker_cnt proactively increased in signal_worker() .*/
-		ASSERT(0 < workq->cur_worker_cnt);
+		assert(0 < workq->cur_worker_cnt);
 		--workq->cur_worker_cnt;
 	}
@@ -334,5 +335,5 @@
 void workq_stop(struct work_queue *workq)
 {
-	ASSERT(!workq_corrupted(workq));
+	assert(!workq_corrupted(workq));
 	
 	interrupt_workers(workq);
@@ -346,5 +347,5 @@
 
 	/* workq_stop() may only be called once. */
-	ASSERT(!workq->stopping);
+	assert(!workq->stopping);
 	workq->stopping = true;
 	
@@ -358,5 +359,5 @@
 static void wait_for_workers(struct work_queue *workq)
 {
-	ASSERT(!PREEMPTION_DISABLED);
+	assert(!PREEMPTION_DISABLED);
 	
 	irq_spinlock_lock(&workq->lock, true);
@@ -375,5 +376,5 @@
 	}
 	
-	ASSERT(list_empty(&workq->workers));
+	assert(list_empty(&workq->workers));
 	
 	/* Wait for deferred add_worker_op(), signal_worker_op() to finish. */
@@ -473,5 +474,5 @@
 	work_func_t func, bool can_block)
 {
-	ASSERT(!workq_corrupted(workq));
+	assert(!workq_corrupted(workq));
 	
 	bool success = true;
@@ -521,17 +522,17 @@
 static size_t active_workers_now(struct work_queue *workq)
 {
-	ASSERT(irq_spinlock_locked(&workq->lock));
+	assert(irq_spinlock_locked(&workq->lock));
 	
 	/* Workers blocked are sleeping in the work function (ie not idle). */
-	ASSERT(workq->blocked_worker_cnt <= workq->cur_worker_cnt);
+	assert(workq->blocked_worker_cnt <= workq->cur_worker_cnt);
 	/* Idle workers are waiting for more work to arrive in condvar_wait. */
-	ASSERT(workq->idle_worker_cnt <= workq->cur_worker_cnt);
+	assert(workq->idle_worker_cnt <= workq->cur_worker_cnt);
 	
 	/* Idle + blocked workers == sleeping worker threads. */
 	size_t sleeping_workers = workq->blocked_worker_cnt + workq->idle_worker_cnt;
 	
-	ASSERT(sleeping_workers	<= workq->cur_worker_cnt);
+	assert(sleeping_workers	<= workq->cur_worker_cnt);
 	/* Workers pending activation are idle workers not yet given a time slice. */
-	ASSERT(workq->activate_pending <= workq->idle_worker_cnt);
+	assert(workq->activate_pending <= workq->idle_worker_cnt);
 	
 	/* 
@@ -550,5 +551,5 @@
 static size_t active_workers(struct work_queue *workq)
 {
-	ASSERT(irq_spinlock_locked(&workq->lock));
+	assert(irq_spinlock_locked(&workq->lock));
 	
 	/* 
@@ -573,10 +574,10 @@
 static void signal_worker_op(struct work_queue *workq)
 {
-	ASSERT(!workq_corrupted(workq));
+	assert(!workq_corrupted(workq));
 
 	condvar_signal(&workq->activate_worker);
 	
 	irq_spinlock_lock(&workq->lock, true);
-	ASSERT(0 < workq->pending_op_cnt);
+	assert(0 < workq->pending_op_cnt);
 	--workq->pending_op_cnt;
 	irq_spinlock_unlock(&workq->lock, true);
@@ -593,6 +594,6 @@
 static signal_op_t signal_worker_logic(struct work_queue *workq, bool can_block)
 {
-	ASSERT(!workq_corrupted(workq));
-	ASSERT(irq_spinlock_locked(&workq->lock));
+	assert(!workq_corrupted(workq));
+	assert(irq_spinlock_locked(&workq->lock));
 	
 	/* Only signal workers if really necessary. */
@@ -645,5 +646,5 @@
 			 */
 			if (need_worker && !can_block && 0 == active) {
-				ASSERT(0 == workq->idle_worker_cnt);
+				assert(0 == workq->idle_worker_cnt);
 				
 				irq_spinlock_lock(&nonblock_adder.lock, true);
@@ -681,5 +682,5 @@
 	}
 	
-	ASSERT(arg != NULL);
+	assert(arg != NULL);
 	
 	struct work_queue *workq = arg;
@@ -697,5 +698,5 @@
 static bool dequeue_work(struct work_queue *workq, work_t **pwork_item)
 {
-	ASSERT(!workq_corrupted(workq));
+	assert(!workq_corrupted(workq));
 	
 	irq_spinlock_lock(&workq->lock, true);
@@ -704,5 +705,5 @@
 	if (!workq->stopping && worker_unnecessary(workq)) {
 		/* There are too many workers for this load. Exit. */
-		ASSERT(0 < workq->cur_worker_cnt);
+		assert(0 < workq->cur_worker_cnt);
 		--workq->cur_worker_cnt;
 		list_remove(&THREAD->workq_link);
@@ -729,5 +730,5 @@
 		
 #ifdef CONFIG_DEBUG
-		ASSERT(!work_item_corrupted(*pwork_item));
+		assert(!work_item_corrupted(*pwork_item));
 		(*pwork_item)->cookie = 0;
 #endif
@@ -738,5 +739,5 @@
 	} else {
 		/* Requested to stop and no more work queued. */
-		ASSERT(workq->stopping);
+		assert(workq->stopping);
 		--workq->cur_worker_cnt;
 		stop = true;
@@ -751,5 +752,5 @@
 static bool worker_unnecessary(struct work_queue *workq)
 {
-	ASSERT(irq_spinlock_locked(&workq->lock));
+	assert(irq_spinlock_locked(&workq->lock));
 	
 	/* No work is pending. We don't need too many idle threads. */
@@ -775,11 +776,11 @@
 	
 	/* Ignore lock ordering just here. */
-	ASSERT(irq_spinlock_locked(&workq->lock));
+	assert(irq_spinlock_locked(&workq->lock));
 	
 	_condvar_wait_timeout_irq_spinlock(&workq->activate_worker,
 		&workq->lock, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE);
 
-	ASSERT(!workq_corrupted(workq));
-	ASSERT(irq_spinlock_locked(&workq->lock));
+	assert(!workq_corrupted(workq));
+	assert(irq_spinlock_locked(&workq->lock));
 	
 	THREAD->workq_idling = false;
@@ -791,14 +792,14 @@
 void workq_before_thread_is_ready(thread_t *thread)
 {
-	ASSERT(thread);
-	ASSERT(irq_spinlock_locked(&thread->lock));
+	assert(thread);
+	assert(irq_spinlock_locked(&thread->lock));
 
 	/* Worker's work func() is about to wake up from sleeping. */
 	if (thread->workq && thread->workq_blocked) {
 		/* Must be blocked in user work func() and not be waiting for work. */
-		ASSERT(!thread->workq_idling);
-		ASSERT(thread->state == Sleeping);
-		ASSERT(THREAD != thread);
-		ASSERT(!workq_corrupted(thread->workq));
+		assert(!thread->workq_idling);
+		assert(thread->state == Sleeping);
+		assert(THREAD != thread);
+		assert(!workq_corrupted(thread->workq));
 		
 		/* Protected by thread->lock */
@@ -814,11 +815,11 @@
 void workq_after_thread_ran(void)
 {
-	ASSERT(THREAD);
-	ASSERT(irq_spinlock_locked(&THREAD->lock));
+	assert(THREAD);
+	assert(irq_spinlock_locked(&THREAD->lock));
 
 	/* Worker's work func() is about to sleep/block. */
 	if (THREAD->workq && THREAD->state == Sleeping && !THREAD->workq_idling) {
-		ASSERT(!THREAD->workq_blocked);
-		ASSERT(!workq_corrupted(THREAD->workq));
+		assert(!THREAD->workq_blocked);
+		assert(!workq_corrupted(THREAD->workq));
 		
 		THREAD->workq_blocked = true;
@@ -834,5 +835,5 @@
 		
 		if (op) {
-			ASSERT(add_worker_noblock_op == op || signal_worker_op == op);
+			assert(add_worker_noblock_op == op || signal_worker_op == op);
 			op(THREAD->workq);
 		}
@@ -903,5 +904,5 @@
 			struct work_queue, nb_link);
 
-		ASSERT(!workq_corrupted(*pworkq));
+		assert(!workq_corrupted(*pworkq));
 		
 		list_remove(&(*pworkq)->nb_link);
