Index: kernel/generic/src/proc/scheduler.c
===================================================================
--- kernel/generic/src/proc/scheduler.c	(revision e190e640f73b310ae15f7fcc3a75485568c08978)
+++ kernel/generic/src/proc/scheduler.c	(revision 1dc5eb7c05ae8c567fc34b39080622ff311a681e)
@@ -52,4 +52,6 @@
 #include <atomic.h>
 #include <synch/spinlock.h>
+#include <synch/workqueue.h>
+#include <synch/rcu.h>
 #include <config.h>
 #include <context.h>
@@ -64,4 +66,5 @@
 #include <debug.h>
 #include <stacktrace.h>
+#include <cpu.h>
 
 static void scheduler_separated_stack(void);
@@ -87,4 +90,5 @@
 {
 	before_thread_runs_arch();
+	rcu_before_thread_runs();
 	
 #ifdef CONFIG_FPU_LAZY
@@ -127,4 +131,6 @@
 static void after_thread_ran(void)
 {
+	workq_after_thread_ran();
+	rcu_after_thread_ran();
 	after_thread_ran_arch();
 }
@@ -219,4 +225,6 @@
 		goto loop;
 	}
+
+	ASSERT(!CPU->idle);
 	
 	unsigned int i;
@@ -398,4 +406,5 @@
 	ASSERT((!THREAD) || (irq_spinlock_locked(&THREAD->lock)));
 	ASSERT(CPU != NULL);
+	ASSERT(interrupts_disabled());
 	
 	/*
@@ -421,4 +430,5 @@
 		
 		case Exiting:
+			rcu_thread_exiting();
 repeat:
 			if (THREAD->detached) {
Index: kernel/generic/src/proc/task.c
===================================================================
--- kernel/generic/src/proc/task.c	(revision e190e640f73b310ae15f7fcc3a75485568c08978)
+++ kernel/generic/src/proc/task.c	(revision 1dc5eb7c05ae8c567fc34b39080622ff311a681e)
@@ -41,4 +41,5 @@
 #include <mm/slab.h>
 #include <atomic.h>
+#include <synch/futex.h>
 #include <synch/spinlock.h>
 #include <synch/waitq.h>
@@ -163,5 +164,4 @@
 	
 	irq_spinlock_initialize(&task->lock, "task_t_lock");
-	mutex_initialize(&task->futexes_lock, MUTEX_PASSIVE);
 	
 	list_initialize(&task->threads);
@@ -175,5 +175,5 @@
 	spinlock_initialize(&task->active_calls_lock, "active_calls_lock");
 	list_initialize(&task->active_calls);
-	
+		
 #ifdef CONFIG_UDEBUG
 	/* Init kbox stuff */
@@ -231,5 +231,5 @@
 		(void) ipc_phone_connect(&task->phones[0], ipc_phone_0);
 	
-	btree_create(&task->futexes);
+	futex_task_init(task);
 	
 	/*
@@ -272,5 +272,5 @@
 	 * Free up dynamically allocated state.
 	 */
-	btree_destroy(&task->futexes);
+	futex_task_deinit(task);
 	
 	/*
Index: kernel/generic/src/proc/the.c
===================================================================
--- kernel/generic/src/proc/the.c	(revision e190e640f73b310ae15f7fcc3a75485568c08978)
+++ kernel/generic/src/proc/the.c	(revision 1dc5eb7c05ae8c567fc34b39080622ff311a681e)
@@ -43,4 +43,5 @@
 
 #include <arch.h>
+#include <debug.h>
 
 /** Initialize THE structure
@@ -53,5 +54,5 @@
 void the_initialize(the_t *the)
 {
-	the->preemption_disabled = 0;
+	the->preemption = 0;
 	the->cpu = NULL;
 	the->thread = NULL;
@@ -59,4 +60,7 @@
 	the->as = NULL;
 	the->magic = MAGIC;
+#ifdef RCU_PREEMPT_A	
+	the->rcu_nesting = 0;
+#endif
 }
 
Index: kernel/generic/src/proc/thread.c
===================================================================
--- kernel/generic/src/proc/thread.c	(revision e190e640f73b310ae15f7fcc3a75485568c08978)
+++ kernel/generic/src/proc/thread.c	(revision 1dc5eb7c05ae8c567fc34b39080622ff311a681e)
@@ -46,4 +46,6 @@
 #include <synch/spinlock.h>
 #include <synch/waitq.h>
+#include <synch/workqueue.h>
+#include <synch/rcu.h>
 #include <cpu.h>
 #include <str.h>
@@ -263,4 +265,11 @@
 }
 
+/** Invoked right before thread_ready() readies the thread. thread is locked. */
+static void before_thread_is_ready(thread_t *thread)
+{
+	ASSERT(irq_spinlock_locked(&thread->lock));
+	workq_before_thread_is_ready(thread);
+}
+
 /** Make thread ready
  *
@@ -275,13 +284,20 @@
 	
 	ASSERT(thread->state != Ready);
+
+	before_thread_is_ready(thread);
 	
 	int i = (thread->priority < RQ_COUNT - 1) ?
 	    ++thread->priority : thread->priority;
-	
-	cpu_t *cpu;
-	if (thread->wired || thread->nomigrate || thread->fpu_context_engaged) {
-		ASSERT(thread->cpu != NULL);
-		cpu = thread->cpu;
-	} else
+
+	/* Check that thread->cpu is set whenever it needs to be. */
+	ASSERT(thread->cpu != NULL || 
+		(!thread->wired && !thread->nomigrate && !thread->fpu_context_engaged));
+
+	/* 
+	 * Prefer to run on the same cpu as the last time. Used by wired 
+	 * threads as well as threads with disabled migration.
+	 */
+	cpu_t *cpu = thread->cpu;
+	if (cpu == NULL) 
 		cpu = CPU;
 	
@@ -377,4 +393,6 @@
 	thread->task = task;
 	
+	thread->workq = NULL;
+	
 	thread->fpu_context_exists = false;
 	thread->fpu_context_engaged = false;
@@ -391,4 +409,6 @@
 	/* Might depend on previous initialization */
 	thread_create_arch(thread);
+	
+	rcu_thread_init(thread);
 	
 	if ((flags & THREAD_FLAG_NOATTACH) != THREAD_FLAG_NOATTACH)
@@ -501,5 +521,5 @@
 			 */
 			ipc_cleanup();
-			futex_cleanup();
+			futex_task_cleanup();
 			LOG("Cleanup of task %" PRIu64" completed.", TASK->taskid);
 		}
@@ -521,4 +541,52 @@
 	/* Not reached */
 	while (true);
+}
+
+/** Interrupts an existing thread so that it may exit as soon as possible.
+ * 
+ * Threads that are blocked waiting for a synchronization primitive 
+ * are woken up with a return code of ESYNCH_INTERRUPTED if the
+ * blocking call was interruptable. See waitq_sleep_timeout().
+ * 
+ * The caller must guarantee the thread object is valid during the entire
+ * function, eg by holding the threads_lock lock.
+ * 
+ * Interrupted threads automatically exit when returning back to user space.
+ * 
+ * @param thread A valid thread object. The caller must guarantee it
+ *               will remain valid until thread_interrupt() exits.
+ */
+void thread_interrupt(thread_t *thread)
+{
+	ASSERT(thread != NULL);
+	
+	irq_spinlock_lock(&thread->lock, true);
+	
+	thread->interrupted = true;
+	bool sleeping = (thread->state == Sleeping);
+	
+	irq_spinlock_unlock(&thread->lock, true);
+	
+	if (sleeping)
+		waitq_interrupt_sleep(thread);
+}
+
+/** Returns true if the thread was interrupted.
+ * 
+ * @param thread A valid thread object. User must guarantee it will
+ *               be alive during the entire call.
+ * @return true if the thread was already interrupted via thread_interrupt().
+ */
+bool thread_interrupted(thread_t *thread)
+{
+	ASSERT(thread != NULL);
+	
+	bool interrupted;
+	
+	irq_spinlock_lock(&thread->lock, true);
+	interrupted = thread->interrupted;
+	irq_spinlock_unlock(&thread->lock, true);
+	
+	return interrupted;
 }
 
