Index: kernel/generic/include/synch/condvar.h
===================================================================
--- kernel/generic/include/synch/condvar.h	(revision b2fa1204c76e1eaec329888181d281aac04ed61e)
+++ kernel/generic/include/synch/condvar.h	(revision 5e3fa9dbfffc978cdcfdaaea9fc6a50dbb0bca1a)
@@ -39,4 +39,5 @@
 #include <synch/waitq.h>
 #include <synch/mutex.h>
+#include <synch/spinlock.h>
 #include <abi/synch.h>
 
@@ -50,4 +51,12 @@
 	_condvar_wait_timeout((cv), (mtx), (usec), SYNCH_FLAGS_NONE)
 
+#ifdef CONFIG_SMP
+#define _condvar_wait_timeout_spinlock(cv, lock, usec, flags) \
+	_condvar_wait_timeout_spinlock_impl((cv), (lock), (usec), (flags))
+#else
+#define _condvar_wait_timeout_spinlock(cv, lock, usec, flags) \
+	_condvar_wait_timeout_spinlock_impl((cv), NULL, (usec), (flags))
+#endif
+
 extern void condvar_initialize(condvar_t *cv);
 extern void condvar_signal(condvar_t *cv);
@@ -55,4 +64,9 @@
 extern int _condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, uint32_t usec,
     int flags);
+extern int _condvar_wait_timeout_spinlock_impl(condvar_t *cv, spinlock_t *lock, 
+	uint32_t usec, int flags);
+extern int _condvar_wait_timeout_irq_spinlock(condvar_t *cv, 
+	irq_spinlock_t *irq_lock, uint32_t usec, int flags);
+
 
 #endif
Index: kernel/generic/include/synch/futex.h
===================================================================
--- kernel/generic/include/synch/futex.h	(revision b2fa1204c76e1eaec329888181d281aac04ed61e)
+++ kernel/generic/include/synch/futex.h	(revision 5e3fa9dbfffc978cdcfdaaea9fc6a50dbb0bca1a)
@@ -55,5 +55,7 @@
 extern sysarg_t sys_futex_wakeup(uintptr_t);
 
-extern void futex_cleanup(void);
+extern void futex_task_cleanup(void);
+extern void futex_task_init(struct task *);
+extern void futex_task_deinit(struct task *);
 
 #endif
Index: kernel/generic/include/synch/rcu.h
===================================================================
--- kernel/generic/include/synch/rcu.h	(revision 5e3fa9dbfffc978cdcfdaaea9fc6a50dbb0bca1a)
+++ kernel/generic/include/synch/rcu.h	(revision 5e3fa9dbfffc978cdcfdaaea9fc6a50dbb0bca1a)
@@ -0,0 +1,247 @@
+/*
+ * Copyright (c) 2012 Adam Hraska
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sync
+ * @{
+ */
+/** @file
+ */
+
+#ifndef KERN_RCU_H_
+#define KERN_RCU_H_
+
+#include <synch/rcu_types.h>
+#include <compiler/barrier.h>
+
+
+/** Use to assign a pointer to newly initialized data to a rcu reader 
+ * accessible pointer.
+ * 
+ * Example:
+ * @code
+ * typedef struct exam {
+ *     struct exam *next;
+ *     int grade;
+ * } exam_t;
+ * 
+ * exam_t *exam_list;
+ * // ..
+ * 
+ * // Insert at the beginning of the list.
+ * exam_t *my_exam = malloc(sizeof(exam_t), 0);
+ * my_exam->grade = 5;
+ * my_exam->next = exam_list;
+ * rcu_assign(exam_list, my_exam);
+ * 
+ * // Changes properly propagate. Every reader either sees
+ * // the old version of exam_list or the new version with
+ * // the fully initialized my_exam.
+ * rcu_synchronize();
+ * // Now we can be sure every reader sees my_exam.
+ * 
+ * @endcode
+ */
+#define rcu_assign(ptr, value) \
+	do { \
+		memory_barrier(); \
+		(ptr) = (value); \
+	} while (0)
+
+/** Use to access RCU protected data in a reader section.
+ * 
+ * Example:
+ * @code
+ * exam_t *exam_list;
+ * // ...
+ * 
+ * rcu_read_lock();
+ * exam_t *first_exam = rcu_access(exam_list);
+ * // We can now safely use first_exam, it won't change 
+ * // under us while we're using it.
+ *
+ * // ..
+ * rcu_read_unlock();
+ * @endcode
+ */
+#define rcu_access(ptr) ACCESS_ONCE(ptr)
+
+
+
+
+#include <debug.h>
+#include <preemption.h>
+#include <cpu.h>
+#include <proc/thread.h>
+
+
+extern bool rcu_read_locked(void);
+extern void rcu_synchronize(void);
+extern void rcu_synchronize_expedite(void);
+extern void rcu_call(rcu_item_t *rcu_item, rcu_func_t func);
+extern void rcu_barrier(void);
+
+extern void rcu_print_stat(void);
+
+extern void rcu_init(void);
+extern void rcu_stop(void);
+extern void rcu_cpu_init(void);
+extern void rcu_kinit_init(void);
+extern void rcu_thread_init(struct thread*);
+extern void rcu_thread_exiting(void);
+extern void rcu_after_thread_ran(void);
+extern void rcu_before_thread_runs(void);
+
+extern uint64_t rcu_completed_gps(void);
+extern void _rcu_call(bool expedite, rcu_item_t *rcu_item, rcu_func_t func);
+extern void _rcu_synchronize(bool expedite);
+
+
+#ifdef RCU_PREEMPT_A
+
+#define RCU_CNT_INC       (1 << 1)
+#define RCU_WAS_PREEMPTED (1 << 0)
+
+/* Fwd. decl. because of inlining. */
+void _rcu_preempted_unlock(void);
+
+/** Delimits the start of an RCU reader critical section. 
+ * 
+ * Reader sections may be nested and are preemptible. You must not
+ * however block/sleep within reader sections.
+ */
+static inline void rcu_read_lock(void)
+{
+	THE->rcu_nesting += RCU_CNT_INC;
+	compiler_barrier();
+}
+
+/** Delimits the end of an RCU reader critical section. */
+static inline void rcu_read_unlock(void)
+{
+	compiler_barrier();
+	THE->rcu_nesting -= RCU_CNT_INC;
+	
+	if (RCU_WAS_PREEMPTED == THE->rcu_nesting) {
+		_rcu_preempted_unlock();
+	}
+}
+
+#elif defined(RCU_PREEMPT_PODZIMEK)
+
+/* Fwd decl. required by the inlined implementation. Not part of public API. */
+extern rcu_gp_t _rcu_cur_gp;
+extern void _rcu_signal_read_unlock(void);
+
+
+/** Unconditionally records a quiescent state for the local cpu. */
+static inline void _rcu_record_qs(void)
+{
+	ASSERT(PREEMPTION_DISABLED || interrupts_disabled());
+	
+	/* 
+	 * A new GP was started since the last time we passed a QS. 
+	 * Notify the detector we have reached a new QS.
+	 */
+	if (CPU->rcu.last_seen_gp != _rcu_cur_gp) {
+		rcu_gp_t cur_gp = ACCESS_ONCE(_rcu_cur_gp);
+		/* 
+		 * Contain memory accesses within a reader critical section. 
+		 * If we are in rcu_lock() it also makes changes prior to the
+		 * start of the GP visible in the reader section.
+		 */
+		memory_barrier();
+		/*
+		 * Acknowledge we passed a QS since the beginning of rcu.cur_gp.
+		 * Cache coherency will lazily transport the value to the
+		 * detector while it sleeps in gp_sleep(). 
+		 * 
+		 * Note that there is a theoretical possibility that we
+		 * overwrite a more recent/greater last_seen_gp here with 
+		 * an older/smaller value. If this cpu is interrupted here
+		 * while in rcu_lock() reader sections in the interrupt handler 
+		 * will update last_seen_gp to the same value as is currently 
+		 * in local cur_gp. However, if the cpu continues processing 
+		 * interrupts and the detector starts a new GP immediately, 
+		 * local interrupt handlers may update last_seen_gp again (ie 
+		 * properly ack the new GP) with a value greater than local cur_gp. 
+		 * Resetting last_seen_gp to a previous value here is however 
+		 * benign and we only have to remember that this reader may end up 
+		 * in cur_preempted even after the GP ends. That is why we
+		 * append next_preempted to cur_preempted rather than overwriting 
+		 * it as if cur_preempted were empty.
+		 */
+		CPU->rcu.last_seen_gp = cur_gp;
+	}
+}
+
+/** Delimits the start of an RCU reader critical section. 
+ * 
+ * Reader sections may be nested and are preemptable. You must not
+ * however block/sleep within reader sections.
+ */
+static inline void rcu_read_lock(void)
+{
+	ASSERT(CPU);
+	preemption_disable();
+
+	/* Record a QS if not in a reader critical section. */
+	if (0 == CPU->rcu.nesting_cnt)
+		_rcu_record_qs();
+
+	++CPU->rcu.nesting_cnt;
+
+	preemption_enable();
+}
+
+/** Delimits the end of an RCU reader critical section. */
+static inline void rcu_read_unlock(void)
+{
+	ASSERT(CPU);
+	preemption_disable();
+	
+	if (0 == --CPU->rcu.nesting_cnt) {
+		_rcu_record_qs();
+		
+		/* 
+		 * The thread was preempted while in a critical section or 
+		 * the detector is eagerly waiting for this cpu's reader to finish. 
+		 */
+		if (CPU->rcu.signal_unlock) {
+			/* Rechecks with disabled interrupts. */
+			_rcu_signal_read_unlock();
+		}
+	}
+	
+	preemption_enable();
+}
+#endif
+
+#endif
+
+/** @}
+ */
Index: kernel/generic/include/synch/rcu_types.h
===================================================================
--- kernel/generic/include/synch/rcu_types.h	(revision 5e3fa9dbfffc978cdcfdaaea9fc6a50dbb0bca1a)
+++ kernel/generic/include/synch/rcu_types.h	(revision 5e3fa9dbfffc978cdcfdaaea9fc6a50dbb0bca1a)
@@ -0,0 +1,171 @@
+/*
+ * Copyright (c) 2012 Adam Hraska
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sync
+ * @{
+ */
+/** @file
+ */
+
+#ifndef KERN_RCU_TYPES_H_
+#define KERN_RCU_TYPES_H_
+
+#include <adt/list.h>
+#include <synch/semaphore.h>
+
+#if !defined(RCU_PREEMPT_PODZIMEK) && !defined(RCU_PREEMPT_A)
+#error You must select an RCU algorithm.
+#endif
+
+
+/* Fwd decl. */
+struct thread;
+struct rcu_item;
+
+/** Grace period number typedef. */
+typedef uint64_t rcu_gp_t;
+
+/** RCU callback type. The passed rcu_item_t maybe freed. */
+typedef void (*rcu_func_t)(struct rcu_item *rcu_item);
+
+typedef struct rcu_item {
+	rcu_func_t func;
+	struct rcu_item *next;
+} rcu_item_t;
+
+
+/** RCU related per-cpu data. */
+typedef struct rcu_cpu_data {
+	/** The cpu recorded a quiescent state last time during this grace period.*/
+	rcu_gp_t last_seen_gp;
+
+#ifdef RCU_PREEMPT_PODZIMEK
+	/** This cpu has not yet passed a quiescent state and it is delaying the
+	 * detector. Once it reaches a QS it must sema_up(rcu.remaining_readers).
+	 */
+	bool is_delaying_gp;
+	
+	/** True if we should signal the detector that we exited a reader section.
+	 * 
+	 * Equal to (THREAD->rcu.was_preempted || CPU->rcu.is_delaying_gp).
+	 */
+	bool signal_unlock;
+
+	/** The number of times an RCU reader section is nested on this cpu. 
+	 * 
+	 * If positive, it is definitely executing reader code. If zero, 
+	 * the thread might already be executing reader code thanks to
+	 * cpu instruction reordering.
+	 */
+	size_t nesting_cnt;
+#endif
+	
+	/** Callbacks to invoke once the current grace period ends, ie cur_cbs_gp.
+	 * Accessed by the local reclaimer only.
+	 */
+	rcu_item_t *cur_cbs;
+	/** Number of callbacks in cur_cbs. */
+	size_t cur_cbs_cnt;
+	/** Callbacks to invoke once the next grace period ends, ie next_cbs_gp. 
+	 * Accessed by the local reclaimer only.
+	 */
+	rcu_item_t *next_cbs;
+	/** Number of callbacks in next_cbs. */
+	size_t next_cbs_cnt;
+	/** New callbacks are place at the end of this list. */
+	rcu_item_t *arriving_cbs;
+	/** Tail of arriving_cbs list. Disable interrupts to access. */
+	rcu_item_t **parriving_cbs_tail;
+	/** Number of callbacks currently in arriving_cbs. 
+	 * Disable interrupts to access.
+	 */
+	size_t arriving_cbs_cnt;
+
+	/** At the end of this grace period callbacks in cur_cbs will be invoked.*/
+	rcu_gp_t cur_cbs_gp;
+	/** At the end of this grace period callbacks in next_cbs will be invoked.
+	 * 
+	 * Should be the next grace period but it allows the reclaimer to 
+	 * notice if it missed a grace period end announcement. In that
+	 * case it can execute next_cbs without waiting for another GP.
+	 * 
+	 * Invariant: next_cbs_gp >= cur_cbs_gp
+	 */
+	rcu_gp_t next_cbs_gp;
+	
+	/** Positive if there are callbacks pending in arriving_cbs. */
+	semaphore_t arrived_flag;
+	
+	/** The reclaimer should expedite GPs for cbs in arriving_cbs. */
+	bool expedite_arriving;
+	
+	/** Protected by global rcu.barrier_mtx. */
+	rcu_item_t barrier_item;
+	
+	/** Interruptable attached reclaimer thread. */
+	struct thread *reclaimer_thr;
+	
+	/* Some statistics. */
+	size_t stat_max_cbs;
+	size_t stat_avg_cbs;
+	size_t stat_missed_gps;
+	size_t stat_missed_gp_in_wait;
+	size_t stat_max_slice_cbs;
+	size_t last_arriving_cnt;
+} rcu_cpu_data_t;
+
+
+/** RCU related per-thread data. */
+typedef struct rcu_thread_data {
+	/** 
+	 * Nesting count of the thread's RCU read sections when the thread 
+	 * is not running.
+	 */
+	size_t nesting_cnt;
+
+#ifdef RCU_PREEMPT_PODZIMEK
+	
+	/** True if the thread was preempted in a reader section. 
+	 *
+	 * The thread is placed into rcu.cur_preempted or rcu.next_preempted
+	 * and must remove itself in rcu_read_unlock(). 
+	 * 
+	 * Access with interrupts disabled.
+	 */
+	bool was_preempted;
+#endif
+	
+	/** Preempted threads link. Access with rcu.prempt_lock.*/
+	link_t preempt_link;
+} rcu_thread_data_t;
+
+
+#endif
+
+/** @}
+ */
Index: kernel/generic/include/synch/semaphore.h
===================================================================
--- kernel/generic/include/synch/semaphore.h	(revision b2fa1204c76e1eaec329888181d281aac04ed61e)
+++ kernel/generic/include/synch/semaphore.h	(revision 5e3fa9dbfffc978cdcfdaaea9fc6a50dbb0bca1a)
@@ -53,4 +53,8 @@
 	_semaphore_down_timeout((s), (usec), SYNCH_FLAGS_NONE)
 
+#define semaphore_down_interruptable(s) \
+	(ESYNCH_INTERRUPTED != _semaphore_down_timeout((s), SYNCH_NO_TIMEOUT, \
+		SYNCH_FLAGS_INTERRUPTIBLE))
+
 extern void semaphore_initialize(semaphore_t *, int);
 extern int _semaphore_down_timeout(semaphore_t *, uint32_t, unsigned int);
Index: kernel/generic/include/synch/smp_memory_barrier.h
===================================================================
--- kernel/generic/include/synch/smp_memory_barrier.h	(revision 5e3fa9dbfffc978cdcfdaaea9fc6a50dbb0bca1a)
+++ kernel/generic/include/synch/smp_memory_barrier.h	(revision 5e3fa9dbfffc978cdcfdaaea9fc6a50dbb0bca1a)
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2012 Adam Hraska
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sync
+ * @{
+ */
+/** @file
+ */
+
+#ifndef KERN_SMP_MEM_BAR_H_
+#define KERN_SMP_MEM_BAR_H_
+
+#include <typedefs.h>
+
+extern sysarg_t sys_smp_memory_barrier(void);
+
+#endif
+
+/** @}
+ */
Index: kernel/generic/include/synch/spinlock.h
===================================================================
--- kernel/generic/include/synch/spinlock.h	(revision b2fa1204c76e1eaec329888181d281aac04ed61e)
+++ kernel/generic/include/synch/spinlock.h	(revision 5e3fa9dbfffc978cdcfdaaea9fc6a50dbb0bca1a)
@@ -45,5 +45,5 @@
 #ifdef CONFIG_SMP
 
-typedef struct {
+typedef struct spinlock {
 	atomic_t val;
 	
@@ -163,4 +163,7 @@
 /* On UP systems, spinlocks are effectively left out. */
 
+/* Allow the use of spinlock_t as an incomplete type. */
+typedef struct spinlock spinlock_t;
+
 #define SPINLOCK_DECLARE(name)
 #define SPINLOCK_EXTERN(name)
@@ -177,5 +180,5 @@
 
 #define spinlock_lock(lock)     preemption_disable()
-#define spinlock_trylock(lock)  (preemption_disable(), 1)
+#define spinlock_trylock(lock)  ({ preemption_disable(); 1; })
 #define spinlock_unlock(lock)   preemption_enable()
 #define spinlock_locked(lock)	1
Index: kernel/generic/include/synch/workqueue.h
===================================================================
--- kernel/generic/include/synch/workqueue.h	(revision 5e3fa9dbfffc978cdcfdaaea9fc6a50dbb0bca1a)
+++ kernel/generic/include/synch/workqueue.h	(revision 5e3fa9dbfffc978cdcfdaaea9fc6a50dbb0bca1a)
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2012 Adam Hraska
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup generic
+ * @{
+ */
+/** @file
+ */
+
+#ifndef KERN_WORKQUEUE_H_
+#define KERN_WORKQUEUE_H_
+
+#include <adt/list.h>
+
+/* Fwd decl. */
+struct thread;
+struct work_item;
+struct work_queue;
+typedef struct work_queue work_queue_t;
+
+typedef void (*work_func_t)(struct work_item *);
+
+typedef struct work_item {
+	link_t queue_link;
+	work_func_t func;
+	
+#ifdef CONFIG_DEBUG
+	/* Magic number for integrity checks. */
+	uint32_t cookie;
+#endif 
+} work_t;
+
+
+
+extern void workq_global_init(void);
+extern void workq_global_worker_init(void);
+extern void workq_global_stop(void);
+extern int workq_global_enqueue_noblock(work_t *, work_func_t);
+extern int workq_global_enqueue(work_t *, work_func_t);
+
+extern struct work_queue * workq_create(const char *);
+extern void workq_destroy(struct work_queue *);
+extern int workq_init(struct work_queue *, const char *);
+extern void workq_stop(struct work_queue *);
+extern int workq_enqueue_noblock(struct work_queue *, work_t *, work_func_t);
+extern int workq_enqueue(struct work_queue *, work_t *, work_func_t);
+
+extern void workq_print_info(struct work_queue *);
+extern void workq_global_print_info(void);
+
+
+extern void workq_after_thread_ran(void);
+extern void workq_before_thread_is_ready(struct thread *);
+
+#endif /* KERN_WORKQUEUE_H_ */
+
+/** @}
+ */
