Index: kernel/generic/include/proc/thread.h
===================================================================
--- kernel/generic/include/proc/thread.h	(revision 390d80d3311996c5f585280d3918fd3f577111a9)
+++ kernel/generic/include/proc/thread.h	(revision 854eddd6711fa7a5783a815c8d140d5414badf49)
@@ -156,4 +156,7 @@
 	int fpu_context_engaged;
 	
+	/* The thread will not be migrated if nomigrate is non-zero. */
+	int nomigrate;
+	
 	/** Thread's state. */
 	state_t state;
@@ -245,4 +248,7 @@
 extern bool thread_exists(thread_t *);
 
+extern void thread_migration_disable(void);
+extern void thread_migration_enable(void);
+
 #ifdef CONFIG_UDEBUG
 extern void thread_stack_trace(thread_id_t);
Index: kernel/generic/src/proc/scheduler.c
===================================================================
--- kernel/generic/src/proc/scheduler.c	(revision 390d80d3311996c5f585280d3918fd3f577111a9)
+++ kernel/generic/src/proc/scheduler.c	(revision 854eddd6711fa7a5783a815c8d140d5414badf49)
@@ -586,5 +586,4 @@
 	 * Searching least priority queues on all CPU's first and most priority
 	 * queues on all CPU's last.
-	 *
 	 */
 	size_t acpu;
@@ -620,23 +619,24 @@
 			
 			while (link != &(cpu->rq[rq].rq_head)) {
-				thread = (thread_t *) list_get_instance(link, thread_t, rq_link);
+				thread = (thread_t *) list_get_instance(link,
+				    thread_t, rq_link);
 				
 				/*
-				 * We don't want to steal CPU-wired threads
-				 * neither threads already stolen. The latter
-				 * prevents threads from migrating between CPU's
-				 * without ever being run. We don't want to
-				 * steal threads whose FPU context is still in
-				 * CPU.
-				 *
+				 * Do not steal CPU-wired threads, threads
+				 * already stolen, threads for which migration
+				 * was temporarily disabled or threads whose
+				 * FPU context is still in the CPU.
 				 */
 				irq_spinlock_lock(&thread->lock, false);
 				
-				if ((!(thread->flags & (THREAD_FLAG_WIRED | THREAD_FLAG_STOLEN)))
-				    && (!(thread->fpu_context_engaged))) {
+				if (!(thread->flags & THREAD_FLAG_WIRED) &&
+				    !(thread->flags & THREAD_FLAG_STOLEN) &&
+				    !thread->nomigrate &&
+				    !thread->fpu_context_engaged) {
 					/*
 					 * Remove thread from ready queue.
 					 */
-					irq_spinlock_unlock(&thread->lock, false);
+					irq_spinlock_unlock(&thread->lock,
+					    false);
 					
 					atomic_dec(&cpu->nrdy);
@@ -660,5 +660,6 @@
 				 */
 				
-				irq_spinlock_pass(&(cpu->rq[rq].lock), &thread->lock);
+				irq_spinlock_pass(&(cpu->rq[rq].lock),
+				    &thread->lock);
 				
 #ifdef KCPULB_VERBOSE
Index: kernel/generic/src/proc/thread.c
===================================================================
--- kernel/generic/src/proc/thread.c	(revision 390d80d3311996c5f585280d3918fd3f577111a9)
+++ kernel/generic/src/proc/thread.c	(revision 854eddd6711fa7a5783a815c8d140d5414badf49)
@@ -322,4 +322,5 @@
 	thread->cpu = NULL;
 	thread->flags = flags;
+	thread->nomigrate = 0;
 	thread->state = Entering;
 	
@@ -482,4 +483,21 @@
 	/* Not reached */
 	while (true);
+}
+
+/** Prevent the current thread from being migrated to another processor. */
+void thread_migration_disable(void)
+{
+	ASSERT(THREAD);
+
+	THREAD->nomigrate++;
+}
+
+/** Allow the current thread to be migrated to another processor. */
+void thread_migration_enable(void)
+{
+	ASSERT(THREAD);
+	ASSERT(THREAD->nomigrate > 0);
+
+	THREAD->nomigrate--;
 }
 
Index: kernel/generic/src/time/delay.c
===================================================================
--- kernel/generic/src/time/delay.c	(revision 390d80d3311996c5f585280d3918fd3f577111a9)
+++ kernel/generic/src/time/delay.c	(revision 854eddd6711fa7a5783a815c8d140d5414badf49)
@@ -37,4 +37,5 @@
  
 #include <time/delay.h>
+#include <proc/thread.h>
 #include <typedefs.h>
 #include <cpu.h>
@@ -42,9 +43,7 @@
 #include <arch.h>
 
-/** Active delay
+/** Delay the execution for the given number of microseconds (or slightly more).
  *
- * Delay the execution for the given number
- * of microseconds (or slightly more). The delay
- * is implemented as CPU calibrated active loop.
+ * The delay is implemented as active delay loop.
  *
  * @param usec Number of microseconds to sleep.
@@ -52,17 +51,16 @@
 void delay(uint32_t usec)
 {
-	ipl_t ipl;
-	
 	/* 
-	 * The delay loop is calibrated for each and every
-	 * CPU in the system. Therefore it is necessary to
-	 * call interrupts_disable() before calling the
-	 * asm_delay_loop().
+	 * The delay loop is calibrated for each and every CPU in the system.
+	 * If running in a thread context, it is therefore necessary to disable
+	 * thread migration. We want to do this in a lightweight manner.
 	 */
-	ipl = interrupts_disable();
+	if (THREAD)
+		thread_migration_disable();
 	asm_delay_loop(usec * CPU->delay_loop_const);
-	interrupts_restore(ipl);
+	if (THREAD)
+		thread_migration_enable();
 }
 
 /** @}
- */
+*/
