Index: kernel/arch/abs32le/include/arch/asm.h
===================================================================
--- kernel/arch/abs32le/include/arch/asm.h	(revision 6188fee70f91508871094638e99dcaf9d73efdc0)
+++ kernel/arch/abs32le/include/arch/asm.h	(revision e1f5fb191dfe5766d747cfd1c7466d29ff2d3e95)
@@ -197,4 +197,15 @@
 }
 
+/** Enables interrupts and blocks until an interrupt arrives,
+ * atomically if possible on target architecture.
+ * Disables interrupts again before returning to caller.
+ */
+_NO_TRACE static inline void cpu_interruptible_sleep(void)
+{
+	interrupts_enable();
+	cpu_sleep();
+	interrupts_disable();
+}
+
 #endif
 
Index: kernel/arch/amd64/include/arch/asm.h
===================================================================
--- kernel/arch/amd64/include/arch/asm.h	(revision 6188fee70f91508871094638e99dcaf9d73efdc0)
+++ kernel/arch/amd64/include/arch/asm.h	(revision e1f5fb191dfe5766d747cfd1c7466d29ff2d3e95)
@@ -43,8 +43,18 @@
 #define IO_SPACE_BOUNDARY	((void *) (64 * 1024))
 
-_NO_TRACE static inline void cpu_sleep(void)
-{
-	asm volatile (
+/** Enables interrupts and blocks until an interrupt arrives,
+ * atomically if possible on target architecture.
+ * Disables interrupts again before returning to caller.
+ */
+_NO_TRACE static inline void cpu_interruptible_sleep()
+{
+	/*
+	 * On x86, "sti" enables interrupts after the next instruction.
+	 * Therefore, this sequence is guaranteed to be atomic.
+	 */
+	asm volatile (
+	    "sti\n"
 	    "hlt\n"
+	    "cli\n"
 	);
 }
Index: kernel/arch/arm32/include/arch/asm.h
===================================================================
--- kernel/arch/arm32/include/arch/asm.h	(revision 6188fee70f91508871094638e99dcaf9d73efdc0)
+++ kernel/arch/arm32/include/arch/asm.h	(revision e1f5fb191dfe5766d747cfd1c7466d29ff2d3e95)
@@ -65,4 +65,16 @@
 }
 
+/** Enables interrupts and blocks until an interrupt arrives,
+ * atomically if possible on target architecture.
+ * Disables interrupts again before returning to caller.
+ */
+_NO_TRACE static inline void cpu_interruptible_sleep(void)
+{
+	// FIXME: do this atomically
+	interrupts_enable();
+	cpu_sleep();
+	interrupts_disable();
+}
+
 _NO_TRACE static inline void cpu_spin_hint(void)
 {
Index: kernel/arch/arm64/include/arch/asm.h
===================================================================
--- kernel/arch/arm64/include/arch/asm.h	(revision 6188fee70f91508871094638e99dcaf9d73efdc0)
+++ kernel/arch/arm64/include/arch/asm.h	(revision e1f5fb191dfe5766d747cfd1c7466d29ff2d3e95)
@@ -52,4 +52,16 @@
 {
 	asm volatile ("wfe");
+}
+
+/** Enables interrupts and blocks until an interrupt arrives,
+ * atomically if possible on target architecture.
+ * Disables interrupts again before returning to caller.
+ */
+_NO_TRACE static inline void cpu_interruptible_sleep(void)
+{
+	// FIXME: do this atomically
+	interrupts_enable();
+	cpu_sleep();
+	interrupts_disable();
 }
 
Index: kernel/arch/ia32/include/arch/asm.h
===================================================================
--- kernel/arch/ia32/include/arch/asm.h	(revision 6188fee70f91508871094638e99dcaf9d73efdc0)
+++ kernel/arch/ia32/include/arch/asm.h	(revision e1f5fb191dfe5766d747cfd1c7466d29ff2d3e95)
@@ -57,8 +57,14 @@
 }
 
-_NO_TRACE static inline void cpu_sleep(void)
-{
-	asm volatile (
+/** Enables interrupts and blocks until an interrupt arrives,
+ * atomically if possible on target architecture.
+ * Disables interrupts again before returning to caller.
+ */
+_NO_TRACE static inline void cpu_interruptible_sleep(void)
+{
+	asm volatile (
+	    "sti\n"
 	    "hlt\n"
+	    "cli\n"
 	);
 }
Index: kernel/arch/ia64/include/arch/asm.h
===================================================================
--- kernel/arch/ia64/include/arch/asm.h	(revision 6188fee70f91508871094638e99dcaf9d73efdc0)
+++ kernel/arch/ia64/include/arch/asm.h	(revision e1f5fb191dfe5766d747cfd1c7466d29ff2d3e95)
@@ -473,4 +473,16 @@
     uint64_t, uint64_t);
 
+/** Enables interrupts and blocks until an interrupt arrives,
+ * atomically if possible on target architecture.
+ * Disables interrupts again before returning to caller.
+ */
+_NO_TRACE static inline void cpu_interruptible_sleep(void)
+{
+	// FIXME: do this properly
+	interrupts_enable();
+	cpu_sleep();
+	interrupts_disable();
+}
+
 #endif
 
Index: kernel/arch/mips32/include/arch/asm.h
===================================================================
--- kernel/arch/mips32/include/arch/asm.h	(revision 6188fee70f91508871094638e99dcaf9d73efdc0)
+++ kernel/arch/mips32/include/arch/asm.h	(revision e1f5fb191dfe5766d747cfd1c7466d29ff2d3e95)
@@ -89,4 +89,16 @@
 extern bool interrupts_disabled(void);
 
+/** Enables interrupts and blocks until an interrupt arrives,
+ * atomically if possible on target architecture.
+ * Disables interrupts again before returning to caller.
+ */
+_NO_TRACE static inline void cpu_interruptible_sleep(void)
+{
+	// FIXME: do this atomically
+	interrupts_enable();
+	cpu_sleep();
+	interrupts_disable();
+}
+
 #endif
 
Index: kernel/arch/ppc32/include/arch/asm.h
===================================================================
--- kernel/arch/ppc32/include/arch/asm.h	(revision 6188fee70f91508871094638e99dcaf9d73efdc0)
+++ kernel/arch/ppc32/include/arch/asm.h	(revision e1f5fb191dfe5766d747cfd1c7466d29ff2d3e95)
@@ -167,6 +167,13 @@
 }
 
-_NO_TRACE static inline void cpu_sleep(void)
-{
+/** Enables interrupts and blocks until an interrupt arrives,
+ * atomically if possible on target architecture.
+ * Disables interrupts again before returning to caller.
+ */
+_NO_TRACE static inline void cpu_interruptible_sleep(void)
+{
+	// FIXME: do this properly
+	interrupts_enable();
+	interrupts_disable();
 }
 
Index: kernel/arch/riscv64/include/arch/asm.h
===================================================================
--- kernel/arch/riscv64/include/arch/asm.h	(revision 6188fee70f91508871094638e99dcaf9d73efdc0)
+++ kernel/arch/riscv64/include/arch/asm.h	(revision e1f5fb191dfe5766d747cfd1c7466d29ff2d3e95)
@@ -95,6 +95,13 @@
 }
 
-_NO_TRACE static inline void cpu_sleep(void)
+/** Enables interrupts and blocks until an interrupt arrives,
+ * atomically if possible on target architecture.
+ * Disables interrupts again before returning to caller.
+ */
+_NO_TRACE static inline void cpu_interruptible_sleep(void)
 {
+	// FIXME: do this properly
+	interrupts_enable();
+	interrupts_disable();
 }
 
Index: kernel/arch/sparc64/include/arch/asm.h
===================================================================
--- kernel/arch/sparc64/include/arch/asm.h	(revision 6188fee70f91508871094638e99dcaf9d73efdc0)
+++ kernel/arch/sparc64/include/arch/asm.h	(revision e1f5fb191dfe5766d747cfd1c7466d29ff2d3e95)
@@ -539,4 +539,16 @@
 extern void switch_to_userspace(uint64_t pc, uint64_t sp, uint64_t uarg);
 
+/** Enables interrupts and blocks until an interrupt arrives,
+ * atomically if possible on target architecture.
+ * Disables interrupts again before returning to caller.
+ */
+_NO_TRACE static inline void cpu_interruptible_sleep(void)
+{
+	// FIXME: do this atomically
+	interrupts_enable();
+	cpu_sleep();
+	interrupts_disable();
+}
+
 #endif
 
