Index: kernel/arch/amd64/include/debugger.h
===================================================================
--- kernel/arch/amd64/include/debugger.h	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/arch/amd64/include/debugger.h	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -38,17 +38,17 @@
 #include <typedefs.h>
 
-#define BKPOINTS_MAX 4
+#define BKPOINTS_MAX  4
 
 /* Flags that are passed to breakpoint_add function */
-#define BKPOINT_INSTR        0x1
-#define BKPOINT_WRITE        0x2 
-#define BKPOINT_READ_WRITE   0x4
+#define BKPOINT_INSTR       0x1
+#define BKPOINT_WRITE       0x2
+#define BKPOINT_READ_WRITE  0x4
 
-#define BKPOINT_CHECK_ZERO   0x8
+#define BKPOINT_CHECK_ZERO  0x8
 
 
 extern void debugger_init(void);
-extern int breakpoint_add(const void *where, const int flags, int curidx);
-extern void breakpoint_del(int slot);
+extern int breakpoint_add(const void *, const unsigned int, int);
+extern void breakpoint_del(int);
 
 #endif
Index: kernel/arch/amd64/src/ddi/ddi.c
===================================================================
--- kernel/arch/amd64/src/ddi/ddi.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/arch/amd64/src/ddi/ddi.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -49,30 +49,27 @@
  * Interrupts are disabled and task is locked.
  *
- * @param task Task.
+ * @param task   Task.
  * @param ioaddr Startign I/O space address.
- * @param size Size of the enabled I/O range.
+ * @param size   Size of the enabled I/O range.
  *
  * @return 0 on success or an error code from errno.h.
+ *
  */
 int ddi_iospace_enable_arch(task_t *task, uintptr_t ioaddr, size_t size)
 {
-	size_t bits;
-	
-	bits = ioaddr + size;
+	size_t bits = ioaddr + size;
 	if (bits > IO_PORTS)
 		return ENOENT;
 	
 	if (task->arch.iomap.bits < bits) {
-		bitmap_t oldiomap;
-		uint8_t *newmap;
-		
 		/*
 		 * The I/O permission bitmap is too small and needs to be grown.
 		 */
 		
-		newmap = (uint8_t *) malloc(BITS2BYTES(bits), FRAME_ATOMIC);
+		uint8_t *newmap = (uint8_t *) malloc(BITS2BYTES(bits), FRAME_ATOMIC);
 		if (!newmap)
 			return ENOMEM;
 		
+		bitmap_t oldiomap;
 		bitmap_initialize(&oldiomap, task->arch.iomap.map,
 		    task->arch.iomap.bits);
@@ -115,23 +112,20 @@
  *
  * Interrupts must be disabled prior this call.
+ *
  */
 void io_perm_bitmap_install(void)
 {
-	size_t bits;
-	ptr_16_64_t cpugdtr;
-	descriptor_t *gdt_p;
-	tss_descriptor_t *tss_desc;
-	size_t ver;
-	
 	/* First, copy the I/O Permission Bitmap. */
-	spinlock_lock(&TASK->lock);
-	ver = TASK->arch.iomapver;
-	if ((bits = TASK->arch.iomap.bits)) {
+	irq_spinlock_lock(&TASK->lock, false);
+	size_t ver = TASK->arch.iomapver;
+	size_t bits = TASK->arch.iomap.bits;
+	if (bits) {
+		ASSERT(TASK->arch.iomap.map);
+		
 		bitmap_t iomap;
-	
-		ASSERT(TASK->arch.iomap.map);
 		bitmap_initialize(&iomap, CPU->arch.tss->iomap,
 		    TSS_IOMAP_SIZE * 8);
 		bitmap_copy(&iomap, &TASK->arch.iomap, TASK->arch.iomap.bits);
+		
 		/*
 		 * It is safe to set the trailing eight bits because of the
@@ -140,5 +134,5 @@
 		bitmap_set_range(&iomap, ALIGN_UP(TASK->arch.iomap.bits, 8), 8);
 	}
-	spinlock_unlock(&TASK->lock);
+	irq_spinlock_unlock(&TASK->lock, false);
 	
 	/*
@@ -146,6 +140,8 @@
 	 * Take the extra ending byte will all bits set into account. 
 	 */
+	ptr_16_64_t cpugdtr;
 	gdtr_store(&cpugdtr);
-	gdt_p = (descriptor_t *) cpugdtr.base;
+	
+	descriptor_t *gdt_p = (descriptor_t *) cpugdtr.base;
 	gdt_tss_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE + BITS2BYTES(bits));
 	gdtr_load(&cpugdtr);
@@ -155,5 +151,5 @@
 	 * type must be changed to describe inactive TSS.
 	 */
-	tss_desc = (tss_descriptor_t *) &gdt_p[TSS_DES];
+	tss_descriptor_t *tss_desc = (tss_descriptor_t *) &gdt_p[TSS_DES];
 	tss_desc->type = AR_TSS;
 	tr_load(gdtselector(TSS_DES));
Index: kernel/arch/amd64/src/debugger.c
===================================================================
--- kernel/arch/amd64/src/debugger.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/arch/amd64/src/debugger.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -46,16 +46,27 @@
 #include <symtab.h>
 
+#ifdef __64_BITS__
+	#define getip(x)  ((x)->rip)
+#endif
+
+#ifdef __32_BITS__
+	#define getip(x)  ((x)->eip)
+#endif
+
 typedef struct  {
-	uintptr_t address;      /**< Breakpoint address */
-	int flags;              /**< Flags regarding breakpoint */
-	int counter;            /**< How many times the exception occured */
+	uintptr_t address;   /**< Breakpoint address */
+	unsigned int flags;  /**< Flags regarding breakpoint */
+	size_t counter;      /**< How many times the exception occured */
 } bpinfo_t;
 
 static bpinfo_t breakpoints[BKPOINTS_MAX];
-SPINLOCK_INITIALIZE(bkpoint_lock);
+IRQ_SPINLOCK_STATIC_INITIALIZE(bkpoint_lock);
 
 #ifdef CONFIG_KCONSOLE
 
-static int cmd_print_breakpoints(cmd_arg_t *argv);
+static int cmd_print_breakpoints(cmd_arg_t *);
+static int cmd_del_breakpoint(cmd_arg_t *);
+static int cmd_add_breakpoint(cmd_arg_t *);
+
 static cmd_info_t bkpts_info = {
 	.name = "bkpts",
@@ -65,11 +76,11 @@
 };
 
-static int cmd_del_breakpoint(cmd_arg_t *argv);
 static cmd_arg_t del_argv = {
 	.type = ARG_TYPE_INT
 };
+
 static cmd_info_t delbkpt_info = {
 	.name = "delbkpt",
-	.description = "delbkpt <number> - Delete breakpoint.",
+	.description = "Delete breakpoint.",
 	.func = cmd_del_breakpoint,
 	.argc = 1,
@@ -77,11 +88,11 @@
 };
 
-static int cmd_add_breakpoint(cmd_arg_t *argv);
 static cmd_arg_t add_argv = {
 	.type = ARG_TYPE_INT
 };
+
 static cmd_info_t addbkpt_info = {
 	.name = "addbkpt",
-	.description = "addbkpt <&symbol> - new breakpoint.",
+	.description = "Add breakpoint.",
 	.func = cmd_add_breakpoint,
 	.argc = 1,
@@ -92,7 +103,8 @@
 	.type = ARG_TYPE_INT
 };
+
 static cmd_info_t addwatchp_info = {
 	.name = "addwatchp",
-	.description = "addbwatchp <&symbol> - new write watchpoint.",
+	.description = "Add write watchpoint.",
 	.func = cmd_add_breakpoint,
 	.argc = 1,
@@ -102,16 +114,20 @@
 #endif /* CONFIG_KCONSOLE */
 
-/* Setup DR register according to table */
+/** Setup DR register according to table
+ *
+ */
 static void setup_dr(int curidx)
 {
-	unative_t dr7;
+	ASSERT(curidx >= 0);
+	
 	bpinfo_t *cur = &breakpoints[curidx];
-	int flags = breakpoints[curidx].flags;
-
+	unsigned int flags = breakpoints[curidx].flags;
+	
 	/* Disable breakpoint in DR7 */
-	dr7 = read_dr7();
-	dr7 &= ~(0x2 << (curidx*2));
-
-	if (cur->address) { /* Setup DR register */
+	unative_t dr7 = read_dr7();
+	dr7 &= ~(0x2 << (curidx * 2));
+	
+	/* Setup DR register */
+	if (cur->address) {
 		/* Set breakpoint to debug registers */
 		switch (curidx) {
@@ -129,15 +145,14 @@
 			break;
 		}
+		
 		/* Set type to requested breakpoint & length*/
-		dr7 &= ~ (0x3 << (16 + 4*curidx));
-		dr7 &= ~ (0x3 << (18 + 4*curidx));
-		if ((flags & BKPOINT_INSTR)) {
-			;
-		} else {
+		dr7 &= ~(0x3 << (16 + 4 * curidx));
+		dr7 &= ~(0x3 << (18 + 4 * curidx));
 		
+		if (!(flags & BKPOINT_INSTR)) {
 #ifdef __32_BITS__
 			dr7 |= ((unative_t) 0x3) << (18 + 4 * curidx);
 #endif
-
+			
 #ifdef __64_BITS__
 			dr7 |= ((unative_t) 0x2) << (18 + 4 * curidx);
@@ -149,76 +164,70 @@
 				dr7 |= ((unative_t) 0x3) << (16 + 4 * curidx);
 		}
-
+		
 		/* Enable global breakpoint */
 		dr7 |= 0x2 << (curidx * 2);
-
+		
 		write_dr7(dr7);
-		
-	} 
-}
-	
+	}
+}
+
 /** Enable hardware breakpoint
  *
  * @param where Address of HW breakpoint
  * @param flags Type of breakpoint (EXECUTE, WRITE)
+ *
  * @return Debug slot on success, -1 - no available HW breakpoint
- */
-int breakpoint_add(const void *where, const int flags, int curidx)
-{
-	ipl_t ipl;
-	int i;
-	bpinfo_t *cur;
-
+ *
+ */
+int breakpoint_add(const void *where, const unsigned int flags, int curidx)
+{
 	ASSERT(flags & (BKPOINT_INSTR | BKPOINT_WRITE | BKPOINT_READ_WRITE));
-
-	ipl = interrupts_disable();
-	spinlock_lock(&bkpoint_lock);
+	
+	irq_spinlock_lock(&bkpoint_lock, true);
 	
 	if (curidx == -1) {
 		/* Find free space in slots */
-		for (i = 0; i < BKPOINTS_MAX; i++)
+		unsigned int i;
+		for (i = 0; i < BKPOINTS_MAX; i++) {
 			if (!breakpoints[i].address) {
 				curidx = i;
 				break;
 			}
+		}
+		
 		if (curidx == -1) {
 			/* Too many breakpoints */
-			spinlock_unlock(&bkpoint_lock);
-			interrupts_restore(ipl);
+			irq_spinlock_unlock(&bkpoint_lock, true);
 			return -1;
 		}
 	}
-	cur = &breakpoints[curidx];
-
+	
+	bpinfo_t *cur = &breakpoints[curidx];
+	
 	cur->address = (uintptr_t) where;
 	cur->flags = flags;
 	cur->counter = 0;
-
+	
 	setup_dr(curidx);
-
-	spinlock_unlock(&bkpoint_lock);
-	interrupts_restore(ipl);
-
+	
+	irq_spinlock_unlock(&bkpoint_lock, true);
+	
 	/* Send IPI */
 //	ipi_broadcast(VECTOR_DEBUG_IPI);
-
+	
 	return curidx;
 }
 
-#ifdef __64_BITS__
-	#define getip(x)  ((x)->rip)
-#else
-	#define getip(x)  ((x)->eip)
-#endif
-
 static void handle_exception(int slot, istate_t *istate)
 {
+	ASSERT(slot >= 0);
 	ASSERT(breakpoints[slot].address);
-
+	
 	/* Handle zero checker */
-	if (! (breakpoints[slot].flags & BKPOINT_INSTR)) {
+	if (!(breakpoints[slot].flags & BKPOINT_INSTR)) {
 		if ((breakpoints[slot].flags & BKPOINT_CHECK_ZERO)) {
 			if (*((unative_t *) breakpoints[slot].address) != 0)
 				return;
+			
 			printf("*** Found ZERO on address %lx (slot %d) ***\n",
 			    breakpoints[slot].address, slot);
@@ -228,8 +237,8 @@
 		}
 	}
-
+	
 	printf("Reached breakpoint %d:%lx (%s)\n", slot, getip(istate),
 	    symtab_fmt_name_lookup(getip(istate)));
-
+	
 #ifdef CONFIG_KCONSOLE
 	atomic_set(&haltstate, 1);
@@ -241,42 +250,37 @@
 void breakpoint_del(int slot)
 {
-	bpinfo_t *cur;
-	ipl_t ipl;
-
-	ipl = interrupts_disable();
-	spinlock_lock(&bkpoint_lock);
-
-	cur = &breakpoints[slot];
+	ASSERT(slot >= 0);
+	
+	irq_spinlock_lock(&bkpoint_lock, true);
+	
+	bpinfo_t *cur = &breakpoints[slot];
 	if (!cur->address) {
-		spinlock_unlock(&bkpoint_lock);
-		interrupts_restore(ipl);
+		irq_spinlock_unlock(&bkpoint_lock, true);
 		return;
 	}
-
+	
 	cur->address = NULL;
-
+	
 	setup_dr(slot);
-
-	spinlock_unlock(&bkpoint_lock);
-	interrupts_restore(ipl);
+	
+	irq_spinlock_unlock(&bkpoint_lock, true);
 //	ipi_broadcast(VECTOR_DEBUG_IPI);
 }
 
-
-
 static void debug_exception(int n __attribute__((unused)), istate_t *istate)
 {
-	unative_t dr6;
-	int i;
-	
 	/* Set RF to restart the instruction  */
 #ifdef __64_BITS__
 	istate->rflags |= RFLAGS_RF;
-#else
+#endif
+	
+#ifdef __32_BITS__
 	istate->eflags |= EFLAGS_RF;
 #endif
-
-	dr6 = read_dr6();
-	for (i=0; i < BKPOINTS_MAX; i++) {
+	
+	unative_t dr6 = read_dr6();
+	
+	unsigned int i;
+	for (i = 0; i < BKPOINTS_MAX; i++) {
 		if (dr6 & (1 << i)) {
 			dr6 &= ~ (1 << i);
@@ -289,38 +293,39 @@
 
 #ifdef CONFIG_SMP
-static void
-debug_ipi(int n __attribute__((unused)),
+static void debug_ipi(int n __attribute__((unused)),
     istate_t *istate __attribute__((unused)))
 {
-	int i;
-
-	spinlock_lock(&bkpoint_lock);
+	irq_spinlock_lock(&bkpoint_lock, false);
+	
+	unsigned int i;
 	for (i = 0; i < BKPOINTS_MAX; i++)
 		setup_dr(i);
-	spinlock_unlock(&bkpoint_lock);
-}
-#endif
-
-/** Initialize debugger */
+	
+	irq_spinlock_unlock(&bkpoint_lock, false);
+}
+#endif /* CONFIG_SMP */
+
+/** Initialize debugger
+ *
+ */
 void debugger_init()
 {
-	int i;
-
+	unsigned int i;
 	for (i = 0; i < BKPOINTS_MAX; i++)
 		breakpoints[i].address = NULL;
-
+	
 #ifdef CONFIG_KCONSOLE
 	cmd_initialize(&bkpts_info);
 	if (!cmd_register(&bkpts_info))
 		printf("Cannot register command %s\n", bkpts_info.name);
-
+	
 	cmd_initialize(&delbkpt_info);
 	if (!cmd_register(&delbkpt_info))
 		printf("Cannot register command %s\n", delbkpt_info.name);
-
+	
 	cmd_initialize(&addbkpt_info);
 	if (!cmd_register(&addbkpt_info))
 		printf("Cannot register command %s\n", addbkpt_info.name);
-
+	
 	cmd_initialize(&addwatchp_info);
 	if (!cmd_register(&addwatchp_info))
@@ -331,18 +336,18 @@
 #ifdef CONFIG_SMP
 	exc_register(VECTOR_DEBUG_IPI, "debugger_smp", debug_ipi);
-#endif
+#endif /* CONFIG_SMP */
 }
 
 #ifdef CONFIG_KCONSOLE
-/** Print table of active breakpoints */
+/** Print table of active breakpoints
+ *
+ */
 int cmd_print_breakpoints(cmd_arg_t *argv __attribute__((unused)))
 {
-	unsigned int i;
-
 #ifdef __32_BITS__
 	printf("#  Count Address    In symbol\n");
 	printf("-- ----- ---------- ---------\n");
 #endif
-
+	
 #ifdef __64_BITS__
 	printf("#  Count Address            In symbol\n");
@@ -350,26 +355,30 @@
 #endif
 	
-	for (i = 0; i < BKPOINTS_MAX; i++)
+	unsigned int i;
+	for (i = 0; i < BKPOINTS_MAX; i++) {
 		if (breakpoints[i].address) {
 			const char *symbol = symtab_fmt_name_lookup(
 			    breakpoints[i].address);
-
+			
 #ifdef __32_BITS__
-			printf("%-2u %-5d %#10zx %s\n", i,
+			printf("%-2u %-5" PRIs " %p %s\n", i,
 			    breakpoints[i].counter, breakpoints[i].address,
 			    symbol);
 #endif
-
+			
 #ifdef __64_BITS__
-			printf("%-2u %-5d %#18zx %s\n", i,
+			printf("%-2u %-5" PRIs " %p %s\n", i,
 			    breakpoints[i].counter, breakpoints[i].address,
 			    symbol);
 #endif
-
-		}
+		}
+	}
+	
 	return 1;
 }
 
-/** Remove breakpoint from table */
+/** Remove breakpoint from table
+ *
+ */
 int cmd_del_breakpoint(cmd_arg_t *argv)
 {
@@ -379,21 +388,23 @@
 		return 0;
 	}
+	
 	breakpoint_del(argv->intval);
 	return 1;
 }
 
-/** Add new breakpoint to table */
+/** Add new breakpoint to table
+ *
+ */
 static int cmd_add_breakpoint(cmd_arg_t *argv)
 {
-	int flags;
-	int id;
-
-	if (argv == &add_argv) {
+	unsigned int flags;
+	if (argv == &add_argv)
 		flags = BKPOINT_INSTR;
-	} else { /* addwatchp */
+	else
 		flags = BKPOINT_WRITE;
-	}
+	
 	printf("Adding breakpoint on address: %p\n", argv->intval);
-	id = breakpoint_add((void *)argv->intval, flags, -1);
+	
+	int id = breakpoint_add((void *)argv->intval, flags, -1);
 	if (id < 0)
 		printf("Add breakpoint failed.\n");
Index: kernel/arch/amd64/src/interrupt.c
===================================================================
--- kernel/arch/amd64/src/interrupt.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/arch/amd64/src/interrupt.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -106,14 +106,14 @@
 }
 
-/** General Protection Fault. */
+/** General Protection Fault.
+ *
+ */
 static void gp_fault(int n, istate_t *istate)
 {
 	if (TASK) {
-		size_t ver;
-
-		spinlock_lock(&TASK->lock);
-		ver = TASK->arch.iomapver;
-		spinlock_unlock(&TASK->lock);
-
+		irq_spinlock_lock(&TASK->lock, false);
+		size_t ver = TASK->arch.iomapver;
+		irq_spinlock_unlock(&TASK->lock, false);
+		
 		if (CPU->arch.iomapver_copy != ver) {
 			/*
@@ -129,5 +129,5 @@
 		fault_if_from_uspace(istate, "General protection fault.");
 	}
-
+	
 	decode_istate(n, istate);
 	panic("General protection fault.");
@@ -159,5 +159,7 @@
 #endif
 
-/** Handler of IRQ exceptions */
+/** Handler of IRQ exceptions.
+ *
+ */
 static void irq_interrupt(int n, istate_t *istate)
 {
@@ -174,5 +176,5 @@
 		 * The IRQ handler was found.
 		 */
-		 
+		
 		if (irq->preack) {
 			/* Send EOI before processing the interrupt */
@@ -181,5 +183,5 @@
 		}
 		irq->handler(irq);
-		spinlock_unlock(&irq->lock);
+		irq_spinlock_unlock(&irq->lock, false);
 	} else {
 		/*
Index: kernel/arch/ia32/include/smp/apic.h
===================================================================
--- kernel/arch/ia32/include/smp/apic.h	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/arch/ia32/include/smp/apic.h	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -27,5 +27,5 @@
  */
 
-/** @addtogroup ia32	
+/** @addtogroup ia32
  * @{
  */
@@ -39,87 +39,88 @@
 #include <cpu.h>
 
-#define FIXED		(0<<0)
-#define LOPRI		(1<<0)
-
-#define APIC_ID_COUNT	16
+#define FIXED  (0 << 0)
+#define LOPRI  (1 << 0)
+
+#define APIC_ID_COUNT  16
 
 /* local APIC macros */
-#define IPI_INIT 	0
-#define IPI_STARTUP	0
+#define IPI_INIT     0
+#define IPI_STARTUP  0
 
 /** Delivery modes. */
-#define DELMOD_FIXED	0x0
-#define DELMOD_LOWPRI	0x1
-#define DELMOD_SMI	0x2
+#define DELMOD_FIXED    0x0
+#define DELMOD_LOWPRI   0x1
+#define DELMOD_SMI      0x2
 /* 0x3 reserved */
-#define DELMOD_NMI	0x4
-#define DELMOD_INIT	0x5
-#define DELMOD_STARTUP	0x6
-#define DELMOD_EXTINT	0x7
+#define DELMOD_NMI      0x4
+#define DELMOD_INIT     0x5
+#define DELMOD_STARTUP  0x6
+#define DELMOD_EXTINT   0x7
 
 /** Destination modes. */
-#define DESTMOD_PHYS	0x0
-#define DESTMOD_LOGIC	0x1
+#define DESTMOD_PHYS   0x0
+#define DESTMOD_LOGIC  0x1
 
 /** Trigger Modes. */
-#define TRIGMOD_EDGE	0x0
-#define TRIGMOD_LEVEL	0x1
+#define TRIGMOD_EDGE   0x0
+#define TRIGMOD_LEVEL  0x1
 
 /** Levels. */
-#define LEVEL_DEASSERT	0x0
-#define LEVEL_ASSERT	0x1
+#define LEVEL_DEASSERT  0x0
+#define LEVEL_ASSERT    0x1
 
 /** Destination Shorthands. */
-#define SHORTHAND_NONE		0x0
-#define SHORTHAND_SELF		0x1
-#define SHORTHAND_ALL_INCL	0x2
-#define SHORTHAND_ALL_EXCL	0x3
+#define SHORTHAND_NONE      0x0
+#define SHORTHAND_SELF      0x1
+#define SHORTHAND_ALL_INCL  0x2
+#define SHORTHAND_ALL_EXCL  0x3
 
 /** Interrupt Input Pin Polarities. */
-#define POLARITY_HIGH	0x0
-#define POLARITY_LOW	0x1
+#define POLARITY_HIGH  0x0
+#define POLARITY_LOW   0x1
 
 /** Divide Values. (Bit 2 is always 0) */
-#define DIVIDE_2	0x0
-#define DIVIDE_4	0x1
-#define DIVIDE_8	0x2
-#define DIVIDE_16	0x3
-#define DIVIDE_32	0x8
-#define DIVIDE_64	0x9
-#define DIVIDE_128	0xa
-#define DIVIDE_1	0xb
+#define DIVIDE_2    0x0
+#define DIVIDE_4    0x1
+#define DIVIDE_8    0x2
+#define DIVIDE_16   0x3
+#define DIVIDE_32   0x8
+#define DIVIDE_64   0x9
+#define DIVIDE_128  0xa
+#define DIVIDE_1    0xb
 
 /** Timer Modes. */
-#define TIMER_ONESHOT	0x0
-#define TIMER_PERIODIC	0x1
+#define TIMER_ONESHOT   0x0
+#define TIMER_PERIODIC  0x1
 
 /** Delivery status. */
-#define DELIVS_IDLE	0x0
-#define DELIVS_PENDING	0x1
+#define DELIVS_IDLE     0x0
+#define DELIVS_PENDING  0x1
 
 /** Destination masks. */
-#define DEST_ALL	0xff
+#define DEST_ALL  0xff
 
 /** Dest format models. */
-#define MODEL_FLAT	0xf
-#define MODEL_CLUSTER	0x0
+#define MODEL_FLAT     0xf
+#define MODEL_CLUSTER  0x0
 
 /** Interrupt Command Register. */
-#define ICRlo		(0x300 / sizeof(uint32_t))
-#define ICRhi		(0x310 / sizeof(uint32_t))
+#define ICRlo  (0x300 / sizeof(uint32_t))
+#define ICRhi  (0x310 / sizeof(uint32_t))
+
 typedef struct {
 	union {
 		uint32_t lo;
 		struct {
-			uint8_t vector;			/**< Interrupt Vector. */
-			unsigned delmod : 3;		/**< Delivery Mode. */
-			unsigned destmod : 1;		/**< Destination Mode. */
-			unsigned delivs : 1;		/**< Delivery status (RO). */
-			unsigned : 1;			/**< Reserved. */
-			unsigned level : 1;		/**< Level. */
-			unsigned trigger_mode : 1;	/**< Trigger Mode. */
-			unsigned : 2;			/**< Reserved. */
-			unsigned shorthand : 2;		/**< Destination Shorthand. */
-			unsigned : 12;			/**< Reserved. */
+			uint8_t vector;                 /**< Interrupt Vector. */
+			unsigned int delmod : 3;        /**< Delivery Mode. */
+			unsigned int destmod : 1;       /**< Destination Mode. */
+			unsigned int delivs : 1;        /**< Delivery status (RO). */
+			unsigned int : 1;               /**< Reserved. */
+			unsigned int level : 1;         /**< Level. */
+			unsigned int trigger_mode : 1;  /**< Trigger Mode. */
+			unsigned int : 2;               /**< Reserved. */
+			unsigned int shorthand : 2;     /**< Destination Shorthand. */
+			unsigned int : 12;              /**< Reserved. */
 		} __attribute__ ((packed));
 	};
@@ -127,6 +128,6 @@
 		uint32_t hi;
 		struct {
-			unsigned : 24;			/**< Reserved. */
-			uint8_t dest;			/**< Destination field. */
+			unsigned int : 24;  /**< Reserved. */
+			uint8_t dest;       /**< Destination field. */
 		} __attribute__ ((packed));
 	};
@@ -134,154 +135,165 @@
 
 /* End Of Interrupt. */
-#define EOI		(0x0b0 / sizeof(uint32_t))
+#define EOI  (0x0b0 / sizeof(uint32_t))
 
 /** Error Status Register. */
-#define ESR		(0x280 / sizeof(uint32_t))
+#define ESR  (0x280 / sizeof(uint32_t))
+
 typedef union {
 	uint32_t value;
 	uint8_t err_bitmap;
 	struct {
-		unsigned send_checksum_error : 1;
-		unsigned receive_checksum_error : 1;
-		unsigned send_accept_error : 1;
-		unsigned receive_accept_error : 1;
-		unsigned : 1;
-		unsigned send_illegal_vector : 1;
-		unsigned received_illegal_vector : 1;
-		unsigned illegal_register_address : 1;
-		unsigned : 24;
+		unsigned int send_checksum_error : 1;
+		unsigned int receive_checksum_error : 1;
+		unsigned int send_accept_error : 1;
+		unsigned int receive_accept_error : 1;
+		unsigned int : 1;
+		unsigned int send_illegal_vector : 1;
+		unsigned int received_illegal_vector : 1;
+		unsigned int illegal_register_address : 1;
+		unsigned int : 24;
 	} __attribute__ ((packed));
 } esr_t;
 
 /* Task Priority Register */
-#define TPR		(0x080 / sizeof(uint32_t))
-typedef union {
-	uint32_t value;
-	struct {
-		unsigned pri_sc : 4;		/**< Task Priority Sub-Class. */
-		unsigned pri : 4;		/**< Task Priority. */
+#define TPR  (0x080 / sizeof(uint32_t))
+
+typedef union {
+	uint32_t value;
+	struct {
+		unsigned int pri_sc : 4;  /**< Task Priority Sub-Class. */
+		unsigned int pri : 4;     /**< Task Priority. */
 	} __attribute__ ((packed));
 } tpr_t;
 
 /** Spurious-Interrupt Vector Register. */
-#define SVR		(0x0f0 / sizeof(uint32_t))
-typedef union {
-	uint32_t value;
-	struct {
-		uint8_t vector;			/**< Spurious Vector. */
-		unsigned lapic_enabled : 1;	/**< APIC Software Enable/Disable. */
-		unsigned focus_checking : 1;	/**< Focus Processor Checking. */
-		unsigned : 22;			/**< Reserved. */
+#define SVR  (0x0f0 / sizeof(uint32_t))
+
+typedef union {
+	uint32_t value;
+	struct {
+		uint8_t vector;                   /**< Spurious Vector. */
+		unsigned int lapic_enabled : 1;   /**< APIC Software Enable/Disable. */
+		unsigned int focus_checking : 1;  /**< Focus Processor Checking. */
+		unsigned int : 22;                /**< Reserved. */
 	} __attribute__ ((packed));
 } svr_t;
 
 /** Time Divide Configuration Register. */
-#define TDCR		(0x3e0 / sizeof(uint32_t))
-typedef union {
-	uint32_t value;
-	struct {
-		unsigned div_value : 4;		/**< Divide Value, bit 2 is always 0. */
-		unsigned : 28;			/**< Reserved. */
+#define TDCR  (0x3e0 / sizeof(uint32_t))
+
+typedef union {
+	uint32_t value;
+	struct {
+		unsigned int div_value : 4;  /**< Divide Value, bit 2 is always 0. */
+		unsigned int : 28;           /**< Reserved. */
 	} __attribute__ ((packed));
 } tdcr_t;
 
 /* Initial Count Register for Timer */
-#define ICRT		(0x380 / sizeof(uint32_t))
+#define ICRT  (0x380 / sizeof(uint32_t))
 
 /* Current Count Register for Timer */
-#define CCRT		(0x390 / sizeof(uint32_t))
+#define CCRT  (0x390 / sizeof(uint32_t))
 
 /** LVT Timer register. */
-#define LVT_Tm		(0x320 / sizeof(uint32_t))
-typedef union {
-	uint32_t value;
-	struct {
-		uint8_t vector;		/**< Local Timer Interrupt vector. */
-		unsigned : 4;		/**< Reserved. */
-		unsigned delivs : 1;	/**< Delivery status (RO). */
-		unsigned : 3;		/**< Reserved. */
-		unsigned masked : 1;	/**< Interrupt Mask. */
-		unsigned mode : 1;	/**< Timer Mode. */
-		unsigned : 14;		/**< Reserved. */
+#define LVT_Tm  (0x320 / sizeof(uint32_t))
+
+typedef union {
+	uint32_t value;
+	struct {
+		uint8_t vector;           /**< Local Timer Interrupt vector. */
+		unsigned int : 4;         /**< Reserved. */
+		unsigned int delivs : 1;  /**< Delivery status (RO). */
+		unsigned int : 3;         /**< Reserved. */
+		unsigned int masked : 1;  /**< Interrupt Mask. */
+		unsigned int mode : 1;    /**< Timer Mode. */
+		unsigned int : 14;        /**< Reserved. */
 	} __attribute__ ((packed));
 } lvt_tm_t;
 
 /** LVT LINT registers. */
-#define LVT_LINT0	(0x350 / sizeof(uint32_t))
-#define LVT_LINT1	(0x360 / sizeof(uint32_t))
-typedef union {
-	uint32_t value;
-	struct {
-		uint8_t vector;			/**< LINT Interrupt vector. */
-		unsigned delmod : 3;		/**< Delivery Mode. */
-		unsigned : 1;			/**< Reserved. */
-		unsigned delivs : 1;		/**< Delivery status (RO). */
-		unsigned intpol : 1;		/**< Interrupt Input Pin Polarity. */
-		unsigned irr : 1;		/**< Remote IRR (RO). */
-		unsigned trigger_mode : 1;	/**< Trigger Mode. */
-		unsigned masked : 1;		/**< Interrupt Mask. */
-		unsigned : 15;			/**< Reserved. */
+#define LVT_LINT0  (0x350 / sizeof(uint32_t))
+#define LVT_LINT1  (0x360 / sizeof(uint32_t))
+
+typedef union {
+	uint32_t value;
+	struct {
+		uint8_t vector;                 /**< LINT Interrupt vector. */
+		unsigned int delmod : 3;        /**< Delivery Mode. */
+		unsigned int : 1;               /**< Reserved. */
+		unsigned int delivs : 1;        /**< Delivery status (RO). */
+		unsigned int intpol : 1;        /**< Interrupt Input Pin Polarity. */
+		unsigned int irr : 1;           /**< Remote IRR (RO). */
+		unsigned int trigger_mode : 1;  /**< Trigger Mode. */
+		unsigned int masked : 1;        /**< Interrupt Mask. */
+		unsigned int : 15;              /**< Reserved. */
 	} __attribute__ ((packed));
 } lvt_lint_t;
 
 /** LVT Error register. */
-#define LVT_Err		(0x370 / sizeof(uint32_t))
-typedef union {
-	uint32_t value;
-	struct {
-		uint8_t vector;		/**< Local Timer Interrupt vector. */
-		unsigned : 4;		/**< Reserved. */
-		unsigned delivs : 1;	/**< Delivery status (RO). */
-		unsigned : 3;		/**< Reserved. */
-		unsigned masked : 1;	/**< Interrupt Mask. */
-		unsigned : 15;		/**< Reserved. */
+#define LVT_Err  (0x370 / sizeof(uint32_t))
+
+typedef union {
+	uint32_t value;
+	struct {
+		uint8_t vector;           /**< Local Timer Interrupt vector. */
+		unsigned int : 4;         /**< Reserved. */
+		unsigned int delivs : 1;  /**< Delivery status (RO). */
+		unsigned int : 3;         /**< Reserved. */
+		unsigned int masked : 1;  /**< Interrupt Mask. */
+		unsigned int : 15;        /**< Reserved. */
 	} __attribute__ ((packed));
 } lvt_error_t;
 
 /** Local APIC ID Register. */
-#define L_APIC_ID	(0x020 / sizeof(uint32_t))
-typedef union {
-	uint32_t value;
-	struct {
-		unsigned : 24;		/**< Reserved. */
-		uint8_t apic_id;		/**< Local APIC ID. */
+#define L_APIC_ID  (0x020 / sizeof(uint32_t))
+
+typedef union {
+	uint32_t value;
+	struct {
+		unsigned int : 24;  /**< Reserved. */
+		uint8_t apic_id;    /**< Local APIC ID. */
 	} __attribute__ ((packed));
 } l_apic_id_t;
 
 /** Local APIC Version Register */
-#define LAVR		(0x030 / sizeof(uint32_t))
-#define LAVR_Mask	0xff
-#define is_local_apic(x)	(((x) & LAVR_Mask & 0xf0) == 0x1)
-#define is_82489DX_apic(x)	((((x) & LAVR_Mask & 0xf0) == 0x0))
-#define is_local_xapic(x)	(((x) & LAVR_Mask) == 0x14)
+#define LAVR       (0x030 / sizeof(uint32_t))
+#define LAVR_Mask  0xff
+
+#define is_local_apic(x)    (((x) & LAVR_Mask & 0xf0) == 0x1)
+#define is_82489DX_apic(x)  ((((x) & LAVR_Mask & 0xf0) == 0x0))
+#define is_local_xapic(x)   (((x) & LAVR_Mask) == 0x14)
 
 /** Logical Destination Register. */
-#define  LDR		(0x0d0 / sizeof(uint32_t))
-typedef union {
-	uint32_t value;
-	struct {
-		unsigned : 24;		/**< Reserved. */
-		uint8_t id;		/**< Logical APIC ID. */
+#define  LDR  (0x0d0 / sizeof(uint32_t))
+
+typedef union {
+	uint32_t value;
+	struct {
+		unsigned int : 24;  /**< Reserved. */
+		uint8_t id;         /**< Logical APIC ID. */
 	} __attribute__ ((packed));
 } ldr_t;
 
 /** Destination Format Register. */
-#define DFR		(0x0e0 / sizeof(uint32_t))
-typedef union {
-	uint32_t value;
-	struct {
-		unsigned : 28;		/**< Reserved, all ones. */
-		unsigned model : 4;	/**< Model. */
+#define DFR  (0x0e0 / sizeof(uint32_t))
+
+typedef union {
+	uint32_t value;
+	struct {
+		unsigned int : 28;       /**< Reserved, all ones. */
+		unsigned int model : 4;  /**< Model. */
 	} __attribute__ ((packed));
 } dfr_t;
 
 /* IO APIC */
-#define IOREGSEL	(0x00 / sizeof(uint32_t))
-#define IOWIN		(0x10 / sizeof(uint32_t))
-
-#define IOAPICID	0x00
-#define IOAPICVER	0x01
-#define IOAPICARB	0x02
-#define IOREDTBL	0x10
+#define IOREGSEL  (0x00 / sizeof(uint32_t))
+#define IOWIN     (0x10 / sizeof(uint32_t))
+
+#define IOAPICID   0x00
+#define IOAPICVER  0x01
+#define IOAPICARB  0x02
+#define IOREDTBL   0x10
 
 /** I/O Register Select Register. */
@@ -289,6 +301,6 @@
 	uint32_t value;
 	struct {
-		uint8_t reg_addr;		/**< APIC Register Address. */
-		unsigned : 24;		/**< Reserved. */
+		uint8_t reg_addr;   /**< APIC Register Address. */
+		unsigned int : 24;  /**< Reserved. */
 	} __attribute__ ((packed));
 } io_regsel_t;
@@ -299,13 +311,13 @@
 		uint32_t lo;
 		struct {
-			uint8_t intvec;			/**< Interrupt Vector. */
-			unsigned delmod : 3;		/**< Delivery Mode. */
-			unsigned destmod : 1; 		/**< Destination mode. */
-			unsigned delivs : 1;		/**< Delivery status (RO). */
-			unsigned intpol : 1;		/**< Interrupt Input Pin Polarity. */
-			unsigned irr : 1;		/**< Remote IRR (RO). */
-			unsigned trigger_mode : 1;	/**< Trigger Mode. */
-			unsigned masked : 1;		/**< Interrupt Mask. */
-			unsigned : 15;			/**< Reserved. */
+			uint8_t intvec;                 /**< Interrupt Vector. */
+			unsigned int delmod : 3;        /**< Delivery Mode. */
+			unsigned int destmod : 1;       /**< Destination mode. */
+			unsigned int delivs : 1;        /**< Delivery status (RO). */
+			unsigned int intpol : 1;        /**< Interrupt Input Pin Polarity. */
+			unsigned int irr : 1;           /**< Remote IRR (RO). */
+			unsigned int trigger_mode : 1;  /**< Trigger Mode. */
+			unsigned int masked : 1;        /**< Interrupt Mask. */
+			unsigned int : 15;              /**< Reserved. */
 		} __attribute__ ((packed));
 	};
@@ -313,6 +325,6 @@
 		uint32_t hi;
 		struct {
-			unsigned : 24;			/**< Reserved. */
-			uint8_t dest : 8;			/**< Destination Field. */
+			unsigned int : 24;  /**< Reserved. */
+			uint8_t dest : 8;   /**< Destination Field. */
 		} __attribute__ ((packed));
 	};
@@ -325,7 +337,7 @@
 	uint32_t value;
 	struct {
-		unsigned : 24;		/**< Reserved. */
-		unsigned apic_id : 4;	/**< IO APIC ID. */
-		unsigned : 4;		/**< Reserved. */
+		unsigned int : 24;         /**< Reserved. */
+		unsigned int apic_id : 4;  /**< IO APIC ID. */
+		unsigned int : 4;          /**< Reserved. */
 	} __attribute__ ((packed));
 } io_apic_id_t;
@@ -340,14 +352,14 @@
 extern void l_apic_init(void);
 extern void l_apic_eoi(void);
-extern int l_apic_broadcast_custom_ipi(uint8_t vector);
-extern int l_apic_send_init_ipi(uint8_t apicid);
+extern int l_apic_broadcast_custom_ipi(uint8_t);
+extern int l_apic_send_init_ipi(uint8_t);
 extern void l_apic_debug(void);
 extern uint8_t l_apic_id(void);
 
-extern uint32_t io_apic_read(uint8_t address);
-extern void io_apic_write(uint8_t address , uint32_t x);
-extern void io_apic_change_ioredtbl(uint8_t pin, uint8_t dest, uint8_t v, int flags);
-extern void io_apic_disable_irqs(uint16_t irqmask);
-extern void io_apic_enable_irqs(uint16_t irqmask);
+extern uint32_t io_apic_read(uint8_t);
+extern void io_apic_write(uint8_t, uint32_t);
+extern void io_apic_change_ioredtbl(uint8_t pin, uint8_t dest, uint8_t v, unsigned int);
+extern void io_apic_disable_irqs(uint16_t);
+extern void io_apic_enable_irqs(uint16_t);
 
 #endif
Index: kernel/arch/ia32/src/ddi/ddi.c
===================================================================
--- kernel/arch/ia32/src/ddi/ddi.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/arch/ia32/src/ddi/ddi.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -50,34 +50,31 @@
  * Interrupts are disabled and task is locked.
  *
- * @param task Task.
+ * @param task   Task.
  * @param ioaddr Startign I/O space address.
- * @param size Size of the enabled I/O range.
+ * @param size   Size of the enabled I/O range.
  *
  * @return 0 on success or an error code from errno.h.
+ *
  */
 int ddi_iospace_enable_arch(task_t *task, uintptr_t ioaddr, size_t size)
 {
-	size_t bits;
-
-	bits = ioaddr + size;
+	size_t bits = ioaddr + size;
 	if (bits > IO_PORTS)
 		return ENOENT;
-
+	
 	if (task->arch.iomap.bits < bits) {
-		bitmap_t oldiomap;
-		uint8_t *newmap;
-	
 		/*
 		 * The I/O permission bitmap is too small and needs to be grown.
 		 */
 		
-		newmap = (uint8_t *) malloc(BITS2BYTES(bits), FRAME_ATOMIC);
+		uint8_t *newmap = (uint8_t *) malloc(BITS2BYTES(bits), FRAME_ATOMIC);
 		if (!newmap)
 			return ENOMEM;
 		
+		bitmap_t oldiomap;
 		bitmap_initialize(&oldiomap, task->arch.iomap.map,
 		    task->arch.iomap.bits);
 		bitmap_initialize(&task->arch.iomap, newmap, bits);
-
+		
 		/*
 		 * Mark the new range inaccessible.
@@ -85,9 +82,9 @@
 		bitmap_set_range(&task->arch.iomap, oldiomap.bits,
 		    bits - oldiomap.bits);
-
+		
 		/*
 		 * In case there really existed smaller iomap,
 		 * copy its contents and deallocate it.
-		 */		
+		 */
 		if (oldiomap.bits) {
 			bitmap_copy(&task->arch.iomap, &oldiomap,
@@ -96,15 +93,15 @@
 		}
 	}
-
+	
 	/*
 	 * Enable the range and we are done.
 	 */
 	bitmap_clear_range(&task->arch.iomap, (size_t) ioaddr, (size_t) size);
-
+	
 	/*
 	 * Increment I/O Permission bitmap generation counter.
 	 */
 	task->arch.iomapver++;
-
+	
 	return 0;
 }
@@ -116,23 +113,20 @@
  *
  * Interrupts must be disabled prior this call.
+ *
  */
 void io_perm_bitmap_install(void)
 {
-	size_t bits;
-	ptr_16_32_t cpugdtr;
-	descriptor_t *gdt_p;
-	size_t ver;
-
 	/* First, copy the I/O Permission Bitmap. */
-	spinlock_lock(&TASK->lock);
-	ver = TASK->arch.iomapver;
-	if ((bits = TASK->arch.iomap.bits)) {
+	irq_spinlock_lock(&TASK->lock, false);
+	size_t ver = TASK->arch.iomapver;
+	size_t bits = TASK->arch.iomap.bits;
+	if (bits) {
+		ASSERT(TASK->arch.iomap.map);
+		
 		bitmap_t iomap;
-		task_t *task = TASK;
-	
-		ASSERT(TASK->arch.iomap.map);
 		bitmap_initialize(&iomap, CPU->arch.tss->iomap,
 		    TSS_IOMAP_SIZE * 8);
-		bitmap_copy(&iomap, &task->arch.iomap, task->arch.iomap.bits);
+		bitmap_copy(&iomap, &TASK->arch.iomap, TASK->arch.iomap.bits);
+		
 		/*
 		 * It is safe to set the trailing eight bits because of the
@@ -141,15 +135,17 @@
 		bitmap_set_range(&iomap, ALIGN_UP(TASK->arch.iomap.bits, 8), 8);
 	}
-	spinlock_unlock(&TASK->lock);
-
+	irq_spinlock_unlock(&TASK->lock, false);
+	
 	/*
 	 * Second, adjust TSS segment limit.
 	 * Take the extra ending byte with all bits set into account.
 	 */
+	ptr_16_32_t cpugdtr;
 	gdtr_store(&cpugdtr);
-	gdt_p = (descriptor_t *) cpugdtr.base;
+	
+	descriptor_t *gdt_p = (descriptor_t *) cpugdtr.base;
 	gdt_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE + BITS2BYTES(bits));
 	gdtr_load(&cpugdtr);
-
+	
 	/*
 	 * Before we load new TSS limit, the current TSS descriptor
Index: kernel/arch/ia32/src/drivers/i8254.c
===================================================================
--- kernel/arch/ia32/src/drivers/i8254.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/arch/ia32/src/drivers/i8254.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -54,9 +54,12 @@
 #include <ddi/device.h>
 
-#define CLK_PORT1	((ioport8_t *)0x40)
-#define CLK_PORT4	((ioport8_t *)0x43)
+#define CLK_PORT1  ((ioport8_t *) 0x40)
+#define CLK_PORT4  ((ioport8_t *) 0x43)
 
-#define CLK_CONST	1193180
-#define MAGIC_NUMBER	1194
+#define CLK_CONST     1193180
+#define MAGIC_NUMBER  1194
+
+#define LOOPS  150000
+#define SHIFT  11
 
 static irq_t i8254_irq;
@@ -75,7 +78,7 @@
 	 * lock. We just release it, call clock() and then reacquire it again.
 	 */
-	spinlock_unlock(&irq->lock);
+	irq_spinlock_unlock(&irq->lock, false);
 	clock();
-	spinlock_lock(&irq->lock);
+	irq_spinlock_lock(&irq->lock, false);
 }
 
@@ -102,13 +105,6 @@
 }
 
-#define LOOPS 150000
-#define SHIFT 11
 void i8254_calibrate_delay_loop(void)
 {
-	uint64_t clk1, clk2;
-	uint32_t t1, t2, o1, o2;
-	uint8_t not_ok;
-
-
 	/*
 	 * One-shot timer. Count-down from 0xffff at 1193180Hz
@@ -118,5 +114,9 @@
 	pio_write_8(CLK_PORT1, 0xff);
 	pio_write_8(CLK_PORT1, 0xff);
-
+	
+	uint8_t not_ok;
+	uint32_t t1;
+	uint32_t t2;
+	
 	do {
 		/* will read both status and count */
@@ -126,34 +126,34 @@
 		t1 |= pio_read_8(CLK_PORT1) << 8;
 	} while (not_ok);
-
+	
 	asm_delay_loop(LOOPS);
-
+	
 	pio_write_8(CLK_PORT4, 0xd2);
 	t2 = pio_read_8(CLK_PORT1);
 	t2 |= pio_read_8(CLK_PORT1) << 8;
-
+	
 	/*
 	 * We want to determine the overhead of the calibrating mechanism.
 	 */
 	pio_write_8(CLK_PORT4, 0xd2);
-	o1 = pio_read_8(CLK_PORT1);
+	uint32_t o1 = pio_read_8(CLK_PORT1);
 	o1 |= pio_read_8(CLK_PORT1) << 8;
-
+	
 	asm_fake_loop(LOOPS);
-
+	
 	pio_write_8(CLK_PORT4, 0xd2);
-	o2 = pio_read_8(CLK_PORT1);
+	uint32_t o2 = pio_read_8(CLK_PORT1);
 	o2 |= pio_read_8(CLK_PORT1) << 8;
-
+	
 	CPU->delay_loop_const =
 	    ((MAGIC_NUMBER * LOOPS) / 1000) / ((t1 - t2) - (o1 - o2)) +
 	    (((MAGIC_NUMBER * LOOPS) / 1000) % ((t1 - t2) - (o1 - o2)) ? 1 : 0);
-
-	clk1 = get_cycle();
+	
+	uint64_t clk1 = get_cycle();
 	delay(1 << SHIFT);
-	clk2 = get_cycle();
+	uint64_t clk2 = get_cycle();
 	
 	CPU->frequency_mhz = (clk2 - clk1) >> SHIFT;
-
+	
 	return;
 }
Index: kernel/arch/ia32/src/interrupt.c
===================================================================
--- kernel/arch/ia32/src/interrupt.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/arch/ia32/src/interrupt.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -94,5 +94,5 @@
 {
 	fault_if_from_uspace(istate, "Unserviced interrupt: %d.", n);
-
+	
 	decode_istate(istate);
 	panic("Unserviced interrupt: %d.", n);
@@ -102,5 +102,5 @@
 {
 	fault_if_from_uspace(istate, "Divide error.");
-
+	
 	decode_istate(istate);
 	panic("Divide error.");
@@ -111,10 +111,8 @@
 {
 	if (TASK) {
-		size_t ver;
+		irq_spinlock_lock(&TASK->lock, false);
+		size_t ver = TASK->arch.iomapver;
+		irq_spinlock_unlock(&TASK->lock, false);
 		
-		spinlock_lock(&TASK->lock);
-		ver = TASK->arch.iomapver;
-		spinlock_unlock(&TASK->lock);
-	
 		if (CPU->arch.iomapver_copy != ver) {
 			/*
@@ -130,5 +128,5 @@
 		fault_if_from_uspace(istate, "General protection fault.");
 	}
-
+	
 	decode_istate(istate);
 	panic("General protection fault.");
@@ -138,5 +136,5 @@
 {
 	fault_if_from_uspace(istate, "Stack fault.");
-
+	
 	decode_istate(istate);
 	panic("Stack fault.");
@@ -146,8 +144,9 @@
 {
 	uint32_t mxcsr;
-	asm (
+	asm volatile (
 		"stmxcsr %[mxcsr]\n"
 		: [mxcsr] "=m" (mxcsr)
 	);
+	
 	fault_if_from_uspace(istate, "SIMD FP exception(19), MXCSR: %#zx.",
 	    (unative_t) mxcsr);
@@ -158,7 +157,8 @@
 }
 
-static void nm_fault(int n __attribute__((unused)), istate_t *istate __attribute__((unused)))
-{
-#ifdef CONFIG_FPU_LAZY     
+static void nm_fault(int n __attribute__((unused)),
+    istate_t *istate __attribute__((unused)))
+{
+#ifdef CONFIG_FPU_LAZY
 	scheduler_fpu_lazy_request();
 #else
@@ -169,5 +169,6 @@
 
 #ifdef CONFIG_SMP
-static void tlb_shootdown_ipi(int n __attribute__((unused)), istate_t *istate __attribute__((unused)))
+static void tlb_shootdown_ipi(int n __attribute__((unused)),
+    istate_t *istate __attribute__((unused)))
 {
 	trap_virtual_eoi();
@@ -191,5 +192,5 @@
 		 * The IRQ handler was found.
 		 */
-		 
+		
 		if (irq->preack) {
 			/* Send EOI before processing the interrupt */
@@ -198,5 +199,5 @@
 		}
 		irq->handler(irq);
-		spinlock_unlock(&irq->lock);
+		irq_spinlock_unlock(&irq->lock, false);
 	} else {
 		/*
Index: kernel/arch/ia32/src/smp/apic.c
===================================================================
--- kernel/arch/ia32/src/smp/apic.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/arch/ia32/src/smp/apic.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -53,11 +53,12 @@
  * Advanced Programmable Interrupt Controller for SMP systems.
  * Tested on:
- *	Bochs 2.0.2 - Bochs 2.2.6 with 2-8 CPUs
- *	Simics 2.0.28 - Simics 2.2.19 2-15 CPUs
- *	VMware Workstation 5.5 with 2 CPUs
- *	QEMU 0.8.0 with 2-15 CPUs
- *	ASUS P/I-P65UP5 + ASUS C-P55T2D REV. 1.41 with 2x 200Mhz Pentium CPUs
- *	ASUS PCH-DL with 2x 3000Mhz Pentium 4 Xeon (HT) CPUs
- *	MSI K7D Master-L with 2x 2100MHz Athlon MP CPUs
+ *    Bochs 2.0.2 - Bochs 2.2.6 with 2-8 CPUs
+ *    Simics 2.0.28 - Simics 2.2.19 2-15 CPUs
+ *    VMware Workstation 5.5 with 2 CPUs
+ *    QEMU 0.8.0 with 2-15 CPUs
+ *    ASUS P/I-P65UP5 + ASUS C-P55T2D REV. 1.41 with 2x 200Mhz Pentium CPUs
+ *    ASUS PCH-DL with 2x 3000Mhz Pentium 4 Xeon (HT) CPUs
+ *    MSI K7D Master-L with 2x 2100MHz Athlon MP CPUs
+ *
  */
 
@@ -69,4 +70,5 @@
  * optimize the code too much and accesses to l_apic and io_apic, that must
  * always be 32-bit, would use byte oriented instructions.
+ *
  */
 volatile uint32_t *l_apic = (uint32_t *) 0xfee00000;
@@ -79,5 +81,5 @@
 
 #ifdef LAPIC_VERBOSE
-static char *delmod_str[] = {
+static const char *delmod_str[] = {
 	"Fixed",
 	"Lowest Priority",
@@ -90,30 +92,30 @@
 };
 
-static char *destmod_str[] = {
+static const char *destmod_str[] = {
 	"Physical",
 	"Logical"
 };
 
-static char *trigmod_str[] = {
+static const char *trigmod_str[] = {
 	"Edge",
 	"Level"
 };
 
-static char *mask_str[] = {
+static const char *mask_str[] = {
 	"Unmasked",
 	"Masked"
 };
 
-static char *delivs_str[] = {
+static const char *delivs_str[] = {
 	"Idle",
 	"Send Pending"
 };
 
-static char *tm_mode_str[] = {
+static const char *tm_mode_str[] = {
 	"One-shot",
 	"Periodic"
 };
 
-static char *intpol_str[] = {
+static const char *intpol_str[] = {
 	"Polarity High",
 	"Polarity Low"
@@ -123,8 +125,10 @@
 /** APIC spurious interrupt handler.
  *
- * @param n Interrupt vector.
+ * @param n      Interrupt vector.
  * @param istate Interrupted state.
- */
-static void apic_spurious(int n __attribute__((unused)), istate_t *istate __attribute__((unused)))
+ *
+ */
+static void apic_spurious(int n __attribute__((unused)),
+    istate_t *istate __attribute__((unused)))
 {
 #ifdef CONFIG_DEBUG
@@ -145,7 +149,7 @@
 	 * irq->lock so we just unlock it and then lock it again.
 	 */
-	spinlock_unlock(&irq->lock);
+	irq_spinlock_unlock(&irq->lock, false);
 	clock();
-	spinlock_lock(&irq->lock);
+	irq_spinlock_lock(&irq->lock, false);
 }
 
@@ -153,8 +157,6 @@
 void apic_init(void)
 {
-	io_apic_id_t idreg;
-	
 	exc_register(VECTOR_APIC_SPUR, "apic_spurious", (iroutine) apic_spurious);
-
+	
 	enable_irqs_function = io_apic_enable_irqs;
 	disable_irqs_function = io_apic_disable_irqs;
@@ -179,5 +181,5 @@
 	for (i = 0; i < IRQ_COUNT; i++) {
 		int pin;
-	
+		
 		if ((pin = smp_irq_to_pin(i)) != -1)
 			io_apic_change_ioredtbl((uint8_t) pin, DEST_ALL, (uint8_t) (IVT_IRQBASE + i), LOPRI);
@@ -187,6 +189,8 @@
 	 * Ensure that io_apic has unique ID.
 	 */
+	io_apic_id_t idreg;
+	
 	idreg.value = io_apic_read(IOAPICID);
-	if ((1 << idreg.apic_id) & apic_id_mask) {	/* see if IO APIC ID is used already */
+	if ((1 << idreg.apic_id) & apic_id_mask) {  /* See if IO APIC ID is used already */
 		for (i = 0; i < APIC_ID_COUNT; i++) {
 			if (!((1 << i) & apic_id_mask)) {
@@ -197,11 +201,10 @@
 		}
 	}
-
+	
 	/*
 	 * Configure the BSP's lapic.
 	 */
 	l_apic_init();
-
-	l_apic_debug();	
+	l_apic_debug();
 }
 
@@ -211,4 +214,5 @@
  *
  * @return 0 on error, 1 on success.
+ *
  */
 int apic_poll_errors(void)
@@ -232,5 +236,5 @@
 	if (esr.illegal_register_address)
 		printf("Illegal Register Address\n");
-
+	
 	return !esr.err_bitmap;
 }
@@ -241,9 +245,10 @@
  *
  * @return 0 on failure, 1 on success.
+ *
  */
 int l_apic_broadcast_custom_ipi(uint8_t vector)
 {
 	icr_t icr;
-
+	
 	icr.lo = l_apic[ICRlo];
 	icr.delmod = DELMOD_FIXED;
@@ -253,7 +258,7 @@
 	icr.trigger_mode = TRIGMOD_LEVEL;
 	icr.vector = vector;
-
+	
 	l_apic[ICRlo] = icr.lo;
-
+	
 	icr.lo = l_apic[ICRlo];
 	if (icr.delivs == DELIVS_PENDING) {
@@ -262,5 +267,5 @@
 #endif
 	}
-
+	
 	return apic_poll_errors();
 }
@@ -271,13 +276,13 @@
  *
  * @return 0 on failure, 1 on success.
+ *
  */
 int l_apic_send_init_ipi(uint8_t apicid)
 {
+	/*
+	 * Read the ICR register in and zero all non-reserved fields.
+	 */
 	icr_t icr;
-	int i;
-
-	/*
-	 * Read the ICR register in and zero all non-reserved fields.
-	 */
+	
 	icr.lo = l_apic[ICRlo];
 	icr.hi = l_apic[ICRhi];
@@ -293,5 +298,5 @@
 	l_apic[ICRhi] = icr.hi;
 	l_apic[ICRlo] = icr.lo;
-
+	
 	/*
 	 * According to MP Specification, 20us should be enough to
@@ -299,8 +304,8 @@
 	 */
 	delay(20);
-
+	
 	if (!apic_poll_errors())
 		return 0;
-
+	
 	icr.lo = l_apic[ICRlo];
 	if (icr.delivs == DELIVS_PENDING) {
@@ -309,5 +314,5 @@
 #endif
 	}
-
+	
 	icr.delmod = DELMOD_INIT;
 	icr.destmod = DESTMOD_PHYS;
@@ -317,15 +322,16 @@
 	icr.vector = 0;
 	l_apic[ICRlo] = icr.lo;
-
+	
 	/*
 	 * Wait 10ms as MP Specification specifies.
 	 */
 	delay(10000);
-
+	
 	if (!is_82489DX_apic(l_apic[LAVR])) {
 		/*
 		 * If this is not 82489DX-based l_apic we must send two STARTUP IPI's.
 		 */
-		for (i = 0; i<2; i++) {
+		unsigned int i;
+		for (i = 0; i < 2; i++) {
 			icr.lo = l_apic[ICRlo];
 			icr.vector = (uint8_t) (((uintptr_t) ap_boot) >> 12); /* calculate the reset vector */
@@ -346,31 +352,26 @@
 void l_apic_init(void)
 {
+	/* Initialize LVT Error register. */
 	lvt_error_t error;
-	lvt_lint_t lint;
-	tpr_t tpr;
-	svr_t svr;
-	icr_t icr;
-	tdcr_t tdcr;
-	lvt_tm_t tm;
-	ldr_t ldr;
-	dfr_t dfr;
-	uint32_t t1, t2;
-
-	/* Initialize LVT Error register. */
+	
 	error.value = l_apic[LVT_Err];
 	error.masked = true;
 	l_apic[LVT_Err] = error.value;
-
+	
 	/* Initialize LVT LINT0 register. */
+	lvt_lint_t lint;
+	
 	lint.value = l_apic[LVT_LINT0];
 	lint.masked = true;
 	l_apic[LVT_LINT0] = lint.value;
-
+	
 	/* Initialize LVT LINT1 register. */
 	lint.value = l_apic[LVT_LINT1];
 	lint.masked = true;
 	l_apic[LVT_LINT1] = lint.value;
-
+	
 	/* Task Priority Register initialization. */
+	tpr_t tpr;
+	
 	tpr.value = l_apic[TPR];
 	tpr.pri_sc = 0;
@@ -379,4 +380,6 @@
 	
 	/* Spurious-Interrupt Vector Register initialization. */
+	svr_t svr;
+	
 	svr.value = l_apic[SVR];
 	svr.vector = VECTOR_APIC_SPUR;
@@ -384,9 +387,11 @@
 	svr.focus_checking = true;
 	l_apic[SVR] = svr.value;
-
+	
 	if (CPU->arch.family >= 6)
 		enable_l_apic_in_msr();
 	
 	/* Interrupt Command Register initialization. */
+	icr_t icr;
+	
 	icr.lo = l_apic[ICRlo];
 	icr.delmod = DELMOD_INIT;
@@ -398,9 +403,13 @@
 	
 	/* Timer Divide Configuration Register initialization. */
+	tdcr_t tdcr;
+	
 	tdcr.value = l_apic[TDCR];
 	tdcr.div_value = DIVIDE_1;
 	l_apic[TDCR] = tdcr.value;
-
+	
 	/* Program local timer. */
+	lvt_tm_t tm;
+	
 	tm.value = l_apic[LVT_Tm];
 	tm.vector = VECTOR_CLK;
@@ -408,23 +417,24 @@
 	tm.masked = false;
 	l_apic[LVT_Tm] = tm.value;
-
+	
 	/*
 	 * Measure and configure the timer to generate timer
 	 * interrupt with period 1s/HZ seconds.
 	 */
+	uint32_t t1 = l_apic[CCRT];
+	l_apic[ICRT] = 0xffffffff;
+	
+	while (l_apic[CCRT] == t1);
+	
 	t1 = l_apic[CCRT];
-	l_apic[ICRT] = 0xffffffff;
-
-	while (l_apic[CCRT] == t1)
-		;
-		
-	t1 = l_apic[CCRT];
-	delay(1000000/HZ);
-	t2 = l_apic[CCRT];
-	
-	l_apic[ICRT] = t1-t2;
+	delay(1000000 / HZ);
+	uint32_t t2 = l_apic[CCRT];
+	
+	l_apic[ICRT] = t1 - t2;
 	
 	/* Program Logical Destination Register. */
 	ASSERT(CPU->id < 8);
+	ldr_t ldr;
+	
 	ldr.value = l_apic[LDR];
 	ldr.id = (uint8_t) (1 << CPU->id);
@@ -432,4 +442,6 @@
 	
 	/* Program Destination Format Register for Flat mode. */
+	dfr_t dfr;
+	
 	dfr.value = l_apic[DFR];
 	dfr.model = MODEL_FLAT;
@@ -447,16 +459,17 @@
 {
 #ifdef LAPIC_VERBOSE
+	printf("LVT on cpu%" PRIs ", LAPIC ID: %" PRIu8 "\n", CPU->id, l_apic_id());
+	
 	lvt_tm_t tm;
-	lvt_lint_t lint;
-	lvt_error_t error;	
-	
-	printf("LVT on cpu%d, LAPIC ID: %d\n", CPU->id, l_apic_id());
-
 	tm.value = l_apic[LVT_Tm];
 	printf("LVT Tm: vector=%hhd, %s, %s, %s\n", tm.vector, delivs_str[tm.delivs], mask_str[tm.masked], tm_mode_str[tm.mode]);
+	
+	lvt_lint_t lint;
 	lint.value = l_apic[LVT_LINT0];
 	printf("LVT LINT0: vector=%hhd, %s, %s, %s, irr=%d, %s, %s\n", tm.vector, delmod_str[lint.delmod], delivs_str[lint.delivs], intpol_str[lint.intpol], lint.irr, trigmod_str[lint.trigger_mode], mask_str[lint.masked]);
 	lint.value = l_apic[LVT_LINT1];	
 	printf("LVT LINT1: vector=%hhd, %s, %s, %s, irr=%d, %s, %s\n", tm.vector, delmod_str[lint.delmod], delivs_str[lint.delivs], intpol_str[lint.intpol], lint.irr, trigmod_str[lint.trigger_mode], mask_str[lint.masked]);	
+	
+	lvt_error_t error;
 	error.value = l_apic[LVT_Err];
 	printf("LVT Err: vector=%hhd, %s, %s\n", error.vector, delivs_str[error.delivs], mask_str[error.masked]);
@@ -467,4 +480,5 @@
  *
  * @return Local APIC ID.
+ *
  */
 uint8_t l_apic_id(void)
@@ -481,4 +495,5 @@
  *
  * @return Content of the addressed IO APIC register.
+ *
  */
 uint32_t io_apic_read(uint8_t address)
@@ -495,7 +510,8 @@
  *
  * @param address IO APIC register address.
- * @param x Content to be written to the addressed IO APIC register.
- */
-void io_apic_write(uint8_t address, uint32_t x)
+ * @param val     Content to be written to the addressed IO APIC register.
+ *
+ */
+void io_apic_write(uint8_t address, uint32_t val)
 {
 	io_regsel_t regsel;
@@ -504,22 +520,26 @@
 	regsel.reg_addr = address;
 	io_apic[IOREGSEL] = regsel.value;
-	io_apic[IOWIN] = x;
+	io_apic[IOWIN] = val;
 }
 
 /** Change some attributes of one item in I/O Redirection Table.
  *
- * @param pin IO APIC pin number.
- * @param dest Interrupt destination address.
- * @param v Interrupt vector to trigger.
+ * @param pin   IO APIC pin number.
+ * @param dest  Interrupt destination address.
+ * @param vec   Interrupt vector to trigger.
  * @param flags Flags.
- */
-void io_apic_change_ioredtbl(uint8_t pin, uint8_t dest, uint8_t v, int flags)
-{
-	io_redirection_reg_t reg;
-	int dlvr = DELMOD_FIXED;
+ *
+ */
+void io_apic_change_ioredtbl(uint8_t pin, uint8_t dest, uint8_t vec,
+    unsigned int flags)
+{
+	unsigned int dlvr;
 	
 	if (flags & LOPRI)
 		dlvr = DELMOD_LOWPRI;
-
+	else
+		dlvr = DELMOD_FIXED;
+	
+	io_redirection_reg_t reg;
 	reg.lo = io_apic_read((uint8_t) (IOREDTBL + pin * 2));
 	reg.hi = io_apic_read((uint8_t) (IOREDTBL + pin * 2 + 1));
@@ -530,6 +550,6 @@
 	reg.intpol = POLARITY_HIGH;
 	reg.delmod = dlvr;
-	reg.intvec = v;
-
+	reg.intvec = vec;
+	
 	io_apic_write((uint8_t) (IOREDTBL + pin * 2), reg.lo);
 	io_apic_write((uint8_t) (IOREDTBL + pin * 2 + 1), reg.hi);
@@ -539,11 +559,9 @@
  *
  * @param irqmask Bitmask of IRQs to be masked (0 = do not mask, 1 = mask).
+ *
  */
 void io_apic_disable_irqs(uint16_t irqmask)
 {
-	io_redirection_reg_t reg;
 	unsigned int i;
-	int pin;
-	
 	for (i = 0; i < 16; i++) {
 		if (irqmask & (1 << i)) {
@@ -552,6 +570,8 @@
 			 * mapping for the respective IRQ number.
 			 */
-			pin = smp_irq_to_pin(i);
+			int pin = smp_irq_to_pin(i);
 			if (pin != -1) {
+				io_redirection_reg_t reg;
+				
 				reg.lo = io_apic_read((uint8_t) (IOREDTBL + pin * 2));
 				reg.masked = true;
@@ -566,11 +586,9 @@
  *
  * @param irqmask Bitmask of IRQs to be unmasked (0 = do not unmask, 1 = unmask).
+ *
  */
 void io_apic_enable_irqs(uint16_t irqmask)
 {
 	unsigned int i;
-	int pin;
-	io_redirection_reg_t reg;	
-	
 	for (i = 0; i < 16; i++) {
 		if (irqmask & (1 << i)) {
@@ -579,6 +597,8 @@
 			 * mapping for the respective IRQ number.
 			 */
-			pin = smp_irq_to_pin(i);
+			int pin = smp_irq_to_pin(i);
 			if (pin != -1) {
+				io_redirection_reg_t reg;
+				
 				reg.lo = io_apic_read((uint8_t) (IOREDTBL + pin * 2));
 				reg.masked = false;
Index: kernel/arch/ia64/src/drivers/it.c
===================================================================
--- kernel/arch/ia64/src/drivers/it.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/arch/ia64/src/drivers/it.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -34,5 +34,5 @@
 
 /** Interval Timer driver. */
- 
+
 #include <arch/drivers/it.h>
 #include <arch/interrupt.h>
@@ -45,11 +45,11 @@
 #include <arch.h>
 
-#define IT_SERVICE_CLOCKS	64
+#define IT_SERVICE_CLOCKS  64
 
-#define FREQ_NUMERATOR_SHIFT	32
-#define FREQ_NUMERATOR_MASK	0xffffffff00000000ULL
+#define FREQ_NUMERATOR_SHIFT  32
+#define FREQ_NUMERATOR_MASK   0xffffffff00000000ULL
 
-#define FREQ_DENOMINATOR_SHIFT	0
-#define FREQ_DENOMINATOR_MASK	0xffffffffULL
+#define FREQ_DENOMINATOR_SHIFT  0
+#define FREQ_DENOMINATOR_MASK   0xffffffffULL
 
 uint64_t it_delta;
@@ -63,6 +63,4 @@
 void it_init(void)
 {
-	cr_itv_t itv;
-	
 	if (config.cpu_active == 1) {
 		irq_initialize(&it_irq);
@@ -83,17 +81,19 @@
 	}
 	
-	/* initialize Interval Timer external interrupt vector */
+	/* Initialize Interval Timer external interrupt vector */
+	cr_itv_t itv;
+	
 	itv.value = itv_read();
 	itv.vector = INTERRUPT_TIMER;
 	itv.m = 0;
 	itv_write(itv.value);
-
-	/* set Interval Timer Counter to zero */
+	
+	/* Set Interval Timer Counter to zero */
 	itc_write(0);
 	
-	/* generate first Interval Timer interrupt in IT_DELTA ticks */
+	/* Generate first Interval Timer interrupt in IT_DELTA ticks */
 	itm_write(IT_DELTA);
-
-	/* propagate changes */
+	
+	/* Propagate changes */
 	srlz_d();
 }
@@ -104,4 +104,5 @@
  *
  * @return Always IRQ_ACCEPT.
+ *
  */
 irq_ownership_t it_claim(irq_t *irq)
@@ -113,17 +114,14 @@
 void it_interrupt(irq_t *irq)
 {
-	int64_t c;
-	int64_t m;
-	
 	eoi_write(EOI);
 	
-	m = itm_read();
+	int64_t itm = itm_read();
 	
-	while (1) {
-		c = itc_read();
-		c += IT_SERVICE_CLOCKS;
-
-		m += IT_DELTA;
-		if (m - c < 0)
+	while (true) {
+		int64_t itc = itc_read();
+		itc += IT_SERVICE_CLOCKS;
+		
+		itm += IT_DELTA;
+		if (itm - itc < 0)
 			CPU->missed_clock_ticks++;
 		else
@@ -131,14 +129,14 @@
 	}
 	
-	itm_write(m);
-	srlz_d();				/* propagate changes */
-
+	itm_write(itm);
+	srlz_d();  /* Propagate changes */
+	
 	/*
 	 * We are holding a lock which prevents preemption.
 	 * Release the lock, call clock() and reacquire the lock again.
 	 */
-	spinlock_unlock(&irq->lock);	
+	irq_spinlock_unlock(&irq->lock, false);
 	clock();
-	spinlock_lock(&irq->lock);
+	irq_spinlock_lock(&irq->lock, false);
 }
 
Index: kernel/arch/ia64/src/interrupt.c
===================================================================
--- kernel/arch/ia64/src/interrupt.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/arch/ia64/src/interrupt.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -57,10 +57,11 @@
 #include <putchar.h>
 
-#define VECTORS_64_BUNDLE	20
-#define VECTORS_16_BUNDLE	48
-#define VECTORS_16_BUNDLE_START	0x5000
-#define VECTOR_MAX		0x7f00
-
-#define BUNDLE_SIZE		16
+#define VECTORS_64_BUNDLE        20
+#define VECTORS_16_BUNDLE        48
+#define VECTORS_16_BUNDLE_START  0x5000
+
+#define VECTOR_MAX  0x7f00
+
+#define BUNDLE_SIZE  16
 
 static const char *vector_names_64_bundle[VECTORS_64_BUNDLE] = {
@@ -134,8 +135,4 @@
 static void dump_interrupted_context(istate_t *istate)
 {
-	const char *ifa = symtab_fmt_name_lookup(istate->cr_ifa);
-	const char *iipa = symtab_fmt_name_lookup(istate->cr_iipa);
-	const char *iip = symtab_fmt_name_lookup(istate->cr_iip);
-	
 	putchar('\n');
 	printf("Interrupted context dump:\n");
@@ -149,8 +146,10 @@
 	    istate->cr_ipsr);
 	
-	printf("cr.iip=%#018llx, #%d\t(%s)\n", istate->cr_iip,
-	    istate->cr_isr.ei, iip);
-	printf("cr.iipa=%#018llx\t(%s)\n", istate->cr_iipa, iipa);
-	printf("cr.ifa=%#018llx\t(%s)\n", istate->cr_ifa, ifa);
+	printf("cr.iip=%#018llx, #%d\t(%s)\n", istate->cr_iip, istate->cr_isr.ei,
+	    symtab_fmt_name_lookup(istate->cr_iip));
+	printf("cr.iipa=%#018llx\t(%s)\n", istate->cr_iipa,
+	    symtab_fmt_name_lookup(istate->cr_iipa));
+	printf("cr.ifa=%#018llx\t(%s)\n", istate->cr_ifa,
+	    symtab_fmt_name_lookup(istate->cr_ifa));
 }
 
@@ -218,5 +217,5 @@
 		istate->cr_ipsr.ri++;
 	}
-
+	
 	return syscall_handler(istate->in0, istate->in1, istate->in2,
 	    istate->in3, istate->in4, istate->in5, istate->in6);
@@ -234,16 +233,18 @@
 static void end_of_local_irq(void)
 {
-	asm volatile ("mov cr.eoi=r0;;");
-}
-
+	asm volatile (
+		"mov cr.eoi=r0;;"
+	);
+}
 
 void external_interrupt(uint64_t vector, istate_t *istate)
 {
 	cr_ivr_t ivr;
-	irq_t *irq;
 	
 	ivr.value = ivr_read();
 	srlz_d();
-
+	
+	irq_t *irq;
+	
 	switch (ivr.vector) {
 	case INTERRUPT_SPURIOUS:
@@ -252,5 +253,5 @@
 #endif
 		break;
-
+	
 #ifdef CONFIG_SMP
 	case VECTOR_TLB_SHOOTDOWN_IPI:
@@ -259,10 +260,10 @@
 		break;
 #endif
-
+	
 	case INTERRUPT_TIMER:
 		irq = irq_dispatch_and_lock(ivr.vector);
 		if (irq) {
 			irq->handler(irq);
-			spinlock_unlock(&irq->lock);
+			irq_spinlock_unlock(&irq->lock, false);
 		} else {
 			panic("Unhandled Internal Timer Interrupt (%d).",
@@ -283,5 +284,5 @@
 			if (!irq->preack)
 				end_of_local_irq();
-			spinlock_unlock(&irq->lock);
+			irq_spinlock_unlock(&irq->lock, false);
 		} else {
 			/*
Index: kernel/arch/mips32/include/asm.h
===================================================================
--- kernel/arch/mips32/include/asm.h	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/arch/mips32/include/asm.h	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -39,5 +39,4 @@
 #include <config.h>
 
-
 static inline void cpu_sleep(void)
 {
@@ -47,20 +46,21 @@
 
 /** Return base address of current stack
- * 
+ *
  * Return the base address of the current stack.
  * The stack is assumed to be STACK_SIZE bytes long.
  * The stack must start on page boundary.
+ *
  */
 static inline uintptr_t get_stack_base(void)
 {
-	uintptr_t v;
+	uintptr_t base;
 	
 	asm volatile (
-		"and %0, $29, %1\n"
-		: "=r" (v)
-		: "r" (~(STACK_SIZE-1))
+		"and %[base], $29, %[mask]\n"
+		: [base] "=r" (base)
+		: [mask] "r" (~(STACK_SIZE - 1))
 	);
 	
-	return v;
+	return base;
 }
 
@@ -78,15 +78,15 @@
 static inline void pio_write_8(ioport8_t *port, uint8_t v)
 {
-	*port = v;	
+	*port = v;
 }
 
 static inline void pio_write_16(ioport16_t *port, uint16_t v)
 {
-	*port = v;	
+	*port = v;
 }
 
 static inline void pio_write_32(ioport32_t *port, uint32_t v)
 {
-	*port = v;	
+	*port = v;
 }
 
Index: kernel/arch/mips32/include/debugger.h
===================================================================
--- kernel/arch/mips32/include/debugger.h	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/arch/mips32/include/debugger.h	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -41,25 +41,33 @@
 #define BKPOINTS_MAX 10
 
-#define BKPOINT_INPROG   (1 << 0)   /**< Breakpoint was shot */
-#define BKPOINT_ONESHOT  (1 << 1)   /**< One-time breakpoint,mandatory for j/b
-				         instructions */
-#define BKPOINT_REINST   (1 << 2)   /**< Breakpoint is set on the next 
-				         instruction, so that it could be
-					 reinstalled on the previous one */
-#define BKPOINT_FUNCCALL (1 << 3)   /**< Call a predefined function */
+/** Breakpoint was shot */
+#define BKPOINT_INPROG  (1 << 0)
+
+/** One-time breakpoint, mandatory for j/b instructions */
+#define BKPOINT_ONESHOT  (1 << 1)
+
+/**
+ * Breakpoint is set on the next instruction, so that it
+ * could be reinstalled on the previous one
+ */
+#define BKPOINT_REINST  (1 << 2)
+
+/** Call a predefined function */
+#define BKPOINT_FUNCCALL  (1 << 3)
+
 
 typedef struct  {
-	uintptr_t address;      /**< Breakpoint address */
-	unative_t instruction; /**< Original instruction */
+	uintptr_t address;          /**< Breakpoint address */
+	unative_t instruction;      /**< Original instruction */
 	unative_t nextinstruction;  /**< Original instruction following break */
-	int flags;        /**< Flags regarding breakpoint */
+	unsigned int flags;         /**< Flags regarding breakpoint */
 	size_t counter;
-	void (*bkfunc)(void *b, istate_t *istate);
+	void (*bkfunc)(void *, istate_t *);
 } bpinfo_t;
 
+extern bpinfo_t breakpoints[BKPOINTS_MAX];
+
 extern void debugger_init(void);
-void debugger_bpoint(istate_t *istate);
-
-extern bpinfo_t breakpoints[BKPOINTS_MAX];
+extern void debugger_bpoint(istate_t *);
 
 #endif
Index: kernel/arch/mips32/include/mm/as.h
===================================================================
--- kernel/arch/mips32/include/mm/as.h	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/arch/mips32/include/mm/as.h	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -27,5 +27,5 @@
  */
 
-/** @addtogroup mips32mm	
+/** @addtogroup mips32mm
  * @{
  */
@@ -36,5 +36,5 @@
 #define KERN_mips32_AS_H_
 
-#define KERNEL_ADDRESS_SPACE_SHADOWED_ARCH	0
+#define KERNEL_ADDRESS_SPACE_SHADOWED_ARCH  0
 
 #define KERNEL_ADDRESS_SPACE_START_ARCH		(unsigned long) 0x80000000
Index: kernel/arch/mips32/src/debugger.c
===================================================================
--- kernel/arch/mips32/src/debugger.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/arch/mips32/src/debugger.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -46,9 +46,12 @@
 
 bpinfo_t breakpoints[BKPOINTS_MAX];
-SPINLOCK_INITIALIZE(bkpoint_lock);
+IRQ_SPINLOCK_STATIC_INITIALIZE(bkpoint_lock);
 
 #ifdef CONFIG_KCONSOLE
 
-static int cmd_print_breakpoints(cmd_arg_t *argv);
+static int cmd_print_breakpoints(cmd_arg_t *);
+static int cmd_del_breakpoint(cmd_arg_t *);
+static int cmd_add_breakpoint(cmd_arg_t *);
+
 static cmd_info_t bkpts_info = {
 	.name = "bkpts",
@@ -58,11 +61,11 @@
 };
 
-static int cmd_del_breakpoint(cmd_arg_t *argv);
 static cmd_arg_t del_argv = {
 	.type = ARG_TYPE_INT
 };
+
 static cmd_info_t delbkpt_info = {
 	.name = "delbkpt",
-	.description = "delbkpt <number> - Delete breakpoint.",
+	.description = "Delete breakpoint.",
 	.func = cmd_del_breakpoint,
 	.argc = 1,
@@ -70,12 +73,11 @@
 };
 
-static int cmd_add_breakpoint(cmd_arg_t *argv);
 static cmd_arg_t add_argv = {
 	.type = ARG_TYPE_INT
 };
+
 static cmd_info_t addbkpt_info = {
 	.name = "addbkpt",
-	.description = "addbkpt <&symbol> - new bkpoint. Break on J/Branch "
-	    "insts unsupported.",
+	.description = "Add bkpoint (break on J/Branch insts unsupported).",
 	.func = cmd_add_breakpoint,
 	.argc = 1,
@@ -89,6 +91,5 @@
 static cmd_info_t addbkpte_info = {
 	.name = "addbkpte",
-	.description = "addebkpte <&symbol> <&func> - new bkpoint. Call "
-	    "func(or Nothing if 0).",
+	.description = "Add bkpoint with a trigger function.",
 	.func = cmd_add_breakpoint,
 	.argc = 2,
@@ -100,156 +101,159 @@
 	uint32_t value;
 } jmpinstr[] = {
-	{0xf3ff0000, 0x41000000}, /* BCzF */
-	{0xf3ff0000, 0x41020000}, /* BCzFL */
-	{0xf3ff0000, 0x41010000}, /* BCzT */
-	{0xf3ff0000, 0x41030000}, /* BCzTL */
-	{0xfc000000, 0x10000000}, /* BEQ */
-	{0xfc000000, 0x50000000}, /* BEQL */
-	{0xfc1f0000, 0x04010000}, /* BEQL */
-	{0xfc1f0000, 0x04110000}, /* BGEZAL */
-	{0xfc1f0000, 0x04130000}, /* BGEZALL */
-	{0xfc1f0000, 0x04030000}, /* BGEZL */
-	{0xfc1f0000, 0x1c000000}, /* BGTZ */
-	{0xfc1f0000, 0x5c000000}, /* BGTZL */
-	{0xfc1f0000, 0x18000000}, /* BLEZ */
-	{0xfc1f0000, 0x58000000}, /* BLEZL */
-	{0xfc1f0000, 0x04000000}, /* BLTZ */
-	{0xfc1f0000, 0x04100000}, /* BLTZAL */
-	{0xfc1f0000, 0x04120000}, /* BLTZALL */
-	{0xfc1f0000, 0x04020000}, /* BLTZL */
-	{0xfc000000, 0x14000000}, /* BNE */
-	{0xfc000000, 0x54000000}, /* BNEL */
-	{0xfc000000, 0x08000000}, /* J */
-	{0xfc000000, 0x0c000000}, /* JAL */
-	{0xfc1f07ff, 0x00000009}, /* JALR */
-	{0, 0} /* EndOfTable */
-};
-
+	{0xf3ff0000, 0x41000000},  /* BCzF */
+	{0xf3ff0000, 0x41020000},  /* BCzFL */
+	{0xf3ff0000, 0x41010000},  /* BCzT */
+	{0xf3ff0000, 0x41030000},  /* BCzTL */
+	{0xfc000000, 0x10000000},  /* BEQ */
+	{0xfc000000, 0x50000000},  /* BEQL */
+	{0xfc1f0000, 0x04010000},  /* BEQL */
+	{0xfc1f0000, 0x04110000},  /* BGEZAL */
+	{0xfc1f0000, 0x04130000},  /* BGEZALL */
+	{0xfc1f0000, 0x04030000},  /* BGEZL */
+	{0xfc1f0000, 0x1c000000},  /* BGTZ */
+	{0xfc1f0000, 0x5c000000},  /* BGTZL */
+	{0xfc1f0000, 0x18000000},  /* BLEZ */
+	{0xfc1f0000, 0x58000000},  /* BLEZL */
+	{0xfc1f0000, 0x04000000},  /* BLTZ */
+	{0xfc1f0000, 0x04100000},  /* BLTZAL */
+	{0xfc1f0000, 0x04120000},  /* BLTZALL */
+	{0xfc1f0000, 0x04020000},  /* BLTZL */
+	{0xfc000000, 0x14000000},  /* BNE */
+	{0xfc000000, 0x54000000},  /* BNEL */
+	{0xfc000000, 0x08000000},  /* J */
+	{0xfc000000, 0x0c000000},  /* JAL */
+	{0xfc1f07ff, 0x00000009},  /* JALR */
+	{0, 0}                     /* end of table */
+};
 
 /** Test, if the given instruction is a jump or branch instruction
  *
  * @param instr Instruction code
- * @return true - it is jump instruction, false otherwise
+ *
+ * @return true if it is jump instruction, false otherwise
  *
  */
 static bool is_jump(unative_t instr)
 {
-	int i;
-
+	unsigned int i;
+	
 	for (i = 0; jmpinstr[i].andmask; i++) {
 		if ((instr & jmpinstr[i].andmask) == jmpinstr[i].value)
 			return true;
 	}
-
+	
 	return false;
 }
 
-/** Add new breakpoint to table */
+/** Add new breakpoint to table
+ *
+ */
 int cmd_add_breakpoint(cmd_arg_t *argv)
 {
-	bpinfo_t *cur = NULL;
-	ipl_t ipl;
-	int i;
-
 	if (argv->intval & 0x3) {
 		printf("Not aligned instruction, forgot to use &symbol?\n");
 		return 1;
 	}
-	ipl = interrupts_disable();
-	spinlock_lock(&bkpoint_lock);
-
+	
+	irq_spinlock_lock(&bkpoint_lock, true);
+	
 	/* Check, that the breakpoints do not conflict */
+	unsigned int i;
 	for (i = 0; i < BKPOINTS_MAX; i++) {
-		if (breakpoints[i].address == (uintptr_t)argv->intval) {
+		if (breakpoints[i].address == (uintptr_t) argv->intval) {
 			printf("Duplicate breakpoint %d.\n", i);
-			spinlock_unlock(&bkpoint_lock);
-			interrupts_restore(ipl);
+			irq_spinlock_unlock(&bkpoint_lock, true);
 			return 0;
-		} else if (breakpoints[i].address == (uintptr_t)argv->intval +
-		    sizeof(unative_t) || breakpoints[i].address ==
-		    (uintptr_t)argv->intval - sizeof(unative_t)) {
+		} else if ((breakpoints[i].address == (uintptr_t) argv->intval +
+		    sizeof(unative_t)) || (breakpoints[i].address ==
+		    (uintptr_t) argv->intval - sizeof(unative_t))) {
 			printf("Adjacent breakpoints not supported, conflict "
 			    "with %d.\n", i);
-			spinlock_unlock(&bkpoint_lock);
-			interrupts_restore(ipl);
+			irq_spinlock_unlock(&bkpoint_lock, true);
 			return 0;
 		}
 		
 	}
-
-	for (i = 0; i < BKPOINTS_MAX; i++)
+	
+	bpinfo_t *cur = NULL;
+	
+	for (i = 0; i < BKPOINTS_MAX; i++) {
 		if (!breakpoints[i].address) {
 			cur = &breakpoints[i];
 			break;
 		}
+	}
+	
 	if (!cur) {
 		printf("Too many breakpoints.\n");
-		spinlock_unlock(&bkpoint_lock);
-		interrupts_restore(ipl);
+		irq_spinlock_unlock(&bkpoint_lock, true);
 		return 0;
 	}
+	
+	printf("Adding breakpoint on address %p\n", argv->intval);
+	
 	cur->address = (uintptr_t) argv->intval;
-	printf("Adding breakpoint on address: %p\n", argv->intval);
-	cur->instruction = ((unative_t *)cur->address)[0];
-	cur->nextinstruction = ((unative_t *)cur->address)[1];
+	cur->instruction = ((unative_t *) cur->address)[0];
+	cur->nextinstruction = ((unative_t *) cur->address)[1];
 	if (argv == &add_argv) {
 		cur->flags = 0;
-	} else { /* We are add extended */
+	} else {  /* We are add extended */
 		cur->flags = BKPOINT_FUNCCALL;
 		cur->bkfunc = (void (*)(void *, istate_t *)) argv[1].intval;
 	}
+	
 	if (is_jump(cur->instruction))
 		cur->flags |= BKPOINT_ONESHOT;
+	
 	cur->counter = 0;
-
+	
 	/* Set breakpoint */
-	*((unative_t *)cur->address) = 0x0d;
+	*((unative_t *) cur->address) = 0x0d;
 	smc_coherence(cur->address);
-
-	spinlock_unlock(&bkpoint_lock);
-	interrupts_restore(ipl);
-
+	
+	irq_spinlock_unlock(&bkpoint_lock, true);
+	
 	return 1;
 }
 
-/** Remove breakpoint from table */
+/** Remove breakpoint from table
+ *
+ */
 int cmd_del_breakpoint(cmd_arg_t *argv)
 {
-	bpinfo_t *cur;
-	ipl_t ipl;
-
 	if (argv->intval > BKPOINTS_MAX) {
 		printf("Invalid breakpoint number.\n");
 		return 0;
 	}
-	ipl = interrupts_disable();
-	spinlock_lock(&bkpoint_lock);
-
-	cur = &breakpoints[argv->intval];
+	
+	irq_spinlock_lock(&bkpoint_lock, true);
+	
+	bpinfo_t *cur = &breakpoints[argv->intval];
 	if (!cur->address) {
 		printf("Breakpoint does not exist.\n");
-		spinlock_unlock(&bkpoint_lock);
-		interrupts_restore(ipl);
+		irq_spinlock_unlock(&bkpoint_lock, true);
 		return 0;
 	}
+	
 	if ((cur->flags & BKPOINT_INPROG) && (cur->flags & BKPOINT_ONESHOT)) {
 		printf("Cannot remove one-shot breakpoint in-progress\n");
-		spinlock_unlock(&bkpoint_lock);
-		interrupts_restore(ipl);
+		irq_spinlock_unlock(&bkpoint_lock, true);
 		return 0;
 	}
-	((uint32_t *)cur->address)[0] = cur->instruction;
-	smc_coherence(((uint32_t *)cur->address)[0]);
-	((uint32_t *)cur->address)[1] = cur->nextinstruction;
-	smc_coherence(((uint32_t *)cur->address)[1]);
-
+	
+	((uint32_t *) cur->address)[0] = cur->instruction;
+	smc_coherence(((uint32_t *) cur->address)[0]);
+	((uint32_t *) cur->address)[1] = cur->nextinstruction;
+	smc_coherence(((uint32_t *) cur->address)[1]);
+	
 	cur->address = NULL;
-
-	spinlock_unlock(&bkpoint_lock);
-	interrupts_restore(ipl);
+	
+	irq_spinlock_unlock(&bkpoint_lock, true);
 	return 1;
 }
 
-/** Print table of active breakpoints */
+/** Print table of active breakpoints
+ *
+ */
 int cmd_print_breakpoints(cmd_arg_t *argv)
 {
@@ -276,59 +280,64 @@
 }
 
-#endif
-
-/** Initialize debugger */
+#endif /* CONFIG_KCONSOLE */
+
+/** Initialize debugger
+ *
+ */
 void debugger_init()
 {
-	int i;
-
+	unsigned int i;
+	
 	for (i = 0; i < BKPOINTS_MAX; i++)
 		breakpoints[i].address = NULL;
-
+	
 #ifdef CONFIG_KCONSOLE
 	cmd_initialize(&bkpts_info);
 	if (!cmd_register(&bkpts_info))
 		printf("Cannot register command %s\n", bkpts_info.name);
-
+	
 	cmd_initialize(&delbkpt_info);
 	if (!cmd_register(&delbkpt_info))
 		printf("Cannot register command %s\n", delbkpt_info.name);
-
+	
 	cmd_initialize(&addbkpt_info);
 	if (!cmd_register(&addbkpt_info))
 		printf("Cannot register command %s\n", addbkpt_info.name);
-
+	
 	cmd_initialize(&addbkpte_info);
 	if (!cmd_register(&addbkpte_info))
 		printf("Cannot register command %s\n", addbkpte_info.name);
-#endif
+#endif /* CONFIG_KCONSOLE */
 }
 
 /** Handle breakpoint
  *
- * Find breakpoint in breakpoint table. 
+ * Find breakpoint in breakpoint table.
  * If found, call kconsole, set break on next instruction and reexecute.
  * If we are on "next instruction", set it back on the first and reexecute.
  * If breakpoint not found in breakpoint table, call kconsole and start
  * next instruction.
+ *
  */
 void debugger_bpoint(istate_t *istate)
 {
-	bpinfo_t *cur = NULL;
-	uintptr_t fireaddr = istate->epc;
-	int i;
-
 	/* test branch delay slot */
 	if (cp0_cause_read() & 0x80000000)
 		panic("Breakpoint in branch delay slot not supported.");
-
-	spinlock_lock(&bkpoint_lock);
+	
+	irq_spinlock_lock(&bkpoint_lock, false);
+	
+	bpinfo_t *cur = NULL;
+	uintptr_t fireaddr = istate->epc;
+	unsigned int i;
+	
 	for (i = 0; i < BKPOINTS_MAX; i++) {
 		/* Normal breakpoint */
-		if (fireaddr == breakpoints[i].address &&
-		    !(breakpoints[i].flags & BKPOINT_REINST)) {
+		if ((fireaddr == breakpoints[i].address) &&
+		    (!(breakpoints[i].flags & BKPOINT_REINST))) {
 			cur = &breakpoints[i];
 			break;
 		}
+		
 		/* Reinst only breakpoint */
 		if ((breakpoints[i].flags & BKPOINT_REINST) &&
@@ -338,24 +347,28 @@
 		}
 	}
+	
 	if (cur) {
 		if (cur->flags & BKPOINT_REINST) {
 			/* Set breakpoint on first instruction */
-			((uint32_t *)cur->address)[0] = 0x0d;
+			((uint32_t *) cur->address)[0] = 0x0d;
 			smc_coherence(((uint32_t *)cur->address)[0]);
+			
 			/* Return back the second */
-			((uint32_t *)cur->address)[1] = cur->nextinstruction;
-			smc_coherence(((uint32_t *)cur->address)[1]);
+			((uint32_t *) cur->address)[1] = cur->nextinstruction;
+			smc_coherence(((uint32_t *) cur->address)[1]);
+			
 			cur->flags &= ~BKPOINT_REINST;
-			spinlock_unlock(&bkpoint_lock);
+			irq_spinlock_unlock(&bkpoint_lock, false);
 			return;
-		} 
+		}
+		
 		if (cur->flags & BKPOINT_INPROG)
 			printf("Warning: breakpoint recursion\n");
 		
 		if (!(cur->flags & BKPOINT_FUNCCALL)) {
-			printf("***Breakpoint %d: %p in %s.\n", i, fireaddr,
-			    symtab_fmt_name_lookup(istate->epc));
-		}
-
+			printf("***Breakpoint %u: %p in %s.\n", i, fireaddr,
+			    symtab_fmt_name_lookup(fireaddr));
+		}
+		
 		/* Return first instruction back */
 		((uint32_t *)cur->address)[0] = cur->instruction;
@@ -371,10 +384,12 @@
 		printf("***Breakpoint %d: %p in %s.\n", i, fireaddr,
 		    symtab_fmt_name_lookup(fireaddr));
-
+		
 		/* Move on to next instruction */
 		istate->epc += 4;
 	}
+	
 	if (cur)
 		cur->counter++;
+	
 	if (cur && (cur->flags & BKPOINT_FUNCCALL)) {
 		/* Allow zero bkfunc, just for counting */
@@ -383,26 +398,31 @@
 	} else {
 #ifdef CONFIG_KCONSOLE
-		/* This disables all other processors - we are not SMP,
+		/*
+		 * This disables all other processors - we are not SMP,
 		 * actually this gets us to cpu_halt, if scheduler() is run
 		 * - we generally do not want scheduler to be run from debug,
 		 *   so this is a good idea
-		 */	
+		 */
 		atomic_set(&haltstate, 1);
-		spinlock_unlock(&bkpoint_lock);
+		irq_spinlock_unlock(&bkpoint_lock, false);
 		
 		kconsole("debug", "Debug console ready.\n", false);
 		
-		spinlock_lock(&bkpoint_lock);
+		irq_spinlock_lock(&bkpoint_lock, false);
 		atomic_set(&haltstate, 0);
 #endif
 	}
-	if (cur && cur->address == fireaddr && (cur->flags & BKPOINT_INPROG)) {
+	
+	if ((cur) && (cur->address == fireaddr)
+	    && ((cur->flags & BKPOINT_INPROG))) {
 		/* Remove one-shot breakpoint */
 		if ((cur->flags & BKPOINT_ONESHOT))
 			cur->address = NULL;
+		
 		/* Remove in-progress flag */
 		cur->flags &= ~BKPOINT_INPROG;
-	} 
-	spinlock_unlock(&bkpoint_lock);
+	}
+	
+	irq_spinlock_unlock(&bkpoint_lock, false);
 }
 
Index: kernel/arch/mips32/src/exception.c
===================================================================
--- kernel/arch/mips32/src/exception.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/arch/mips32/src/exception.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -27,5 +27,5 @@
  */
 
-/** @addtogroup mips32	
+/** @addtogroup mips32
  * @{
  */
@@ -67,5 +67,5 @@
 	"Floating Point",
 	NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-	"WatchHi/WatchLo", /* 23 */
+	"WatchHi/WatchLo",  /* 23 */
 	NULL, NULL, NULL, NULL, NULL, NULL, NULL,
 	"Virtual Coherency - data",
@@ -74,9 +74,7 @@
 static void print_regdump(istate_t *istate)
 {
-	const char *pcsymbol = symtab_fmt_name_lookup(istate->epc);
-	const char *rasymbol = symtab_fmt_name_lookup(istate->ra);
-	
-	printf("PC: %#x(%s) RA: %#x(%s), SP(%p)\n", istate->epc, pcsymbol,
-	    istate->ra, rasymbol, istate->sp);
+	printf("PC: %#x(%s) RA: %#x(%s), SP(%p)\n", istate->epc,
+	    symtab_fmt_name_lookup(istate->epc), istate->ra,
+	    symtab_fmt_name_lookup(istate->ra), istate->sp);
 }
 
@@ -135,10 +133,8 @@
 static void interrupt_exception(int n, istate_t *istate)
 {
-	uint32_t cause;
-	int i;
+	/* Decode interrupt number and process the interrupt */
+	uint32_t cause = (cp0_cause_read() >> 8) & 0xff;
 	
-	/* decode interrupt number and process the interrupt */
-	cause = (cp0_cause_read() >> 8) & 0xff;
-	
+	unsigned int i;
 	for (i = 0; i < 8; i++) {
 		if (cause & (1 << i)) {
@@ -149,5 +145,5 @@
 				 */
 				irq->handler(irq);
-				spinlock_unlock(&irq->lock);
+				irq_spinlock_unlock(&irq->lock, false);
 			} else {
 				/*
@@ -172,5 +168,5 @@
 {
 	int i;
-
+	
 	/* Clear exception table */
 	for (i = 0; i < IVT_ITEMS; i++)
Index: kernel/arch/mips32/src/interrupt.c
===================================================================
--- kernel/arch/mips32/src/interrupt.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/arch/mips32/src/interrupt.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -48,4 +48,10 @@
 function virtual_timer_fnc = NULL;
 static irq_t timer_irq;
+
+// TODO: This is SMP unsafe!!!
+
+uint32_t count_hi = 0;
+static unsigned long nextcount;
+static unsigned long lastcount;
 
 /** Disable interrupts.
@@ -99,10 +105,7 @@
 }
 
-/* TODO: This is SMP unsafe!!! */
-uint32_t count_hi = 0;
-static unsigned long nextcount;
-static unsigned long lastcount;
-
-/** Start hardware clock */
+/** Start hardware clock
+ *
+ */
 static void timer_start(void)
 {
@@ -119,16 +122,16 @@
 static void timer_irq_handler(irq_t *irq)
 {
-	unsigned long drift;
-	
 	if (cp0_count_read() < lastcount)
 		/* Count overflow detected */
 		count_hi++;
+	
 	lastcount = cp0_count_read();
 	
-	drift = cp0_count_read() - nextcount;
+	unsigned long drift = cp0_count_read() - nextcount;
 	while (drift > cp0_compare_value) {
 		drift -= cp0_compare_value;
 		CPU->missed_clock_ticks++;
 	}
+	
 	nextcount = cp0_count_read() + cp0_compare_value - drift;
 	cp0_compare_write(nextcount);
@@ -138,7 +141,7 @@
 	 * Release the lock, call clock() and reacquire the lock again.
 	 */
-	spinlock_unlock(&irq->lock);
+	irq_spinlock_unlock(&irq->lock, false);
 	clock();
-	spinlock_lock(&irq->lock);
+	irq_spinlock_lock(&irq->lock, false);
 	
 	if (virtual_timer_fnc != NULL)
Index: kernel/arch/ppc32/include/asm.h
===================================================================
--- kernel/arch/ppc32/include/asm.h	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/arch/ppc32/include/asm.h	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -39,4 +39,5 @@
 #include <config.h>
 #include <arch/cpu.h>
+#include <arch/mm/asid.h>
 
 static inline uint32_t msr_read(void)
@@ -58,4 +59,38 @@
 		:: [msr] "r" (msr)
 	);
+}
+
+static inline void sr_set(uint32_t flags, asid_t asid, uint32_t sr)
+{
+	asm volatile (
+		"mtsrin %[value], %[sr]\n"
+		:: [value] "r" ((flags << 16) + (asid << 4) + sr),
+		   [sr] "r" (sr << 28)
+	);
+}
+
+static inline uint32_t sr_get(uint32_t vaddr)
+{
+	uint32_t vsid;
+	
+	asm volatile (
+		"mfsrin %[vsid], %[vaddr]\n"
+		: [vsid] "=r" (vsid)
+		: [vaddr] "r" (vaddr)
+	);
+	
+	return vsid;
+}
+
+static inline uint32_t sdr1_get(void)
+{
+	uint32_t sdr1;
+	
+	asm volatile (
+		"mfsdr1 %[sdr1]\n"
+		: [sdr1] "=r" (sdr1)
+	);
+	
+	return sdr1;
 }
 
Index: kernel/arch/ppc32/include/mm/frame.h
===================================================================
--- kernel/arch/ppc32/include/mm/frame.h	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/arch/ppc32/include/mm/frame.h	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -46,4 +46,16 @@
 extern uintptr_t last_frame;
 
+static inline uint32_t physmem_top(void)
+{
+	uint32_t physmem;
+	
+	asm volatile (
+		"mfsprg3 %[physmem]\n"
+		: [physmem] "=r" (physmem)
+	);
+	
+	return physmem;
+}
+
 extern void frame_arch_init(void);
 extern void physmem_print(void);
Index: kernel/arch/ppc32/src/debug/stacktrace.c
===================================================================
--- kernel/arch/ppc32/src/debug/stacktrace.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/arch/ppc32/src/debug/stacktrace.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -66,10 +66,12 @@
 bool uspace_frame_pointer_prev(uintptr_t fp, uintptr_t *prev)
 {
-	return false;
+	return !copy_from_uspace((void *) prev,
+	    (uint32_t *) fp + FRAME_OFFSET_FP_PREV, sizeof(*prev));
 }
 
 bool uspace_return_address_get(uintptr_t fp, uintptr_t *ra)
 {
-	return false;
+	return !copy_from_uspace((void *) ra, (uint32_t *) fp + FRAME_OFFSET_RA,
+	    sizeof(*ra));
 }
 
Index: kernel/arch/ppc32/src/mm/as.c
===================================================================
--- kernel/arch/ppc32/src/mm/as.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/arch/ppc32/src/mm/as.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -55,26 +55,13 @@
 void as_install_arch(as_t *as)
 {
-	asid_t asid;
 	uint32_t sr;
-
-	asid = as->asid;
 	
 	/* Lower 2 GB, user and supervisor access */
-	for (sr = 0; sr < 8; sr++) {
-		asm volatile (
-			"mtsrin %0, %1\n"
-			:
-			: "r" ((0x6000 << 16) + (asid << 4) + sr), "r" (sr << 28)
-		);
-	}
+	for (sr = 0; sr < 8; sr++)
+		sr_set(0x6000, as->asid, sr);
 	
 	/* Upper 2 GB, only supervisor access */
-	for (sr = 8; sr < 16; sr++) {
-		asm volatile (
-			"mtsrin %0, %1\n"
-			:
-			: "r" ((0x4000 << 16) + (asid << 4) + sr), "r" (sr << 28)
-		);
-	}
+	for (sr = 8; sr < 16; sr++)
+		sr_set(0x4000, as->asid, sr);
 }
 
Index: kernel/arch/ppc32/src/mm/frame.c
===================================================================
--- kernel/arch/ppc32/src/mm/frame.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/arch/ppc32/src/mm/frame.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -45,12 +45,11 @@
 void physmem_print(void)
 {
-	unsigned int i;
-	
 	printf("Base       Size\n");
 	printf("---------- ----------\n");
-		
+	
+	size_t i;
 	for (i = 0; i < memmap.cnt; i++) {
 		printf("%#10x %#10x\n", memmap.zones[i].start,
-			memmap.zones[i].size);
+		    memmap.zones[i].size);
 	}
 }
@@ -60,11 +59,11 @@
 	pfn_t minconf = 2;
 	size_t i;
-	pfn_t start, conf;
-	size_t size;
 	
 	for (i = 0; i < memmap.cnt; i++) {
-		start = ADDR2PFN(ALIGN_UP((uintptr_t) memmap.zones[i].start, FRAME_SIZE));
-		size = SIZE2FRAMES(ALIGN_DOWN(memmap.zones[i].size, FRAME_SIZE));
+		pfn_t start = ADDR2PFN(ALIGN_UP((uintptr_t) memmap.zones[i].start,
+		    FRAME_SIZE));
+		size_t size = SIZE2FRAMES(ALIGN_DOWN(memmap.zones[i].size, FRAME_SIZE));
 		
+		pfn_t conf;
 		if ((minconf < start) || (minconf >= start + size))
 			conf = start;
@@ -73,6 +72,8 @@
 		
 		zone_create(start, size, conf, 0);
-		if (last_frame < ALIGN_UP((uintptr_t) memmap.zones[i].start + memmap.zones[i].size, FRAME_SIZE))
-			last_frame = ALIGN_UP((uintptr_t) memmap.zones[i].start + memmap.zones[i].size, FRAME_SIZE);
+		if (last_frame < ALIGN_UP((uintptr_t) memmap.zones[i].start
+		    + memmap.zones[i].size, FRAME_SIZE))
+			last_frame = ALIGN_UP((uintptr_t) memmap.zones[i].start
+			    + memmap.zones[i].size, FRAME_SIZE);
 	}
 	
@@ -82,10 +83,8 @@
 	
 	/* Mark the Page Hash Table frames as unavailable */
-	uint32_t sdr1;
-	asm volatile (
-		"mfsdr1 %0\n"
-		: "=r" (sdr1)
-	);
-	frame_mark_unavailable(ADDR2PFN(sdr1 & 0xffff000), 16); // FIXME
+	uint32_t sdr1 = sdr1_get();
+	
+	// FIXME: compute size of PHT exactly
+	frame_mark_unavailable(ADDR2PFN(sdr1 & 0xffff000), 16);
 }
 
Index: kernel/arch/ppc32/src/mm/tlb.c
===================================================================
--- kernel/arch/ppc32/src/mm/tlb.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/arch/ppc32/src/mm/tlb.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -45,6 +45,6 @@
 
 static unsigned int seed = 10;
-static unsigned int seed_real __attribute__ ((section("K_UNMAPPED_DATA_START"))) = 42;
-
+static unsigned int seed_real
+    __attribute__ ((section("K_UNMAPPED_DATA_START"))) = 42;
 
 /** Try to find PTE for faulting address
@@ -54,21 +54,21 @@
  * if lock is true.
  *
- * @param as		Address space.
- * @param lock		Lock/unlock the address space.
- * @param badvaddr	Faulting virtual address.
- * @param access	Access mode that caused the fault.
- * @param istate	Pointer to interrupted state.
- * @param pfrc		Pointer to variable where as_page_fault() return code
- * 			will be stored.
- * @return		PTE on success, NULL otherwise.
- *
- */
-static pte_t *
-find_mapping_and_check(as_t *as, bool lock, uintptr_t badvaddr, int access,
-    istate_t *istate, int *pfrc)
+ * @param as       Address space.
+ * @param lock     Lock/unlock the address space.
+ * @param badvaddr Faulting virtual address.
+ * @param access   Access mode that caused the fault.
+ * @param istate   Pointer to interrupted state.
+ * @param pfrc     Pointer to variable where as_page_fault() return code
+ *                 will be stored.
+ *
+ * @return PTE on success, NULL otherwise.
+ *
+ */
+static pte_t *find_mapping_and_check(as_t *as, bool lock, uintptr_t badvaddr,
+    int access, istate_t *istate, int *pfrc)
 {
 	/*
 	 * Check if the mapping exists in page tables.
-	 */	
+	 */
 	pte_t *pte = page_mapping_find(as, badvaddr);
 	if ((pte) && (pte->present)) {
@@ -79,6 +79,4 @@
 		return pte;
 	} else {
-		int rc;
-	
 		/*
 		 * Mapping not found in page tables.
@@ -86,5 +84,7 @@
 		 */
 		page_table_unlock(as, lock);
-		switch (rc = as_page_fault(badvaddr, access, istate)) {
+		
+		int rc = as_page_fault(badvaddr, access, istate);
+		switch (rc) {
 		case AS_PF_OK:
 			/*
@@ -107,8 +107,7 @@
 		default:
 			panic("Unexpected rc (%d).", rc);
-		}	
-	}
-}
-
+		}
+	}
+}
 
 static void pht_refill_fail(uintptr_t badvaddr, istate_t *istate)
@@ -123,5 +122,4 @@
 }
 
-
 static void pht_insert(const uintptr_t vaddr, const pte_t *pte)
 {
@@ -129,16 +127,8 @@
 	uint32_t api = (vaddr >> 22) & 0x3f;
 	
-	uint32_t vsid;
-	asm volatile (
-		"mfsrin %0, %1\n"
-		: "=r" (vsid)
-		: "r" (vaddr)
-	);
-	
-	uint32_t sdr1;
-	asm volatile (
-		"mfsdr1 %0\n"
-		: "=r" (sdr1)
-	);
+	uint32_t vsid = sr_get(vaddr);
+	uint32_t sdr1 = sdr1_get();
+	
+	// FIXME: compute size of PHT exactly
 	phte_t *phte = (phte_t *) PA2KA(sdr1 & 0xffff0000);
 	
@@ -215,5 +205,4 @@
 }
 
-
 /** Process Instruction/Data Storage Exception
  *
@@ -224,7 +213,4 @@
 void pht_refill(int n, istate_t *istate)
 {
-	uintptr_t badvaddr;
-	pte_t *pte;
-	int pfrc;
 	as_t *as;
 	bool lock;
@@ -238,13 +224,17 @@
 	}
 	
+	uintptr_t badvaddr;
+	
 	if (n == VECTOR_DATA_STORAGE)
 		badvaddr = istate->dar;
 	else
 		badvaddr = istate->pc;
-		
+	
 	page_table_lock(as, lock);
 	
-	pte = find_mapping_and_check(as, lock, badvaddr,
+	int pfrc;
+	pte_t *pte = find_mapping_and_check(as, lock, badvaddr,
 	    PF_ACCESS_READ /* FIXME */, istate, &pfrc);
+	
 	if (!pte) {
 		switch (pfrc) {
@@ -264,5 +254,6 @@
 	}
 	
-	pte->accessed = 1; /* Record access to PTE */
+	/* Record access to PTE */
+	pte->accessed = 1;
 	pht_insert(badvaddr, pte);
 	
@@ -274,5 +265,4 @@
 	pht_refill_fail(badvaddr, istate);
 }
-
 
 /** Process Instruction/Data Storage Exception in Real Mode
@@ -291,9 +281,5 @@
 		badvaddr = istate->pc;
 	
-	uint32_t physmem;
-	asm volatile (
-		"mfsprg3 %0\n"
-		: "=r" (physmem)
-	);
+	uint32_t physmem = physmem_top();
 	
 	if ((badvaddr < PA2KA(0)) || (badvaddr >= PA2KA(physmem)))
@@ -303,16 +289,8 @@
 	uint32_t api = (badvaddr >> 22) & 0x3f;
 	
-	uint32_t vsid;
-	asm volatile (
-		"mfsrin %0, %1\n"
-		: "=r" (vsid)
-		: "r" (badvaddr)
-	);
-	
-	uint32_t sdr1;
-	asm volatile (
-		"mfsdr1 %0\n"
-		: "=r" (sdr1)
-	);
+	uint32_t vsid = sr_get(badvaddr);
+	uint32_t sdr1 = sdr1_get();
+	
+	// FIXME: compute size of PHT exactly
 	phte_t *phte_real = (phte_t *) (sdr1 & 0xffff0000);
 	
@@ -396,5 +374,4 @@
 }
 
-
 /** Process ITLB/DTLB Miss Exception in Real Mode
  *
@@ -404,10 +381,5 @@
 {
 	uint32_t badvaddr = tlbmiss & 0xfffffffc;
-	
-	uint32_t physmem;
-	asm volatile (
-		"mfsprg3 %0\n"
-		: "=r" (physmem)
-	);
+	uint32_t physmem = physmem_top();
 	
 	if ((badvaddr < PA2KA(0)) || (badvaddr >= PA2KA(physmem)))
@@ -420,15 +392,14 @@
 	uint32_t index = 0;
 	asm volatile (
-		"mtspr 981, %0\n"
-		"mtspr 982, %1\n"
-		"tlbld %2\n"
-		"tlbli %2\n"
-		: "=r" (index)
-		: "r" (ptehi),
-		  "r" (ptelo)
+		"mtspr 981, %[ptehi]\n"
+		"mtspr 982, %[ptelo]\n"
+		"tlbld %[index]\n"
+		"tlbli %[index]\n"
+		: [index] "=r" (index)
+		: [ptehi] "r" (ptehi),
+		  [ptelo] "r" (ptelo)
 	);
 }
 
-
 void tlb_arch_init(void)
 {
@@ -436,15 +407,15 @@
 }
 
-
 void tlb_invalidate_all(void)
 {
 	uint32_t index;
+	
 	asm volatile (
-		"li %0, 0\n"
+		"li %[index], 0\n"
 		"sync\n"
 		
 		".rept 64\n"
-		"tlbie %0\n"
-		"addi %0, %0, 0x1000\n"
+		"	tlbie %[index]\n"
+		"	addi %[index], %[index], 0x1000\n"
 		".endr\n"
 		
@@ -452,19 +423,16 @@
 		"tlbsync\n"
 		"sync\n"
-		: "=r" (index)
+		: [index] "=r" (index)
 	);
 }
 
-
 void tlb_invalidate_asid(asid_t asid)
 {
-	uint32_t sdr1;
-	asm volatile (
-		"mfsdr1 %0\n"
-		: "=r" (sdr1)
-	);
+	uint32_t sdr1 = sdr1_get();
+	
+	// FIXME: compute size of PHT exactly
 	phte_t *phte = (phte_t *) PA2KA(sdr1 & 0xffff0000);
 	
-	uint32_t i;
+	size_t i;
 	for (i = 0; i < 8192; i++) {
 		if ((phte[i].v) && (phte[i].vsid >= (asid << 4)) &&
@@ -472,7 +440,7 @@
 			phte[i].v = 0;
 	}
+	
 	tlb_invalidate_all();
 }
-
 
 void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt)
@@ -482,15 +450,17 @@
 }
 
-
 #define PRINT_BAT(name, ureg, lreg) \
 	asm volatile ( \
-		"mfspr %0," #ureg "\n" \
-		"mfspr %1," #lreg "\n" \
-		: "=r" (upper), "=r" (lower) \
+		"mfspr %[upper], " #ureg "\n" \
+		"mfspr %[lower], " #lreg "\n" \
+		: [upper] "=r" (upper), \
+		  [lower] "=r" (lower) \
 	); \
+	\
 	mask = (upper & 0x1ffc) >> 2; \
 	if (upper & 3) { \
 		uint32_t tmp = mask; \
 		length = 128; \
+		\
 		while (tmp) { \
 			if ((tmp & 1) == 0) { \
@@ -503,4 +473,5 @@
 	} else \
 		length = 0; \
+	\
 	printf(name ": page=%.*p frame=%.*p length=%d KB (mask=%#x)%s%s\n", \
 	    sizeof(upper) * 2, upper & 0xffff0000, sizeof(lower) * 2, \
@@ -515,10 +486,6 @@
 	
 	for (sr = 0; sr < 16; sr++) {
-		uint32_t vsid;
-		asm volatile (
-			"mfsrin %0, %1\n"
-			: "=r" (vsid)
-			: "r" (sr << 28)
-		);
+		uint32_t vsid = sr_get(sr << 28);
+		
 		printf("sr[%02u]: vsid=%.*p (asid=%u)%s%s\n", sr,
 		    sizeof(vsid) * 2, vsid & 0xffffff, (vsid & 0xffffff) >> 4,
Index: kernel/arch/sparc64/src/mm/sun4u/as.c
===================================================================
--- kernel/arch/sparc64/src/mm/sun4u/as.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/arch/sparc64/src/mm/sun4u/as.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -41,4 +41,5 @@
 
 #ifdef CONFIG_TSB
+
 #include <arch/mm/tsb.h>
 #include <arch/memstr.h>
@@ -47,4 +48,5 @@
 #include <bitops.h>
 #include <macros.h>
+
 #endif /* CONFIG_TSB */
 
@@ -58,5 +60,5 @@
 }
 
-int as_constructor_arch(as_t *as, int flags)
+int as_constructor_arch(as_t *as, unsigned int flags)
 {
 #ifdef CONFIG_TSB
@@ -64,20 +66,22 @@
 	 * The order must be calculated with respect to the emulated
 	 * 16K page size.
-	 */
-	int order = fnzb32(((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
+	 *
+	 */
+	uint8_t order = fnzb32(((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
 	    sizeof(tsb_entry_t)) >> FRAME_WIDTH);
-
+	
 	uintptr_t tsb = (uintptr_t) frame_alloc(order, flags | FRAME_KA);
-
+	
 	if (!tsb)
 		return -1;
-
+	
 	as->arch.itsb = (tsb_entry_t *) tsb;
 	as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT *
 	    sizeof(tsb_entry_t));
-
+	
 	memsetb(as->arch.itsb,
 	    (ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * sizeof(tsb_entry_t), 0);
 #endif
+	
 	return 0;
 }
@@ -93,4 +97,5 @@
 	    sizeof(tsb_entry_t)) >> FRAME_WIDTH;
 	frame_free(KA2PA((uintptr_t) as->arch.itsb));
+	
 	return cnt;
 #else
@@ -99,9 +104,10 @@
 }
 
-int as_create_arch(as_t *as, int flags)
+int as_create_arch(as_t *as, unsigned int flags)
 {
 #ifdef CONFIG_TSB
 	tsb_invalidate(as, 0, (size_t) -1);
 #endif
+	
 	return 0;
 }
@@ -123,4 +129,5 @@
 	 *
 	 * Moreover, the as->asid is protected by asidlock, which is being held.
+	 *
 	 */
 	
@@ -130,16 +137,17 @@
 	 * secondary context register from the TL=1 code just before switch to
 	 * userspace.
+	 *
 	 */
 	ctx.v = 0;
 	ctx.context = as->asid;
 	mmu_secondary_context_write(ctx.v);
-
-#ifdef CONFIG_TSB	
+	
+#ifdef CONFIG_TSB
 	uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
-
+	
 	ASSERT(as->arch.itsb && as->arch.dtsb);
-
+	
 	uintptr_t tsb = (uintptr_t) as->arch.itsb;
-		
+	
 	if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
 		/*
@@ -147,18 +155,20 @@
 		 * by the locked 4M kernel DTLB entry. We need
 		 * to map both TSBs explicitly.
+		 *
 		 */
 		dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
 		dtlb_insert_mapping(tsb, KA2PA(tsb), PAGESIZE_64K, true, true);
 	}
-		
+	
 	/*
 	 * Setup TSB Base registers.
+	 *
 	 */
 	tsb_base_reg_t tsb_base;
-		
+	
 	tsb_base.value = 0;
 	tsb_base.size = TSB_SIZE;
 	tsb_base.split = 0;
-
+	
 	tsb_base.base = ((uintptr_t) as->arch.itsb) >> MMU_PAGE_WIDTH;
 	itsb_base_write(tsb_base.value);
@@ -175,5 +185,6 @@
 	 * Clearing the extension registers will ensure that the value of the
 	 * TSB Base register will be used as an address of TSB, making the code
-	 * compatible with the US port. 
+	 * compatible with the US port.
+	 *
 	 */
 	itsb_primary_extension_write(0);
@@ -195,5 +206,4 @@
 void as_deinstall_arch(as_t *as)
 {
-
 	/*
 	 * Note that we don't and may not lock the address space. That's ok
@@ -201,13 +211,14 @@
 	 *
 	 * Moreover, the as->asid is protected by asidlock, which is being held.
-	 */
-
+	 *
+	 */
+	
 #ifdef CONFIG_TSB
 	uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
-
+	
 	ASSERT(as->arch.itsb && as->arch.dtsb);
-
+	
 	uintptr_t tsb = (uintptr_t) as->arch.itsb;
-		
+	
 	if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
 		/*
Index: kernel/arch/sparc64/src/mm/sun4v/as.c
===================================================================
--- kernel/arch/sparc64/src/mm/sun4v/as.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/arch/sparc64/src/mm/sun4v/as.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -44,4 +44,5 @@
 
 #ifdef CONFIG_TSB
+
 #include <arch/mm/tsb.h>
 #include <arch/memstr.h>
@@ -50,4 +51,5 @@
 #include <bitops.h>
 #include <macros.h>
+
 #endif /* CONFIG_TSB */
 
@@ -61,15 +63,15 @@
 }
 
-int as_constructor_arch(as_t *as, int flags)
+int as_constructor_arch(as_t *as, unsigned int flags)
 {
 #ifdef CONFIG_TSB
-	int order = fnzb32(
+	uint8_t order = fnzb32(
 		(TSB_ENTRY_COUNT * sizeof(tsb_entry_t)) >> FRAME_WIDTH);
-
+	
 	uintptr_t tsb = (uintptr_t) frame_alloc(order, flags);
-
+	
 	if (!tsb)
 		return -1;
-
+	
 	as->arch.tsb_description.page_size = PAGESIZE_8K;
 	as->arch.tsb_description.associativity = 1;
@@ -79,8 +81,9 @@
 	as->arch.tsb_description.reserved = 0;
 	as->arch.tsb_description.context = 0;
-
+	
 	memsetb((void *) PA2KA(as->arch.tsb_description.tsb_base),
 		TSB_ENTRY_COUNT * sizeof(tsb_entry_t), 0);
 #endif
+	
 	return 0;
 }
@@ -91,4 +94,5 @@
 	size_t cnt = (TSB_ENTRY_COUNT * sizeof(tsb_entry_t)) >> FRAME_WIDTH;
 	frame_free((uintptr_t) as->arch.tsb_description.tsb_base);
+	
 	return cnt;
 #else
@@ -97,9 +101,10 @@
 }
 
-int as_create_arch(as_t *as, int flags)
+int as_create_arch(as_t *as, unsigned int flags)
 {
 #ifdef CONFIG_TSB
 	tsb_invalidate(as, 0, (size_t) -1);
 #endif
+	
 	return 0;
 }
@@ -111,14 +116,16 @@
  *
  * @param as Address space.
+ *
  */
 void as_install_arch(as_t *as)
 {
 	mmu_secondary_context_write(as->asid);
-#ifdef CONFIG_TSB	
+	
+#ifdef CONFIG_TSB
 	uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
-
+	
 	ASSERT(as->arch.tsb_description.tsb_base);
 	uintptr_t tsb = PA2KA(as->arch.tsb_description.tsb_base);
-		
+	
 	if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
 		/*
@@ -126,11 +133,11 @@
 		 * by the locked 4M kernel DTLB entry. We need
 		 * to map both TSBs explicitly.
+		 *
 		 */
 		mmu_demap_page(tsb, 0, MMU_FLAG_DTLB);
 		dtlb_insert_mapping(tsb, KA2PA(tsb), PAGESIZE_64K, true, true);
 	}
-
+	
 	__hypercall_fast2(MMU_TSB_CTXNON0, 1, KA2PA(&(as->arch.tsb_description)));
-	
 #endif
 }
@@ -142,8 +149,8 @@
  *
  * @param as Address space.
+ *
  */
 void as_deinstall_arch(as_t *as)
 {
-
 	/*
 	 * Note that we don't and may not lock the address space. That's ok
@@ -151,13 +158,14 @@
 	 *
 	 * Moreover, the as->asid is protected by asidlock, which is being held.
+	 *
 	 */
-
+	
 #ifdef CONFIG_TSB
 	uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
-
+	
 	ASSERT(as->arch.tsb_description.tsb_base);
-
+	
 	uintptr_t tsb = PA2KA(as->arch.tsb_description.tsb_base);
-		
+	
 	if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
 		/*
@@ -165,4 +173,5 @@
 		 * by the locked 4M kernel DTLB entry. We need
 		 * to demap the entry installed by as_install_arch().
+		 *
 		 */
 		__hypercall_fast3(MMU_UNMAP_PERM_ADDR, tsb, 0, MMU_FLAG_DTLB);
Index: kernel/arch/sparc64/src/trap/sun4u/interrupt.c
===================================================================
--- kernel/arch/sparc64/src/trap/sun4u/interrupt.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/arch/sparc64/src/trap/sun4u/interrupt.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -55,18 +55,15 @@
 void interrupt(int n, istate_t *istate)
 {
-	uint64_t status;
-	uint64_t intrcv;
-	uint64_t data0;
-	status = asi_u64_read(ASI_INTR_DISPATCH_STATUS, 0);
+	uint64_t status = asi_u64_read(ASI_INTR_DISPATCH_STATUS, 0);
 	if (status & (!INTR_DISPATCH_STATUS_BUSY))
 		panic("Interrupt Dispatch Status busy bit not set\n");
-
-	intrcv = asi_u64_read(ASI_INTR_RECEIVE, 0);
+	
+	uint64_t intrcv = asi_u64_read(ASI_INTR_RECEIVE, 0);
 #if defined (US)
-	data0 = asi_u64_read(ASI_INTR_R, ASI_UDB_INTR_R_DATA_0);
+	uint64_t data0 = asi_u64_read(ASI_INTR_R, ASI_UDB_INTR_R_DATA_0);
 #elif defined (US3)
-	data0 = asi_u64_read(ASI_INTR_R, VA_INTR_R_DATA_0);
+	uint64_t data0 = asi_u64_read(ASI_INTR_R, VA_INTR_R_DATA_0);
 #endif
-
+	
 	irq_t *irq = irq_dispatch_and_lock(data0);
 	if (irq) {
@@ -75,11 +72,12 @@
 		 */
 		irq->handler(irq);
+		
 		/*
 		 * See if there is a clear-interrupt-routine and call it.
 		 */
-		if (irq->cir) {
+		if (irq->cir)
 			irq->cir(irq->cir_arg, irq->inr);
-		}
-		spinlock_unlock(&irq->lock);
+		
+		irq_spinlock_unlock(&irq->lock, false);
 	} else if (data0 > config.base) {
 		/*
@@ -90,7 +88,6 @@
 		 */
 #ifdef CONFIG_SMP
-		if (data0 == (uintptr_t) tlb_shootdown_ipi_recv) {
+		if (data0 == (uintptr_t) tlb_shootdown_ipi_recv)
 			tlb_shootdown_ipi_recv();
-		}
 #endif
 	} else {
@@ -101,7 +98,9 @@
 		printf("cpu%u: spurious interrupt (intrcv=%#" PRIx64
 		    ", data0=%#" PRIx64 ")\n", CPU->id, intrcv, data0);
+#else
+		(void) intrcv;
 #endif
 	}
-
+	
 	membar();
 	asi_u64_write(ASI_INTR_RECEIVE, 0, 0);
Index: kernel/genarch/include/mm/page_ht.h
===================================================================
--- kernel/genarch/include/mm/page_ht.h	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/genarch/include/mm/page_ht.h	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -32,5 +32,5 @@
 /**
  * @file
- * @brief	This is the generic page hash table interface.
+ * @brief This is the generic page hash table interface.
  */
 
@@ -46,18 +46,18 @@
 #include <adt/hash_table.h>
 
-#define PAGE_HT_KEYS	2
-#define KEY_AS		0
-#define KEY_PAGE	1
+#define PAGE_HT_KEYS  2
+#define KEY_AS        0
+#define KEY_PAGE      1
 
-#define PAGE_HT_ENTRIES_BITS	13
-#define PAGE_HT_ENTRIES		(1 << PAGE_HT_ENTRIES_BITS)
+#define PAGE_HT_ENTRIES_BITS  13
+#define PAGE_HT_ENTRIES       (1 << PAGE_HT_ENTRIES_BITS)
 
 /* Macros for querying page hash table PTEs. */
-#define PTE_VALID(pte)		((pte) != NULL)
-#define PTE_PRESENT(pte)	((pte)->p != 0)
-#define PTE_GET_FRAME(pte)	((pte)->frame)
-#define PTE_READABLE(pte)	1
-#define PTE_WRITABLE(pte)	((pte)->w != 0)
-#define PTE_EXECUTABLE(pte)	((pte)->x != 0)
+#define PTE_VALID(pte)       ((pte) != NULL)
+#define PTE_PRESENT(pte)     ((pte)->p != 0)
+#define PTE_GET_FRAME(pte)   ((pte)->frame)
+#define PTE_READABLE(pte)    1
+#define PTE_WRITABLE(pte)    ((pte)->w != 0)
+#define PTE_EXECUTABLE(pte)  ((pte)->x != 0)
 
 extern as_operations_t as_ht_operations;
Index: kernel/genarch/include/mm/page_pt.h
===================================================================
--- kernel/genarch/include/mm/page_pt.h	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/genarch/include/mm/page_pt.h	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -37,4 +37,5 @@
  * Architectures that use hierarchical page tables
  * are supposed to implement *_ARCH macros.
+ *
  */
 
@@ -52,76 +53,81 @@
  * Number of entries in each level.
  */
-#define PTL0_ENTRIES			PTL0_ENTRIES_ARCH
-#define PTL1_ENTRIES			PTL1_ENTRIES_ARCH
-#define PTL2_ENTRIES			PTL2_ENTRIES_ARCH
-#define PTL3_ENTRIES			PTL3_ENTRIES_ARCH
+#define PTL0_ENTRIES  PTL0_ENTRIES_ARCH
+#define PTL1_ENTRIES  PTL1_ENTRIES_ARCH
+#define PTL2_ENTRIES  PTL2_ENTRIES_ARCH
+#define PTL3_ENTRIES  PTL3_ENTRIES_ARCH
 
 /* Table sizes in each level */
-#define PTL0_SIZE			PTL0_SIZE_ARCH
-#define PTL1_SIZE			PTL1_SIZE_ARCH
-#define PTL2_SIZE			PTL2_SIZE_ARCH
-#define PTL3_SIZE			PTL3_SIZE_ARCH
+#define PTL0_SIZE  PTL0_SIZE_ARCH
+#define PTL1_SIZE  PTL1_SIZE_ARCH
+#define PTL2_SIZE  PTL2_SIZE_ARCH
+#define PTL3_SIZE  PTL3_SIZE_ARCH
 
 /*
  * These macros process vaddr and extract those portions
  * of it that function as indices to respective page tables.
+ *
  */
-#define PTL0_INDEX(vaddr)		PTL0_INDEX_ARCH(vaddr)
-#define PTL1_INDEX(vaddr)		PTL1_INDEX_ARCH(vaddr)
-#define PTL2_INDEX(vaddr)		PTL2_INDEX_ARCH(vaddr)
-#define PTL3_INDEX(vaddr)		PTL3_INDEX_ARCH(vaddr)
+#define PTL0_INDEX(vaddr)  PTL0_INDEX_ARCH(vaddr)
+#define PTL1_INDEX(vaddr)  PTL1_INDEX_ARCH(vaddr)
+#define PTL2_INDEX(vaddr)  PTL2_INDEX_ARCH(vaddr)
+#define PTL3_INDEX(vaddr)  PTL3_INDEX_ARCH(vaddr)
 
-#define SET_PTL0_ADDRESS(ptl0)		SET_PTL0_ADDRESS_ARCH(ptl0)
+#define SET_PTL0_ADDRESS(ptl0)  SET_PTL0_ADDRESS_ARCH(ptl0)
 
 /*
  * These macros traverse the 4-level tree of page tables,
  * each descending by one level.
+ *
  */
-#define GET_PTL1_ADDRESS(ptl0, i)	GET_PTL1_ADDRESS_ARCH(ptl0, i)
-#define GET_PTL2_ADDRESS(ptl1, i)	GET_PTL2_ADDRESS_ARCH(ptl1, i)
-#define GET_PTL3_ADDRESS(ptl2, i)	GET_PTL3_ADDRESS_ARCH(ptl2, i)
-#define GET_FRAME_ADDRESS(ptl3, i)	GET_FRAME_ADDRESS_ARCH(ptl3, i)
+#define GET_PTL1_ADDRESS(ptl0, i)   GET_PTL1_ADDRESS_ARCH(ptl0, i)
+#define GET_PTL2_ADDRESS(ptl1, i)   GET_PTL2_ADDRESS_ARCH(ptl1, i)
+#define GET_PTL3_ADDRESS(ptl2, i)   GET_PTL3_ADDRESS_ARCH(ptl2, i)
+#define GET_FRAME_ADDRESS(ptl3, i)  GET_FRAME_ADDRESS_ARCH(ptl3, i)
 
 /*
  * These macros are provided to change the shape of the 4-level tree of page
  * tables on respective level.
+ *
  */
-#define SET_PTL1_ADDRESS(ptl0, i, a)	SET_PTL1_ADDRESS_ARCH(ptl0, i, a)
-#define SET_PTL2_ADDRESS(ptl1, i, a)	SET_PTL2_ADDRESS_ARCH(ptl1, i, a)
-#define SET_PTL3_ADDRESS(ptl2, i, a)	SET_PTL3_ADDRESS_ARCH(ptl2, i, a)
-#define SET_FRAME_ADDRESS(ptl3, i, a)	SET_FRAME_ADDRESS_ARCH(ptl3, i, a)
+#define SET_PTL1_ADDRESS(ptl0, i, a)   SET_PTL1_ADDRESS_ARCH(ptl0, i, a)
+#define SET_PTL2_ADDRESS(ptl1, i, a)   SET_PTL2_ADDRESS_ARCH(ptl1, i, a)
+#define SET_PTL3_ADDRESS(ptl2, i, a)   SET_PTL3_ADDRESS_ARCH(ptl2, i, a)
+#define SET_FRAME_ADDRESS(ptl3, i, a)  SET_FRAME_ADDRESS_ARCH(ptl3, i, a)
 
 /*
  * These macros are provided to query various flags within the page tables.
+ *
  */
-#define GET_PTL1_FLAGS(ptl0, i)		GET_PTL1_FLAGS_ARCH(ptl0, i)
-#define GET_PTL2_FLAGS(ptl1, i)		GET_PTL2_FLAGS_ARCH(ptl1, i)
-#define GET_PTL3_FLAGS(ptl2, i)		GET_PTL3_FLAGS_ARCH(ptl2, i)
-#define GET_FRAME_FLAGS(ptl3, i)	GET_FRAME_FLAGS_ARCH(ptl3, i)
+#define GET_PTL1_FLAGS(ptl0, i)   GET_PTL1_FLAGS_ARCH(ptl0, i)
+#define GET_PTL2_FLAGS(ptl1, i)   GET_PTL2_FLAGS_ARCH(ptl1, i)
+#define GET_PTL3_FLAGS(ptl2, i)   GET_PTL3_FLAGS_ARCH(ptl2, i)
+#define GET_FRAME_FLAGS(ptl3, i)  GET_FRAME_FLAGS_ARCH(ptl3, i)
 
 /*
  * These macros are provided to set/clear various flags within the page tables.
+ *
  */
-#define SET_PTL1_FLAGS(ptl0, i, x)	SET_PTL1_FLAGS_ARCH(ptl0, i, x)
-#define SET_PTL2_FLAGS(ptl1, i, x)	SET_PTL2_FLAGS_ARCH(ptl1, i, x)
-#define SET_PTL3_FLAGS(ptl2, i, x)	SET_PTL3_FLAGS_ARCH(ptl2, i, x)
-#define SET_FRAME_FLAGS(ptl3, i, x)	SET_FRAME_FLAGS_ARCH(ptl3, i, x)
+#define SET_PTL1_FLAGS(ptl0, i, x)   SET_PTL1_FLAGS_ARCH(ptl0, i, x)
+#define SET_PTL2_FLAGS(ptl1, i, x)   SET_PTL2_FLAGS_ARCH(ptl1, i, x)
+#define SET_PTL3_FLAGS(ptl2, i, x)   SET_PTL3_FLAGS_ARCH(ptl2, i, x)
+#define SET_FRAME_FLAGS(ptl3, i, x)  SET_FRAME_FLAGS_ARCH(ptl3, i, x)
 
 /*
  * Macros for querying the last-level PTEs.
+ *
  */
-#define PTE_VALID(p)		PTE_VALID_ARCH((p))
-#define PTE_PRESENT(p)		PTE_PRESENT_ARCH((p))
-#define PTE_GET_FRAME(p)	PTE_GET_FRAME_ARCH((p))
-#define PTE_READABLE(p)		1
-#define PTE_WRITABLE(p)		PTE_WRITABLE_ARCH((p))
-#define PTE_EXECUTABLE(p)	PTE_EXECUTABLE_ARCH((p))
+#define PTE_VALID(p)       PTE_VALID_ARCH((p))
+#define PTE_PRESENT(p)     PTE_PRESENT_ARCH((p))
+#define PTE_GET_FRAME(p)   PTE_GET_FRAME_ARCH((p))
+#define PTE_READABLE(p)    1
+#define PTE_WRITABLE(p)    PTE_WRITABLE_ARCH((p))
+#define PTE_EXECUTABLE(p)  PTE_EXECUTABLE_ARCH((p))
 
 extern as_operations_t as_pt_operations;
 extern page_mapping_operations_t pt_mapping_operations;
 
-extern void page_mapping_insert_pt(as_t *as, uintptr_t page, uintptr_t frame,
-    int flags);
-extern pte_t *page_mapping_find_pt(as_t *as, uintptr_t page);
+extern void page_mapping_insert_pt(as_t *, uintptr_t, uintptr_t, unsigned int);
+extern pte_t *page_mapping_find_pt(as_t *, uintptr_t);
 
 #endif
Index: kernel/genarch/src/drivers/ega/ega.c
===================================================================
--- kernel/genarch/src/drivers/ega/ega.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/genarch/src/drivers/ega/ega.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -63,5 +63,5 @@
 
 typedef struct {
-	SPINLOCK_DECLARE(lock);
+	IRQ_SPINLOCK_DECLARE(lock);
 	
 	uint32_t cursor;
@@ -71,6 +71,6 @@
 } ega_instance_t;
 
-static void ega_putchar(outdev_t *dev, wchar_t ch, bool silent);
-static void ega_redraw(outdev_t *dev);
+static void ega_putchar(outdev_t *, wchar_t, bool);
+static void ega_redraw(outdev_t *);
 
 static outdev_operations_t egadev_ops = {
@@ -540,6 +540,5 @@
 	ega_instance_t *instance = (ega_instance_t *) dev->data;
 	
-	ipl_t ipl = interrupts_disable();
-	spinlock_lock(&instance->lock);
+	irq_spinlock_lock(&instance->lock, true);
 	
 	switch (ch) {
@@ -564,6 +563,5 @@
 	ega_move_cursor(instance, silent);
 	
-	spinlock_unlock(&instance->lock);
-	interrupts_restore(ipl);
+	irq_spinlock_unlock(&instance->lock, true);
 }
 
@@ -572,6 +570,5 @@
 	ega_instance_t *instance = (ega_instance_t *) dev->data;
 	
-	ipl_t ipl = interrupts_disable();
-	spinlock_lock(&instance->lock);
+	irq_spinlock_lock(&instance->lock, true);
 	
 	memcpy(instance->addr, instance->backbuf, EGA_VRAM_SIZE);
@@ -579,6 +576,5 @@
 	ega_show_cursor(instance, silent);
 	
-	spinlock_unlock(&instance->lock);
-	interrupts_restore(ipl);
+	irq_spinlock_unlock(&instance->lock, true);
 }
 
@@ -598,5 +594,5 @@
 	egadev->data = instance;
 	
-	spinlock_initialize(&instance->lock, "*ega_lock");
+	irq_spinlock_initialize(&instance->lock, "*ega.instance.lock");
 	
 	instance->base = base;
Index: kernel/genarch/src/mm/as_ht.c
===================================================================
--- kernel/genarch/src/mm/as_ht.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/genarch/src/mm/as_ht.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -30,8 +30,8 @@
  * @{
  */
- 
+
 /**
  * @file
- * @brief	Address space functions for global page hash table.
+ * @brief Address space functions for global page hash table.
  */
 
@@ -46,9 +46,9 @@
 #include <synch/mutex.h>
 
-static pte_t *ht_create(int flags);
-static void ht_destroy(pte_t *page_table);
+static pte_t *ht_create(unsigned int);
+static void ht_destroy(pte_t *);
 
-static void ht_lock(as_t *as, bool lock);
-static void ht_unlock(as_t *as, bool unlock);
+static void ht_lock(as_t *, bool);
+static void ht_unlock(as_t *, bool);
 
 as_operations_t as_ht_operations = {
@@ -68,6 +68,7 @@
  *
  * @return Returns NULL.
+ *
  */
-pte_t *ht_create(int flags)
+pte_t *ht_create(unsigned int flags)
 {
 	if (flags & FLAG_AS_KERNEL) {
@@ -75,4 +76,5 @@
 		mutex_initialize(&page_ht_lock, MUTEX_PASSIVE);
 	}
+	
 	return NULL;
 }
@@ -83,4 +85,5 @@
  *
  * @param page_table This parameter is ignored.
+ *
  */
 void ht_destroy(pte_t *page_table)
@@ -94,6 +97,7 @@
  * Interrupts must be disabled.
  *
- * @param as Address space.
+ * @param as   Address space.
  * @param lock If false, do not attempt to lock the address space.
+ *
  */
 void ht_lock(as_t *as, bool lock)
@@ -101,4 +105,5 @@
 	if (lock)
 		mutex_lock(&as->lock);
+	
 	mutex_lock(&page_ht_lock);
 }
@@ -109,10 +114,12 @@
  * Interrupts must be disabled.
  *
- * @param as Address space.
+ * @param as     Address space.
  * @param unlock If false, do not attempt to lock the address space.
+ *
  */
 void ht_unlock(as_t *as, bool unlock)
 {
 	mutex_unlock(&page_ht_lock);
+	
 	if (unlock)
 		mutex_unlock(&as->lock);
Index: kernel/genarch/src/mm/as_pt.c
===================================================================
--- kernel/genarch/src/mm/as_pt.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/genarch/src/mm/as_pt.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -33,5 +33,5 @@
 /**
  * @file
- * @brief	Address space functions for 4-level hierarchical pagetables.
+ * @brief Address space functions for 4-level hierarchical pagetables.
  */
 
@@ -47,9 +47,9 @@
 #include <arch.h>
 
-static pte_t *ptl0_create(int flags);
-static void ptl0_destroy(pte_t *page_table);
+static pte_t *ptl0_create(unsigned int);
+static void ptl0_destroy(pte_t *);
 
-static void pt_lock(as_t *as, bool lock);
-static void pt_unlock(as_t *as, bool unlock);
+static void pt_lock(as_t *, bool);
+static void pt_unlock(as_t *, bool);
 
 as_operations_t as_pt_operations = {
@@ -67,36 +67,38 @@
  *
  * @return New PTL0.
+ *
  */
-pte_t *ptl0_create(int flags)
+pte_t *ptl0_create(unsigned int flags)
 {
-	pte_t *src_ptl0, *dst_ptl0;
-	ipl_t ipl;
-	int table_size;
-
-	dst_ptl0 = (pte_t *) frame_alloc(PTL0_SIZE, FRAME_KA);
-	table_size = FRAME_SIZE << PTL0_SIZE;
-
-	if (flags & FLAG_AS_KERNEL) {
+	pte_t *dst_ptl0 = (pte_t *) frame_alloc(PTL0_SIZE, FRAME_KA);
+	size_t table_size = FRAME_SIZE << PTL0_SIZE;
+	
+	if (flags & FLAG_AS_KERNEL)
 		memsetb(dst_ptl0, table_size, 0);
-	} else {
-		uintptr_t src, dst;
-	
+	else {
 		/*
 		 * Copy the kernel address space portion to new PTL0.
+		 *
 		 */
-		 
-		ipl = interrupts_disable();
-		mutex_lock(&AS_KERNEL->lock);		
-		src_ptl0 = (pte_t *) PA2KA((uintptr_t) AS_KERNEL->genarch.page_table);
-
-		src = (uintptr_t) &src_ptl0[PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)];
-		dst = (uintptr_t) &dst_ptl0[PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)];
-
+		
+		ipl_t ipl = interrupts_disable();
+		mutex_lock(&AS_KERNEL->lock);
+		
+		pte_t *src_ptl0 =
+		    (pte_t *) PA2KA((uintptr_t) AS_KERNEL->genarch.page_table);
+		
+		uintptr_t src =
+		    (uintptr_t) &src_ptl0[PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)];
+		uintptr_t dst =
+		    (uintptr_t) &dst_ptl0[PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)];
+		
 		memsetb(dst_ptl0, table_size, 0);
-		memcpy((void *) dst, (void *) src, table_size - (src - (uintptr_t) src_ptl0));
+		memcpy((void *) dst, (void *) src,
+		    table_size - (src - (uintptr_t) src_ptl0));
+		
 		mutex_unlock(&AS_KERNEL->lock);
 		interrupts_restore(ipl);
 	}
-
+	
 	return (pte_t *) KA2PA((uintptr_t) dst_ptl0);
 }
@@ -107,8 +109,9 @@
  *
  * @param page_table Physical address of PTL0.
+ *
  */
 void ptl0_destroy(pte_t *page_table)
 {
-	frame_free((uintptr_t)page_table);
+	frame_free((uintptr_t) page_table);
 }
 
@@ -118,6 +121,7 @@
  * Interrupts must be disabled.
  *
- * @param as Address space.
+ * @param as   Address space.
  * @param lock If false, do not attempt to lock the address space.
+ *
  */
 void pt_lock(as_t *as, bool lock)
@@ -132,6 +136,7 @@
  * Interrupts must be disabled.
  *
- * @param as Address space.
+ * @param as     Address space.
  * @param unlock If false, do not attempt to unlock the address space.
+ *
  */
 void pt_unlock(as_t *as, bool unlock)
Index: kernel/genarch/src/mm/page_ht.c
===================================================================
--- kernel/genarch/src/mm/page_ht.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/genarch/src/mm/page_ht.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -33,5 +33,5 @@
 /**
  * @file
- * @brief	Virtual Address Translation (VAT) for global page hash table.
+ * @brief Virtual Address Translation (VAT) for global page hash table.
  */
 
@@ -52,12 +52,11 @@
 #include <align.h>
 
-static size_t hash(unative_t key[]);
-static bool compare(unative_t key[], size_t keys, link_t *item);
-static void remove_callback(link_t *item);
-
-static void ht_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame,
-    int flags);
-static void ht_mapping_remove(as_t *as, uintptr_t page);
-static pte_t *ht_mapping_find(as_t *as, uintptr_t page);
+static size_t hash(unative_t[]);
+static bool compare(unative_t[], size_t, link_t *);
+static void remove_callback(link_t *);
+
+static void ht_mapping_insert(as_t *, uintptr_t, uintptr_t, unsigned int);
+static void ht_mapping_remove(as_t *, uintptr_t);
+static pte_t *ht_mapping_find(as_t *, uintptr_t);
 
 /**
@@ -65,10 +64,12 @@
  * after address space lock and after any address space area
  * locks.
+ *
  */
 mutex_t page_ht_lock;
 
-/**
- * Page hash table.
+/** Page hash table.
+ *
  * The page hash table may be accessed only when page_ht_lock is held.
+ *
  */
 hash_table_t page_ht;
@@ -93,4 +94,5 @@
  *
  * @return Index into page hash table.
+ *
  */
 size_t hash(unative_t key[])
@@ -98,5 +100,4 @@
 	as_t *as = (as_t *) key[KEY_AS];
 	uintptr_t page = (uintptr_t) key[KEY_PAGE];
-	size_t index;
 	
 	/*
@@ -104,6 +105,7 @@
 	 * of occurring. Least significant bits of VPN compose the
 	 * hash index.
-	 */
-	index = ((page >> PAGE_WIDTH) & (PAGE_HT_ENTRIES - 1));
+	 *
+	 */
+	size_t index = ((page >> PAGE_WIDTH) & (PAGE_HT_ENTRIES - 1));
 	
 	/*
@@ -111,4 +113,5 @@
 	 * similar addresses. Least significant bits compose the
 	 * hash index.
+	 *
 	 */
 	index |= ((unative_t) as) & (PAGE_HT_ENTRIES - 1);
@@ -119,28 +122,28 @@
 /** Compare page hash table item with page and/or address space.
  *
- * @param key Array of one or two keys (i.e. page and/or address space).
+ * @param key  Array of one or two keys (i.e. page and/or address space).
  * @param keys Number of keys passed.
  * @param item Item to compare the keys with.
  *
  * @return true on match, false otherwise.
+ *
  */
 bool compare(unative_t key[], size_t keys, link_t *item)
 {
-	pte_t *t;
-
 	ASSERT(item);
-	ASSERT((keys > 0) && (keys <= PAGE_HT_KEYS));
-
+	ASSERT(keys > 0);
+	ASSERT(keys <= PAGE_HT_KEYS);
+	
 	/*
 	 * Convert item to PTE.
-	 */
-	t = hash_table_get_instance(item, pte_t, link);
-
-	if (keys == PAGE_HT_KEYS) {
-		return (key[KEY_AS] == (uintptr_t) t->as) &&
-		    (key[KEY_PAGE] == t->page);
-	} else {
-		return (key[KEY_AS] == (uintptr_t) t->as);
-	}
+	 *
+	 */
+	pte_t *pte = hash_table_get_instance(item, pte_t, link);
+	
+	if (keys == PAGE_HT_KEYS)
+		return (key[KEY_AS] == (uintptr_t) pte->as) &&
+		    (key[KEY_PAGE] == pte->page);
+	
+	return (key[KEY_AS] == (uintptr_t) pte->as);
 }
 
@@ -148,17 +151,17 @@
  *
  * @param item Page hash table item being removed.
+ *
  */
 void remove_callback(link_t *item)
 {
-	pte_t *t;
-
 	ASSERT(item);
-
+	
 	/*
 	 * Convert item to PTE.
-	 */
-	t = hash_table_get_instance(item, pte_t, link);
-
-	free(t);
+	 *
+	 */
+	pte_t *pte = hash_table_get_instance(item, pte_t, link);
+	
+	free(pte);
 }
 
@@ -166,16 +169,17 @@
  *
  * Map virtual address page to physical address frame
- * using flags. 
+ * using flags.
  *
  * The page table must be locked and interrupts must be disabled.
  *
- * @param as Address space to which page belongs.
- * @param page Virtual address of the page to be mapped.
+ * @param as    Address space to which page belongs.
+ * @param page  Virtual address of the page to be mapped.
  * @param frame Physical address of memory frame to which the mapping is done.
  * @param flags Flags to be used for mapping.
- */
-void ht_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, int flags)
-{
-	pte_t *t;
+ *
+ */
+void ht_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame,
+    unsigned int flags)
+{
 	unative_t key[2] = {
 		(uintptr_t) as,
@@ -184,21 +188,21 @@
 	
 	if (!hash_table_find(&page_ht, key)) {
-		t = (pte_t *) malloc(sizeof(pte_t), FRAME_ATOMIC);
-		ASSERT(t != NULL);
-
-		t->g = (flags & PAGE_GLOBAL) != 0;
-		t->x = (flags & PAGE_EXEC) != 0;
-		t->w = (flags & PAGE_WRITE) != 0;
-		t->k = !(flags & PAGE_USER);
-		t->c = (flags & PAGE_CACHEABLE) != 0;
-		t->p = !(flags & PAGE_NOT_PRESENT);
-		t->a = false;
-		t->d = false;
-
-		t->as = as;
-		t->page = ALIGN_DOWN(page, PAGE_SIZE);
-		t->frame = ALIGN_DOWN(frame, FRAME_SIZE);
-
-		hash_table_insert(&page_ht, key, &t->link);
+		pte_t *pte = (pte_t *) malloc(sizeof(pte_t), FRAME_ATOMIC);
+		ASSERT(pte != NULL);
+		
+		pte->g = (flags & PAGE_GLOBAL) != 0;
+		pte->x = (flags & PAGE_EXEC) != 0;
+		pte->w = (flags & PAGE_WRITE) != 0;
+		pte->k = !(flags & PAGE_USER);
+		pte->c = (flags & PAGE_CACHEABLE) != 0;
+		pte->p = !(flags & PAGE_NOT_PRESENT);
+		pte->a = false;
+		pte->d = false;
+		
+		pte->as = as;
+		pte->page = ALIGN_DOWN(page, PAGE_SIZE);
+		pte->frame = ALIGN_DOWN(frame, FRAME_SIZE);
+		
+		hash_table_insert(&page_ht, key, &pte->link);
 	}
 }
@@ -212,6 +216,7 @@
  * The page table must be locked and interrupts must be disabled.
  *
- * @param as Address space to wich page belongs.
+ * @param as   Address space to wich page belongs.
  * @param page Virtual address of the page to be demapped.
+ *
  */
 void ht_mapping_remove(as_t *as, uintptr_t page)
@@ -236,13 +241,12 @@
  * The page table must be locked and interrupts must be disabled.
  *
- * @param as Address space to wich page belongs.
+ * @param as   Address space to wich page belongs.
  * @param page Virtual page.
  *
  * @return NULL if there is no such mapping; requested mapping otherwise.
+ *
  */
 pte_t *ht_mapping_find(as_t *as, uintptr_t page)
 {
-	link_t *hlp;
-	pte_t *t = NULL;
 	unative_t key[2] = {
 		(uintptr_t) as,
@@ -250,9 +254,9 @@
 	};
 	
-	hlp = hash_table_find(&page_ht, key);
-	if (hlp)
-		t = hash_table_get_instance(hlp, pte_t, link);
-
-	return t;
+	link_t *cur = hash_table_find(&page_ht, key);
+	if (cur)
+		return hash_table_get_instance(cur, pte_t, link);
+	
+	return NULL;
 }
 
Index: kernel/genarch/src/mm/page_pt.c
===================================================================
--- kernel/genarch/src/mm/page_pt.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/genarch/src/mm/page_pt.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -33,5 +33,5 @@
 /**
  * @file
- * @brief	Virtual Address Translation for hierarchical 4-level page tables.
+ * @brief Virtual Address Translation for hierarchical 4-level page tables.
  */
 
@@ -46,7 +46,7 @@
 #include <memstr.h>
 
-static void pt_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, int flags);
-static void pt_mapping_remove(as_t *as, uintptr_t page);
-static pte_t *pt_mapping_find(as_t *as, uintptr_t page);
+static void pt_mapping_insert(as_t *, uintptr_t, uintptr_t, unsigned int);
+static void pt_mapping_remove(as_t *, uintptr_t);
+static pte_t *pt_mapping_find(as_t *, uintptr_t);
 
 page_mapping_operations_t pt_mapping_operations = {
@@ -63,43 +63,42 @@
  * The page table must be locked and interrupts must be disabled.
  *
- * @param as Address space to wich page belongs.
- * @param page Virtual address of the page to be mapped.
+ * @param as    Address space to wich page belongs.
+ * @param page  Virtual address of the page to be mapped.
  * @param frame Physical address of memory frame to which the mapping is done.
  * @param flags Flags to be used for mapping.
- */
-void pt_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, int flags)
+ *
+ */
+void pt_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame,
+    unsigned int flags)
 {
-	pte_t *ptl0, *ptl1, *ptl2, *ptl3;
-	pte_t *newpt;
-
-	ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table);
-
+	pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table);
+	
 	if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT) {
-		newpt = (pte_t *)frame_alloc(PTL1_SIZE, FRAME_KA);
+		pte_t *newpt = (pte_t *) frame_alloc(PTL1_SIZE, FRAME_KA);
 		memsetb(newpt, FRAME_SIZE << PTL1_SIZE, 0);
 		SET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page), KA2PA(newpt));
 		SET_PTL1_FLAGS(ptl0, PTL0_INDEX(page), PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | PAGE_WRITE);
 	}
-
-	ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
-
+	
+	pte_t *ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
+	
 	if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT) {
-		newpt = (pte_t *)frame_alloc(PTL2_SIZE, FRAME_KA);
+		pte_t *newpt = (pte_t *) frame_alloc(PTL2_SIZE, FRAME_KA);
 		memsetb(newpt, FRAME_SIZE << PTL2_SIZE, 0);
 		SET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page), KA2PA(newpt));
 		SET_PTL2_FLAGS(ptl1, PTL1_INDEX(page), PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | PAGE_WRITE);
 	}
-
-	ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
-
+	
+	pte_t *ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
+	
 	if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT) {
-		newpt = (pte_t *)frame_alloc(PTL3_SIZE, FRAME_KA);
+		pte_t *newpt = (pte_t *) frame_alloc(PTL3_SIZE, FRAME_KA);
 		memsetb(newpt, FRAME_SIZE << PTL3_SIZE, 0);
 		SET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page), KA2PA(newpt));
 		SET_PTL3_FLAGS(ptl2, PTL2_INDEX(page), PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | PAGE_WRITE);
 	}
-
-	ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
-
+	
+	pte_t *ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
+	
 	SET_FRAME_ADDRESS(ptl3, PTL3_INDEX(page), frame);
 	SET_FRAME_FLAGS(ptl3, PTL3_INDEX(page), flags);
@@ -116,42 +115,41 @@
  * The page table must be locked and interrupts must be disabled.
  *
- * @param as Address space to wich page belongs.
+ * @param as   Address space to wich page belongs.
  * @param page Virtual address of the page to be demapped.
+ *
  */
 void pt_mapping_remove(as_t *as, uintptr_t page)
 {
-	pte_t *ptl0, *ptl1, *ptl2, *ptl3;
-	bool empty = true;
-	int i;
-
 	/*
 	 * First, remove the mapping, if it exists.
+	 *
 	 */
-
-	ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table);
-
+	
+	pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table);
 	if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT)
 		return;
-
-	ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
-
+	
+	pte_t *ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
 	if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT)
 		return;
-
-	ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
-
+	
+	pte_t *ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
 	if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT)
 		return;
-
-	ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
-
+	
+	pte_t *ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
+	
 	/* Destroy the mapping. Setting to PAGE_NOT_PRESENT is not sufficient. */
 	memsetb(&ptl3[PTL3_INDEX(page)], sizeof(pte_t), 0);
-
+	
 	/*
 	 * Second, free all empty tables along the way from PTL3 down to PTL0.
+	 *
 	 */
 	
-	/* check PTL3 */
+	/* Check PTL3 */
+	bool empty = true;
+	
+	unsigned int i;
 	for (i = 0; i < PTL3_ENTRIES; i++) {
 		if (PTE_VALID(&ptl3[i])) {
@@ -160,16 +158,19 @@
 		}
 	}
+	
 	if (empty) {
 		/*
 		 * PTL3 is empty.
 		 * Release the frame and remove PTL3 pointer from preceding table.
+		 *
 		 */
 		frame_free(KA2PA((uintptr_t) ptl3));
-		if (PTL2_ENTRIES)
-			memsetb(&ptl2[PTL2_INDEX(page)], sizeof(pte_t), 0);
-		else if (PTL1_ENTRIES)
-			memsetb(&ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0);
-		else
-			memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
+#if (PTL2_ENTRIES != 0)
+		memsetb(&ptl2[PTL2_INDEX(page)], sizeof(pte_t), 0);
+#elif (PTL1_ENTRIES != 0)
+		memsetb(&ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0);
+#else
+		memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
+#endif
 	} else {
 		/*
@@ -177,55 +178,60 @@
 		 * Therefore, there must be a path from PTL0 to PTL3 and
 		 * thus nothing to free in higher levels.
-		 */
-		return;
-	}
-	
-	/* check PTL2, empty is still true */
-	if (PTL2_ENTRIES) {
-		for (i = 0; i < PTL2_ENTRIES; i++) {
-			if (PTE_VALID(&ptl2[i])) {
-				empty = false;
-				break;
-			}
+		 *
+		 */
+		return;
+	}
+	
+	/* Check PTL2, empty is still true */
+#if (PTL2_ENTRIES != 0)
+	for (i = 0; i < PTL2_ENTRIES; i++) {
+		if (PTE_VALID(&ptl2[i])) {
+			empty = false;
+			break;
 		}
-		if (empty) {
-			/*
-			 * PTL2 is empty.
-			 * Release the frame and remove PTL2 pointer from preceding table.
-			 */
-			frame_free(KA2PA((uintptr_t) ptl2));
-			if (PTL1_ENTRIES)
-				memsetb(&ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0);
-			else
-				memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
+	}
+	
+	if (empty) {
+		/*
+		 * PTL2 is empty.
+		 * Release the frame and remove PTL2 pointer from preceding table.
+		 *
+		 */
+		frame_free(KA2PA((uintptr_t) ptl2));
+#if (PTL1_ENTRIES != 0)
+		memsetb(&ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0);
+#else
+		memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
+#endif
+	} else {
+		/*
+		 * PTL2 is not empty.
+		 * Therefore, there must be a path from PTL0 to PTL2 and
+		 * thus nothing to free in higher levels.
+		 *
+		 */
+		return;
+	}
+#endif /* PTL2_ENTRIES != 0 */
+	
+	/* check PTL1, empty is still true */
+#if (PTL1_ENTRIES != 0)
+	for (i = 0; i < PTL1_ENTRIES; i++) {
+		if (PTE_VALID(&ptl1[i])) {
+			empty = false;
+			break;
 		}
-		else {
-			/*
-			 * PTL2 is not empty.
-			 * Therefore, there must be a path from PTL0 to PTL2 and
-			 * thus nothing to free in higher levels.
-			 */
-			return;
-		}
-	}
-
-	/* check PTL1, empty is still true */
-	if (PTL1_ENTRIES) {
-		for (i = 0; i < PTL1_ENTRIES; i++) {
-			if (PTE_VALID(&ptl1[i])) {
-				empty = false;
-				break;
-			}
-		}
-		if (empty) {
-			/*
-			 * PTL1 is empty.
-			 * Release the frame and remove PTL1 pointer from preceding table.
-			 */
-			frame_free(KA2PA((uintptr_t) ptl1));
-			memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
-		}
-	}
-
+	}
+	
+	if (empty) {
+		/*
+		 * PTL1 is empty.
+		 * Release the frame and remove PTL1 pointer from preceding table.
+		 *
+		 */
+		frame_free(KA2PA((uintptr_t) ptl1));
+		memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
+	}
+#endif /* PTL1_ENTRIES != 0 */
 }
 
@@ -236,30 +242,27 @@
  * The page table must be locked and interrupts must be disabled.
  *
- * @param as Address space to which page belongs.
+ * @param as   Address space to which page belongs.
  * @param page Virtual page.
  *
- * @return NULL if there is no such mapping; entry from PTL3 describing the mapping otherwise.
+ * @return NULL if there is no such mapping; entry from PTL3 describing
+ *         the mapping otherwise.
+ *
  */
 pte_t *pt_mapping_find(as_t *as, uintptr_t page)
 {
-	pte_t *ptl0, *ptl1, *ptl2, *ptl3;
-
-	ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table);
-
+	pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table);
 	if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT)
 		return NULL;
-
-	ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
-
+	
+	pte_t *ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
 	if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT)
 		return NULL;
-
-	ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
-
+	
+	pte_t *ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
 	if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT)
 		return NULL;
-
-	ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
-
+	
+	pte_t *ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
+	
 	return &ptl3[PTL3_INDEX(page)];
 }
Index: kernel/generic/include/console/chardev.h
===================================================================
--- kernel/generic/include/console/chardev.h	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/include/console/chardev.h	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -57,5 +57,5 @@
 	
 	/** Protects everything below. */
-	SPINLOCK_DECLARE(lock);
+	IRQ_SPINLOCK_DECLARE(lock);
 	wchar_t buffer[INDEV_BUFLEN];
 	size_t counter;
@@ -95,13 +95,13 @@
 } outdev_t;
 
-extern void indev_initialize(const char *name, indev_t *indev,
-    indev_operations_t *op);
-extern void indev_push_character(indev_t *indev, wchar_t ch);
-extern wchar_t indev_pop_character(indev_t *indev);
+extern void indev_initialize(const char *, indev_t *,
+    indev_operations_t *);
+extern void indev_push_character(indev_t *, wchar_t);
+extern wchar_t indev_pop_character(indev_t *);
 
-extern void outdev_initialize(const char *name, outdev_t *outdev,
-    outdev_operations_t *op);
+extern void outdev_initialize(const char *, outdev_t *,
+    outdev_operations_t *);
 
-extern bool check_poll(indev_t *indev);
+extern bool check_poll(indev_t *);
 
 #endif /* KERN_CHARDEV_H_ */
Index: kernel/generic/include/cpu.h
===================================================================
--- kernel/generic/include/cpu.h	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/include/cpu.h	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -42,5 +42,5 @@
 #include <arch/context.h>
 
-#define CPU_STACK_SIZE	STACK_SIZE
+#define CPU_STACK_SIZE  STACK_SIZE
 
 /** CPU structure.
@@ -49,40 +49,44 @@
  */
 typedef struct cpu {
-	SPINLOCK_DECLARE(lock);
-
+	IRQ_SPINLOCK_DECLARE(lock);
+	
 	tlb_shootdown_msg_t tlb_messages[TLB_MESSAGE_QUEUE_LEN];
 	size_t tlb_messages_count;
 	
 	context_t saved_context;
-
+	
 	atomic_t nrdy;
 	runq_t rq[RQ_COUNT];
 	volatile size_t needs_relink;
-
-	SPINLOCK_DECLARE(timeoutlock);
+	
+	IRQ_SPINLOCK_DECLARE(timeoutlock);
 	link_t timeout_active_head;
-
-	size_t missed_clock_ticks;	/**< When system clock loses a tick, it is recorded here
-					     so that clock() can react. This variable is
-					     CPU-local and can be only accessed when interrupts
-					     are disabled. */
-
+	
+	/**
+	 * When system clock loses a tick, it is
+	 * recorded here so that clock() can react.
+	 * This variable is CPU-local and can be
+	 * only accessed when interrupts are
+	 * disabled.
+	 */
+	size_t missed_clock_ticks;
+	
 	bool idle;
 	uint64_t idle_ticks;
 	uint64_t busy_ticks;
-
+	
 	/**
 	 * Processor ID assigned by kernel.
 	 */
-	unsigned int id;
+	size_t id;
 	
 	bool active;
-	int tlb_active;
-
+	bool tlb_active;
+	
 	uint16_t frequency_mhz;
 	uint32_t delay_loop_const;
-
+	
 	cpu_arch_t arch;
-
+	
 	struct thread *fpu_owner;
 	
@@ -100,5 +104,5 @@
 extern void cpu_arch_init(void);
 extern void cpu_identify(void);
-extern void cpu_print_report(cpu_t *m);
+extern void cpu_print_report(cpu_t *);
 
 #endif
Index: kernel/generic/include/ddi/ddi.h
===================================================================
--- kernel/generic/include/ddi/ddi.h	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/include/ddi/ddi.h	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -50,15 +50,14 @@
 
 extern void ddi_init(void);
-extern void ddi_parea_register(parea_t *parea);
+extern void ddi_parea_register(parea_t *);
 
-extern unative_t sys_physmem_map(unative_t phys_base, unative_t virt_base,
-	unative_t pages, unative_t flags);
-extern unative_t sys_iospace_enable(ddi_ioarg_t *uspace_io_arg);
-extern unative_t sys_preempt_control(int enable);
+extern unative_t sys_physmem_map(unative_t, unative_t, unative_t, unative_t);
+extern unative_t sys_iospace_enable(ddi_ioarg_t *);
+extern unative_t sys_preempt_control(int);
 
 /*
  * Interface to be implemented by all architectures.
  */
-extern int ddi_iospace_enable_arch(task_t *task, uintptr_t ioaddr, size_t size);
+extern int ddi_iospace_enable_arch(task_t *, uintptr_t, size_t);
 
 #endif
Index: kernel/generic/include/ddi/ddi_arg.h
===================================================================
--- kernel/generic/include/ddi/ddi_arg.h	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/include/ddi/ddi_arg.h	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -36,8 +36,14 @@
 #define KERN_DDI_ARG_H_
 
+#ifdef KERNEL
+
+#include <typedefs.h>
+
+#endif /* KERNEL */
+
 /** Structure encapsulating arguments for SYS_PHYSMEM_MAP syscall. */
 typedef struct {
 	/** ID of the destination task. */
-	unsigned long long task_id;
+	uint64_t task_id;
 	/** Physical address of starting frame. */
 	void *phys_base;
@@ -45,14 +51,14 @@
 	void *virt_base;
 	/** Number of pages to map. */
-	unsigned long pages;
+	size_t pages;
 	/** Address space area flags for the mapping. */
-	int flags;
+	unsigned int flags;
 } ddi_memarg_t;
 
 /** Structure encapsulating arguments for SYS_ENABLE_IOSPACE syscall. */
 typedef struct {
-	unsigned long long task_id;	/**< ID of the destination task. */
-	void *ioaddr;			/**< Starting I/O space address. */
-	unsigned long size;		/**< Number of bytes. */
+	uint64_t task_id;  /**< ID of the destination task. */
+	void *ioaddr;      /**< Starting I/O space address. */
+	size_t size;       /**< Number of bytes. */
 } ddi_ioarg_t;
 
Index: kernel/generic/include/ddi/irq.h
===================================================================
--- kernel/generic/include/ddi/irq.h	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/include/ddi/irq.h	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -35,4 +35,15 @@
 #ifndef KERN_IRQ_H_
 #define KERN_IRQ_H_
+
+#ifdef KERNEL
+
+#include <typedefs.h>
+#include <adt/list.h>
+#include <adt/hash_table.h>
+#include <synch/spinlock.h>
+#include <proc/task.h>
+#include <ipc/ipc.h>
+
+#endif /* KERNEL */
 
 typedef enum {
@@ -49,4 +60,5 @@
 	/** Write 4 bytes to the I/O space. */
 	CMD_PIO_WRITE_32,
+	
 	/**
 	 * Perform a bit test on the source argument and store the result into
@@ -54,4 +66,5 @@
 	 */
 	CMD_BTEST,
+	
 	/**
 	 * Predicate the execution of the following N commands by the boolean
@@ -59,4 +72,5 @@
 	 */
 	CMD_PREDICATE,
+	
 	/** Accept the interrupt. */
 	CMD_ACCEPT,
@@ -69,11 +83,11 @@
 	irq_cmd_type cmd;
 	void *addr;
-	unsigned long long value;
-	unsigned int srcarg;
-	unsigned int dstarg;
+	uint32_t value;
+	uintptr_t srcarg;
+	uintptr_t dstarg;
 } irq_cmd_t;
 
 typedef struct {
-	unsigned int cmdcount;
+	size_t cmdcount;
 	irq_cmd_t *cmds;
 } irq_code_t;
@@ -81,14 +95,7 @@
 #ifdef KERNEL
 
-#include <typedefs.h>
-#include <adt/list.h>
-#include <adt/hash_table.h>
-#include <synch/spinlock.h>
-#include <proc/task.h>
-#include <ipc/ipc.h>
-
 typedef enum {
-	IRQ_DECLINE,		/**< Decline to service. */
-	IRQ_ACCEPT		/**< Accept to service. */
+	IRQ_DECLINE,  /**< Decline to service. */
+	IRQ_ACCEPT    /**< Accept to service. */
 } irq_ownership_t;
 
@@ -108,4 +115,5 @@
  * Primarily, this structure is encapsulated in the irq_t structure.
  * It is protected by irq_t::lock.
+ *
  */
 typedef struct {
@@ -117,9 +125,10 @@
 	unative_t method;
 	/** Arguments that will be sent if the IRQ is claimed. */
-	unative_t scratch[IPC_CALL_LEN];
+	uint32_t scratch[IPC_CALL_LEN];
 	/** Top-half pseudocode. */
 	irq_code_t *code;
 	/** Counter. */
 	size_t counter;
+	
 	/**
 	 * Link between IRQs that are notifying the same answerbox. The list is
@@ -133,9 +142,10 @@
  * If one device has multiple interrupts, there will be multiple irq_t
  * instantions with the same devno.
+ *
  */
 typedef struct irq {
 	/** Hash table link. */
 	link_t link;
-
+	
 	/** Lock protecting everything in this structure
 	 *  except the link member. When both the IRQ
@@ -143,5 +153,5 @@
 	 *  this lock must not be taken first.
 	 */
-	SPINLOCK_DECLARE(lock);
+	IRQ_SPINLOCK_DECLARE(lock);
 	
 	/** Send EOI before processing the interrupt.
@@ -152,8 +162,8 @@
 	 */
 	bool preack;
-
+	
 	/** Unique device number. -1 if not yet assigned. */
 	devno_t devno;
-
+	
 	/** Actual IRQ number. -1 if not yet assigned. */
 	inr_t inr;
@@ -166,15 +176,15 @@
 	/** Instance argument for the handler and the claim function. */
 	void *instance;
-
+	
 	/** Clear interrupt routine. */
 	cir_t cir;
 	/** First argument to the clear interrupt routine. */
 	void *cir_arg;
-
+	
 	/** Notification configuration structure. */
 	ipc_notif_cfg_t notif_cfg; 
 } irq_t;
 
-SPINLOCK_EXTERN(irq_uspace_hash_table_lock);
+IRQ_SPINLOCK_EXTERN(irq_uspace_hash_table_lock);
 extern hash_table_t irq_uspace_hash_table;
 
@@ -184,8 +194,8 @@
 extern irq_t *irq_dispatch_and_lock(inr_t);
 
+#endif /* KERNEL */
+
 #endif
 
-#endif
-
 /** @}
  */
Index: kernel/generic/include/interrupt.h
===================================================================
--- kernel/generic/include/interrupt.h	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/include/interrupt.h	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -44,12 +44,12 @@
 #include <stacktrace.h>
 
-typedef void (* iroutine)(int n, istate_t *istate);
+typedef void (* iroutine)(int, istate_t *);
 
-extern void fault_if_from_uspace(istate_t *istate, const char *fmt, ...);
-extern iroutine exc_register(int n, const char *name, iroutine f);
-extern void exc_dispatch(int n, istate_t *t);
-void exc_init(void);
+extern void fault_if_from_uspace(istate_t *, const char *, ...);
+extern iroutine exc_register(int, const char *, iroutine);
+extern void exc_dispatch(int, istate_t *);
+extern void exc_init(void);
 
-extern void irq_initialize_arch(irq_t *irq);
+extern void irq_initialize_arch(irq_t *);
 
 #endif
Index: kernel/generic/include/ipc/ipc.h
===================================================================
--- kernel/generic/include/ipc/ipc.h	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/include/ipc/ipc.h	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -36,13 +36,16 @@
 #define KERN_IPC_H_
 
-/* Length of data being transfered with IPC call */
-/* - the uspace may not be able to utilize full length */
-#define IPC_CALL_LEN		6	
+/** Length of data being transfered with IPC call
+ *
+ * The uspace may not be able to utilize full length
+ *
+ */
+#define IPC_CALL_LEN  6
 
 /** Maximum active async calls per thread */
 #ifdef CONFIG_DEBUG
-#define IPC_MAX_ASYNC_CALLS	4
+	#define IPC_MAX_ASYNC_CALLS  4
 #else
-#define IPC_MAX_ASYNC_CALLS	4000
+	#define IPC_MAX_ASYNC_CALLS  4000
 #endif
 
@@ -50,52 +53,61 @@
 
 /** This is answer to a call */
-#define IPC_CALL_ANSWERED	(1 << 0)
+#define IPC_CALL_ANSWERED  (1 << 0)
+
 /** Answer will not be passed to userspace, will be discarded */
-#define IPC_CALL_DISCARD_ANSWER (1 << 1)
+#define IPC_CALL_DISCARD_ANSWER  (1 << 1)
+
 /** Call was forwarded */
-#define IPC_CALL_FORWARDED	(1 << 2)
+#define IPC_CALL_FORWARDED  (1 << 2)
+
 /** Identify connect_me_to answer */
-#define IPC_CALL_CONN_ME_TO	(1 << 3)
+#define IPC_CALL_CONN_ME_TO  (1 << 3)
+
 /** Interrupt notification */
-#define IPC_CALL_NOTIF		(1 << 4)
-
-/*
- * Bits used in call hashes.
+#define IPC_CALL_NOTIF  (1 << 4)
+
+
+/** Bits used in call hashes.
+ *
  * The addresses are aligned at least to 4 that is why we can use the 2 least
  * significant bits of the call address.
- */
+ *
+ */
+
 /** Type of this call is 'answer' */
-#define IPC_CALLID_ANSWERED	1
+#define IPC_CALLID_ANSWERED  1
+
 /** Type of this call is 'notification' */
-#define IPC_CALLID_NOTIFICATION	2
+#define IPC_CALLID_NOTIFICATION  2
 
 /* Return values from sys_ipc_call_async(). */
-#define IPC_CALLRET_FATAL	-1
-#define IPC_CALLRET_TEMPORARY	-2
+#define IPC_CALLRET_FATAL      -1
+#define IPC_CALLRET_TEMPORARY  -2
 
 
 /* Macros for manipulating calling data */
-#define IPC_SET_RETVAL(data, retval)	((data).args[0] = (retval))
-#define IPC_SET_METHOD(data, val)	((data).args[0] = (val))
-#define IPC_SET_ARG1(data, val)		((data).args[1] = (val))
-#define IPC_SET_ARG2(data, val)		((data).args[2] = (val))
-#define IPC_SET_ARG3(data, val)		((data).args[3] = (val))
-#define IPC_SET_ARG4(data, val)		((data).args[4] = (val))
-#define IPC_SET_ARG5(data, val)		((data).args[5] = (val))
-
-#define IPC_GET_METHOD(data)		((data).args[0])
-#define IPC_GET_RETVAL(data)		((data).args[0])
-
-#define IPC_GET_ARG1(data)		((data).args[1])
-#define IPC_GET_ARG2(data)		((data).args[2])
-#define IPC_GET_ARG3(data)		((data).args[3])
-#define IPC_GET_ARG4(data)		((data).args[4])
-#define IPC_GET_ARG5(data)		((data).args[5])
+#define IPC_SET_RETVAL(data, retval)  ((data).args[0] = (retval))
+#define IPC_SET_METHOD(data, val)     ((data).args[0] = (val))
+#define IPC_SET_ARG1(data, val)       ((data).args[1] = (val))
+#define IPC_SET_ARG2(data, val)       ((data).args[2] = (val))
+#define IPC_SET_ARG3(data, val)       ((data).args[3] = (val))
+#define IPC_SET_ARG4(data, val)       ((data).args[4] = (val))
+#define IPC_SET_ARG5(data, val)       ((data).args[5] = (val))
+
+#define IPC_GET_METHOD(data)  ((data).args[0])
+#define IPC_GET_RETVAL(data)  ((data).args[0])
+
+#define IPC_GET_ARG1(data)  ((data).args[1])
+#define IPC_GET_ARG2(data)  ((data).args[2])
+#define IPC_GET_ARG3(data)  ((data).args[3])
+#define IPC_GET_ARG4(data)  ((data).args[4])
+#define IPC_GET_ARG5(data)  ((data).args[5])
 
 /* Well known phone descriptors */
-#define PHONE_NS	0
+#define PHONE_NS  0
 
 /* Forwarding flags. */
-#define IPC_FF_NONE		0
+#define IPC_FF_NONE  0
+
 /**
  * The call will be routed as though it was initially sent via the phone used to
@@ -104,10 +116,13 @@
  * calls that were initially sent by the forwarder to the same destination. This
  * flag has no imapct on routing replies.
- */
-#define IPC_FF_ROUTE_FROM_ME	(1 << 0)
-
-/* System-specific methods - only through special syscalls
+ *
+ */
+#define IPC_FF_ROUTE_FROM_ME  (1 << 0)
+
+/** System-specific methods - only through special syscalls
  * These methods have special behaviour
- */
+ *
+ */
+
 /** Clone connection.
  *
@@ -115,19 +130,24 @@
  *
  * - ARG1 - The caller sets ARG1 to the phone of the cloned connection.
- *	  - The callee gets the new phone from ARG1.
+ *        - The callee gets the new phone from ARG1.
+ *
  * - on answer, the callee acknowledges the new connection by sending EOK back
  *   or the kernel closes it
- */
-#define IPC_M_CONNECTION_CLONE	1
+ *
+ */
+#define IPC_M_CONNECTION_CLONE  1
+
 /** Protocol for CONNECT - ME
  *
  * Through this call, the recipient learns about the new cloned connection. 
- * 
+ *
  * - ARG5 - the kernel sets ARG5 to contain the hash of the used phone
  * - on asnwer, the callee acknowledges the new connection by sending EOK back
  *   or the kernel closes it
- */
-#define IPC_M_CONNECT_ME	2
-/** Protocol for CONNECT - TO - ME 
+ *
+ */
+#define IPC_M_CONNECT_ME  2
+
+/** Protocol for CONNECT - TO - ME
  *
  * Calling process asks the callee to create a callback connection,
@@ -144,6 +164,8 @@
  *                     - the allocated phoneid is passed to userspace 
  *                       (on the receiving side) as ARG5 of the call.
- */
-#define IPC_M_CONNECT_TO_ME	3	
+ *
+ */
+#define IPC_M_CONNECT_TO_ME  3
+
 /** Protocol for CONNECT - ME - TO
  *
@@ -163,9 +185,10 @@
  *
  */
-#define IPC_M_CONNECT_ME_TO	4	
-/** This message is sent to answerbox when the phone
- * is hung up
- */
-#define IPC_M_PHONE_HUNGUP	5
+#define IPC_M_CONNECT_ME_TO  4
+
+/** This message is sent to answerbox when the phone is hung up
+ *
+ */
+#define IPC_M_PHONE_HUNGUP  5
 
 /** Send as_area over IPC.
@@ -173,9 +196,10 @@
  * - ARG2 - size of source as_area (filled automatically by kernel)
  * - ARG3 - flags of the as_area being sent
- * 
+ *
  * on answer, the recipient must set:
  * - ARG1 - dst as_area base adress
- */
-#define IPC_M_SHARE_OUT		6	
+ *
+ */
+#define IPC_M_SHARE_OUT  6
 
 /** Receive as_area over IPC.
@@ -183,11 +207,12 @@
  * - ARG2 - destination as_area size
  * - ARG3 - user defined argument
- * 
+ *
  * on answer, the recipient must set:
  *
  * - ARG1 - source as_area base address
  * - ARG2 - flags that will be used for sharing
- */
-#define IPC_M_SHARE_IN		7	
+ *
+ */
+#define IPC_M_SHARE_IN  7
 
 /** Send data to another address space over IPC.
@@ -199,6 +224,7 @@
  * - ARG1 - final destination address space virtual address
  * - ARG2 - final size of data to be copied
- */
-#define IPC_M_DATA_WRITE	8
+ *
+ */
+#define IPC_M_DATA_WRITE  8
 
 /** Receive data from another address space over IPC.
@@ -210,18 +236,21 @@
  * - ARG1 - source virtual address in the destination address space
  * - ARG2 - final size of data to be copied
- */
-#define IPC_M_DATA_READ		9
+ *
+ */
+#define IPC_M_DATA_READ  9
 
 /** Debug the recipient.
  * - ARG1 - specifies the debug method (from udebug_method_t)
  * - other arguments are specific to the debug method
- */
-#define IPC_M_DEBUG_ALL		10
+ *
+ */
+#define IPC_M_DEBUG_ALL  10
 
 /* Well-known methods */
-#define IPC_M_LAST_SYSTEM	511
-#define IPC_M_PING		512
+#define IPC_M_LAST_SYSTEM  511
+#define IPC_M_PING         512
+
 /* User methods */
-#define IPC_FIRST_USER_METHOD	1024
+#define IPC_FIRST_USER_METHOD  1024
 
 #ifdef KERNEL
@@ -259,23 +288,24 @@
 
 typedef struct answerbox {
-	SPINLOCK_DECLARE(lock);
-
+	IRQ_SPINLOCK_DECLARE(lock);
+	
 	struct task *task;
-
+	
 	waitq_t wq;
-
+	
 	/** Linkage for the list of task's synchronous answerboxes. */
 	link_t sync_box_link;
-
+	
 	/** Phones connected to this answerbox. */
 	link_t connected_phones;
 	/** Received calls. */
-	link_t calls;			
-	link_t dispatched_calls;	/* Should be hash table in the future */
-
+	link_t calls;
+	link_t dispatched_calls;  /* Should be hash table in the future */
+	
 	/** Answered calls. */
 	link_t answers;
-
-	SPINLOCK_DECLARE(irq_lock);
+	
+	IRQ_SPINLOCK_DECLARE(irq_lock);
+	
 	/** Notifications from IRQ handlers. */
 	link_t irq_notifs;
@@ -291,43 +321,46 @@
 typedef struct {
 	link_t link;
-
-	int flags;
-
+	
+	unsigned int flags;
+	
 	/** Identification of the caller. */
 	struct task *sender;
-	/** The caller box is different from sender->answerbox for synchronous
-	 *  calls. */
+	
+	/*
+	 * The caller box is different from sender->answerbox
+	 * for synchronous calls.
+	 *
+	 */
 	answerbox_t *callerbox;
-
+	
 	/** Private data to internal IPC. */
 	unative_t priv;
-
+	
 	/** Data passed from/to userspace. */
 	ipc_data_t data;
-
+	
 	/** Buffer for IPC_M_DATA_WRITE and IPC_M_DATA_READ. */
 	uint8_t *buffer;
-
+	
 	/*
 	 * The forward operation can masquerade the caller phone. For those
 	 * cases, we must keep it aside so that the answer is processed
 	 * correctly.
+	 *
 	 */
 	phone_t *caller_phone;
 } call_t;
 
-
 extern answerbox_t *ipc_phone_0;
 
-
 extern void ipc_init(void);
 
-extern call_t * ipc_call_alloc(int);
+extern call_t *ipc_call_alloc(unsigned int);
 extern void ipc_call_free(call_t *);
 
 extern int ipc_call(phone_t *, call_t *);
 extern int ipc_call_sync(phone_t *, call_t *);
-extern call_t * ipc_wait_for_call(answerbox_t *, uint32_t, int);
-extern int ipc_forward(call_t *, phone_t *, answerbox_t *, int);
+extern call_t * ipc_wait_for_call(answerbox_t *, uint32_t, unsigned int);
+extern int ipc_forward(call_t *, phone_t *, answerbox_t *, unsigned int);
 extern void ipc_answer(answerbox_t *, call_t *);
 
@@ -345,8 +378,8 @@
 extern void ipc_print_task(task_id_t);
 
+#endif /* KERNEL */
+
 #endif
 
-#endif
-
 /** @}
  */
Index: kernel/generic/include/ipc/irq.h
===================================================================
--- kernel/generic/include/ipc/irq.h	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/include/ipc/irq.h	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -37,5 +37,5 @@
 
 /** Maximum length of IPC IRQ program */
-#define IRQ_MAX_PROG_SIZE	20
+#define IRQ_MAX_PROG_SIZE  20
 
 #include <ipc/ipc.h>
@@ -58,15 +58,20 @@
  */
 #define ipc_irq_send_msg_0(irq) \
-    ipc_irq_send_msg((irq), 0, 0, 0, 0, 0)
+	ipc_irq_send_msg((irq), 0, 0, 0, 0, 0)
+
 #define ipc_irq_send_msg_1(irq, a1) \
-    ipc_irq_send_msg((irq), (a1), 0, 0, 0, 0)
+	ipc_irq_send_msg((irq), (a1), 0, 0, 0, 0)
+
 #define ipc_irq_send_msg_2(irq, a1, a2) \
-    ipc_irq_send_msg((irq), (a1), (a2), 0, 0, 0)
+	ipc_irq_send_msg((irq), (a1), (a2), 0, 0, 0)
+
 #define ipc_irq_send_msg_3(irq, a1, a2, a3) \
-    ipc_irq_send_msg((irq), (a1), (a2), (a3), 0, 0)
+	ipc_irq_send_msg((irq), (a1), (a2), (a3), 0, 0)
+
 #define ipc_irq_send_msg_4(irq, a1, a2, a3, a4) \
-    ipc_irq_send_msg((irq), (a1), (a2), (a3), (a4), 0)
+	ipc_irq_send_msg((irq), (a1), (a2), (a3), (a4), 0)
+
 #define ipc_irq_send_msg_5(irq, a1, a2, a3, a4, a5) \
-    ipc_irq_send_msg((irq), (a1), (a2), (a3), (a4), (a5))
+	ipc_irq_send_msg((irq), (a1), (a2), (a3), (a4), (a5))
 
 extern void ipc_irq_send_msg(irq_t *, unative_t, unative_t, unative_t, unative_t,
Index: kernel/generic/include/ipc/sysipc.h
===================================================================
--- kernel/generic/include/ipc/sysipc.h	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/include/ipc/sysipc.h	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -40,26 +40,26 @@
 #include <typedefs.h>
 
-unative_t sys_ipc_call_sync_fast(unative_t phoneid, unative_t method, 
+extern unative_t sys_ipc_call_sync_fast(unative_t phoneid, unative_t method, 
     unative_t arg1, unative_t arg2, unative_t arg3, ipc_data_t *data);
-unative_t sys_ipc_call_sync_slow(unative_t phoneid, ipc_data_t *question,
+extern unative_t sys_ipc_call_sync_slow(unative_t phoneid, ipc_data_t *question,
     ipc_data_t *reply);
-unative_t sys_ipc_call_async_fast(unative_t phoneid, unative_t method, 
+extern unative_t sys_ipc_call_async_fast(unative_t phoneid, unative_t method, 
     unative_t arg1, unative_t arg2, unative_t arg3, unative_t arg4);
-unative_t sys_ipc_call_async_slow(unative_t phoneid, ipc_data_t *data);
-unative_t sys_ipc_answer_fast(unative_t callid, unative_t retval, 
+extern unative_t sys_ipc_call_async_slow(unative_t phoneid, ipc_data_t *data);
+extern unative_t sys_ipc_answer_fast(unative_t callid, unative_t retval, 
     unative_t arg1, unative_t arg2, unative_t arg3, unative_t arg4);
-unative_t sys_ipc_answer_slow(unative_t callid, ipc_data_t *data);
-unative_t sys_ipc_wait_for_call(ipc_data_t *calldata, uint32_t usec,
-    int nonblocking);
-unative_t sys_ipc_poke(void);
-unative_t sys_ipc_forward_fast(unative_t callid, unative_t phoneid,
-    unative_t method, unative_t arg1, unative_t arg2, int mode);
-unative_t sys_ipc_forward_slow(unative_t callid, unative_t phoneid,
-    ipc_data_t *data, int mode);
-unative_t sys_ipc_hangup(unative_t phoneid);
-unative_t sys_ipc_register_irq(inr_t inr, devno_t devno, unative_t method,
+extern unative_t sys_ipc_answer_slow(unative_t callid, ipc_data_t *data);
+extern unative_t sys_ipc_wait_for_call(ipc_data_t *calldata, uint32_t usec,
+    unsigned int nonblocking);
+extern unative_t sys_ipc_poke(void);
+extern unative_t sys_ipc_forward_fast(unative_t callid, unative_t phoneid,
+    unative_t method, unative_t arg1, unative_t arg2, unsigned int mode);
+extern unative_t sys_ipc_forward_slow(unative_t callid, unative_t phoneid,
+    ipc_data_t *data, unsigned int mode);
+extern unative_t sys_ipc_hangup(unative_t phoneid);
+extern unative_t sys_ipc_register_irq(inr_t inr, devno_t devno, unative_t method,
     irq_code_t *ucode);
-unative_t sys_ipc_unregister_irq(inr_t inr, devno_t devno);
-unative_t sys_ipc_connect_kbox(sysarg64_t *task_id);
+extern unative_t sys_ipc_unregister_irq(inr_t inr, devno_t devno);
+extern unative_t sys_ipc_connect_kbox(sysarg64_t *task_id);
 
 #endif
Index: kernel/generic/include/mm/as.h
===================================================================
--- kernel/generic/include/mm/as.h	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/include/mm/as.h	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -43,8 +43,8 @@
 
 /** Address space area flags. */
-#define AS_AREA_READ		1
-#define AS_AREA_WRITE		2
-#define AS_AREA_EXEC		4
-#define AS_AREA_CACHEABLE	8
+#define AS_AREA_READ       1
+#define AS_AREA_WRITE      2
+#define AS_AREA_EXEC       4
+#define AS_AREA_CACHEABLE  8
 
 /** Address space area info exported to userspace. */
@@ -52,10 +52,10 @@
 	/** Starting address */
 	uintptr_t start_addr;
-
+	
 	/** Area size */
 	size_t size;
-
+	
 	/** Area flags */
-	int flags;
+	unsigned int flags;
 } as_area_info_t;
 
@@ -75,27 +75,30 @@
  * Defined to be true if user address space and kernel address space shadow each
  * other.
- */
-#define KERNEL_ADDRESS_SPACE_SHADOWED	KERNEL_ADDRESS_SPACE_SHADOWED_ARCH
-
-#define KERNEL_ADDRESS_SPACE_START	KERNEL_ADDRESS_SPACE_START_ARCH
-#define KERNEL_ADDRESS_SPACE_END	KERNEL_ADDRESS_SPACE_END_ARCH
-#define USER_ADDRESS_SPACE_START	USER_ADDRESS_SPACE_START_ARCH
-#define USER_ADDRESS_SPACE_END		USER_ADDRESS_SPACE_END_ARCH
-
-#define USTACK_ADDRESS			USTACK_ADDRESS_ARCH
+ *
+ */
+#define KERNEL_ADDRESS_SPACE_SHADOWED  KERNEL_ADDRESS_SPACE_SHADOWED_ARCH
+
+#define KERNEL_ADDRESS_SPACE_START  KERNEL_ADDRESS_SPACE_START_ARCH
+#define KERNEL_ADDRESS_SPACE_END    KERNEL_ADDRESS_SPACE_END_ARCH
+#define USER_ADDRESS_SPACE_START    USER_ADDRESS_SPACE_START_ARCH
+#define USER_ADDRESS_SPACE_END      USER_ADDRESS_SPACE_END_ARCH
+
+#define USTACK_ADDRESS  USTACK_ADDRESS_ARCH
 
 /** Kernel address space. */
-#define FLAG_AS_KERNEL			(1 << 0)	
+#define FLAG_AS_KERNEL  (1 << 0)
 
 /* Address space area attributes. */
-#define AS_AREA_ATTR_NONE	0
-#define AS_AREA_ATTR_PARTIAL	1	/**< Not fully initialized area. */
+#define AS_AREA_ATTR_NONE     0
+#define AS_AREA_ATTR_PARTIAL  1  /**< Not fully initialized area. */
 
 /** The page fault was not resolved by as_page_fault(). */
-#define AS_PF_FAULT		0
+#define AS_PF_FAULT  0
+
 /** The page fault was resolved by as_page_fault(). */
-#define AS_PF_OK		1
+#define AS_PF_OK  1
+
 /** The page fault was caused by memcpy_from_uspace() or memcpy_to_uspace(). */
-#define AS_PF_DEFER		2
+#define AS_PF_DEFER  2
 
 /** Address space structure.
@@ -105,8 +108,10 @@
  * supposed to figure in the list as they are shared by all tasks and
  * set up during system initialization.
+ *
  */
 typedef struct as {
 	/** Protected by asidlock. */
 	link_t inactive_as_with_asid_link;
+	
 	/**
 	 * Number of processors on wich is this address space active.
@@ -114,4 +119,5 @@
 	 */
 	size_t cpu_refcount;
+	
 	/**
 	 * Address space identifier.
@@ -120,10 +126,10 @@
 	 */
 	asid_t asid;
-
+	
 	/** Number of references (i.e tasks that reference this as). */
 	atomic_t refcount;
-
+	
 	mutex_t lock;
-
+	
 	/** B+tree of address space areas. */
 	btree_t as_area_btree;
@@ -131,5 +137,5 @@
 	/** Non-generic content. */
 	as_genarch_t genarch;
-
+	
 	/** Architecture specific content. */
 	as_arch_t arch;
@@ -137,8 +143,8 @@
 
 typedef struct {
-	pte_t *(* page_table_create)(int flags);
-	void (* page_table_destroy)(pte_t *page_table);
-	void (* page_table_lock)(as_t *as, bool lock);
-	void (* page_table_unlock)(as_t *as, bool unlock);
+	pte_t *(* page_table_create)(unsigned int);
+	void (* page_table_destroy)(pte_t *);
+	void (* page_table_lock)(as_t *, bool);
+	void (* page_table_unlock)(as_t *, bool);
 } as_operations_t;
 
@@ -146,10 +152,12 @@
  * This structure contains information associated with the shared address space
  * area.
+ *
  */
 typedef struct {
 	/** This lock must be acquired only when the as_area lock is held. */
-	mutex_t lock;		
+	mutex_t lock;
 	/** This structure can be deallocated if refcount drops to 0. */
 	size_t refcount;
+	
 	/**
 	 * B+tree containing complete map of anonymous pages of the shared area.
@@ -169,9 +177,12 @@
 /** Backend data stored in address space area. */
 typedef union mem_backend_data {
-	struct {	/**< elf_backend members */
+	/** elf_backend members */
+	struct {
 		elf_header_t *elf;
 		elf_segment_header_t *segment;
 	};
-	struct {	/**< phys_backend members */
+	
+	/** phys_backend members */
+	struct {
 		uintptr_t base;
 		size_t frames;
@@ -182,15 +193,18 @@
  *
  * Each as_area_t structure describes one contiguous area of virtual memory.
+ *
  */
 typedef struct {
 	mutex_t lock;
 	/** Containing address space. */
-	as_t *as;		
+	as_t *as;
+	
 	/**
 	 * Flags related to the memory represented by the address space area.
 	 */
-	int flags;
+	unsigned int flags;
+	
 	/** Attributes related to the address space area itself. */
-	int attributes;
+	unsigned int attributes;
 	/** Size of this area in multiples of PAGE_SIZE. */
 	size_t pages;
@@ -199,5 +213,5 @@
 	/** Map of used space. */
 	btree_t used_space;
-
+	
 	/**
 	 * If the address space area has been shared, this pointer will
@@ -205,8 +219,8 @@
 	 */
 	share_info_t *sh_info;
-
+	
 	/** Memory backend backing this address space area. */
 	struct mem_backend *backend;
-
+	
 	/** Data to be used by the backend. */
 	mem_backend_data_t backend_data;
@@ -215,7 +229,7 @@
 /** Address space area backend structure. */
 typedef struct mem_backend {
-	int (* page_fault)(as_area_t *area, uintptr_t addr, pf_access_t access);
-	void (* frame_free)(as_area_t *area, uintptr_t page, uintptr_t frame);
-	void (* share)(as_area_t *area);
+	int (* page_fault)(as_area_t *, uintptr_t, pf_access_t);
+	void (* frame_free)(as_area_t *, uintptr_t, uintptr_t);
+	void (* share)(as_area_t *);
 } mem_backend_t;
 
@@ -227,5 +241,5 @@
 extern void as_init(void);
 
-extern as_t *as_create(int);
+extern as_t *as_create(unsigned int);
 extern void as_destroy(as_t *);
 extern void as_hold(as_t *);
@@ -234,12 +248,13 @@
 extern int as_page_fault(uintptr_t, pf_access_t, istate_t *);
 
-extern as_area_t *as_area_create(as_t *, int, size_t, uintptr_t, int,
-    mem_backend_t *, mem_backend_data_t *);
+extern as_area_t *as_area_create(as_t *, unsigned int, size_t, uintptr_t,
+    unsigned int, mem_backend_t *, mem_backend_data_t *);
 extern int as_area_destroy(as_t *, uintptr_t);
-extern int as_area_resize(as_t *, uintptr_t, size_t, int);
-extern int as_area_share(as_t *, uintptr_t, size_t, as_t *, uintptr_t, int);
-extern int as_area_change_flags(as_t *, int, uintptr_t);
-
-extern int as_area_get_flags(as_area_t *);
+extern int as_area_resize(as_t *, uintptr_t, size_t, unsigned int);
+extern int as_area_share(as_t *, uintptr_t, size_t, as_t *, uintptr_t,
+    unsigned int);
+extern int as_area_change_flags(as_t *, unsigned int, uintptr_t);
+
+extern unsigned int as_area_get_flags(as_area_t *);
 extern bool as_area_check_access(as_area_t *, pf_access_t);
 extern size_t as_area_get_size(uintptr_t);
@@ -249,16 +264,21 @@
 
 /* Interface to be implemented by architectures. */
+
 #ifndef as_constructor_arch
-extern int as_constructor_arch(as_t *, int);
+extern int as_constructor_arch(as_t *, unsigned int);
 #endif /* !def as_constructor_arch */
+
 #ifndef as_destructor_arch
 extern int as_destructor_arch(as_t *);
 #endif /* !def as_destructor_arch */
+
 #ifndef as_create_arch
-extern int as_create_arch(as_t *, int);
+extern int as_create_arch(as_t *, unsigned int);
 #endif /* !def as_create_arch */
+
 #ifndef as_install_arch
 extern void as_install_arch(as_t *);
 #endif /* !def as_install_arch */
+
 #ifndef as_deinstall_arch
 extern void as_deinstall_arch(as_t *);
@@ -270,17 +290,18 @@
 extern mem_backend_t phys_backend;
 
-/** 
+/**
  * This flags is passed when running the loader, otherwise elf_load()
  * would return with a EE_LOADER error code.
- */
-#define ELD_F_NONE	0
-#define ELD_F_LOADER	1
-
-extern unsigned int elf_load(elf_header_t *, as_t *, int);
+ *
+ */
+#define ELD_F_NONE    0
+#define ELD_F_LOADER  1
+
+extern unsigned int elf_load(elf_header_t *, as_t *, unsigned int);
 
 /* Address space area related syscalls. */
-extern unative_t sys_as_area_create(uintptr_t, size_t, int);
-extern unative_t sys_as_area_resize(uintptr_t, size_t, int);
-extern unative_t sys_as_area_change_flags(uintptr_t, int);
+extern unative_t sys_as_area_create(uintptr_t, size_t, unsigned int);
+extern unative_t sys_as_area_resize(uintptr_t, size_t, unsigned int);
+extern unative_t sys_as_area_change_flags(uintptr_t, unsigned int);
 extern unative_t sys_as_area_destroy(uintptr_t);
 
Index: kernel/generic/include/mm/frame.h
===================================================================
--- kernel/generic/include/mm/frame.h	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/include/mm/frame.h	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -81,5 +81,5 @@
 
 typedef struct {
-	size_t refcount;     /**< Tracking of shared frames */
+	size_t refcount;      /**< Tracking of shared frames */
 	uint8_t buddy_order;  /**< Buddy system block order */
 	link_t buddy_link;    /**< Link to the next free block inside
@@ -91,8 +91,8 @@
 	pfn_t base;                    /**< Frame_no of the first frame
                                         in the frames array */
-	size_t count;                 /**< Size of zone */
-	size_t free_count;            /**< Number of free frame_t
+	size_t count;                  /**< Size of zone */
+	size_t free_count;             /**< Number of free frame_t
                                         structures */
-	size_t busy_count;            /**< Number of busy frame_t
+	size_t busy_count;             /**< Number of busy frame_t
                                         structures */
 	zone_flags_t flags;            /**< Type of the zone */
@@ -108,5 +108,5 @@
  */
 typedef struct {
-	SPINLOCK_DECLARE(lock);
+	IRQ_SPINLOCK_DECLARE(lock);
 	size_t count;
 	zone_t info[ZONES_MAX];
Index: kernel/generic/include/mm/page.h
===================================================================
--- kernel/generic/include/mm/page.h	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/include/mm/page.h	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -42,8 +42,7 @@
 /** Operations to manipulate page mappings. */
 typedef struct {
-	void (* mapping_insert)(as_t *as, uintptr_t page, uintptr_t frame,
-	    int flags);
-	void (* mapping_remove)(as_t *as, uintptr_t page);
-	pte_t *(* mapping_find)(as_t *as, uintptr_t page);
+	void (* mapping_insert)(as_t *, uintptr_t, uintptr_t, unsigned int);
+	void (* mapping_remove)(as_t *, uintptr_t);
+	pte_t *(* mapping_find)(as_t *, uintptr_t);
 } page_mapping_operations_t;
 
@@ -51,15 +50,14 @@
 
 extern void page_init(void);
-extern void page_table_lock(as_t *as, bool lock);
-extern void page_table_unlock(as_t *as, bool unlock);
-extern void page_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame,
-    int flags);
-extern void page_mapping_remove(as_t *as, uintptr_t page);
-extern pte_t *page_mapping_find(as_t *as, uintptr_t page);
-extern pte_t *page_table_create(int flags);
-extern void page_table_destroy(pte_t *page_table);
-extern void map_structure(uintptr_t s, size_t size);
+extern void page_table_lock(as_t *, bool);
+extern void page_table_unlock(as_t *, bool);
+extern void page_mapping_insert(as_t *, uintptr_t, uintptr_t, unsigned int);
+extern void page_mapping_remove(as_t *, uintptr_t);
+extern pte_t *page_mapping_find(as_t *, uintptr_t);
+extern pte_t *page_table_create(unsigned int);
+extern void page_table_destroy(pte_t *);
+extern void map_structure(uintptr_t, size_t);
 
-extern uintptr_t hw_map(uintptr_t physaddr, size_t size);
+extern uintptr_t hw_map(uintptr_t, size_t);
 
 #endif
Index: kernel/generic/include/mm/slab.h
===================================================================
--- kernel/generic/include/mm/slab.h	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/include/mm/slab.h	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -84,5 +84,4 @@
 } slab_mag_cache_t;
 
-
 typedef struct {
 	const char *name;
@@ -94,13 +93,13 @@
 	size_t size;
 	
-	int (*constructor)(void *obj, int kmflag);
-	int (*destructor)(void *obj);
+	int (*constructor)(void *obj, unsigned int kmflag);
+	size_t (*destructor)(void *obj);
 	
 	/** Flags changing behaviour of cache */
-	int flags;
+	unsigned int flags;
 	
 	/* Computed values */
-	uint8_t order;         /**< Order of frames to be allocated */
-	unsigned int objects;  /**< Number of objects that fit in */
+	uint8_t order;   /**< Order of frames to be allocated */
+	size_t objects;  /**< Number of objects that fit in */
 	
 	/* Statistics */
@@ -109,5 +108,5 @@
 	atomic_t cached_objs;
 	/** How many magazines in magazines list */
-	atomic_t magazine_counter; 
+	atomic_t magazine_counter;
 	
 	/* Slabs */
@@ -124,10 +123,10 @@
 
 extern slab_cache_t *slab_cache_create(const char *, size_t, size_t,
-    int (*)(void *, int), int (*)(void *), int);
+    int (*)(void *, unsigned int), size_t (*)(void *), unsigned int);
 extern void slab_cache_destroy(slab_cache_t *);
 
-extern void * slab_alloc(slab_cache_t *, int);
+extern void * slab_alloc(slab_cache_t *, unsigned int);
 extern void slab_free(slab_cache_t *, void *);
-extern size_t slab_reclaim(int);
+extern size_t slab_reclaim(unsigned int);
 
 /* slab subsytem initialization */
@@ -139,6 +138,6 @@
 
 /* malloc support */
-extern void *malloc(unsigned int, int);
-extern void *realloc(void *, unsigned int, int);
+extern void *malloc(size_t, unsigned int);
+extern void *realloc(void *, size_t, unsigned int);
 extern void free(void *);
 
Index: kernel/generic/include/proc/scheduler.h
===================================================================
--- kernel/generic/include/proc/scheduler.h	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/include/proc/scheduler.h	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -47,7 +47,7 @@
 /** Scheduler run queue structure. */
 typedef struct {
-	SPINLOCK_DECLARE(lock);
-	link_t rq_head;          /**< List of ready threads. */
-	size_t n;                /**< Number of threads in rq_ready. */
+	IRQ_SPINLOCK_DECLARE(lock);
+	link_t rq_head;              /**< List of ready threads. */
+	size_t n;                    /**< Number of threads in rq_ready. */
 } runq_t;
 
Index: kernel/generic/include/proc/task.h
===================================================================
--- kernel/generic/include/proc/task.h	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/include/proc/task.h	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -70,6 +70,6 @@
 	 * threads.
 	 */
-	SPINLOCK_DECLARE(lock);
-
+	IRQ_SPINLOCK_DECLARE(lock);
+	
 	char name[TASK_NAME_BUFLEN];
 	/** List of threads contained in this task. */
@@ -81,13 +81,13 @@
 	/** Task security context. */
 	context_id_t context;
-
+	
 	/** Number of references (i.e. threads). */
 	atomic_t refcount;
 	/** Number of threads that haven't exited yet. */
 	atomic_t lifecount;
-
+	
 	/** Task capabilities. */
 	cap_t capabilities;
-
+	
 	/* IPC stuff */
 	answerbox_t answerbox;  /**< Communication endpoint */
@@ -101,13 +101,13 @@
 	/** List of synchronous answerboxes. */
 	link_t sync_box_head;
-
+	
 #ifdef CONFIG_UDEBUG
 	/** Debugging stuff. */
 	udebug_task_t udebug;
-
+	
 	/** Kernel answerbox. */
 	kbox_t kb;
-#endif
-
+#endif /* CONFIG_UDEBUG */
+	
 	/** Architecture specific task data. */
 	task_arch_t arch;
@@ -126,5 +126,5 @@
 } task_t;
 
-SPINLOCK_EXTERN(tasks_lock);
+IRQ_SPINLOCK_EXTERN(tasks_lock);
 extern avltree_t tasks_tree;
 
Index: kernel/generic/include/proc/thread.h
===================================================================
--- kernel/generic/include/proc/thread.h	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/include/proc/thread.h	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -50,6 +50,6 @@
 #include <sysinfo/abi.h>
 
-#define THREAD_STACK_SIZE	STACK_SIZE
-#define THREAD_NAME_BUFLEN	20
+#define THREAD_STACK_SIZE   STACK_SIZE
+#define THREAD_NAME_BUFLEN  20
 
 extern const char *thread_states[];
@@ -61,19 +61,23 @@
  * When using this flag, the caller must set cpu in the thread_t
  * structure manually before calling thread_ready (even on uniprocessor).
- */ 
-#define THREAD_FLAG_WIRED	(1 << 0)
+ *
+ */
+#define THREAD_FLAG_WIRED  (1 << 0)
+
 /** Thread was migrated to another CPU and has not run yet. */
-#define THREAD_FLAG_STOLEN	(1 << 1)
+#define THREAD_FLAG_STOLEN  (1 << 1)
+
 /** Thread executes in userspace. */
-#define THREAD_FLAG_USPACE	(1 << 2)
+#define THREAD_FLAG_USPACE  (1 << 2)
+
 /** Thread will be attached by the caller. */
-#define THREAD_FLAG_NOATTACH	(1 << 3)
+#define THREAD_FLAG_NOATTACH  (1 << 3)
 
 /** Thread structure. There is one per thread. */
 typedef struct thread {
-	link_t rq_link;		/**< Run queue link. */
-	link_t wq_link;		/**< Wait queue link. */
-	link_t th_link;		/**< Links to threads within containing task. */
-
+	link_t rq_link;  /**< Run queue link. */
+	link_t wq_link;  /**< Wait queue link. */
+	link_t th_link;  /**< Links to threads within containing task. */
+	
 	/** Threads linkage to the threads_tree. */
 	avltree_node_t threads_tree_node;
@@ -83,13 +87,13 @@
 	 * Protects the whole thread structure except list links above.
 	 */
-	SPINLOCK_DECLARE(lock);
-
+	IRQ_SPINLOCK_DECLARE(lock);
+	
 	char name[THREAD_NAME_BUFLEN];
-
+	
 	/** Function implementing the thread. */
 	void (* thread_code)(void *);
 	/** Argument passed to thread_code() function. */
 	void *thread_arg;
-
+	
 	/**
 	 * From here, the stored context is restored when the thread is
@@ -107,5 +111,5 @@
 	 */
 	context_t sleep_interruption_context;
-
+	
 	/** If true, the thread can be interrupted from sleep. */
 	bool sleep_interruptible;
@@ -115,6 +119,6 @@
 	timeout_t sleep_timeout;
 	/** Flag signalling sleep timeout in progress. */
-	volatile int timeout_pending;
-
+	volatile bool timeout_pending;
+	
 	/**
 	 * True if this thread is executing copy_from_uspace().
@@ -132,5 +136,5 @@
 	 * thread_exit() before returning to userspace.
 	 */
-	bool interrupted;			
+	bool interrupted;
 	
 	/** If true, thread_join_timeout() cannot be used on this thread. */
@@ -140,8 +144,8 @@
 	/** Link used in the joiner_head list. */
 	link_t joiner_link;
-
+	
 	fpu_context_t *saved_fpu_context;
 	int fpu_context_exists;
-
+	
 	/*
 	 * Defined only if thread doesn't run.
@@ -150,16 +154,16 @@
 	 */
 	int fpu_context_engaged;
-
+	
 	rwlock_type_t rwlock_holder_type;
-
+	
 	/** Callback fired in scheduler before the thread is put asleep. */
 	void (* call_me)(void *);
 	/** Argument passed to call_me(). */
 	void *call_me_with;
-
+	
 	/** Thread's state. */
 	state_t state;
 	/** Thread's flags. */
-	int flags;
+	unsigned int flags;
 	
 	/** Thread's CPU. */
@@ -167,5 +171,5 @@
 	/** Containing task. */
 	task_t *task;
-
+	
 	/** Ticks before preemption. */
 	uint64_t ticks;
@@ -176,7 +180,7 @@
 	/** Last sampled cycle. */
 	uint64_t last_cycle;
-	/** Thread doesn't affect accumulated accounting. */	
+	/** Thread doesn't affect accumulated accounting. */
 	bool uncounted;
-
+	
 	/** Thread's priority. Implemented as index to CPU->rq */
 	int priority;
@@ -186,13 +190,12 @@
 	/** Architecture-specific data. */
 	thread_arch_t arch;
-
+	
 	/** Thread's kernel stack. */
 	uint8_t *kstack;
-
+	
 #ifdef CONFIG_UDEBUG
 	/** Debugging stuff */
 	udebug_thread_t udebug;
-#endif
-
+#endif /* CONFIG_UDEBUG */
 } thread_t;
 
@@ -203,5 +206,5 @@
  *
  */
-SPINLOCK_EXTERN(threads_lock);
+IRQ_SPINLOCK_EXTERN(threads_lock);
 
 /** AVL tree containing all threads. */
@@ -209,6 +212,6 @@
 
 extern void thread_init(void);
-extern thread_t *thread_create(void (*)(void *), void *, task_t *, int,
-    const char *, bool);
+extern thread_t *thread_create(void (*)(void *), void *, task_t *,
+    unsigned int, const char *, bool);
 extern void thread_attach(thread_t *, task_t *);
 extern void thread_ready(thread_t *);
@@ -218,7 +221,9 @@
 extern void thread_create_arch(thread_t *);
 #endif
+
 #ifndef thr_constructor_arch
 extern void thr_constructor_arch(thread_t *);
 #endif
+
 #ifndef thr_destructor_arch
 extern void thr_destructor_arch(thread_t *);
@@ -230,10 +235,11 @@
 #define thread_join(t) \
 	thread_join_timeout((t), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE)
-extern int thread_join_timeout(thread_t *, uint32_t, int);
+
+extern int thread_join_timeout(thread_t *, uint32_t, unsigned int);
 extern void thread_detach(thread_t *);
 
 extern void thread_register_call_me(void (*)(void *), void *);
 extern void thread_print_list(void);
-extern void thread_destroy(thread_t *);
+extern void thread_destroy(thread_t *, bool);
 extern thread_t *thread_find_by_id(thread_id_t);
 extern void thread_update_accounting(bool);
Index: kernel/generic/include/stacktrace.h
===================================================================
--- kernel/generic/include/stacktrace.h	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/include/stacktrace.h	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -38,5 +38,5 @@
 #include <typedefs.h>
 
-/* Forward declarations. */
+/* Forward declaration. */
 struct istate;
 
Index: kernel/generic/include/symtab.h
===================================================================
--- kernel/generic/include/symtab.h	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/include/symtab.h	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -38,5 +38,5 @@
 #include <typedefs.h>
 
-#define MAX_SYMBOL_NAME 64
+#define MAX_SYMBOL_NAME  64
 
 struct symtab_entry {
@@ -53,8 +53,10 @@
 #ifdef CONFIG_SYMTAB
 
-/* Symtable linked together by build process */
+/** Symtable linked together by build process
+ *
+ */
 extern struct symtab_entry symbol_table[];
 
-#endif
+#endif /* CONFIG_SYMTAB */
 
 #endif
Index: kernel/generic/include/synch/mutex.h
===================================================================
--- kernel/generic/include/synch/mutex.h	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/include/synch/mutex.h	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -50,13 +50,15 @@
 } mutex_t;
 
-#define mutex_lock(mtx)			\
+#define mutex_lock(mtx) \
 	_mutex_lock_timeout((mtx), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE)
-#define mutex_trylock(mtx)		\
+
+#define mutex_trylock(mtx) \
 	_mutex_lock_timeout((mtx), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NON_BLOCKING)
-#define mutex_lock_timeout(mtx, usec)	\
+
+#define mutex_lock_timeout(mtx, usec) \
 	_mutex_lock_timeout((mtx), (usec), SYNCH_FLAGS_NON_BLOCKING)
 
 extern void mutex_initialize(mutex_t *, mutex_type_t);
-extern int _mutex_lock_timeout(mutex_t *, uint32_t, int);
+extern int _mutex_lock_timeout(mutex_t *, uint32_t, unsigned int);
 extern void mutex_unlock(mutex_t *);
 
Index: kernel/generic/include/synch/rwlock.h
===================================================================
--- kernel/generic/include/synch/rwlock.h	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/include/synch/rwlock.h	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -48,9 +48,12 @@
 
 typedef struct {
-	SPINLOCK_DECLARE(lock);
+	IRQ_SPINLOCK_DECLARE(lock);
+	
 	/**
 	 * Mutex for writers, readers can bypass it if readers_in is positive.
+	 *
 	 */
 	mutex_t exclusive;
+	
 	/** Number of readers in critical section. */
 	size_t readers_in;
@@ -59,22 +62,27 @@
 #define rwlock_write_lock(rwl) \
 	_rwlock_write_lock_timeout((rwl), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE)
+
 #define rwlock_read_lock(rwl) \
 	_rwlock_read_lock_timeout((rwl), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE)
+
 #define rwlock_write_trylock(rwl) \
 	_rwlock_write_lock_timeout((rwl), SYNCH_NO_TIMEOUT, \
 	    SYNCH_FLAGS_NON_BLOCKING)
+
 #define rwlock_read_trylock(rwl) \
 	_rwlock_read_lock_timeout((rwl), SYNCH_NO_TIMEOUT, \
 	    SYNCH_FLAGS_NON_BLOCKING)
+
 #define rwlock_write_lock_timeout(rwl, usec) \
 	_rwlock_write_lock_timeout((rwl), (usec), SYNCH_FLAGS_NONE)
+
 #define rwlock_read_lock_timeout(rwl, usec) \
 	_rwlock_read_lock_timeout((rwl), (usec), SYNCH_FLAGS_NONE)
 
-extern void rwlock_initialize(rwlock_t *rwl);
-extern void rwlock_read_unlock(rwlock_t *rwl);
-extern void rwlock_write_unlock(rwlock_t *rwl);
-extern int _rwlock_read_lock_timeout(rwlock_t *rwl, uint32_t usec, int flags);
-extern int _rwlock_write_lock_timeout(rwlock_t *rwl, uint32_t usec, int flags);
+extern void rwlock_initialize(rwlock_t *);
+extern void rwlock_read_unlock(rwlock_t *);
+extern void rwlock_write_unlock(rwlock_t *);
+extern int _rwlock_read_lock_timeout(rwlock_t *, uint32_t, unsigned int);
+extern int _rwlock_write_lock_timeout(rwlock_t *, uint32_t, unsigned int);
 
 #endif
Index: kernel/generic/include/synch/semaphore.h
===================================================================
--- kernel/generic/include/synch/semaphore.h	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/include/synch/semaphore.h	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -46,12 +46,14 @@
 #define semaphore_down(s) \
 	_semaphore_down_timeout((s), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE)
+
 #define semaphore_trydown(s) \
 	_semaphore_down_timeout((s), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NON_BLOCKING)
+
 #define semaphore_down_timeout(s, usec) \
 	_semaphore_down_timeout((s), (usec), SYNCH_FLAGS_NONE)
 
-extern void semaphore_initialize(semaphore_t *s, int val);
-extern int _semaphore_down_timeout(semaphore_t *s, uint32_t usec, int flags);
-extern void semaphore_up(semaphore_t *s);
+extern void semaphore_initialize(semaphore_t *, int);
+extern int _semaphore_down_timeout(semaphore_t *, uint32_t, unsigned int);
+extern void semaphore_up(semaphore_t *);
 
 #endif
Index: kernel/generic/include/synch/spinlock.h
===================================================================
--- kernel/generic/include/synch/spinlock.h	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/include/synch/spinlock.h	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -170,5 +170,5 @@
 #define SPINLOCK_STATIC_INITIALIZE_NAME(name, desc_name)
 
-#define ASSERT_SPINLOCK(expr, lock)
+#define ASSERT_SPINLOCK(expr, lock)  ASSERT(expr)
 
 #define spinlock_initialize(lock, name)
Index: kernel/generic/include/synch/waitq.h
===================================================================
--- kernel/generic/include/synch/waitq.h	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/include/synch/waitq.h	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -46,17 +46,20 @@
 } wakeup_mode_t;
 
-/** Wait queue structure. */
+/** Wait queue structure.
+ *
+ */
 typedef struct {
-
 	/** Lock protecting wait queue structure.
 	 *
 	 * Must be acquired before T.lock for each T of type thread_t.
 	 */
-	SPINLOCK_DECLARE(lock);
-
+	IRQ_SPINLOCK_DECLARE(lock);
+	
 	/**
 	 * Number of waitq_wakeup() calls that didn't find a thread to wake up.
+	 *
 	 */
 	int missed_wakeups;
+	
 	/** List of sleeping threads for wich there was no missed_wakeup. */
 	link_t head;
@@ -69,7 +72,7 @@
 
 extern void waitq_initialize(waitq_t *);
-extern int waitq_sleep_timeout(waitq_t *, uint32_t, int);
+extern int waitq_sleep_timeout(waitq_t *, uint32_t, unsigned int);
 extern ipl_t waitq_sleep_prepare(waitq_t *);
-extern int waitq_sleep_timeout_unsafe(waitq_t *, uint32_t, int);
+extern int waitq_sleep_timeout_unsafe(waitq_t *, uint32_t, unsigned int);
 extern void waitq_sleep_finish(waitq_t *, int, ipl_t);
 extern void waitq_wakeup(waitq_t *, wakeup_mode_t);
Index: kernel/generic/include/sysinfo/abi.h
===================================================================
--- kernel/generic/include/sysinfo/abi.h	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/include/sysinfo/abi.h	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -38,5 +38,5 @@
 
 /** Number of load components */
-#define LOAD_STEPS        3
+#define LOAD_STEPS  3
 
 /** Maximum task name size */
@@ -65,5 +65,5 @@
  */
 typedef struct {
-	unsigned int id;         /**< CPU ID as stored by kernel */
+	size_t id;               /**< CPU ID as stored by kernel */
 	bool active;             /**< CPU is activate */
 	uint16_t frequency_mhz;  /**< Frequency in MHz */
Index: kernel/generic/include/time/timeout.h
===================================================================
--- kernel/generic/include/time/timeout.h	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/include/time/timeout.h	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -43,9 +43,9 @@
 
 typedef struct {
-	SPINLOCK_DECLARE(lock);
-
+	IRQ_SPINLOCK_DECLARE(lock);
+	
 	/** Link to the list of active timeouts on THE->cpu */
 	link_t link;
-	/** Timeout will be activated in this amount of clock() ticks. */	
+	/** Timeout will be activated in this amount of clock() ticks. */
 	uint64_t ticks;
 	/** Function that will be called on timeout activation. */
@@ -57,12 +57,11 @@
 } timeout_t;
 
-#define us2ticks(us)	((uint64_t) (((uint32_t) (us) / (1000000 / HZ))))
+#define us2ticks(us)  ((uint64_t) (((uint32_t) (us) / (1000000 / HZ))))
 
 extern void timeout_init(void);
-extern void timeout_initialize(timeout_t *t);
-extern void timeout_reinitialize(timeout_t *t);
-extern void timeout_register(timeout_t *t, uint64_t usec, timeout_handler_t f,
-    void *arg);
-extern bool timeout_unregister(timeout_t *t);
+extern void timeout_initialize(timeout_t *);
+extern void timeout_reinitialize(timeout_t *);
+extern void timeout_register(timeout_t *, uint64_t, timeout_handler_t, void *);
+extern bool timeout_unregister(timeout_t *);
 
 #endif
Index: kernel/generic/src/console/chardev.c
===================================================================
--- kernel/generic/src/console/chardev.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/src/console/chardev.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -52,5 +52,5 @@
 	indev->name = name;
 	waitq_initialize(&indev->wq);
-	spinlock_initialize(&indev->lock, "indev");
+	irq_spinlock_initialize(&indev->lock, "chardev.indev.lock");
 	indev->counter = 0;
 	indev->index = 0;
@@ -68,8 +68,8 @@
 	ASSERT(indev);
 	
-	spinlock_lock(&indev->lock);
+	irq_spinlock_lock(&indev->lock, true);
 	if (indev->counter == INDEV_BUFLEN - 1) {
 		/* Buffer full */
-		spinlock_unlock(&indev->lock);
+		irq_spinlock_unlock(&indev->lock, true);
 		return;
 	}
@@ -81,5 +81,5 @@
 	indev->index = indev->index % INDEV_BUFLEN;
 	waitq_wakeup(&indev->wq, WAKEUP_FIRST);
-	spinlock_unlock(&indev->lock);
+	irq_spinlock_unlock(&indev->lock, true);
 }
 
@@ -114,10 +114,8 @@
 	
 	waitq_sleep(&indev->wq);
-	ipl_t ipl = interrupts_disable();
-	spinlock_lock(&indev->lock);
+	irq_spinlock_lock(&indev->lock, true);
 	wchar_t ch = indev->buffer[(indev->index - indev->counter) % INDEV_BUFLEN];
 	indev->counter--;
-	spinlock_unlock(&indev->lock);
-	interrupts_restore(ipl);
+	irq_spinlock_unlock(&indev->lock, true);
 	
 	return ch;
@@ -134,5 +132,5 @@
 {
 	outdev->name = name;
-	spinlock_initialize(&outdev->lock, "outdev");
+	spinlock_initialize(&outdev->lock, "chardev.outdev.lock");
 	link_initialize(&outdev->link);
 	list_initialize(&outdev->list);
Index: kernel/generic/src/console/cmd.c
===================================================================
--- kernel/generic/src/console/cmd.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/src/console/cmd.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -510,5 +510,5 @@
 void cmd_initialize(cmd_info_t *cmd)
 {
-	spinlock_initialize(&cmd->lock, "cmd");
+	spinlock_initialize(&cmd->lock, "cmd.lock");
 	link_initialize(&cmd->link);
 }
@@ -681,15 +681,18 @@
 			continue;
 		
-		thread_t *t;
-		if ((t = thread_create((void (*)(void *)) cmd_call0, (void *) argv, TASK, THREAD_FLAG_WIRED, "call0", false))) {
-			spinlock_lock(&t->lock);
-			t->cpu = &cpus[i];
-			spinlock_unlock(&t->lock);
-			printf("cpu%u: ", i);
-			thread_ready(t);
-			thread_join(t);
-			thread_detach(t);
+		thread_t *thread;
+		if ((thread = thread_create((void (*)(void *)) cmd_call0,
+		    (void *) argv, TASK, THREAD_FLAG_WIRED, "call0", false))) {
+			irq_spinlock_lock(&thread->lock, true);
+			thread->cpu = &cpus[i];
+			irq_spinlock_unlock(&thread->lock, true);
+			
+			printf("cpu%" PRIs ": ", i);
+			
+			thread_ready(thread);
+			thread_join(thread);
+			thread_detach(thread);
 		} else
-			printf("Unable to create thread for cpu%u\n", i);
+			printf("Unable to create thread for cpu%" PRIs "\n", i);
 	}
 	
@@ -1049,10 +1052,8 @@
 	/* Update and read thread accounting
 	   for benchmarking */
-	ipl_t ipl = interrupts_disable();
-	spinlock_lock(&TASK->lock);
+	irq_spinlock_lock(&TASK->lock, true);
 	uint64_t ucycles0, kcycles0;
 	task_get_accounting(TASK, &ucycles0, &kcycles0);
-	spinlock_unlock(&TASK->lock);
-	interrupts_restore(ipl);
+	irq_spinlock_unlock(&TASK->lock, true);
 	
 	/* Execute the test */
@@ -1061,10 +1062,8 @@
 	
 	/* Update and read thread accounting */
-	uint64_t ucycles1, kcycles1; 
-	ipl = interrupts_disable();
-	spinlock_lock(&TASK->lock);
+	uint64_t ucycles1, kcycles1;
+	irq_spinlock_lock(&TASK->lock, true);
 	task_get_accounting(TASK, &ucycles1, &kcycles1);
-	spinlock_unlock(&TASK->lock);
-	interrupts_restore(ipl);
+	irq_spinlock_unlock(&TASK->lock, true);
 	
 	uint64_t ucycles, kcycles;
@@ -1072,7 +1071,7 @@
 	order_suffix(ucycles1 - ucycles0, &ucycles, &usuffix);
 	order_suffix(kcycles1 - kcycles0, &kcycles, &ksuffix);
-		
+	
 	printf("Time: %" PRIu64 "%c user cycles, %" PRIu64 "%c kernel cycles\n",
-			ucycles, usuffix, kcycles, ksuffix);
+	    ucycles, usuffix, kcycles, ksuffix);
 	
 	if (ret == NULL) {
@@ -1080,5 +1079,5 @@
 		return true;
 	}
-
+	
 	printf("%s\n", ret);
 	return false;
@@ -1106,10 +1105,8 @@
 		/* Update and read thread accounting
 		   for benchmarking */
-		ipl_t ipl = interrupts_disable();
-		spinlock_lock(&TASK->lock);
+		irq_spinlock_lock(&TASK->lock, true);
 		uint64_t ucycles0, kcycles0;
 		task_get_accounting(TASK, &ucycles0, &kcycles0);
-		spinlock_unlock(&TASK->lock);
-		interrupts_restore(ipl);
+		irq_spinlock_unlock(&TASK->lock, true);
 		
 		/* Execute the test */
@@ -1118,11 +1115,9 @@
 		
 		/* Update and read thread accounting */
-		ipl = interrupts_disable();
-		spinlock_lock(&TASK->lock);
+		irq_spinlock_lock(&TASK->lock, true);
 		uint64_t ucycles1, kcycles1;
 		task_get_accounting(TASK, &ucycles1, &kcycles1);
-		spinlock_unlock(&TASK->lock);
-		interrupts_restore(ipl);
-
+		irq_spinlock_unlock(&TASK->lock, true);
+		
 		if (ret != NULL) {
 			printf("%s\n", ret);
@@ -1135,5 +1130,5 @@
 		order_suffix(kcycles1 - kcycles0, &kcycles, &ksuffix);
 		printf("OK (%" PRIu64 "%c user cycles, %" PRIu64 "%c kernel cycles)\n",
-				ucycles, usuffix, kcycles, ksuffix);
+		    ucycles, usuffix, kcycles, ksuffix);
 	}
 	
Index: kernel/generic/src/console/console.c
===================================================================
--- kernel/generic/src/console/console.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/src/console/console.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -62,10 +62,14 @@
 /** Kernel log initialized */
 static bool klog_inited = false;
+
 /** First kernel log characters */
 static size_t klog_start = 0;
+
 /** Number of valid kernel log characters */
 static size_t klog_len = 0;
+
 /** Number of stored (not printed) kernel log characters */
 static size_t klog_stored = 0;
+
 /** Number of stored kernel log characters for uspace */
 static size_t klog_uspace = 0;
@@ -84,6 +88,6 @@
 };
 
-static void stdout_write(outdev_t *dev, wchar_t ch, bool silent);
-static void stdout_redraw(outdev_t *dev);
+static void stdout_write(outdev_t *, wchar_t, bool);
+static void stdout_redraw(outdev_t *);
 
 static outdev_operations_t stdout_ops = {
@@ -174,7 +178,10 @@
 		stdout->op->redraw(stdout);
 	
-	/* Force the console to print the prompt */
-	if ((stdin) && (prev))
+	if ((stdin) && (prev)) {
+		/*
+		 * Force the console to print the prompt.
+		 */
 		indev_push_character(stdin, '\n');
+	}
 }
 
Index: kernel/generic/src/cpu/cpu.c
===================================================================
--- kernel/generic/src/cpu/cpu.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/src/cpu/cpu.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -33,7 +33,7 @@
 /**
  * @file
- * @brief	CPU subsystem initialization and listing.
+ * @brief CPU subsystem initialization and listing.
  */
- 
+
 #include <cpu.h>
 #include <arch.h>
@@ -58,26 +58,27 @@
  */
 void cpu_init(void) {
-	unsigned int i, j;
-	
 #ifdef CONFIG_SMP
 	if (config.cpu_active == 1) {
 #endif /* CONFIG_SMP */
+		
 		cpus = (cpu_t *) malloc(sizeof(cpu_t) * config.cpu_count,
-					FRAME_ATOMIC);
+		    FRAME_ATOMIC);
 		if (!cpus)
 			panic("Cannot allocate CPU structures.");
-
-		/* initialize everything */
+		
+		/* Initialize everything */
 		memsetb(cpus, sizeof(cpu_t) * config.cpu_count, 0);
-
+		
+		size_t i;
 		for (i = 0; i < config.cpu_count; i++) {
-			cpus[i].stack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | FRAME_ATOMIC);
-			
+			cpus[i].stack = (uint8_t *) frame_alloc(STACK_FRAMES,
+			    FRAME_KA | FRAME_ATOMIC);
 			cpus[i].id = i;
 			
-			spinlock_initialize(&cpus[i].lock, "cpu_t.lock");
-
+			irq_spinlock_initialize(&cpus[i].lock, "cpus[].lock");
+			
+			unsigned int j;
 			for (j = 0; j < RQ_COUNT; j++) {
-				spinlock_initialize(&cpus[i].rq[j].lock, "rq_t.lock");
+				irq_spinlock_initialize(&cpus[i].rq[j].lock, "cpus[].rq[].lock");
 				list_initialize(&cpus[i].rq[j].rq_head);
 			}
@@ -87,9 +88,9 @@
 	}
 #endif /* CONFIG_SMP */
-
+	
 	CPU = &cpus[config.cpu_active - 1];
 	
-	CPU->active = 1;
-	CPU->tlb_active = 1;
+	CPU->active = true;
+	CPU->tlb_active = true;
 	
 	cpu_identify();
@@ -100,6 +101,6 @@
 void cpu_list(void)
 {
-	unsigned int i;
-
+	size_t i;
+	
 	for (i = 0; i < config.cpu_count; i++) {
 		if (cpus[i].active)
Index: kernel/generic/src/ddi/ddi.c
===================================================================
--- kernel/generic/src/ddi/ddi.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/src/ddi/ddi.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -59,5 +59,7 @@
 static btree_t parea_btree;
 
-/** Initialize DDI. */
+/** Initialize DDI.
+ *
+ */
 void ddi_init(void)
 {
@@ -97,5 +99,6 @@
  *
  */
-static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, size_t pages, int flags)
+static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, size_t pages,
+    unsigned int flags)
 {
 	ASSERT(TASK);
@@ -114,8 +117,6 @@
 	backend_data.frames = pages;
 	
-	ipl_t ipl = interrupts_disable();
-	
 	/* Find the zone of the physical memory */
-	spinlock_lock(&zones.lock);
+	irq_spinlock_lock(&zones.lock, true);
 	size_t znum = find_zone(ADDR2PFN(pf), pages, 0);
 	
@@ -124,5 +125,5 @@
 		 * -> assume it is hardware device and allow mapping
 		 */
-		spinlock_unlock(&zones.lock);
+		irq_spinlock_unlock(&zones.lock, true);
 		goto map;
 	}
@@ -130,13 +131,14 @@
 	if (zones.info[znum].flags & ZONE_FIRMWARE) {
 		/* Frames are part of firmware */
-		spinlock_unlock(&zones.lock);
+		irq_spinlock_unlock(&zones.lock, true);
 		goto map;
 	}
 	
 	if (zone_flags_available(zones.info[znum].flags)) {
-		/* Frames are part of physical memory, check if the memory
+		/*
+		 * Frames are part of physical memory, check if the memory
 		 * region is enabled for mapping.
 		 */
-		spinlock_unlock(&zones.lock);
+		irq_spinlock_unlock(&zones.lock, true);
 		
 		mutex_lock(&parea_lock);
@@ -154,12 +156,10 @@
 	}
 	
-	spinlock_unlock(&zones.lock);
+	irq_spinlock_unlock(&zones.lock, true);
+	
 err:
-	interrupts_restore(ipl);
 	return ENOENT;
 	
 map:
-	interrupts_restore(ipl);
-
 	if (!as_area_create(TASK->as, flags, pages * PAGE_SIZE, vp,
 	    AS_AREA_ATTR_NONE, &phys_backend, &backend_data)) {
@@ -196,6 +196,5 @@
 		return EPERM;
 	
-	ipl_t ipl = interrupts_disable();
-	spinlock_lock(&tasks_lock);
+	irq_spinlock_lock(&tasks_lock, true);
 	
 	task_t *task = task_find_by_id(id);
@@ -207,17 +206,14 @@
 		 * context.
 		 */
-		spinlock_unlock(&tasks_lock);
-		interrupts_restore(ipl);
+		irq_spinlock_unlock(&tasks_lock, true);
 		return ENOENT;
 	}
 	
 	/* Lock the task and release the lock protecting tasks_btree. */
-	spinlock_lock(&task->lock);
-	spinlock_unlock(&tasks_lock);
+	irq_spinlock_exchange(&tasks_lock, &task->lock);
 	
 	int rc = ddi_iospace_enable_arch(task, ioaddr, size);
 	
-	spinlock_unlock(&task->lock);
-	interrupts_restore(ipl);
+	irq_spinlock_unlock(&task->lock, true);
 	
 	return rc;
Index: kernel/generic/src/ddi/irq.c
===================================================================
--- kernel/generic/src/ddi/irq.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/src/ddi/irq.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -32,5 +32,5 @@
 /**
  * @file
- * @brief	IRQ dispatcher.
+ * @brief IRQ dispatcher.
  *
  * This file provides means of connecting IRQs with particular
@@ -78,20 +78,24 @@
 #include <arch.h>
 
-#define KEY_INR		0
-#define KEY_DEVNO	1
-
-/**
- * Spinlock protecting the kernel IRQ hash table.
+#define KEY_INR    0
+#define KEY_DEVNO  1
+
+/** Spinlock protecting the kernel IRQ hash table.
+ *
  * This lock must be taken only when interrupts are disabled.
- */
-SPINLOCK_INITIALIZE(irq_kernel_hash_table_lock);
+ *
+ */
+IRQ_SPINLOCK_STATIC_INITIALIZE(irq_kernel_hash_table_lock);
+
 /** The kernel IRQ hash table. */
 static hash_table_t irq_kernel_hash_table;
 
-/**
- * Spinlock protecting the uspace IRQ hash table.
+/** Spinlock protecting the uspace IRQ hash table.
+ *
  * This lock must be taken only when interrupts are disabled.
- */
-SPINLOCK_INITIALIZE(irq_uspace_hash_table_lock);
+ *
+ */
+IRQ_SPINLOCK_INITIALIZE(irq_uspace_hash_table_lock);
+
 /** The uspace IRQ hash table. */
 hash_table_t irq_uspace_hash_table;
@@ -100,4 +104,5 @@
  * Hash table operations for cases when we know that
  * there will be collisions between different keys.
+ *
  */
 static size_t irq_ht_hash(unative_t *key);
@@ -116,4 +121,5 @@
  * However, there might be still collisions among
  * elements with single key (sharing of one IRQ).
+ *
  */
 static size_t irq_lin_hash(unative_t *key);
@@ -132,6 +138,7 @@
 /** Initialize IRQ subsystem.
  *
- * @param inrs Numbers of unique IRQ numbers or INRs.
+ * @param inrs   Numbers of unique IRQ numbers or INRs.
  * @param chains Number of chains in the hash table.
+ *
  */
 void irq_init(size_t inrs, size_t chains)
@@ -166,9 +173,9 @@
 	memsetb(irq, sizeof(irq_t), 0);
 	link_initialize(&irq->link);
-	spinlock_initialize(&irq->lock, "irq.lock");
+	irq_spinlock_initialize(&irq->lock, "irq.lock");
 	link_initialize(&irq->notif_cfg.link);
 	irq->inr = -1;
 	irq->devno = -1;
-
+	
 	irq_initialize_arch(irq);
 }
@@ -180,10 +187,11 @@
  * function pointer and handler() function pointer.
  *
- * @param irq		IRQ structure belonging to a device.
- * @return		True on success, false on failure.
+ * @param irq IRQ structure belonging to a device.
+ *
+ * @return True on success, false on failure.
+ *
  */
 void irq_register(irq_t *irq)
 {
-	ipl_t ipl;
 	unative_t key[] = {
 		(unative_t) irq->inr,
@@ -191,11 +199,9 @@
 	};
 	
-	ipl = interrupts_disable();
-	spinlock_lock(&irq_kernel_hash_table_lock);
-	spinlock_lock(&irq->lock);
+	irq_spinlock_lock(&irq_kernel_hash_table_lock, true);
+	irq_spinlock_lock(&irq->lock, false);
 	hash_table_insert(&irq_kernel_hash_table, key, &irq->link);
-	spinlock_unlock(&irq->lock);	
-	spinlock_unlock(&irq_kernel_hash_table_lock);
-	interrupts_restore(ipl);
+	irq_spinlock_unlock(&irq->lock, false);
+	irq_spinlock_unlock(&irq_kernel_hash_table_lock, true);
 }
 
@@ -208,17 +214,15 @@
 	unative_t key[] = {
 		(unative_t) inr,
-		(unative_t) -1    /* search will use claim() instead of devno */
+		(unative_t) -1    /* Search will use claim() instead of devno */
 	};
 	
-	spinlock_lock(&irq_uspace_hash_table_lock);
+	irq_spinlock_lock(&irq_uspace_hash_table_lock, false);
 	lnk = hash_table_find(&irq_uspace_hash_table, key);
 	if (lnk) {
-		irq_t *irq;
-		
-		irq = hash_table_get_instance(lnk, irq_t, link);
-		spinlock_unlock(&irq_uspace_hash_table_lock);
+		irq_t *irq = hash_table_get_instance(lnk, irq_t, link);
+		irq_spinlock_unlock(&irq_uspace_hash_table_lock, false);
 		return irq;
 	}
-	spinlock_unlock(&irq_uspace_hash_table_lock);
+	irq_spinlock_unlock(&irq_uspace_hash_table_lock, false);
 	
 	return NULL;
@@ -233,17 +237,15 @@
 	unative_t key[] = {
 		(unative_t) inr,
-		(unative_t) -1    /* search will use claim() instead of devno */
+		(unative_t) -1    /* Search will use claim() instead of devno */
 	};
 	
-	spinlock_lock(&irq_kernel_hash_table_lock);
+	irq_spinlock_lock(&irq_kernel_hash_table_lock, false);
 	lnk = hash_table_find(&irq_kernel_hash_table, key);
 	if (lnk) {
-		irq_t *irq;
-		
-		irq = hash_table_get_instance(lnk, irq_t, link);
-		spinlock_unlock(&irq_kernel_hash_table_lock);
+		irq_t *irq = hash_table_get_instance(lnk, irq_t, link);
+		irq_spinlock_unlock(&irq_kernel_hash_table_lock, false);
 		return irq;
 	}
-	spinlock_unlock(&irq_kernel_hash_table_lock);
+	irq_spinlock_unlock(&irq_kernel_hash_table_lock, false);
 	
 	return NULL;
@@ -263,9 +265,8 @@
  *
  * @return IRQ structure of the respective device or NULL.
+ *
  */
 irq_t *irq_dispatch_and_lock(inr_t inr)
 {
-	irq_t *irq;
-	
 	/*
 	 * If the kernel console is silenced,
@@ -277,13 +278,15 @@
 	 */
 	if (silent) {
-		irq = irq_dispatch_and_lock_uspace(inr);
+		irq_t *irq = irq_dispatch_and_lock_uspace(inr);
 		if (irq)
 			return irq;
+		
 		return irq_dispatch_and_lock_kernel(inr);
 	}
 	
-	irq = irq_dispatch_and_lock_kernel(inr);
+	irq_t *irq = irq_dispatch_and_lock_kernel(inr);
 	if (irq)
 		return irq;
+	
 	return irq_dispatch_and_lock_uspace(inr);
 }
@@ -301,4 +304,5 @@
  *
  * @return Index into the hash table.
+ *
  */
 size_t irq_ht_hash(unative_t key[])
@@ -322,9 +326,10 @@
  * This function assumes interrupts are already disabled.
  *
- * @param key Keys (i.e. inr and devno).
+ * @param key  Keys (i.e. inr and devno).
  * @param keys This is 2.
  * @param item The item to compare the key with.
  *
  * @return True on match or false otherwise.
+ *
  */
 bool irq_ht_compare(unative_t key[], size_t keys, link_t *item)
@@ -333,8 +338,8 @@
 	inr_t inr = (inr_t) key[KEY_INR];
 	devno_t devno = (devno_t) key[KEY_DEVNO];
-
+	
 	bool rv;
 	
-	spinlock_lock(&irq->lock);
+	irq_spinlock_lock(&irq->lock, false);
 	if (devno == -1) {
 		/* Invoked by irq_dispatch_and_lock(). */
@@ -348,6 +353,6 @@
 	/* unlock only on non-match */
 	if (!rv)
-		spinlock_unlock(&irq->lock);
-
+		irq_spinlock_unlock(&irq->lock, false);
+	
 	return rv;
 }
@@ -361,5 +366,5 @@
 	irq_t *irq __attribute__((unused))
 	    = hash_table_get_instance(lnk, irq_t, link);
-	spinlock_unlock(&irq->lock);
+	irq_spinlock_unlock(&irq->lock, false);
 }
 
@@ -374,4 +379,5 @@
  *
  * @return Index into the hash table.
+ *
  */
 size_t irq_lin_hash(unative_t key[])
@@ -395,9 +401,10 @@
  * This function assumes interrupts are already disabled.
  *
- * @param key Keys (i.e. inr and devno).
+ * @param key  Keys (i.e. inr and devno).
  * @param keys This is 2.
  * @param item The item to compare the key with.
  *
  * @return True on match or false otherwise.
+ *
  */
 bool irq_lin_compare(unative_t key[], size_t keys, link_t *item)
@@ -407,5 +414,5 @@
 	bool rv;
 	
-	spinlock_lock(&irq->lock);
+	irq_spinlock_lock(&irq->lock, false);
 	if (devno == -1) {
 		/* Invoked by irq_dispatch_and_lock() */
@@ -418,5 +425,5 @@
 	/* unlock only on non-match */
 	if (!rv)
-		spinlock_unlock(&irq->lock);
+		irq_spinlock_unlock(&irq->lock, false);
 	
 	return rv;
@@ -425,5 +432,6 @@
 /** Unlock IRQ structure after hash_table_remove().
  *
- * @param lnk		Link in the removed and locked IRQ structure.
+ * @param lnk Link in the removed and locked IRQ structure.
+ *
  */
 void irq_lin_remove(link_t *lnk)
@@ -431,5 +439,5 @@
 	irq_t *irq __attribute__((unused))
 	    = hash_table_get_instance(lnk, irq_t, link);
-	spinlock_unlock(&irq->lock);
+	irq_spinlock_unlock(&irq->lock, false);
 }
 
Index: kernel/generic/src/interrupt/interrupt.c
===================================================================
--- kernel/generic/src/interrupt/interrupt.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/src/interrupt/interrupt.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -32,9 +32,10 @@
 /**
  * @file
- * @brief	Interrupt redirector.
+ * @brief Interrupt redirector.
  *
  * This file provides means of registering interrupt handlers
  * by kernel functions and calling the handlers when interrupts
  * occur.
+ *
  */
 
@@ -61,19 +62,18 @@
 
 /** Register exception handler
- * 
- * @param n Exception number
- * @param name Description 
- * @param f Exception handler
- */
-iroutine exc_register(int n, const char *name, iroutine f)
+ *
+ * @param n       Exception number
+ * @param name    Description
+ * @param handler Exception handler
+ *
+ */
+iroutine exc_register(int n, const char *name, iroutine handler)
 {
 	ASSERT(n < IVT_ITEMS);
 	
-	iroutine old;
-	
 	spinlock_lock(&exctbl_lock);
 	
-	old = exc_table[n].f;
-	exc_table[n].f = f;
+	iroutine old = exc_table[n].f;
+	exc_table[n].f = handler;
 	exc_table[n].name = name;
 	
@@ -87,38 +87,43 @@
  * Called directly from the assembler code.
  * CPU is interrupts_disable()'d.
+ *
  */
 void exc_dispatch(int n, istate_t *istate)
 {
 	ASSERT(n < IVT_ITEMS);
-
+	
 	/* Account user cycles */
 	if (THREAD) {
-		spinlock_lock(&THREAD->lock);
+		irq_spinlock_lock(&THREAD->lock, false);
 		thread_update_accounting(true);
-		spinlock_unlock(&THREAD->lock);
-	}
-
+		irq_spinlock_unlock(&THREAD->lock, false);
+	}
+	
 #ifdef CONFIG_UDEBUG
-	if (THREAD) THREAD->udebug.uspace_state = istate;
+	if (THREAD)
+		THREAD->udebug.uspace_state = istate;
 #endif
 	
 	exc_table[n].f(n + IVT_FIRST, istate);
-
+	
 #ifdef CONFIG_UDEBUG
-	if (THREAD) THREAD->udebug.uspace_state = NULL;
-#endif
-
+	if (THREAD)
+		THREAD->udebug.uspace_state = NULL;
+#endif
+	
 	/* This is a safe place to exit exiting thread */
-	if (THREAD && THREAD->interrupted && istate_from_uspace(istate))
+	if ((THREAD) && (THREAD->interrupted) && (istate_from_uspace(istate)))
 		thread_exit();
-
+	
 	if (THREAD) {
-		spinlock_lock(&THREAD->lock);
+		irq_spinlock_lock(&THREAD->lock, false);
 		thread_update_accounting(false);
-		spinlock_unlock(&THREAD->lock);
-	}
-}
-
-/** Default 'null' exception handler */
+		irq_spinlock_unlock(&THREAD->lock, false);
+	}
+}
+
+/** Default 'null' exception handler
+ *
+ */
 static void exc_undef(int n, istate_t *istate)
 {
@@ -127,25 +132,26 @@
 }
 
-/** Terminate thread and task if exception came from userspace. */
+/** Terminate thread and task if exception came from userspace.
+ *
+ */
 void fault_if_from_uspace(istate_t *istate, const char *fmt, ...)
 {
-	task_t *task = TASK;
-	va_list args;
-
 	if (!istate_from_uspace(istate))
 		return;
-
+	
 	printf("Task %s (%" PRIu64 ") killed due to an exception at "
-	    "program counter %p.\n", task->name, task->taskid,
+	    "program counter %p.\n", TASK->name, TASK->taskid,
 	    istate_get_pc(istate));
-
+	
 	stack_trace_istate(istate);
-
+	
 	printf("Kill message: ");
+	
+	va_list args;
 	va_start(args, fmt);
 	vprintf(fmt, args);
 	va_end(args);
 	printf("\n");
-
+	
 	/*
 	 * Userspace can subscribe for FAULT events to take action
@@ -158,5 +164,5 @@
 		event_notify_3(EVENT_FAULT, LOWER32(TASK->taskid),
 		    UPPER32(TASK->taskid), (unative_t) THREAD);
-
+		
 #ifdef CONFIG_UDEBUG
 		/* Wait for a debugging session. */
@@ -164,6 +170,6 @@
 #endif
 	}
-
-	task_kill(task->taskid);
+	
+	task_kill(TASK->taskid);
 	thread_exit();
 }
@@ -171,17 +177,19 @@
 #ifdef CONFIG_KCONSOLE
 
-/** kconsole cmd - print all exceptions */
+/** Print all exceptions
+ *
+ */
 static int cmd_exc_print(cmd_arg_t *argv)
 {
 #if (IVT_ITEMS > 0)
 	unsigned int i;
-
+	
 	spinlock_lock(&exctbl_lock);
-
+	
 #ifdef __32_BITS__
 	printf("Exc Description          Handler    Symbol\n");
 	printf("--- -------------------- ---------- --------\n");
 #endif
-
+	
 #ifdef __64_BITS__
 	printf("Exc Description          Handler            Symbol\n");
@@ -191,10 +199,10 @@
 	for (i = 0; i < IVT_ITEMS; i++) {
 		const char *symbol = symtab_fmt_name_lookup((unative_t) exc_table[i].f);
-
+		
 #ifdef __32_BITS__
 		printf("%-3u %-20s %10p %s\n", i + IVT_FIRST, exc_table[i].name,
 			exc_table[i].f, symbol);
 #endif
-
+		
 #ifdef __64_BITS__
 		printf("%-3u %-20s %18p %s\n", i + IVT_FIRST, exc_table[i].name,
@@ -216,5 +224,4 @@
 	return 1;
 }
-
 
 static cmd_info_t exc_info = {
@@ -227,14 +234,20 @@
 };
 
-#endif
-
-/** Initialize generic exception handling support */
+#endif /* CONFIG_KCONSOLE */
+
+/** Initialize generic exception handling support
+ *
+ */
 void exc_init(void)
 {
-	int i;
-
+	(void) exc_undef;
+	
+#if (IVT_ITEMS > 0)
+	unsigned int i;
+	
 	for (i = 0; i < IVT_ITEMS; i++)
 		exc_register(i, "undef", (iroutine) exc_undef);
-
+#endif
+	
 #ifdef CONFIG_KCONSOLE
 	cmd_initialize(&exc_info);
Index: kernel/generic/src/ipc/event.c
===================================================================
--- kernel/generic/src/ipc/event.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/src/ipc/event.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -137,9 +137,7 @@
 			IPC_SET_ARG5(call->data, a5);
 			
-			ipl_t ipl = interrupts_disable();
-			spinlock_lock(&events[evno].answerbox->irq_lock);
+			irq_spinlock_lock(&events[evno].answerbox->irq_lock, true);
 			list_append(&call->link, &events[evno].answerbox->irq_notifs);
-			spinlock_unlock(&events[evno].answerbox->irq_lock);
-			interrupts_restore(ipl);
+			irq_spinlock_unlock(&events[evno].answerbox->irq_lock, true);
 			
 			waitq_wakeup(&events[evno].answerbox->wq, WAKEUP_FIRST);
Index: kernel/generic/src/ipc/ipc.c
===================================================================
--- kernel/generic/src/ipc/ipc.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/src/ipc/ipc.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -66,5 +66,6 @@
 /** Initialize a call structure.
  *
- * @param call		Call structure to be initialized.
+ * @param call Call structure to be initialized.
+ *
  */
 static void _ipc_call_init(call_t *call)
@@ -77,21 +78,20 @@
 
 /** Allocate and initialize a call structure.
- * 
+ *
  * The call is initialized, so that the reply will be directed to
  * TASK->answerbox.
  *
- * @param flags		Parameters for slab_alloc (e.g FRAME_ATOMIC).
- *
- * @return		If flags permit it, return NULL, or initialized kernel
- *			call structure.
- */
-call_t *ipc_call_alloc(int flags)
-{
-	call_t *call;
-
-	call = slab_alloc(ipc_call_slab, flags);
+ * @param flags Parameters for slab_alloc (e.g FRAME_ATOMIC).
+ *
+ * @return If flags permit it, return NULL, or initialized kernel
+ *         call structure.
+ *
+ */
+call_t *ipc_call_alloc(unsigned int flags)
+{
+	call_t *call = slab_alloc(ipc_call_slab, flags);
 	if (call)
 		_ipc_call_init(call);
-
+	
 	return call;
 }
@@ -99,5 +99,6 @@
 /** Deallocate a call structure.
  *
- * @param call		Call structure to be freed.
+ * @param call Call structure to be freed.
+ *
  */
 void ipc_call_free(call_t *call)
@@ -111,11 +112,12 @@
 /** Initialize an answerbox structure.
  *
- * @param box		Answerbox structure to be initialized.
- * @param task		Task to which the answerbox belongs.
+ * @param box  Answerbox structure to be initialized.
+ * @param task Task to which the answerbox belongs.
+ *
  */
 void ipc_answerbox_init(answerbox_t *box, task_t *task)
 {
-	spinlock_initialize(&box->lock, "ipc_box_lock");
-	spinlock_initialize(&box->irq_lock, "ipc_box_irqlock");
+	irq_spinlock_initialize(&box->lock, "ipc.box.lock");
+	irq_spinlock_initialize(&box->irq_lock, "ipc.box.irqlock");
 	waitq_initialize(&box->wq);
 	link_initialize(&box->sync_box_link);
@@ -131,18 +133,19 @@
 /** Connect a phone to an answerbox.
  *
- * @param phone		Initialized phone structure.
- * @param box		Initialized answerbox structure.
+ * @param phone Initialized phone structure.
+ * @param box   Initialized answerbox structure.
+ *
  */
 void ipc_phone_connect(phone_t *phone, answerbox_t *box)
 {
 	mutex_lock(&phone->lock);
-
+	
 	phone->state = IPC_PHONE_CONNECTED;
 	phone->callee = box;
-
-	spinlock_lock(&box->lock);
+	
+	irq_spinlock_lock(&box->lock, true);
 	list_append(&phone->link, &box->connected_phones);
-	spinlock_unlock(&box->lock);
-
+	irq_spinlock_unlock(&box->lock, true);
+	
 	mutex_unlock(&phone->lock);
 }
@@ -150,5 +153,6 @@
 /** Initialize a phone structure.
  *
- * @param phone		Phone structure to be initialized.
+ * @param phone Phone structure to be initialized.
+ *
  */
 void ipc_phone_init(phone_t *phone)
@@ -162,47 +166,41 @@
 /** Helper function to facilitate synchronous calls.
  *
- * @param phone		Destination kernel phone structure.
- * @param request	Call structure with request.
- *
- * @return		EOK on success or EINTR if the sleep was interrupted.
+ * @param phone   Destination kernel phone structure.
+ * @param request Call structure with request.
+ *
+ * @return EOK on success or EINTR if the sleep was interrupted.
+ *
  */
 int ipc_call_sync(phone_t *phone, call_t *request)
 {
-	answerbox_t *sync_box; 
-	ipl_t ipl;
-
-	sync_box = slab_alloc(ipc_answerbox_slab, 0);
+	answerbox_t *sync_box = slab_alloc(ipc_answerbox_slab, 0);
 	ipc_answerbox_init(sync_box, TASK);
-
+	
 	/*
 	 * Put the answerbox on the TASK's list of synchronous answerboxes so
 	 * that it can be cleaned up if the call is interrupted.
 	 */
-	ipl = interrupts_disable();
-	spinlock_lock(&TASK->lock);
+	irq_spinlock_lock(&TASK->lock, true);
 	list_append(&sync_box->sync_box_link, &TASK->sync_box_head);
-	spinlock_unlock(&TASK->lock);
-	interrupts_restore(ipl);
-
+	irq_spinlock_unlock(&TASK->lock, true);
+	
 	/* We will receive data in a special box. */
 	request->callerbox = sync_box;
-
+	
 	ipc_call(phone, request);
 	if (!ipc_wait_for_call(sync_box, SYNCH_NO_TIMEOUT,
 	    SYNCH_FLAGS_INTERRUPTIBLE)) {
-	    	/* The answerbox and the call will be freed by ipc_cleanup(). */
+		/* The answerbox and the call will be freed by ipc_cleanup(). */
 		return EINTR;
 	}
-
+	
 	/*
 	 * The answer arrived without interruption so we can remove the
 	 * answerbox from the TASK's list of synchronous answerboxes.
 	 */
-	(void) interrupts_disable();
-	spinlock_lock(&TASK->lock);
+	irq_spinlock_lock(&TASK->lock, true);
 	list_remove(&sync_box->sync_box_link);
-	spinlock_unlock(&TASK->lock);
-	interrupts_restore(ipl);
-
+	irq_spinlock_unlock(&TASK->lock, true);
+	
 	slab_free(ipc_answerbox_slab, sync_box);
 	return EOK;
@@ -211,6 +209,7 @@
 /** Answer a message which was not dispatched and is not listed in any queue.
  *
- * @param call		Call structure to be answered.
- * @param selflocked	If true, then TASK->answebox is locked.
+ * @param call       Call structure to be answered.
+ * @param selflocked If true, then TASK->answebox is locked.
+ *
  */
 static void _ipc_answer_free_call(call_t *call, bool selflocked)
@@ -218,15 +217,12 @@
 	answerbox_t *callerbox = call->callerbox;
 	bool do_lock = ((!selflocked) || callerbox != (&TASK->answerbox));
-	ipl_t ipl;
-
+	
 	/* Count sent answer */
-	ipl = interrupts_disable();
-	spinlock_lock(&TASK->lock);
+	irq_spinlock_lock(&TASK->lock, true);
 	TASK->ipc_info.answer_sent++;
-	spinlock_unlock(&TASK->lock);
-	interrupts_restore(ipl);
-
+	irq_spinlock_unlock(&TASK->lock, true);
+	
 	call->flags |= IPC_CALL_ANSWERED;
-
+	
 	if (call->flags & IPC_CALL_FORWARDED) {
 		if (call->caller_phone) {
@@ -235,10 +231,13 @@
 		}
 	}
-
+	
 	if (do_lock)
-		spinlock_lock(&callerbox->lock);
+		irq_spinlock_lock(&callerbox->lock, true);
+	
 	list_append(&call->link, &callerbox->answers);
+	
 	if (do_lock)
-		spinlock_unlock(&callerbox->lock);
+		irq_spinlock_unlock(&callerbox->lock, true);
+	
 	waitq_wakeup(&callerbox->wq, WAKEUP_FIRST);
 }
@@ -246,13 +245,15 @@
 /** Answer a message which is in a callee queue.
  *
- * @param box		Answerbox that is answering the message.
- * @param call		Modified request that is being sent back.
+ * @param box  Answerbox that is answering the message.
+ * @param call Modified request that is being sent back.
+ *
  */
 void ipc_answer(answerbox_t *box, call_t *call)
 {
 	/* Remove from active box */
-	spinlock_lock(&box->lock);
+	irq_spinlock_lock(&box->lock, true);
 	list_remove(&call->link);
-	spinlock_unlock(&box->lock);
+	irq_spinlock_unlock(&box->lock, true);
+	
 	/* Send back answer */
 	_ipc_answer_free_call(call, false);
@@ -264,7 +265,8 @@
  * message and sending it as a normal answer.
  *
- * @param phone		Phone structure the call should appear to come from.
- * @param call		Call structure to be answered.
- * @param err		Return value to be used for the answer.
+ * @param phone Phone structure the call should appear to come from.
+ * @param call  Call structure to be answered.
+ * @param err   Return value to be used for the answer.
+ *
  */
 void ipc_backsend_err(phone_t *phone, call_t *call, unative_t err)
@@ -278,27 +280,25 @@
 /** Unsafe unchecking version of ipc_call.
  *
- * @param phone		Phone structure the call comes from.
- * @param box		Destination answerbox structure.
- * @param call		Call structure with request.
+ * @param phone Phone structure the call comes from.
+ * @param box   Destination answerbox structure.
+ * @param call  Call structure with request.
+ *
  */
 static void _ipc_call(phone_t *phone, answerbox_t *box, call_t *call)
 {
-	ipl_t ipl;
-
 	/* Count sent ipc call */
-	ipl = interrupts_disable();
-	spinlock_lock(&TASK->lock);
+	irq_spinlock_lock(&TASK->lock, true);
 	TASK->ipc_info.call_sent++;
-	spinlock_unlock(&TASK->lock);
-	interrupts_restore(ipl);
-
+	irq_spinlock_unlock(&TASK->lock, true);
+	
 	if (!(call->flags & IPC_CALL_FORWARDED)) {
 		atomic_inc(&phone->active_calls);
 		call->data.phone = phone;
 	}
-
-	spinlock_lock(&box->lock);
+	
+	irq_spinlock_lock(&box->lock, true);
 	list_append(&call->link, &box->calls);
-	spinlock_unlock(&box->lock);
+	irq_spinlock_unlock(&box->lock, true);
+	
 	waitq_wakeup(&box->wq, WAKEUP_FIRST);
 }
@@ -306,14 +306,13 @@
 /** Send an asynchronous request using a phone to an answerbox.
  *
- * @param phone		Phone structure the call comes from and which is
- *			connected to the destination answerbox.
- * @param call		Call structure with request.
- *
- * @return		Return 0 on success, ENOENT on error.
+ * @param phone Phone structure the call comes from and which is
+ *              connected to the destination answerbox.
+ * @param call  Call structure with request.
+ *
+ * @return Return 0 on success, ENOENT on error.
+ *
  */
 int ipc_call(phone_t *phone, call_t *call)
 {
-	answerbox_t *box;
-
 	mutex_lock(&phone->lock);
 	if (phone->state != IPC_PHONE_CONNECTED) {
@@ -328,7 +327,9 @@
 				ipc_backsend_err(phone, call, ENOENT);
 		}
+		
 		return ENOENT;
 	}
-	box = phone->callee;
+	
+	answerbox_t *box = phone->callee;
 	_ipc_call(phone, box, call);
 	
@@ -342,14 +343,12 @@
  * lazily later.
  *
- * @param phone		Phone structure to be hung up.
- *              
- * @return		Return 0 if the phone is disconnected.
- *			Return -1 if the phone was already disconnected.
+ * @param phone Phone structure to be hung up.
+ *
+ * @return 0 if the phone is disconnected.
+ * @return -1 if the phone was already disconnected.
+ *
  */
 int ipc_phone_hangup(phone_t *phone)
 {
-	answerbox_t *box;
-	call_t *call;
-	
 	mutex_lock(&phone->lock);
 	if (phone->state == IPC_PHONE_FREE ||
@@ -359,20 +358,21 @@
 		return -1;
 	}
-	box = phone->callee;
+	
+	answerbox_t *box = phone->callee;
 	if (phone->state != IPC_PHONE_SLAMMED) {
 		/* Remove myself from answerbox */
-		spinlock_lock(&box->lock);
+		irq_spinlock_lock(&box->lock, true);
 		list_remove(&phone->link);
-		spinlock_unlock(&box->lock);
-
-		call = ipc_call_alloc(0);
+		irq_spinlock_unlock(&box->lock, true);
+		
+		call_t *call = ipc_call_alloc(0);
 		IPC_SET_METHOD(call->data, IPC_M_PHONE_HUNGUP);
 		call->flags |= IPC_CALL_DISCARD_ANSWER;
 		_ipc_call(phone, box, call);
 	}
-
+	
 	phone->state = IPC_PHONE_HUNGUP;
 	mutex_unlock(&phone->lock);
-
+	
 	return 0;
 }
@@ -380,30 +380,26 @@
 /** Forwards call from one answerbox to another one.
  *
- * @param call		Call structure to be redirected.
- * @param newphone	Phone structure to target answerbox.
- * @param oldbox	Old answerbox structure.
- * @param mode		Flags that specify mode of the forward operation.
- *
- * @return		Return 0 if forwarding succeeded or an error code if
- *			there was error.
- * 
+ * @param call     Call structure to be redirected.
+ * @param newphone Phone structure to target answerbox.
+ * @param oldbox   Old answerbox structure.
+ * @param mode     Flags that specify mode of the forward operation.
+ *
+ * @return 0 if forwarding succeeded or an error code if
+ *         there was an error.
+ *
  * The return value serves only as an information for the forwarder,
  * the original caller is notified automatically with EFORWARD.
- */
-int ipc_forward(call_t *call, phone_t *newphone, answerbox_t *oldbox, int mode)
-{
-	ipl_t ipl;
-
+ *
+ */
+int ipc_forward(call_t *call, phone_t *newphone, answerbox_t *oldbox,
+    unsigned int mode)
+{
 	/* Count forwarded calls */
-	ipl = interrupts_disable();
-	spinlock_lock(&TASK->lock);
+	irq_spinlock_lock(&TASK->lock, true);
 	TASK->ipc_info.forwarded++;
-	spinlock_unlock(&TASK->lock);
-	interrupts_restore(ipl);
-
-	spinlock_lock(&oldbox->lock);
+	irq_spinlock_pass(&TASK->lock, &oldbox->lock);
 	list_remove(&call->link);
-	spinlock_unlock(&oldbox->lock);
-
+	irq_spinlock_unlock(&oldbox->lock, true);
+	
 	if (mode & IPC_FF_ROUTE_FROM_ME) {
 		if (!call->caller_phone)
@@ -411,5 +407,5 @@
 		call->data.phone = newphone;
 	}
-
+	
 	return ipc_call(newphone, call);
 }
@@ -418,24 +414,25 @@
 /** Wait for a phone call.
  *
- * @param box		Answerbox expecting the call.
- * @param usec		Timeout in microseconds. See documentation for
- *			waitq_sleep_timeout() for decription of its special
- *			meaning.
- * @param flags		Select mode of sleep operation. See documentation for
- *			waitq_sleep_timeout() for description of its special
- *			meaning.
- * @return 		Recived call structure or NULL.
- * 
+ * @param box   Answerbox expecting the call.
+ * @param usec  Timeout in microseconds. See documentation for
+ *              waitq_sleep_timeout() for decription of its special
+ *              meaning.
+ * @param flags Select mode of sleep operation. See documentation for
+ *              waitq_sleep_timeout() for description of its special
+ *              meaning.
+ *
+ * @return Recived call structure or NULL.
+ *
  * To distinguish between a call and an answer, have a look at call->flags.
- */
-call_t *ipc_wait_for_call(answerbox_t *box, uint32_t usec, int flags)
+ *
+ */
+call_t *ipc_wait_for_call(answerbox_t *box, uint32_t usec, unsigned int flags)
 {
 	call_t *request;
-	ipl_t ipl;
 	uint64_t irq_cnt = 0;
 	uint64_t answer_cnt = 0;
 	uint64_t call_cnt = 0;
 	int rc;
-
+	
 restart:
 	rc = waitq_sleep_timeout(&box->wq, usec, flags);
@@ -443,21 +440,19 @@
 		return NULL;
 	
-	spinlock_lock(&box->lock);
+	irq_spinlock_lock(&box->lock, true);
 	if (!list_empty(&box->irq_notifs)) {
 		/* Count recieved IRQ notification */
-		irq_cnt++;	
-
-		ipl = interrupts_disable();
-		spinlock_lock(&box->irq_lock);
-
+		irq_cnt++;
+		
+		irq_spinlock_lock(&box->irq_lock, false);
+		
 		request = list_get_instance(box->irq_notifs.next, call_t, link);
 		list_remove(&request->link);
-
-		spinlock_unlock(&box->irq_lock);
-		interrupts_restore(ipl);
+		
+		irq_spinlock_unlock(&box->irq_lock, false);
 	} else if (!list_empty(&box->answers)) {
 		/* Count recieved answer */
 		answer_cnt++;
-
+		
 		/* Handle asynchronous answers */
 		request = list_get_instance(box->answers.next, call_t, link);
@@ -467,25 +462,25 @@
 		/* Count recieved call */
 		call_cnt++;
-
+		
 		/* Handle requests */
 		request = list_get_instance(box->calls.next, call_t, link);
 		list_remove(&request->link);
+		
 		/* Append request to dispatch queue */
 		list_append(&request->link, &box->dispatched_calls);
 	} else {
 		/* This can happen regularly after ipc_cleanup */
-		spinlock_unlock(&box->lock);
+		irq_spinlock_unlock(&box->lock, true);
 		goto restart;
 	}
-	spinlock_unlock(&box->lock);
-	
-	ipl = interrupts_disable();
-	spinlock_lock(&TASK->lock);
+	
+	irq_spinlock_pass(&box->lock, &TASK->lock);
+	
 	TASK->ipc_info.irq_notif_recieved += irq_cnt;
 	TASK->ipc_info.answer_recieved += answer_cnt;
 	TASK->ipc_info.call_recieved += call_cnt;
-	spinlock_unlock(&TASK->lock);
-	interrupts_restore(ipl);
-
+	
+	irq_spinlock_unlock(&TASK->lock, true);
+	
 	return request;
 }
@@ -493,16 +488,16 @@
 /** Answer all calls from list with EHANGUP answer.
  *
- * @param lst		Head of the list to be cleaned up.
+ * @param lst Head of the list to be cleaned up.
+ *
  */
 void ipc_cleanup_call_list(link_t *lst)
 {
-	call_t *call;
-
 	while (!list_empty(lst)) {
-		call = list_get_instance(lst->next, call_t, link);
+		call_t *call = list_get_instance(lst->next, call_t, link);
 		if (call->buffer)
 			free(call->buffer);
+		
 		list_remove(&call->link);
-
+		
 		IPC_SET_RETVAL(call->data, EHANGUP);
 		_ipc_answer_free_call(call, true);
@@ -512,7 +507,8 @@
 /** Disconnects all phones connected to an answerbox.
  *
- * @param box		Answerbox to disconnect phones from.
- * @param notify_box	If true, the answerbox will get a hangup message for
- *			each disconnected phone.
+ * @param box        Answerbox to disconnect phones from.
+ * @param notify_box If true, the answerbox will get a hangup message for
+ *                   each disconnected phone.
+ *
  */
 void ipc_answerbox_slam_phones(answerbox_t *box, bool notify_box)
@@ -520,19 +516,15 @@
 	phone_t *phone;
 	DEADLOCK_PROBE_INIT(p_phonelck);
-	ipl_t ipl;
-	call_t *call;
-
-	call = notify_box ? ipc_call_alloc(0) : NULL;
-
+	
+	call_t *call = notify_box ? ipc_call_alloc(0) : NULL;
+	
 	/* Disconnect all phones connected to our answerbox */
 restart_phones:
-	ipl = interrupts_disable();
-	spinlock_lock(&box->lock);
+	irq_spinlock_lock(&box->lock, true);
 	while (!list_empty(&box->connected_phones)) {
 		phone = list_get_instance(box->connected_phones.next,
 		    phone_t, link);
 		if (SYNCH_FAILED(mutex_trylock(&phone->lock))) {
-			spinlock_unlock(&box->lock);
-			interrupts_restore(ipl);
+			irq_spinlock_unlock(&box->lock, true);
 			DEADLOCK_PROBE(p_phonelck, DEADLOCK_THRESHOLD);
 			goto restart_phones;
@@ -541,13 +533,12 @@
 		/* Disconnect phone */
 		ASSERT(phone->state == IPC_PHONE_CONNECTED);
-
+		
 		list_remove(&phone->link);
 		phone->state = IPC_PHONE_SLAMMED;
-
+		
 		if (notify_box) {
 			mutex_unlock(&phone->lock);
-			spinlock_unlock(&box->lock);
-			interrupts_restore(ipl);
-
+			irq_spinlock_unlock(&box->lock, true);
+			
 			/*
 			 * Send one message to the answerbox for each
@@ -559,18 +550,17 @@
 			call->flags |= IPC_CALL_DISCARD_ANSWER;
 			_ipc_call(phone, box, call);
-
+			
 			/* Allocate another call in advance */
 			call = ipc_call_alloc(0);
-
+			
 			/* Must start again */
 			goto restart_phones;
 		}
-
+		
 		mutex_unlock(&phone->lock);
 	}
-
-	spinlock_unlock(&box->lock);
-	interrupts_restore(ipl);
-
+	
+	irq_spinlock_unlock(&box->lock, true);
+	
 	/* Free unused call */
 	if (call)
@@ -578,47 +568,45 @@
 }
 
-/** Cleans up all IPC communication of the current task.
+/** Clean up all IPC communication of the current task.
  *
  * Note: ipc_hangup sets returning answerbox to TASK->answerbox, you
  * have to change it as well if you want to cleanup other tasks than TASK.
+ *
  */
 void ipc_cleanup(void)
 {
-	int i;
-	call_t *call;
-	ipl_t ipl;
-
 	/* Disconnect all our phones ('ipc_phone_hangup') */
+	size_t i;
 	for (i = 0; i < IPC_MAX_PHONES; i++)
 		ipc_phone_hangup(&TASK->phones[i]);
-
+	
 	/* Unsubscribe from any event notifications. */
 	event_cleanup_answerbox(&TASK->answerbox);
-
+	
 	/* Disconnect all connected irqs */
 	ipc_irq_cleanup(&TASK->answerbox);
-
+	
 	/* Disconnect all phones connected to our regular answerbox */
 	ipc_answerbox_slam_phones(&TASK->answerbox, false);
-
+	
 #ifdef CONFIG_UDEBUG
 	/* Clean up kbox thread and communications */
 	ipc_kbox_cleanup();
 #endif
-
+	
 	/* Answer all messages in 'calls' and 'dispatched_calls' queues */
-	spinlock_lock(&TASK->answerbox.lock);
+	irq_spinlock_lock(&TASK->answerbox.lock, true);
 	ipc_cleanup_call_list(&TASK->answerbox.dispatched_calls);
 	ipc_cleanup_call_list(&TASK->answerbox.calls);
-	spinlock_unlock(&TASK->answerbox.lock);
+	irq_spinlock_unlock(&TASK->answerbox.lock, true);
 	
 	/* Wait for all answers to interrupted synchronous calls to arrive */
-	ipl = interrupts_disable();
+	ipl_t ipl = interrupts_disable();
 	while (!list_empty(&TASK->sync_box_head)) {
 		answerbox_t *box = list_get_instance(TASK->sync_box_head.next,
 		    answerbox_t, sync_box_link);
-
+		
 		list_remove(&box->sync_box_link);
-		call = ipc_wait_for_call(box, SYNCH_NO_TIMEOUT,
+		call_t *call = ipc_wait_for_call(box, SYNCH_NO_TIMEOUT,
 		    SYNCH_FLAGS_NONE);
 		ipc_call_free(call);
@@ -626,10 +614,12 @@
 	}
 	interrupts_restore(ipl);
-
+	
 	/* Wait for all answers to asynchronous calls to arrive */
-	while (1) {
-		/* Go through all phones, until all are FREE... */
-		/* Locking not needed, no one else should modify
-		 * it, when we are in cleanup */
+	while (true) {
+		/*
+		 * Go through all phones, until they are all FREE
+		 * Locking is not needed, no one else should modify
+		 * it when we are in cleanup
+		 */
 		for (i = 0; i < IPC_MAX_PHONES; i++) {
 			if (TASK->phones[i].state == IPC_PHONE_HUNGUP &&
@@ -639,20 +629,26 @@
 			}
 			
-			/* Just for sure, we might have had some 
-			 * IPC_PHONE_CONNECTING phones */
+			/*
+			 * Just for sure, we might have had some
+			 * IPC_PHONE_CONNECTING phones
+			 */
 			if (TASK->phones[i].state == IPC_PHONE_CONNECTED)
 				ipc_phone_hangup(&TASK->phones[i]);
-			/* If the hangup succeeded, it has sent a HANGUP 
+			
+			/*
+			 * If the hangup succeeded, it has sent a HANGUP
 			 * message, the IPC is now in HUNGUP state, we
-			 * wait for the reply to come */
+			 * wait for the reply to come
+			 */
 			
 			if (TASK->phones[i].state != IPC_PHONE_FREE)
 				break;
 		}
-		/* Voila, got into cleanup */
+		
+		/* Got into cleanup */
 		if (i == IPC_MAX_PHONES)
 			break;
 		
-		call = ipc_wait_for_call(&TASK->answerbox, SYNCH_NO_TIMEOUT,
+		call_t *call = ipc_wait_for_call(&TASK->answerbox, SYNCH_NO_TIMEOUT,
 		    SYNCH_FLAGS_NONE);
 		ASSERT((call->flags & IPC_CALL_ANSWERED) ||
@@ -666,10 +662,12 @@
 		if (!(call->flags & IPC_CALL_DISCARD_ANSWER))
 			atomic_dec(&TASK->active_calls);
+		
 		ipc_call_free(call);
 	}
 }
 
-
-/** Initilize IPC subsystem */
+/** Initilize IPC subsystem
+ *
+ */
 void ipc_init(void)
 {
@@ -680,30 +678,26 @@
 }
 
-
 /** List answerbox contents.
  *
- * @param taskid	Task ID.
+ * @param taskid Task ID.
+ *
  */
 void ipc_print_task(task_id_t taskid)
 {
-	task_t *task;
-	int i;
-	call_t *call;
-	link_t *tmp;
-	ipl_t ipl;
-	
-	ipl = interrupts_disable();
-	spinlock_lock(&tasks_lock);
-	task = task_find_by_id(taskid);
-	if (task) 
-		spinlock_lock(&task->lock);
-	spinlock_unlock(&tasks_lock);
+	irq_spinlock_lock(&tasks_lock, true);
+	task_t *task = task_find_by_id(taskid);
+	
 	if (!task) {
-		interrupts_restore(ipl);
+		irq_spinlock_unlock(&tasks_lock, true);
 		return;
 	}
-
+	
+	/* Hand-over-hand locking */
+	irq_spinlock_exchange(&tasks_lock, &task->lock);
+	
 	/* Print opened phones & details */
 	printf("PHONE:\n");
+	
+	size_t i;
 	for (i = 0; i < IPC_MAX_PHONES; i++) {
 		if (SYNCH_FAILED(mutex_trylock(&task->phones[i].lock))) {
@@ -711,6 +705,8 @@
 			continue;
 		}
+		
 		if (task->phones[i].state != IPC_PHONE_FREE) {
-			printf("%d: ", i);
+			printf("%" PRIs ": ", i);
+			
 			switch (task->phones[i].state) {
 			case IPC_PHONE_CONNECTING:
@@ -718,31 +714,35 @@
 				break;
 			case IPC_PHONE_CONNECTED:
-				printf("connected to: %p ", 
-				       task->phones[i].callee);
+				printf("connected to: %p ",
+				    task->phones[i].callee);
 				break;
 			case IPC_PHONE_SLAMMED:
 				printf("slammed by: %p ", 
-				       task->phones[i].callee);
+				    task->phones[i].callee);
 				break;
 			case IPC_PHONE_HUNGUP:
 				printf("hung up - was: %p ", 
-				       task->phones[i].callee);
+				    task->phones[i].callee);
 				break;
 			default:
 				break;
 			}
-			printf("active: %ld\n",
+			
+			printf("active: %" PRIun "\n",
 			    atomic_get(&task->phones[i].active_calls));
 		}
+		
 		mutex_unlock(&task->phones[i].lock);
 	}
-
-
+	
+	irq_spinlock_lock(&task->answerbox.lock, false);
+	
+	link_t *cur;
+	
 	/* Print answerbox - calls */
-	spinlock_lock(&task->answerbox.lock);
 	printf("ABOX - CALLS:\n");
-	for (tmp = task->answerbox.calls.next; tmp != &task->answerbox.calls;
-	    tmp = tmp->next) {
-		call = list_get_instance(tmp, call_t, link);
+	for (cur = task->answerbox.calls.next; cur != &task->answerbox.calls;
+	    cur = cur->next) {
+		call_t *call = list_get_instance(cur, call_t, link);
 		printf("Callid: %p Srctask:%" PRIu64 " M:%" PRIun 
 		    " A1:%" PRIun " A2:%" PRIun " A3:%" PRIun
@@ -754,10 +754,11 @@
 		    call->flags);
 	}
-	/* Print answerbox - calls */
+	
+	/* Print answerbox - dispatched calls */
 	printf("ABOX - DISPATCHED CALLS:\n");
-	for (tmp = task->answerbox.dispatched_calls.next;
-	    tmp != &task->answerbox.dispatched_calls; 
-	    tmp = tmp->next) {
-		call = list_get_instance(tmp, call_t, link);
+	for (cur = task->answerbox.dispatched_calls.next;
+	    cur != &task->answerbox.dispatched_calls;
+	    cur = cur->next) {
+		call_t *call = list_get_instance(cur, call_t, link);
 		printf("Callid: %p Srctask:%" PRIu64 " M:%" PRIun
 		    " A1:%" PRIun " A2:%" PRIun " A3:%" PRIun
@@ -769,10 +770,11 @@
 		    call->flags);
 	}
-	/* Print answerbox - calls */
+	
+	/* Print answerbox - answers */
 	printf("ABOX - ANSWERS:\n");
-	for (tmp = task->answerbox.answers.next;
-	    tmp != &task->answerbox.answers;
-	    tmp = tmp->next) {
-		call = list_get_instance(tmp, call_t, link);
+	for (cur = task->answerbox.answers.next;
+	    cur != &task->answerbox.answers;
+	    cur = cur->next) {
+		call_t *call = list_get_instance(cur, call_t, link);
 		printf("Callid:%p M:%" PRIun " A1:%" PRIun " A2:%" PRIun
 		    " A3:%" PRIun " A4:%" PRIun " A5:%" PRIun " Flags:%x\n",
@@ -782,8 +784,7 @@
 		    call->flags);
 	}
-
-	spinlock_unlock(&task->answerbox.lock);
-	spinlock_unlock(&task->lock);
-	interrupts_restore(ipl);
+	
+	irq_spinlock_unlock(&task->answerbox.lock, false);
+	irq_spinlock_unlock(&task->lock, true);
 }
 
Index: kernel/generic/src/ipc/ipcrsc.c
===================================================================
--- kernel/generic/src/ipc/ipcrsc.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/src/ipc/ipcrsc.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -45,5 +45,5 @@
  * - hangup phone (the caller has hung up)
  * - hangup phone (the answerbox is exiting)
- * 
+ *
  * Locking strategy
  *
@@ -85,5 +85,5 @@
  *
  * Phone hangup
- * 
+ *
  * *** The caller hangs up (sys_ipc_hangup) ***
  * - The phone is disconnected (no more messages can be sent over this phone),
@@ -99,5 +99,5 @@
  *
  * Call forwarding
- * 
+ *
  * The call can be forwarded, so that the answer to call is passed directly
  * to the original sender. However, this poses special problems regarding 
@@ -114,5 +114,5 @@
  *
  * Cleanup strategy
- * 
+ *
  * 1) Disconnect all our phones ('ipc_phone_hangup').
  *
@@ -123,5 +123,5 @@
  *
  * 4) Wait for all async answers to arrive and dispose of them.
- * 
+ *
  */
 
@@ -137,19 +137,20 @@
  * @todo Some speedup (hash table?)
  *
- * @param callid	Userspace hash of the call. Currently it is the call
- *			structure kernel address.
- *
- * @return		NULL on not found, otherwise pointer to the call
- *			structure.
+ * @param callid Userspace hash of the call. Currently it is the call
+ *               structure kernel address.
+ *
+ * @return NULL on not found, otherwise pointer to the call
+ *         structure.
+ *
  */
 call_t *get_call(unative_t callid)
 {
 	link_t *lst;
-	call_t *call, *result = NULL;
-
-	spinlock_lock(&TASK->answerbox.lock);
+	call_t *result = NULL;
+	
+	irq_spinlock_lock(&TASK->answerbox.lock, true);
 	for (lst = TASK->answerbox.dispatched_calls.next;
 	    lst != &TASK->answerbox.dispatched_calls; lst = lst->next) {
-		call = list_get_instance(lst, call_t, link);
+		call_t *call = list_get_instance(lst, call_t, link);
 		if ((unative_t) call == callid) {
 			result = call;
@@ -157,5 +158,6 @@
 		}
 	}
-	spinlock_unlock(&TASK->answerbox.lock);
+	
+	irq_spinlock_unlock(&TASK->answerbox.lock, true);
 	return result;
 }
@@ -163,29 +165,31 @@
 /** Allocate new phone slot in the specified task.
  *
- * @param t		Task for which to allocate a new phone.
- *
- * @return		New phone handle or -1 if the phone handle limit is
- *			exceeded.
- */
-int phone_alloc(task_t *t)
-{
-	int i;
-
-	spinlock_lock(&t->lock);
+ * @param task Task for which to allocate a new phone.
+ *
+ * @return New phone handle or -1 if the phone handle limit is
+ *         exceeded.
+ *
+ */
+int phone_alloc(task_t *task)
+{
+	irq_spinlock_lock(&task->lock, true);
+	
+	size_t i;
 	for (i = 0; i < IPC_MAX_PHONES; i++) {
-		if (t->phones[i].state == IPC_PHONE_HUNGUP &&
-		    atomic_get(&t->phones[i].active_calls) == 0)
-			t->phones[i].state = IPC_PHONE_FREE;
-
-		if (t->phones[i].state == IPC_PHONE_FREE) {
-			t->phones[i].state = IPC_PHONE_CONNECTING;
+		if ((task->phones[i].state == IPC_PHONE_HUNGUP) &&
+		    (atomic_get(&task->phones[i].active_calls) == 0))
+			task->phones[i].state = IPC_PHONE_FREE;
+		
+		if (task->phones[i].state == IPC_PHONE_FREE) {
+			task->phones[i].state = IPC_PHONE_CONNECTING;
 			break;
 		}
 	}
-	spinlock_unlock(&t->lock);
-
+	
+	irq_spinlock_unlock(&task->lock, true);
+	
 	if (i == IPC_MAX_PHONES)
 		return -1;
-
+	
 	return i;
 }
@@ -193,5 +197,6 @@
 /** Mark a phone structure free.
  *
- * @param phone		Phone structure to be marked free.
+ * @param phone Phone structure to be marked free.
+ *
  */
 static void phone_deallocp(phone_t *phone)
@@ -199,5 +204,5 @@
 	ASSERT(phone->state == IPC_PHONE_CONNECTING);
 	
-	/* atomic operation */
+	/* Atomic operation */
 	phone->state = IPC_PHONE_FREE;
 }
@@ -207,5 +212,6 @@
  * All already sent messages will be correctly processed.
  *
- * @param phoneid	Phone handle of the phone to be freed.
+ * @param phoneid Phone handle of the phone to be freed.
+ *
  */
 void phone_dealloc(int phoneid)
@@ -216,10 +222,11 @@
 /** Connect phone to a given answerbox.
  *
- * @param phoneid 	Phone handle to be connected.
- * @param box		Answerbox to which to connect the phone handle.
+ * @param phoneid Phone handle to be connected.
+ * @param box     Answerbox to which to connect the phone handle.
  *
  * The procedure _enforces_ that the user first marks the phone
  * busy (e.g. via phone_alloc) and then connects the phone, otherwise
  * race condition may appear.
+ *
  */
 void phone_connect(int phoneid, answerbox_t *box)
Index: kernel/generic/src/ipc/irq.c
===================================================================
--- kernel/generic/src/ipc/irq.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/src/ipc/irq.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -31,4 +31,5 @@
  * @{
  */
+
 /**
  * @file
@@ -67,4 +68,5 @@
  *   structure are finished. Because we hold the hash table lock, we prevent new
  *   IRQs from taking new references to the IRQ structure.
+ *
  */
 
@@ -81,5 +83,6 @@
 /** Free the top-half pseudocode.
  *
- * @param code		Pointer to the top-half pseudocode.
+ * @param code Pointer to the top-half pseudocode.
+ *
  */
 static void code_free(irq_code_t *code)
@@ -93,16 +96,13 @@
 /** Copy the top-half pseudocode from userspace into the kernel.
  *
- * @param ucode		Userspace address of the top-half pseudocode.
- *
- * @return		Kernel address of the copied pseudocode.
+ * @param ucode Userspace address of the top-half pseudocode.
+ *
+ * @return Kernel address of the copied pseudocode.
+ *
  */
 static irq_code_t *code_from_uspace(irq_code_t *ucode)
 {
-	irq_code_t *code;
-	irq_cmd_t *ucmds;
-	int rc;
-
-	code = malloc(sizeof(*code), 0);
-	rc = copy_from_uspace(code, ucode, sizeof(*code));
+	irq_code_t *code = malloc(sizeof(*code), 0);
+	int rc = copy_from_uspace(code, ucode, sizeof(*code));
 	if (rc != 0) {
 		free(code);
@@ -114,5 +114,6 @@
 		return NULL;
 	}
-	ucmds = code->cmds;
+	
+	irq_cmd_t *ucmds = code->cmds;
 	code->cmds = malloc(sizeof(code->cmds[0]) * code->cmdcount, 0);
 	rc = copy_from_uspace(code->cmds, ucmds,
@@ -123,5 +124,5 @@
 		return NULL;
 	}
-
+	
 	return code;
 }
@@ -141,8 +142,4 @@
     unative_t method, irq_code_t *ucode)
 {
-	ipl_t ipl;
-	irq_code_t *code;
-	irq_t *irq;
-	link_t *hlp;
 	unative_t key[] = {
 		(unative_t) inr,
@@ -150,16 +147,17 @@
 	};
 	
+	irq_code_t *code;
 	if (ucode) {
 		code = code_from_uspace(ucode);
 		if (!code)
 			return EBADMEM;
-	} else {
+	} else
 		code = NULL;
-	}
 	
 	/*
 	 * Allocate and populate the IRQ structure.
 	 */
-	irq = malloc(sizeof(irq_t), 0);
+	irq_t *irq = malloc(sizeof(irq_t), 0);
+	
 	irq_initialize(irq);
 	irq->devno = devno;
@@ -177,29 +175,30 @@
 	 * answerbox's list.
 	 */
-	ipl = interrupts_disable();
-	spinlock_lock(&irq_uspace_hash_table_lock);
-	hlp = hash_table_find(&irq_uspace_hash_table, key);
+	irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
+	
+	link_t *hlp = hash_table_find(&irq_uspace_hash_table, key);
 	if (hlp) {
-		irq_t *hirq __attribute__((unused))
-		    = hash_table_get_instance(hlp, irq_t, link);
+		irq_t *hirq = hash_table_get_instance(hlp, irq_t, link);
 		
 		/* hirq is locked */
-		spinlock_unlock(&hirq->lock);
+		irq_spinlock_unlock(&hirq->lock, false);
 		code_free(code);
-		spinlock_unlock(&irq_uspace_hash_table_lock);
+		irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
+		
 		free(irq);
-		interrupts_restore(ipl);
 		return EEXISTS;
 	}
 	
-	spinlock_lock(&irq->lock);  /* Not really necessary, but paranoid */
-	spinlock_lock(&box->irq_lock);
+	/* Locking is not really necessary, but paranoid */
+	irq_spinlock_lock(&irq->lock, false);
+	irq_spinlock_lock(&box->irq_lock, false);
+	
 	hash_table_insert(&irq_uspace_hash_table, key, &irq->link);
 	list_append(&irq->notif_cfg.link, &box->irq_head);
-	spinlock_unlock(&box->irq_lock);
-	spinlock_unlock(&irq->lock);
-	spinlock_unlock(&irq_uspace_hash_table_lock);
-	
-	interrupts_restore(ipl);
+	
+	irq_spinlock_unlock(&box->irq_lock, false);
+	irq_spinlock_unlock(&irq->lock, false);
+	irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
+	
 	return EOK;
 }
@@ -207,29 +206,27 @@
 /** Unregister task from IRQ notification.
  *
- * @param box		Answerbox associated with the notification.
- * @param inr		IRQ number.
- * @param devno		Device number.
+ * @param box   Answerbox associated with the notification.
+ * @param inr   IRQ number.
+ * @param devno Device number.
+ *
  */
 int ipc_irq_unregister(answerbox_t *box, inr_t inr, devno_t devno)
 {
-	ipl_t ipl;
 	unative_t key[] = {
 		(unative_t) inr,
 		(unative_t) devno
 	};
-	link_t *lnk;
-	irq_t *irq;
-
-	ipl = interrupts_disable();
-	spinlock_lock(&irq_uspace_hash_table_lock);
-	lnk = hash_table_find(&irq_uspace_hash_table, key);
+	
+	irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
+	link_t *lnk = hash_table_find(&irq_uspace_hash_table, key);
 	if (!lnk) {
-		spinlock_unlock(&irq_uspace_hash_table_lock);
-		interrupts_restore(ipl);
+		irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
 		return ENOENT;
 	}
-	irq = hash_table_get_instance(lnk, irq_t, link);
+	
+	irq_t *irq = hash_table_get_instance(lnk, irq_t, link);
+	
 	/* irq is locked */
-	spinlock_lock(&box->irq_lock);
+	irq_spinlock_lock(&box->irq_lock, false);
 	
 	ASSERT(irq->notif_cfg.answerbox == box);
@@ -237,8 +234,8 @@
 	/* Free up the pseudo code and associated structures. */
 	code_free(irq->notif_cfg.code);
-
-	/* Remove the IRQ from the answerbox's list. */ 
+	
+	/* Remove the IRQ from the answerbox's list. */
 	list_remove(&irq->notif_cfg.link);
-
+	
 	/*
 	 * We need to drop the IRQ lock now because hash_table_remove() will try
@@ -248,19 +245,17 @@
 	 * the meantime.
 	 */
-	spinlock_unlock(&irq->lock);
-
+	irq_spinlock_unlock(&irq->lock, false);
+	
 	/* Remove the IRQ from the uspace IRQ hash table. */
 	hash_table_remove(&irq_uspace_hash_table, key, 2);
 	
-	spinlock_unlock(&irq_uspace_hash_table_lock);
-	spinlock_unlock(&box->irq_lock);
+	irq_spinlock_unlock(&box->irq_lock, false);
+	irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
 	
 	/* Free up the IRQ structure. */
 	free(irq);
 	
-	interrupts_restore(ipl);
 	return EOK;
 }
-
 
 /** Disconnect all IRQ notifications from an answerbox.
@@ -270,35 +265,32 @@
  * send notifications to it.
  *
- * @param box		Answerbox for which we want to carry out the cleanup.
+ * @param box Answerbox for which we want to carry out the cleanup.
+ *
  */
 void ipc_irq_cleanup(answerbox_t *box)
 {
-	ipl_t ipl;
-	
 loop:
-	ipl = interrupts_disable();
-	spinlock_lock(&irq_uspace_hash_table_lock);
-	spinlock_lock(&box->irq_lock);
+	irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
+	irq_spinlock_lock(&box->irq_lock, false);
 	
 	while (box->irq_head.next != &box->irq_head) {
-		link_t *cur = box->irq_head.next;
-		irq_t *irq;
 		DEADLOCK_PROBE_INIT(p_irqlock);
-		unative_t key[2];
-		
-		irq = list_get_instance(cur, irq_t, notif_cfg.link);
-		if (!spinlock_trylock(&irq->lock)) {
+		
+		irq_t *irq = list_get_instance(box->irq_head.next, irq_t,
+		    notif_cfg.link);
+		
+		if (!irq_spinlock_trylock(&irq->lock)) {
 			/*
 			 * Avoid deadlock by trying again.
 			 */
-			spinlock_unlock(&box->irq_lock);
-			spinlock_unlock(&irq_uspace_hash_table_lock);
-			interrupts_restore(ipl);
+			irq_spinlock_unlock(&box->irq_lock, false);
+			irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
 			DEADLOCK_PROBE(p_irqlock, DEADLOCK_THRESHOLD);
 			goto loop;
 		}
+		
+		unative_t key[2];
 		key[0] = irq->inr;
 		key[1] = irq->devno;
-		
 		
 		ASSERT(irq->notif_cfg.answerbox == box);
@@ -317,5 +309,5 @@
 		 * didn't drop the hash table lock in the meantime.
 		 */
-		spinlock_unlock(&irq->lock);
+		irq_spinlock_unlock(&irq->lock, false);
 		
 		/* Remove from the hash table. */
@@ -325,22 +317,22 @@
 	}
 	
-	spinlock_unlock(&box->irq_lock);
-	spinlock_unlock(&irq_uspace_hash_table_lock);
-	interrupts_restore(ipl);
+	irq_spinlock_unlock(&box->irq_lock, false);
+	irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
 }
 
 /** Add a call to the proper answerbox queue.
  *
- * Assume irq->lock is locked.
- *
- * @param irq		IRQ structure referencing the target answerbox.
- * @param call		IRQ notification call.
+ * Assume irq->lock is locked and interrupts disabled.
+ *
+ * @param irq  IRQ structure referencing the target answerbox.
+ * @param call IRQ notification call.
+ *
  */
 static void send_call(irq_t *irq, call_t *call)
 {
-	spinlock_lock(&irq->notif_cfg.answerbox->irq_lock);
+	irq_spinlock_lock(&irq->notif_cfg.answerbox->irq_lock, false);
 	list_append(&call->link, &irq->notif_cfg.answerbox->irq_notifs);
-	spinlock_unlock(&irq->notif_cfg.answerbox->irq_lock);
-		
+	irq_spinlock_unlock(&irq->notif_cfg.answerbox->irq_lock, false);
+	
 	waitq_wakeup(&irq->notif_cfg.answerbox->wq, WAKEUP_FIRST);
 }
@@ -348,16 +340,14 @@
 /** Apply the top-half pseudo code to find out whether to accept the IRQ or not.
  *
- * @param irq		IRQ structure.
- *
- * @return		IRQ_ACCEPT if the interrupt is accepted by the
- *			pseudocode. IRQ_DECLINE otherwise.
+ * @param irq IRQ structure.
+ *
+ * @return IRQ_ACCEPT if the interrupt is accepted by the
+ *         pseudocode, IRQ_DECLINE otherwise.
+ *
  */
 irq_ownership_t ipc_irq_top_half_claim(irq_t *irq)
 {
-	unsigned int i;
-	unative_t dstval;
 	irq_code_t *code = irq->notif_cfg.code;
-	unative_t *scratch = irq->notif_cfg.scratch;
-
+	uint32_t *scratch = irq->notif_cfg.scratch;
 	
 	if (!irq->notif_cfg.notify)
@@ -367,10 +357,13 @@
 		return IRQ_DECLINE;
 	
+	size_t i;
 	for (i = 0; i < code->cmdcount; i++) {
-		unsigned int srcarg = code->cmds[i].srcarg;
-		unsigned int dstarg = code->cmds[i].dstarg;
+		uint32_t dstval;
+		uintptr_t srcarg = code->cmds[i].srcarg;
+		uintptr_t dstarg = code->cmds[i].dstarg;
 		
 		if (srcarg >= IPC_CALL_LEN)
 			break;
+		
 		if (dstarg >= IPC_CALL_LEN)
 			break;
@@ -405,5 +398,5 @@
 			break;
 		case CMD_BTEST:
-			if (srcarg && dstarg) {
+			if ((srcarg) && (dstarg)) {
 				dstval = scratch[srcarg] & code->cmds[i].value;
 				scratch[dstarg] = dstval;
@@ -411,5 +404,5 @@
 			break;
 		case CMD_PREDICATE:
-			if (srcarg && !scratch[srcarg]) {
+			if ((srcarg) && (!scratch[srcarg])) {
 				i += code->cmds[i].value;
 				continue;
@@ -427,19 +420,17 @@
 }
 
-
 /* IRQ top-half handler.
  *
  * We expect interrupts to be disabled and the irq->lock already held.
  *
- * @param irq		IRQ structure.
+ * @param irq IRQ structure.
+ *
  */
 void ipc_irq_top_half_handler(irq_t *irq)
 {
 	ASSERT(irq);
-
+	
 	if (irq->notif_cfg.answerbox) {
-		call_t *call;
-
-		call = ipc_call_alloc(FRAME_ATOMIC);
+		call_t *call = ipc_call_alloc(FRAME_ATOMIC);
 		if (!call)
 			return;
@@ -448,5 +439,5 @@
 		/* Put a counter to the message */
 		call->priv = ++irq->notif_cfg.counter;
-
+		
 		/* Set up args */
 		IPC_SET_METHOD(call->data, irq->notif_cfg.method);
@@ -456,5 +447,5 @@
 		IPC_SET_ARG4(call->data, irq->notif_cfg.scratch[4]);
 		IPC_SET_ARG5(call->data, irq->notif_cfg.scratch[5]);
-
+		
 		send_call(irq, call);
 	}
@@ -463,28 +454,28 @@
 /** Send notification message.
  *
- * @param irq		IRQ structure.
- * @param a1		Driver-specific payload argument.
- * @param a2		Driver-specific payload argument.
- * @param a3		Driver-specific payload argument.
- * @param a4		Driver-specific payload argument.
- * @param a5		Driver-specific payload argument.
+ * @param irq IRQ structure.
+ * @param a1  Driver-specific payload argument.
+ * @param a2  Driver-specific payload argument.
+ * @param a3  Driver-specific payload argument.
+ * @param a4  Driver-specific payload argument.
+ * @param a5  Driver-specific payload argument.
+ *
  */
 void ipc_irq_send_msg(irq_t *irq, unative_t a1, unative_t a2, unative_t a3,
     unative_t a4, unative_t a5)
 {
-	call_t *call;
-
-	spinlock_lock(&irq->lock);
-
+	irq_spinlock_lock(&irq->lock, true);
+	
 	if (irq->notif_cfg.answerbox) {
-		call = ipc_call_alloc(FRAME_ATOMIC);
+		call_t *call = ipc_call_alloc(FRAME_ATOMIC);
 		if (!call) {
-			spinlock_unlock(&irq->lock);
+			irq_spinlock_unlock(&irq->lock, true);
 			return;
 		}
+		
 		call->flags |= IPC_CALL_NOTIF;
 		/* Put a counter to the message */
 		call->priv = ++irq->notif_cfg.counter;
-
+		
 		IPC_SET_METHOD(call->data, irq->notif_cfg.method);
 		IPC_SET_ARG1(call->data, a1);
@@ -496,5 +487,6 @@
 		send_call(irq, call);
 	}
-	spinlock_unlock(&irq->lock);
+	
+	irq_spinlock_unlock(&irq->lock, true);
 }
 
Index: kernel/generic/src/ipc/kbox.c
===================================================================
--- kernel/generic/src/ipc/kbox.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/src/ipc/kbox.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -47,7 +47,5 @@
 void ipc_kbox_cleanup(void)
 {
-	bool have_kb_thread;
-
-	/* 
+	/*
 	 * Only hold kb.cleanup_lock while setting kb.finished -
 	 * this is enough.
@@ -56,12 +54,12 @@
 	TASK->kb.finished = true;
 	mutex_unlock(&TASK->kb.cleanup_lock);
-
-	have_kb_thread = (TASK->kb.thread != NULL);
-
+	
+	bool have_kb_thread = (TASK->kb.thread != NULL);
+	
 	/*
 	 * From now on nobody will try to connect phones or attach
 	 * kbox threads
 	 */
-
+	
 	/*
 	 * Disconnect all phones connected to our kbox. Passing true for
@@ -71,6 +69,6 @@
 	 */
 	ipc_answerbox_slam_phones(&TASK->kb.box, have_kb_thread);
-
-	/* 
+	
+	/*
 	 * If the task was being debugged, clean up debugging session.
 	 * This is necessarry as slamming the phones won't force
@@ -80,5 +78,5 @@
 	udebug_task_cleanup(TASK);
 	mutex_unlock(&TASK->udebug.lock);
-
+	
 	if (have_kb_thread) {
 		LOG("Join kb.thread.");
@@ -88,44 +86,40 @@
 		TASK->kb.thread = NULL;
 	}
-
+	
 	/* Answer all messages in 'calls' and 'dispatched_calls' queues. */
-	spinlock_lock(&TASK->kb.box.lock);
+	irq_spinlock_lock(&TASK->kb.box.lock, true);
 	ipc_cleanup_call_list(&TASK->kb.box.dispatched_calls);
 	ipc_cleanup_call_list(&TASK->kb.box.calls);
-	spinlock_unlock(&TASK->kb.box.lock);
+	irq_spinlock_unlock(&TASK->kb.box.lock, true);
 }
 
 /** Handle hangup message in kbox.
  *
- * @param call	The IPC_M_PHONE_HUNGUP call structure.
- * @param last	Output, the function stores @c true here if
- *		this was the last phone, @c false otherwise.
- **/
+ * @param call The IPC_M_PHONE_HUNGUP call structure.
+ * @param last Output, the function stores @c true here if
+ *             this was the last phone, @c false otherwise.
+ *
+ */
 static void kbox_proc_phone_hungup(call_t *call, bool *last)
 {
-	ipl_t ipl;
-
 	/* Was it our debugger, who hung up? */
 	if (call->sender == TASK->udebug.debugger) {
 		/* Terminate debugging session (if any). */
 		LOG("Terminate debugging session.");
-		ipl = interrupts_disable();
-		spinlock_lock(&TASK->lock);
+		irq_spinlock_lock(&TASK->lock, true);
 		udebug_task_cleanup(TASK);
-		spinlock_unlock(&TASK->lock);
-		interrupts_restore(ipl);
+		irq_spinlock_unlock(&TASK->lock, true);
 	} else {
 		LOG("Was not debugger.");
 	}
-
+	
 	LOG("Continue with hangup message.");
 	IPC_SET_RETVAL(call->data, 0);
 	ipc_answer(&TASK->kb.box, call);
-
+	
 	mutex_lock(&TASK->kb.cleanup_lock);
-
-	ipl = interrupts_disable();
-	spinlock_lock(&TASK->lock);
-	spinlock_lock(&TASK->kb.box.lock);
+	
+	irq_spinlock_lock(&TASK->lock, true);
+	irq_spinlock_lock(&TASK->kb.box.lock, false);
 	if (list_empty(&TASK->kb.box.connected_phones)) {
 		/*
@@ -133,5 +127,5 @@
 		 * gets freed and signal to the caller.
 		 */
-
+		
 		/* Only detach kbox thread unless already terminating. */
 		if (TASK->kb.finished == false) {
@@ -140,15 +134,13 @@
 			TASK->kb.thread = NULL;
 		}
-
+		
 		LOG("Phone list is empty.");
 		*last = true;
-	} else {
+	} else
 		*last = false;
-	}
-
-	spinlock_unlock(&TASK->kb.box.lock);
-	spinlock_unlock(&TASK->lock);
-	interrupts_restore(ipl);
-
+	
+	irq_spinlock_unlock(&TASK->kb.box.lock, true);
+	irq_spinlock_unlock(&TASK->lock, false);
+	
 	mutex_unlock(&TASK->kb.cleanup_lock);
 }
@@ -159,29 +151,27 @@
  * when all phones are disconnected from the kbox.
  *
- * @param arg	Ignored.
+ * @param arg Ignored.
+ *
  */
 static void kbox_thread_proc(void *arg)
 {
-	call_t *call;
-	bool done;
-
-	(void)arg;
+	(void) arg;
 	LOG("Starting.");
-	done = false;
-
+	bool done = false;
+	
 	while (!done) {
-		call = ipc_wait_for_call(&TASK->kb.box, SYNCH_NO_TIMEOUT,
+		call_t *call = ipc_wait_for_call(&TASK->kb.box, SYNCH_NO_TIMEOUT,
 			SYNCH_FLAGS_NONE);
-
+		
 		if (call == NULL)
-			continue;	/* Try again. */
-
+			continue;  /* Try again. */
+		
 		switch (IPC_GET_METHOD(call->data)) {
-
+		
 		case IPC_M_DEBUG_ALL:
 			/* Handle debug call. */
 			udebug_call_receive(call);
 			break;
-
+		
 		case IPC_M_PHONE_HUNGUP:
 			/*
@@ -192,5 +182,5 @@
 			kbox_proc_phone_hungup(call, &done);
 			break;
-
+		
 		default:
 			/* Ignore */
@@ -198,11 +188,10 @@
 		}
 	}
-
+	
 	LOG("Exiting.");
 }
 
 
-/**
- * Connect phone to a task kernel-box specified by id.
+/** Connect phone to a task kernel-box specified by id.
  *
  * Note that this is not completely atomic. For optimisation reasons, the task
@@ -211,68 +200,61 @@
  * cleanup code.
  *
- * @return 		Phone id on success, or negative error code.
+ * @return Phone id on success, or negative error code.
+ *
  */
 int ipc_connect_kbox(task_id_t taskid)
 {
-	int newphid;
-	task_t *ta;
-	thread_t *kb_thread;
-	ipl_t ipl;
-
-	ipl = interrupts_disable();
-	spinlock_lock(&tasks_lock);
-
-	ta = task_find_by_id(taskid);
-	if (ta == NULL) {
-		spinlock_unlock(&tasks_lock);
-		interrupts_restore(ipl);
+	irq_spinlock_lock(&tasks_lock, true);
+	
+	task_t *task = task_find_by_id(taskid);
+	if (task == NULL) {
+		irq_spinlock_unlock(&tasks_lock, true);
 		return ENOENT;
 	}
-
-	atomic_inc(&ta->refcount);
-
-	spinlock_unlock(&tasks_lock);
-	interrupts_restore(ipl);
-
-	mutex_lock(&ta->kb.cleanup_lock);
-
-	if (atomic_predec(&ta->refcount) == 0) {
-		mutex_unlock(&ta->kb.cleanup_lock);
-		task_destroy(ta);
+	
+	atomic_inc(&task->refcount);
+	
+	irq_spinlock_unlock(&tasks_lock, true);
+	
+	mutex_lock(&task->kb.cleanup_lock);
+	
+	if (atomic_predec(&task->refcount) == 0) {
+		mutex_unlock(&task->kb.cleanup_lock);
+		task_destroy(task);
 		return ENOENT;
 	}
-
-	if (ta->kb.finished != false) {
-		mutex_unlock(&ta->kb.cleanup_lock);
+	
+	if (task->kb.finished != false) {
+		mutex_unlock(&task->kb.cleanup_lock);
 		return EINVAL;
 	}
-
-	newphid = phone_alloc(TASK);
+	
+	int newphid = phone_alloc(TASK);
 	if (newphid < 0) {
-		mutex_unlock(&ta->kb.cleanup_lock);
+		mutex_unlock(&task->kb.cleanup_lock);
 		return ELIMIT;
 	}
-
+	
 	/* Connect the newly allocated phone to the kbox */
-	ipc_phone_connect(&TASK->phones[newphid], &ta->kb.box);
-
-	if (ta->kb.thread != NULL) {
-		mutex_unlock(&ta->kb.cleanup_lock);
+	ipc_phone_connect(&TASK->phones[newphid], &task->kb.box);
+	
+	if (task->kb.thread != NULL) {
+		mutex_unlock(&task->kb.cleanup_lock);
 		return newphid;
 	}
-
+	
 	/* Create a kbox thread */
-	kb_thread = thread_create(kbox_thread_proc, NULL, ta, 0,
+	thread_t *kb_thread = thread_create(kbox_thread_proc, NULL, task, 0,
 	    "kbox", false);
 	if (!kb_thread) {
-		mutex_unlock(&ta->kb.cleanup_lock);
+		mutex_unlock(&task->kb.cleanup_lock);
 		return ENOMEM;
 	}
-
-	ta->kb.thread = kb_thread;
+	
+	task->kb.thread = kb_thread;
 	thread_ready(kb_thread);
-
-	mutex_unlock(&ta->kb.cleanup_lock);
-
+	
+	mutex_unlock(&task->kb.cleanup_lock);
+	
 	return newphid;
 }
Index: kernel/generic/src/ipc/sysipc.c
===================================================================
--- kernel/generic/src/ipc/sysipc.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/src/ipc/sysipc.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -56,11 +56,15 @@
  * requests.
  */
-#define DATA_XFER_LIMIT		(64 * 1024)
+#define DATA_XFER_LIMIT  (64 * 1024)
+
+#define STRUCT_TO_USPACE(dst, src)  copy_to_uspace((dst), (src), sizeof(*(src)))
 
 /** Get phone from the current task by ID.
  *
- * @param phoneid	Phone ID.
- * @param phone		Place to store pointer to phone.
- * @return		EOK on success, EINVAL if ID is invalid.
+ * @param phoneid Phone ID.
+ * @param phone   Place to store pointer to phone.
+ *
+ * @return EOK on success, EINVAL if ID is invalid.
+ *
  */
 static int phone_get(unative_t phoneid, phone_t **phone)
@@ -68,23 +72,22 @@
 	if (phoneid >= IPC_MAX_PHONES)
 		return EINVAL;
-
+	
 	*phone = &TASK->phones[phoneid];
 	return EOK;
 }
 
-#define STRUCT_TO_USPACE(dst, src)	copy_to_uspace(dst, src, sizeof(*(src)))
-
 /** Decide if the method is a system method.
  *
- * @param method	Method to be decided.
- *
- * @return		Return 1 if the method is a system method.
- *			Otherwise return 0.
- */
-static inline int method_is_system(unative_t method)
+ * @param method Method to be decided.
+ *
+ * @return true if the method is a system method.
+ *
+ */
+static inline bool method_is_system(unative_t method)
 {
 	if (method <= IPC_M_LAST_SYSTEM)
-		return 1;
-	return 0;
+		return true;
+	
+	return false;
 }
 
@@ -94,10 +97,10 @@
  *   it is useless
  *
- * @param method	Method to be decided.
- *
- * @return		Return 1 if the method is forwardable.
- *			Otherwise return 0.
- */
-static inline int method_is_forwardable(unative_t method)
+ * @param method Method to be decided.
+ *
+ * @return true if the method is forwardable.
+ *
+ */
+static inline bool method_is_forwardable(unative_t method)
 {
 	switch (method) {
@@ -106,7 +109,7 @@
 	case IPC_M_PHONE_HUNGUP:
 		/* This message is meant only for the original recipient. */
-		return 0;
+		return false;
 	default:
-		return 1;
+		return true;
 	}
 }
@@ -116,10 +119,10 @@
  * - some system messages may be forwarded but their content cannot be altered
  *
- * @param method	Method to be decided.
- *
- * @return		Return 1 if the method is immutable on forward.
- *			Otherwise return 0.
- */
-static inline int method_is_immutable(unative_t method)
+ * @param method Method to be decided.
+ *
+ * @return true if the method is immutable on forward.
+ *
+ */
+static inline bool method_is_immutable(unative_t method)
 {
 	switch (method) {
@@ -128,7 +131,7 @@
 	case IPC_M_DATA_WRITE:
 	case IPC_M_DATA_READ:
-		return 1;
+		return true;
 	default:
-		return 0;
+		return false;
 	}
 }
@@ -142,10 +145,10 @@
  * for answer_preprocess().
  *
- * @param call		Call structure to be decided.
- *
- * @return		Return 1 if the old call contents should be saved.
- *			Return 0 otherwise.
- */
-static inline int answer_need_old(call_t *call)
+ * @param call Call structure to be decided.
+ *
+ * @return true if the old call contents should be saved.
+ *
+ */
+static inline bool answer_need_old(call_t *call)
 {
 	switch (IPC_GET_METHOD(call->data)) {
@@ -158,7 +161,7 @@
 	case IPC_M_DATA_WRITE:
 	case IPC_M_DATA_READ:
-		return 1;
+		return true;
 	default:
-		return 0;
+		return false;
 	}
 }
@@ -168,13 +171,12 @@
  * This function is called directly after sys_ipc_answer().
  *
- * @param answer	Call structure with the answer.
- * @param olddata	Saved data of the request.
- *
- * @return		Return 0 on success or an error code. 
+ * @param answer  Call structure with the answer.
+ * @param olddata Saved data of the request.
+ *
+ * @return Return 0 on success or an error code.
+ *
  */
 static inline int answer_preprocess(call_t *answer, ipc_data_t *olddata)
 {
-	int phoneid;
-
 	if ((native_t) IPC_GET_RETVAL(answer->data) == EHANGUP) {
 		/* In case of forward, hangup the forwared phone,
@@ -182,19 +184,20 @@
 		 */
 		mutex_lock(&answer->data.phone->lock);
-		spinlock_lock(&TASK->answerbox.lock);
+		irq_spinlock_lock(&TASK->answerbox.lock, true);
 		if (answer->data.phone->state == IPC_PHONE_CONNECTED) {
 			list_remove(&answer->data.phone->link);
 			answer->data.phone->state = IPC_PHONE_SLAMMED;
 		}
-		spinlock_unlock(&TASK->answerbox.lock);
+		irq_spinlock_unlock(&TASK->answerbox.lock, true);
 		mutex_unlock(&answer->data.phone->lock);
 	}
-
+	
 	if (!olddata)
 		return 0;
-
+	
 	if (IPC_GET_METHOD(*olddata) == IPC_M_CONNECTION_CLONE) {
-		phoneid = IPC_GET_ARG1(*olddata);
-		phone_t *phone = &TASK->phones[phoneid]; 
+		int phoneid = IPC_GET_ARG1(*olddata);
+		phone_t *phone = &TASK->phones[phoneid];
+		
 		if (IPC_GET_RETVAL(answer->data) != EOK) {
 			/*
@@ -208,13 +211,14 @@
 			mutex_lock(&phone->lock);
 			if (phone->state == IPC_PHONE_CONNECTED) {
-				spinlock_lock(&phone->callee->lock);
+				irq_spinlock_lock(&phone->callee->lock, true);
 				list_remove(&phone->link);
 				phone->state = IPC_PHONE_SLAMMED;
-				spinlock_unlock(&phone->callee->lock);
+				irq_spinlock_unlock(&phone->callee->lock, true);
 			}
 			mutex_unlock(&phone->lock);
 		}
 	} else if (IPC_GET_METHOD(*olddata) == IPC_M_CONNECT_ME) {
-		phone_t *phone = (phone_t *)IPC_GET_ARG5(*olddata);
+		phone_t *phone = (phone_t *) IPC_GET_ARG5(*olddata);
+		
 		if (IPC_GET_RETVAL(answer->data) != EOK) {
 			/*
@@ -226,13 +230,14 @@
 			mutex_lock(&phone->lock);
 			if (phone->state == IPC_PHONE_CONNECTED) {
-				spinlock_lock(&phone->callee->lock);
+				irq_spinlock_lock(&phone->callee->lock, true);
 				list_remove(&phone->link);
 				phone->state = IPC_PHONE_SLAMMED;
-				spinlock_unlock(&phone->callee->lock);
+				irq_spinlock_unlock(&phone->callee->lock, true);
 			}
 			mutex_unlock(&phone->lock);
 		}
 	} else if (IPC_GET_METHOD(*olddata) == IPC_M_CONNECT_TO_ME) {
-		phoneid = IPC_GET_ARG5(*olddata);
+		int phoneid = IPC_GET_ARG5(*olddata);
+		
 		if (IPC_GET_RETVAL(answer->data) != EOK) {
 			/* The connection was not accepted */
@@ -254,15 +259,10 @@
 		if (!IPC_GET_RETVAL(answer->data)) {
 			/* Accepted, handle as_area receipt */
-			ipl_t ipl;
-			int rc;
-			as_t *as;
 			
-			ipl = interrupts_disable();
-			spinlock_lock(&answer->sender->lock);
-			as = answer->sender->as;
-			spinlock_unlock(&answer->sender->lock);
-			interrupts_restore(ipl);
+			irq_spinlock_lock(&answer->sender->lock, true);
+			as_t *as = answer->sender->as;
+			irq_spinlock_unlock(&answer->sender->lock, true);
 			
-			rc = as_area_share(as, IPC_GET_ARG1(*olddata),
+			int rc = as_area_share(as, IPC_GET_ARG1(*olddata),
 			    IPC_GET_ARG2(*olddata), AS,
 			    IPC_GET_ARG1(answer->data), IPC_GET_ARG3(*olddata));
@@ -272,15 +272,9 @@
 	} else if (IPC_GET_METHOD(*olddata) == IPC_M_SHARE_IN) {
 		if (!IPC_GET_RETVAL(answer->data)) { 
-			ipl_t ipl;
-			as_t *as;
-			int rc;
+			irq_spinlock_lock(&answer->sender->lock, true);
+			as_t *as = answer->sender->as;
+			irq_spinlock_unlock(&answer->sender->lock, true);
 			
-			ipl = interrupts_disable();
-			spinlock_lock(&answer->sender->lock);
-			as = answer->sender->as;
-			spinlock_unlock(&answer->sender->lock);
-			interrupts_restore(ipl);
-			
-			rc = as_area_share(AS, IPC_GET_ARG1(answer->data),
+			int rc = as_area_share(AS, IPC_GET_ARG1(answer->data),
 			    IPC_GET_ARG2(*olddata), as, IPC_GET_ARG1(*olddata),
 			    IPC_GET_ARG2(answer->data));
@@ -301,5 +295,5 @@
 				 */
 				IPC_SET_ARG1(answer->data, dst);
-
+				
 				answer->buffer = malloc(size, 0);
 				int rc = copy_from_uspace(answer->buffer,
@@ -320,15 +314,10 @@
 		if (!IPC_GET_RETVAL(answer->data)) {
 			/* The recipient agreed to receive data. */
-			int rc;
-			uintptr_t dst;
-			size_t size;
-			size_t max_size;
-
-			dst = (uintptr_t)IPC_GET_ARG1(answer->data);
-			size = (size_t)IPC_GET_ARG2(answer->data);
-			max_size = (size_t)IPC_GET_ARG2(*olddata);
-
+			uintptr_t dst = (uintptr_t)IPC_GET_ARG1(answer->data);
+			size_t size = (size_t)IPC_GET_ARG2(answer->data);
+			size_t max_size = (size_t)IPC_GET_ARG2(*olddata);
+			
 			if (size <= max_size) {
-				rc = copy_to_uspace((void *) dst,
+				int rc = copy_to_uspace((void *) dst,
 				    answer->buffer, size);
 				if (rc)
@@ -341,4 +330,5 @@
 		answer->buffer = NULL;
 	}
+	
 	return 0;
 }
@@ -352,7 +342,6 @@
 		mutex_lock(&p2->lock);
 		mutex_lock(&p1->lock);
-	} else {
+	} else
 		mutex_lock(&p1->lock);
-	}
 }
 
@@ -366,24 +355,20 @@
 /** Called before the request is sent.
  *
- * @param call		Call structure with the request.
- * @param phone		Phone that the call will be sent through.
- *
- * @return 		Return 0 on success, ELIMIT or EPERM on error.
+ * @param call  Call structure with the request.
+ * @param phone Phone that the call will be sent through.
+ *
+ * @return Return 0 on success, ELIMIT or EPERM on error.
+ *
  */
 static int request_preprocess(call_t *call, phone_t *phone)
 {
-	int newphid;
-	size_t size;
-	uintptr_t src;
-	int rc;
-
 	switch (IPC_GET_METHOD(call->data)) {
 	case IPC_M_CONNECTION_CLONE: {
 		phone_t *cloned_phone;
-
 		if (phone_get(IPC_GET_ARG1(call->data), &cloned_phone) != EOK)
 			return ENOENT;
+		
 		phones_lock(cloned_phone, phone);
-
+		
 		if ((cloned_phone->state != IPC_PHONE_CONNECTED) ||
 		    phone->state != IPC_PHONE_CONNECTED) {
@@ -391,4 +376,5 @@
 			return EINVAL;
 		}
+		
 		/*
 		 * We can be pretty sure now that both tasks exist and we are
@@ -396,13 +382,16 @@
 		 * we are effectively preventing them from finishing their
 		 * potential cleanup.
+		 *
 		 */
-		newphid = phone_alloc(phone->callee->task);
+		int newphid = phone_alloc(phone->callee->task);
 		if (newphid < 0) {
 			phones_unlock(cloned_phone, phone);
 			return ELIMIT;
 		}
+		
 		ipc_phone_connect(&phone->callee->task->phones[newphid],
 		    cloned_phone->callee);
 		phones_unlock(cloned_phone, phone);
+		
 		/* Set the new phone for the callee. */
 		IPC_SET_ARG1(call->data, newphid);
@@ -412,8 +401,9 @@
 		IPC_SET_ARG5(call->data, (unative_t) phone);
 		break;
-	case IPC_M_CONNECT_ME_TO:
-		newphid = phone_alloc(TASK);
+	case IPC_M_CONNECT_ME_TO: {
+		int newphid = phone_alloc(TASK);
 		if (newphid < 0)
 			return ELIMIT;
+		
 		/* Set arg5 for server */
 		IPC_SET_ARG5(call->data, (unative_t) &TASK->phones[newphid]);
@@ -421,18 +411,23 @@
 		call->priv = newphid;
 		break;
-	case IPC_M_SHARE_OUT:
-		size = as_area_get_size(IPC_GET_ARG1(call->data));
+	}
+	case IPC_M_SHARE_OUT: {
+		size_t size = as_area_get_size(IPC_GET_ARG1(call->data));
 		if (!size)
 			return EPERM;
+		
 		IPC_SET_ARG2(call->data, size);
 		break;
-	case IPC_M_DATA_READ:
-		size = IPC_GET_ARG2(call->data);
+	}
+	case IPC_M_DATA_READ: {
+		size_t size = IPC_GET_ARG2(call->data);
 		if ((size <= 0 || (size > DATA_XFER_LIMIT)))
 			return ELIMIT;
+		
 		break;
-	case IPC_M_DATA_WRITE:
-		src = IPC_GET_ARG1(call->data);
-		size = IPC_GET_ARG2(call->data);
+	}
+	case IPC_M_DATA_WRITE: {
+		uintptr_t src = IPC_GET_ARG1(call->data);
+		size_t size = IPC_GET_ARG2(call->data);
 		
 		if (size > DATA_XFER_LIMIT)
@@ -440,10 +435,12 @@
 		
 		call->buffer = (uint8_t *) malloc(size, 0);
-		rc = copy_from_uspace(call->buffer, (void *) src, size);
+		int rc = copy_from_uspace(call->buffer, (void *) src, size);
 		if (rc != 0) {
 			free(call->buffer);
 			return rc;
 		}
+		
 		break;
+	}
 #ifdef CONFIG_UDEBUG
 	case IPC_M_DEBUG_ALL:
@@ -453,4 +450,5 @@
 		break;
 	}
+	
 	return 0;
 }
@@ -462,5 +460,6 @@
 /** Do basic kernel processing of received call answer.
  *
- * @param call		Call structure with the answer.
+ * @param call Call structure with the answer.
+ *
  */
 static void process_answer(call_t *call)
@@ -469,5 +468,5 @@
 	    (call->flags & IPC_CALL_FORWARDED))
 		IPC_SET_RETVAL(call->data, EFORWARD);
-
+	
 	if (call->flags & IPC_CALL_CONN_ME_TO) {
 		if (IPC_GET_RETVAL(call->data))
@@ -476,8 +475,11 @@
 			IPC_SET_ARG5(call->data, call->priv);
 	}
-
+	
 	if (call->buffer) {
-		/* This must be an affirmative answer to IPC_M_DATA_READ. */
-		/* or IPC_M_DEBUG_ALL/UDEBUG_M_MEM_READ... */
+		/*
+		 * This must be an affirmative answer to IPC_M_DATA_READ
+		 * or IPC_M_DEBUG_ALL/UDEBUG_M_MEM_READ...
+		 *
+		 */
 		uintptr_t dst = IPC_GET_ARG1(call->data);
 		size_t size = IPC_GET_ARG2(call->data);
@@ -492,16 +494,15 @@
 /** Do basic kernel processing of received call request.
  *
- * @param box		Destination answerbox structure.
- * @param call		Call structure with the request.
- *
- * @return 		Return 0 if the call should be passed to userspace.
- *			Return -1 if the call should be ignored.
+ * @param box  Destination answerbox structure.
+ * @param call Call structure with the request.
+ *
+ * @return 0 if the call should be passed to userspace.
+ * @return -1 if the call should be ignored.
+ *
  */
 static int process_request(answerbox_t *box, call_t *call)
 {
-	int phoneid;
-
 	if (IPC_GET_METHOD(call->data) == IPC_M_CONNECT_TO_ME) {
-		phoneid = phone_alloc(TASK);
+		int phoneid = phone_alloc(TASK);
 		if (phoneid < 0) { /* Failed to allocate phone */
 			IPC_SET_RETVAL(call->data, ELIMIT);
@@ -509,6 +510,8 @@
 			return -1;
 		}
+		
 		IPC_SET_ARG5(call->data, phoneid);
 	}
+	
 	switch (IPC_GET_METHOD(call->data)) {
 	case IPC_M_DEBUG_ALL:
@@ -517,4 +520,5 @@
 		break;
 	}
+	
 	return 0;
 }
@@ -525,31 +529,29 @@
  * the generic function (i.e. sys_ipc_call_sync_slow()).
  *
- * @param phoneid	Phone handle for the call.
- * @param method	Method of the call.
- * @param arg1		Service-defined payload argument.
- * @param arg2		Service-defined payload argument.
- * @param arg3		Service-defined payload argument.
- * @param data		Address of userspace structure where the reply call will
- *			be stored.
- *
- * @return		Returns 0 on success.
- *			Return ENOENT if there is no such phone handle.
+ * @param phoneid Phone handle for the call.
+ * @param method  Method of the call.
+ * @param arg1    Service-defined payload argument.
+ * @param arg2    Service-defined payload argument.
+ * @param arg3    Service-defined payload argument.
+ * @param data    Address of userspace structure where the reply call will
+ *                be stored.
+ *
+ * @return 0 on success.
+ * @return ENOENT if there is no such phone handle.
+ *
  */
 unative_t sys_ipc_call_sync_fast(unative_t phoneid, unative_t method,
     unative_t arg1, unative_t arg2, unative_t arg3, ipc_data_t *data)
 {
-	call_t *call;
 	phone_t *phone;
-	int res;
-	int rc;
-
 	if (phone_get(phoneid, &phone) != EOK)
 		return ENOENT;
 
-	call = ipc_call_alloc(0);
+	call_t *call = ipc_call_alloc(0);
 	IPC_SET_METHOD(call->data, method);
 	IPC_SET_ARG1(call->data, arg1);
 	IPC_SET_ARG2(call->data, arg2);
 	IPC_SET_ARG3(call->data, arg3);
+	
 	/*
 	 * To achieve deterministic behavior, zero out arguments that are beyond
@@ -558,6 +560,9 @@
 	IPC_SET_ARG4(call->data, 0);
 	IPC_SET_ARG5(call->data, 0);
-
-	if (!(res = request_preprocess(call, phone))) {
+	
+	int res = request_preprocess(call, phone);
+	int rc;
+	
+	if (!res) {
 #ifdef CONFIG_UDEBUG
 		udebug_stoppable_begin();
@@ -567,18 +572,20 @@
 		udebug_stoppable_end();
 #endif
+		
 		if (rc != EOK) {
 			/* The call will be freed by ipc_cleanup(). */
 			return rc;
 		}
+		
 		process_answer(call);
-
-	} else {
+		
+	} else
 		IPC_SET_RETVAL(call->data, res);
-	}
+	
 	rc = STRUCT_TO_USPACE(&data->args, &call->data.args);
 	ipc_call_free(call);
 	if (rc != 0)
 		return rc;
-
+	
 	return 0;
 }
@@ -586,24 +593,21 @@
 /** Make a synchronous IPC call allowing to transmit the entire payload.
  *
- * @param phoneid	Phone handle for the call.
- * @param question	Userspace address of call data with the request.
- * @param reply		Userspace address of call data where to store the
- *			answer.
- *
- * @return		Zero on success or an error code.
+ * @param phoneid  Phone handle for the call.
+ * @param question Userspace address of call data with the request.
+ * @param reply    Userspace address of call data where to store the
+ *                 answer.
+ *
+ * @return Zero on success or an error code.
+ *
  */
 unative_t sys_ipc_call_sync_slow(unative_t phoneid, ipc_data_t *question,
     ipc_data_t *reply)
 {
-	call_t *call;
 	phone_t *phone;
-	int res;
-	int rc;
-
 	if (phone_get(phoneid, &phone) != EOK)
 		return ENOENT;
 
-	call = ipc_call_alloc(0);
-	rc = copy_from_uspace(&call->data.args, &question->args,
+	call_t *call = ipc_call_alloc(0);
+	int rc = copy_from_uspace(&call->data.args, &question->args,
 	    sizeof(call->data.args));
 	if (rc != 0) {
@@ -611,7 +615,8 @@
 		return (unative_t) rc;
 	}
-
-
-	if (!(res = request_preprocess(call, phone))) {
+	
+	int res = request_preprocess(call, phone);
+	
+	if (!res) {
 #ifdef CONFIG_UDEBUG
 		udebug_stoppable_begin();
@@ -621,17 +626,19 @@
 		udebug_stoppable_end();
 #endif
+		
 		if (rc != EOK) {
 			/* The call will be freed by ipc_cleanup(). */
 			return rc;
 		}
+		
 		process_answer(call);
-	} else 
+	} else
 		IPC_SET_RETVAL(call->data, res);
-
+	
 	rc = STRUCT_TO_USPACE(&reply->args, &call->data.args);
 	ipc_call_free(call);
 	if (rc != 0)
 		return rc;
-
+	
 	return 0;
 }
@@ -639,5 +646,6 @@
 /** Check that the task did not exceed the allowed limit of asynchronous calls.
  *
- * @return 		Return 0 if limit not reached or -1 if limit exceeded.
+ * @return 0 if limit not reached or -1 if limit exceeded.
+ *
  */
 static int check_call_limit(void)
@@ -647,4 +655,5 @@
 		return -1;
 	}
+	
 	return 0;
 }
@@ -655,30 +664,28 @@
  * the generic function sys_ipc_call_async_slow().
  *
- * @param phoneid	Phone handle for the call.
- * @param method	Method of the call.
- * @param arg1		Service-defined payload argument.
- * @param arg2		Service-defined payload argument.
- * @param arg3		Service-defined payload argument.
- * @param arg4		Service-defined payload argument.
- *
- * @return 		Return call hash on success.
- *			Return IPC_CALLRET_FATAL in case of a fatal error and 
- *			IPC_CALLRET_TEMPORARY if there are too many pending
- *			asynchronous requests; answers should be handled first.
+ * @param phoneid Phone handle for the call.
+ * @param method  Method of the call.
+ * @param arg1    Service-defined payload argument.
+ * @param arg2    Service-defined payload argument.
+ * @param arg3    Service-defined payload argument.
+ * @param arg4    Service-defined payload argument.
+ *
+ * @return Call hash on success.
+ * @return IPC_CALLRET_FATAL in case of a fatal error.
+ * @return IPC_CALLRET_TEMPORARY if there are too many pending
+ *         asynchronous requests; answers should be handled first.
+ *
  */
 unative_t sys_ipc_call_async_fast(unative_t phoneid, unative_t method,
     unative_t arg1, unative_t arg2, unative_t arg3, unative_t arg4)
 {
-	call_t *call;
-	phone_t *phone;
-	int res;
-
 	if (check_call_limit())
 		return IPC_CALLRET_TEMPORARY;
-
+	
+	phone_t *phone;
 	if (phone_get(phoneid, &phone) != EOK)
 		return IPC_CALLRET_FATAL;
-
-	call = ipc_call_alloc(0);
+	
+	call_t *call = ipc_call_alloc(0);
 	IPC_SET_METHOD(call->data, method);
 	IPC_SET_ARG1(call->data, arg1);
@@ -686,4 +693,5 @@
 	IPC_SET_ARG3(call->data, arg3);
 	IPC_SET_ARG4(call->data, arg4);
+	
 	/*
 	 * To achieve deterministic behavior, zero out arguments that are beyond
@@ -691,10 +699,12 @@
 	 */
 	IPC_SET_ARG5(call->data, 0);
-
-	if (!(res = request_preprocess(call, phone)))
+	
+	int res = request_preprocess(call, phone);
+	
+	if (!res)
 		ipc_call(phone, call);
 	else
 		ipc_backsend_err(phone, call, res);
-
+	
 	return (unative_t) call;
 }
@@ -702,24 +712,21 @@
 /** Make an asynchronous IPC call allowing to transmit the entire payload.
  *
- * @param phoneid	Phone handle for the call.
- * @param data		Userspace address of call data with the request.
- *
- * @return		See sys_ipc_call_async_fast(). 
+ * @param phoneid Phone handle for the call.
+ * @param data    Userspace address of call data with the request.
+ *
+ * @return See sys_ipc_call_async_fast().
+ *
  */
 unative_t sys_ipc_call_async_slow(unative_t phoneid, ipc_data_t *data)
 {
-	call_t *call;
-	phone_t *phone;
-	int res;
-	int rc;
-
 	if (check_call_limit())
 		return IPC_CALLRET_TEMPORARY;
-
+	
+	phone_t *phone;
 	if (phone_get(phoneid, &phone) != EOK)
 		return IPC_CALLRET_FATAL;
 
-	call = ipc_call_alloc(0);
-	rc = copy_from_uspace(&call->data.args, &data->args,
+	call_t *call = ipc_call_alloc(0);
+	int rc = copy_from_uspace(&call->data.args, &data->args,
 	    sizeof(call->data.args));
 	if (rc != 0) {
@@ -727,45 +734,48 @@
 		return (unative_t) rc;
 	}
-	if (!(res = request_preprocess(call, phone)))
+	
+	int res = request_preprocess(call, phone);
+	
+	if (!res)
 		ipc_call(phone, call);
 	else
 		ipc_backsend_err(phone, call, res);
-
+	
 	return (unative_t) call;
 }
 
-/** Forward a received call to another destination - common code for both the
- * fast and the slow version.
- *
- * @param callid	Hash of the call to forward.
- * @param phoneid	Phone handle to use for forwarding.
- * @param method	New method to use for the forwarded call.
- * @param arg1		New value of the first argument for the forwarded call.
- * @param arg2		New value of the second argument for the forwarded call.
- * @param arg3		New value of the third argument for the forwarded call.
- * @param arg4		New value of the fourth argument for the forwarded call.
- * @param arg5		New value of the fifth argument for the forwarded call.
- * @param mode		Flags that specify mode of the forward operation.
- * @param slow		If true, arg3, arg4 and arg5 are considered. Otherwise
- *			the function considers only the fast version arguments:
- *			i.e. arg1 and arg2.
- *
- * @return		Return 0 on succes, otherwise return an error code.
- *
- * Warning:	Make sure that ARG5 is not rewritten for certain system IPC
+/** Forward a received call to another destination
+ *
+ * Common code for both the fast and the slow version.
+ *
+ * @param callid  Hash of the call to forward.
+ * @param phoneid Phone handle to use for forwarding.
+ * @param method  New method to use for the forwarded call.
+ * @param arg1    New value of the first argument for the forwarded call.
+ * @param arg2    New value of the second argument for the forwarded call.
+ * @param arg3    New value of the third argument for the forwarded call.
+ * @param arg4    New value of the fourth argument for the forwarded call.
+ * @param arg5    New value of the fifth argument for the forwarded call.
+ * @param mode    Flags that specify mode of the forward operation.
+ * @param slow    If true, arg3, arg4 and arg5 are considered. Otherwise
+ *                the function considers only the fast version arguments:
+ *                i.e. arg1 and arg2.
+ *
+ * @return 0 on succes, otherwise an error code.
+ *
+ * Warning: Make sure that ARG5 is not rewritten for certain system IPC
+ *
  */
 static unative_t sys_ipc_forward_common(unative_t callid, unative_t phoneid,
     unative_t method, unative_t arg1, unative_t arg2, unative_t arg3,
-    unative_t arg4, unative_t arg5, int mode, bool slow)
-{
-	call_t *call;
-	phone_t *phone;
-
-	call = get_call(callid);
+    unative_t arg4, unative_t arg5, unsigned int mode, bool slow)
+{
+	call_t *call = get_call(callid);
 	if (!call)
 		return ENOENT;
 	
 	call->flags |= IPC_CALL_FORWARDED;
-
+	
+	phone_t *phone;
 	if (phone_get(phoneid, &phone) != EOK) {
 		IPC_SET_RETVAL(call->data, EFORWARD);
@@ -773,5 +783,5 @@
 		return ENOENT;
 	}
-
+	
 	if (!method_is_forwardable(IPC_GET_METHOD(call->data))) {
 		IPC_SET_RETVAL(call->data, EFORWARD);
@@ -779,5 +789,5 @@
 		return EPERM;
 	}
-
+	
 	/*
 	 * Userspace is not allowed to change method of system methods on
@@ -790,8 +800,9 @@
 			if (IPC_GET_METHOD(call->data) == IPC_M_CONNECT_TO_ME)
 				phone_dealloc(IPC_GET_ARG5(call->data));
-
+			
 			IPC_SET_ARG1(call->data, method);
 			IPC_SET_ARG2(call->data, arg1);
 			IPC_SET_ARG3(call->data, arg2);
+			
 			if (slow) {
 				IPC_SET_ARG4(call->data, arg3);
@@ -812,18 +823,9 @@
 		}
 	}
-
+	
 	return ipc_forward(call, phone, &TASK->answerbox, mode);
 }
 
 /** Forward a received call to another destination - fast version.
- *
- * @param callid	Hash of the call to forward.
- * @param phoneid	Phone handle to use for forwarding.
- * @param method	New method to use for the forwarded call.
- * @param arg1		New value of the first argument for the forwarded call.
- * @param arg2		New value of the second argument for the forwarded call.
- * @param mode		Flags that specify mode of the forward operation.
- *
- * @return		Return 0 on succes, otherwise return an error code.
  *
  * In case the original method is a system method, ARG1, ARG2 and ARG3 are
@@ -833,7 +835,17 @@
  * is a set of immutable methods, for which the new method and arguments are not
  * set and these values are ignored.
+ *
+ * @param callid  Hash of the call to forward.
+ * @param phoneid Phone handle to use for forwarding.
+ * @param method  New method to use for the forwarded call.
+ * @param arg1    New value of the first argument for the forwarded call.
+ * @param arg2    New value of the second argument for the forwarded call.
+ * @param mode    Flags that specify mode of the forward operation.
+ *
+ * @return 0 on succes, otherwise an error code.
+ *
  */
 unative_t sys_ipc_forward_fast(unative_t callid, unative_t phoneid,
-    unative_t method, unative_t arg1, unative_t arg2, int mode)
+    unative_t method, unative_t arg1, unative_t arg2, unsigned int mode)
 {
 	return sys_ipc_forward_common(callid, phoneid, method, arg1, arg2, 0, 0,
@@ -842,11 +854,4 @@
 
 /** Forward a received call to another destination - slow version.
- *
- * @param callid	Hash of the call to forward.
- * @param phoneid	Phone handle to use for forwarding.
- * @param data		Userspace address of the new IPC data. 
- * @param mode		Flags that specify mode of the forward operation.
- *
- * @return		Return 0 on succes, otherwise return an error code.
  *
  * This function is the slow verision of the sys_ipc_forward_fast interface.
@@ -856,16 +861,22 @@
  * methods, it additionally stores the new value of arg3, arg4 and arg5,
  * respectively, to ARG3, ARG4 and ARG5, respectively.
+ *
+ * @param callid  Hash of the call to forward.
+ * @param phoneid Phone handle to use for forwarding.
+ * @param data    Userspace address of the new IPC data.
+ * @param mode    Flags that specify mode of the forward operation.
+ *
+ * @return 0 on succes, otherwise an error code.
+ *
  */
 unative_t sys_ipc_forward_slow(unative_t callid, unative_t phoneid,
-    ipc_data_t *data, int mode)
+    ipc_data_t *data, unsigned int mode)
 {
 	ipc_data_t newdata;
-	int rc;
-
-	rc = copy_from_uspace(&newdata.args, &data->args,
+	int rc = copy_from_uspace(&newdata.args, &data->args,
 	    sizeof(newdata.args));
-	if (rc != 0) 
+	if (rc != 0)
 		return (unative_t) rc;
-
+	
 	return sys_ipc_forward_common(callid, phoneid,
 	    IPC_GET_METHOD(newdata), IPC_GET_ARG1(newdata),
@@ -879,34 +890,34 @@
  * than the generic sys_ipc_answer().
  *
- * @param callid	Hash of the call to be answered.
- * @param retval	Return value of the answer.
- * @param arg1		Service-defined return value.
- * @param arg2		Service-defined return value.
- * @param arg3		Service-defined return value.
- * @param arg4		Service-defined return value.
- *
- * @return		Return 0 on success, otherwise return an error code.	
+ * @param callid Hash of the call to be answered.
+ * @param retval Return value of the answer.
+ * @param arg1   Service-defined return value.
+ * @param arg2   Service-defined return value.
+ * @param arg3   Service-defined return value.
+ * @param arg4   Service-defined return value.
+ *
+ * @return 0 on success, otherwise an error code.
+ *
  */
 unative_t sys_ipc_answer_fast(unative_t callid, unative_t retval,
     unative_t arg1, unative_t arg2, unative_t arg3, unative_t arg4)
 {
-	call_t *call;
-	ipc_data_t saved_data;
-	int saveddata = 0;
-	int rc;
-
 	/* Do not answer notification callids */
 	if (callid & IPC_CALLID_NOTIFICATION)
 		return 0;
-
-	call = get_call(callid);
+	
+	call_t *call = get_call(callid);
 	if (!call)
 		return ENOENT;
-
+	
+	ipc_data_t saved_data;
+	bool saved;
+	
 	if (answer_need_old(call)) {
 		memcpy(&saved_data, &call->data, sizeof(call->data));
-		saveddata = 1;
-	}
-
+		saved = true;
+	} else
+		saved = false;
+	
 	IPC_SET_RETVAL(call->data, retval);
 	IPC_SET_ARG1(call->data, arg1);
@@ -914,4 +925,5 @@
 	IPC_SET_ARG3(call->data, arg3);
 	IPC_SET_ARG4(call->data, arg4);
+	
 	/*
 	 * To achieve deterministic behavior, zero out arguments that are beyond
@@ -919,6 +931,6 @@
 	 */
 	IPC_SET_ARG5(call->data, 0);
-	rc = answer_preprocess(call, saveddata ? &saved_data : NULL);
-
+	int rc = answer_preprocess(call, saved ? &saved_data : NULL);
+	
 	ipc_answer(&TASK->answerbox, call);
 	return rc;
@@ -927,37 +939,37 @@
 /** Answer an IPC call.
  *
- * @param callid	Hash of the call to be answered.
- * @param data		Userspace address of call data with the answer.
- *
- * @return		Return 0 on success, otherwise return an error code.
+ * @param callid Hash of the call to be answered.
+ * @param data   Userspace address of call data with the answer.
+ *
+ * @return 0 on success, otherwise an error code.
+ *
  */
 unative_t sys_ipc_answer_slow(unative_t callid, ipc_data_t *data)
 {
-	call_t *call;
-	ipc_data_t saved_data;
-	int saveddata = 0;
-	int rc;
-
 	/* Do not answer notification callids */
 	if (callid & IPC_CALLID_NOTIFICATION)
 		return 0;
-
-	call = get_call(callid);
+	
+	call_t *call = get_call(callid);
 	if (!call)
 		return ENOENT;
-
+	
+	ipc_data_t saved_data;
+	bool saved;
+	
 	if (answer_need_old(call)) {
 		memcpy(&saved_data, &call->data, sizeof(call->data));
-		saveddata = 1;
-	}
-	rc = copy_from_uspace(&call->data.args, &data->args, 
+		saved = true;
+	} else
+		saved = false;
+	
+	int rc = copy_from_uspace(&call->data.args, &data->args, 
 	    sizeof(call->data.args));
 	if (rc != 0)
 		return rc;
-
-	rc = answer_preprocess(call, saveddata ? &saved_data : NULL);
+	
+	rc = answer_preprocess(call, saved ? &saved_data : NULL);
 	
 	ipc_answer(&TASK->answerbox, call);
-
 	return rc;
 }
@@ -965,18 +977,19 @@
 /** Hang up a phone.
  *
- * @param		Phone handle of the phone to be hung up.
- *
- * @return		Return 0 on success or an error code.
+ * @param Phone handle of the phone to be hung up.
+ *
+ * @return 0 on success or an error code.
+ *
  */
 unative_t sys_ipc_hangup(unative_t phoneid)
 {
 	phone_t *phone;
-
+	
 	if (phone_get(phoneid, &phone) != EOK)
 		return ENOENT;
-
+	
 	if (ipc_phone_hangup(phone))
 		return -1;
-
+	
 	return 0;
 }
@@ -984,32 +997,36 @@
 /** Wait for an incoming IPC call or an answer.
  *
- * @param calldata	Pointer to buffer where the call/answer data is stored.
- * @param usec 		Timeout. See waitq_sleep_timeout() for explanation.
- * @param flags		Select mode of sleep operation. See waitq_sleep_timeout()
- *			for explanation.
- *
- * @return 		Hash of the call.
- *			If IPC_CALLID_NOTIFICATION bit is set in the hash, the
- *			call is a notification. IPC_CALLID_ANSWERED denotes an
- *			answer.
- */
-unative_t sys_ipc_wait_for_call(ipc_data_t *calldata, uint32_t usec, int flags)
+ * @param calldata Pointer to buffer where the call/answer data is stored.
+ * @param usec     Timeout. See waitq_sleep_timeout() for explanation.
+ * @param flags    Select mode of sleep operation. See waitq_sleep_timeout()
+ *                 for explanation.
+ *
+ * @return Hash of the call.
+ *         If IPC_CALLID_NOTIFICATION bit is set in the hash, the
+ *         call is a notification. IPC_CALLID_ANSWERED denotes an
+ *         answer.
+ *
+ */
+unative_t sys_ipc_wait_for_call(ipc_data_t *calldata, uint32_t usec,
+    unsigned int flags)
 {
 	call_t *call;
-
+	
 restart:
-
+	
 #ifdef CONFIG_UDEBUG
 	udebug_stoppable_begin();
-#endif	
+#endif
+	
 	call = ipc_wait_for_call(&TASK->answerbox, usec,
 	    flags | SYNCH_FLAGS_INTERRUPTIBLE);
-
+	
 #ifdef CONFIG_UDEBUG
 	udebug_stoppable_end();
 #endif
+	
 	if (!call)
 		return 0;
-
+	
 	if (call->flags & IPC_CALL_NOTIF) {
 		/* Set in_phone_hash to the interrupt counter */
@@ -1017,13 +1034,13 @@
 		
 		STRUCT_TO_USPACE(calldata, &call->data);
-
+		
 		ipc_call_free(call);
 		
 		return ((unative_t) call) | IPC_CALLID_NOTIFICATION;
 	}
-
+	
 	if (call->flags & IPC_CALL_ANSWERED) {
 		process_answer(call);
-
+		
 		if (call->flags & IPC_CALL_DISCARD_ANSWER) {
 			ipc_call_free(call);
@@ -1037,14 +1054,14 @@
 			atomic_dec(&TASK->active_calls);
 		}
-
+		
 		STRUCT_TO_USPACE(&calldata->args, &call->data.args);
 		ipc_call_free(call);
-
+		
 		return ((unative_t) call) | IPC_CALLID_ANSWERED;
 	}
-
+	
 	if (process_request(&TASK->answerbox, call))
 		goto restart;
-
+	
 	/* Include phone address('id') of the caller in the request,
 	 * copy whole call->data, not only call->data.args */
@@ -1055,23 +1072,27 @@
 		 */
 		ipc_data_t saved_data;
-		int saveddata = 0;
-
+		bool saved;
+		
 		if (answer_need_old(call)) {
 			memcpy(&saved_data, &call->data, sizeof(call->data));
-			saveddata = 1;
-		}
+			saved = true;
+		} else
+			saved = false;
 		
 		IPC_SET_RETVAL(call->data, EPARTY);
-		(void) answer_preprocess(call, saveddata ? &saved_data : NULL);
+		(void) answer_preprocess(call, saved ? &saved_data : NULL);
 		ipc_answer(&TASK->answerbox, call);
 		return 0;
 	}
-	return (unative_t)call;
-}
-
-/** Interrupt one thread from sys_ipc_wait_for_call(). */
+	
+	return (unative_t) call;
+}
+
+/** Interrupt one thread from sys_ipc_wait_for_call().
+ *
+ */
 unative_t sys_ipc_poke(void)
 {
-	waitq_unsleep(&TASK->answerbox.wq);	
+	waitq_unsleep(&TASK->answerbox.wq);
 	return EOK;
 }
@@ -1079,10 +1100,11 @@
 /** Connect an IRQ handler to a task.
  *
- * @param inr		IRQ number.
- * @param devno		Device number.
- * @param method	Method to be associated with the notification.
- * @param ucode		Uspace pointer to the top-half pseudocode.
- *
- * @return		EPERM or a return code returned by ipc_irq_register().
+ * @param inr    IRQ number.
+ * @param devno  Device number.
+ * @param method Method to be associated with the notification.
+ * @param ucode  Uspace pointer to the top-half pseudocode.
+ *
+ * @return EPERM or a return code returned by ipc_irq_register().
+ *
  */
 unative_t sys_ipc_register_irq(inr_t inr, devno_t devno, unative_t method,
@@ -1091,5 +1113,5 @@
 	if (!(cap_get(TASK) & CAP_IRQ_REG))
 		return EPERM;
-
+	
 	return ipc_irq_register(&TASK->answerbox, inr, devno, method, ucode);
 }
@@ -1097,8 +1119,9 @@
 /** Disconnect an IRQ handler from a task.
  *
- * @param inr		IRQ number.
- * @param devno		Device number.
- *
- * @return		Zero on success or EPERM on error..
+ * @param inr   IRQ number.
+ * @param devno Device number.
+ *
+ * @return Zero on success or EPERM on error.
+ *
  */
 unative_t sys_ipc_unregister_irq(inr_t inr, devno_t devno)
@@ -1106,7 +1129,7 @@
 	if (!(cap_get(TASK) & CAP_IRQ_REG))
 		return EPERM;
-
+	
 	ipc_irq_unregister(&TASK->answerbox, inr, devno);
-
+	
 	return 0;
 }
@@ -1114,8 +1137,8 @@
 #include <console/console.h>
 
-/**
- * Syscall connect to a task by id.
- *
- * @return 		Phone id on success, or negative error code.
+/** Syscall connect to a task by id.
+ *
+ * @return Phone id on success, or negative error code.
+ *
  */
 unative_t sys_ipc_connect_kbox(sysarg64_t *uspace_taskid_arg)
@@ -1123,12 +1146,10 @@
 #ifdef CONFIG_UDEBUG
 	sysarg64_t taskid_arg;
-	int rc;
-	
-	rc = copy_from_uspace(&taskid_arg, uspace_taskid_arg, sizeof(sysarg64_t));
+	int rc = copy_from_uspace(&taskid_arg, uspace_taskid_arg, sizeof(sysarg64_t));
 	if (rc != 0)
 		return (unative_t) rc;
-
+	
 	LOG("sys_ipc_connect_kbox(%" PRIu64 ")\n", taskid_arg.value);
-
+	
 	return ipc_connect_kbox(taskid_arg.value);
 #else
Index: kernel/generic/src/lib/elf.c
===================================================================
--- kernel/generic/src/lib/elf.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/src/lib/elf.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -28,5 +28,5 @@
  */
 
-/** @addtogroup generic	
+/** @addtogroup generic
  * @{
  */
@@ -34,5 +34,5 @@
 /**
  * @file
- * @brief	Kernel ELF loader.
+ * @brief Kernel ELF loader.
  */
 
@@ -57,75 +57,72 @@
 };
 
-static int segment_header(elf_segment_header_t *entry, elf_header_t *elf,
-    as_t *as, int flags);
-static int section_header(elf_section_header_t *entry, elf_header_t *elf,
-    as_t *as);
-static int load_segment(elf_segment_header_t *entry, elf_header_t *elf,
-    as_t *as);
+static int segment_header(elf_segment_header_t *, elf_header_t *, as_t *,
+    unsigned int);
+static int section_header(elf_section_header_t *, elf_header_t *, as_t *);
+static int load_segment(elf_segment_header_t *, elf_header_t *, as_t *);
 
 /** ELF loader
  *
  * @param header Pointer to ELF header in memory
- * @param as Created and properly mapped address space
- * @param flags A combination of ELD_F_*
+ * @param as     Created and properly mapped address space
+ * @param flags  A combination of ELD_F_*
+ *
  * @return EE_OK on success
- */
-unsigned int elf_load(elf_header_t *header, as_t * as, int flags)
-{
-	int i, rc;
-
+ *
+ */
+unsigned int elf_load(elf_header_t *header, as_t *as, unsigned int flags)
+{
 	/* Identify ELF */
-	if (header->e_ident[EI_MAG0] != ELFMAG0 ||
-	    header->e_ident[EI_MAG1] != ELFMAG1 || 
-	    header->e_ident[EI_MAG2] != ELFMAG2 ||
-	    header->e_ident[EI_MAG3] != ELFMAG3) {
+	if ((header->e_ident[EI_MAG0] != ELFMAG0) ||
+	    (header->e_ident[EI_MAG1] != ELFMAG1) ||
+	    (header->e_ident[EI_MAG2] != ELFMAG2) ||
+	    (header->e_ident[EI_MAG3] != ELFMAG3))
 		return EE_INVALID;
-	}
 	
 	/* Identify ELF compatibility */
-	if (header->e_ident[EI_DATA] != ELF_DATA_ENCODING ||
-	    header->e_machine != ELF_MACHINE || 
-	    header->e_ident[EI_VERSION] != EV_CURRENT ||
-	    header->e_version != EV_CURRENT ||
-	    header->e_ident[EI_CLASS] != ELF_CLASS) {
+	if ((header->e_ident[EI_DATA] != ELF_DATA_ENCODING) ||
+	    (header->e_machine != ELF_MACHINE) ||
+	    (header->e_ident[EI_VERSION] != EV_CURRENT) ||
+	    (header->e_version != EV_CURRENT) ||
+	    (header->e_ident[EI_CLASS] != ELF_CLASS))
 		return EE_INCOMPATIBLE;
-	}
-
+	
 	if (header->e_phentsize != sizeof(elf_segment_header_t))
 		return EE_INCOMPATIBLE;
-
+	
 	if (header->e_shentsize != sizeof(elf_section_header_t))
 		return EE_INCOMPATIBLE;
-
+	
 	/* Check if the object type is supported. */
 	if (header->e_type != ET_EXEC)
 		return EE_UNSUPPORTED;
-
+	
 	/* Check if the ELF image starts on a page boundary */
-	if (ALIGN_UP((uintptr_t)header, PAGE_SIZE) != (uintptr_t)header)
+	if (ALIGN_UP((uintptr_t) header, PAGE_SIZE) != (uintptr_t) header)
 		return EE_UNSUPPORTED;
-
+	
 	/* Walk through all segment headers and process them. */
+	elf_half i;
 	for (i = 0; i < header->e_phnum; i++) {
-		elf_segment_header_t *seghdr;
-
-		seghdr = &((elf_segment_header_t *)(((uint8_t *) header) +
+		elf_segment_header_t *seghdr =
+		    &((elf_segment_header_t *)(((uint8_t *) header) +
 		    header->e_phoff))[i];
-		rc = segment_header(seghdr, header, as, flags);
+		
+		int rc = segment_header(seghdr, header, as, flags);
 		if (rc != EE_OK)
 			return rc;
 	}
-
+	
 	/* Inspect all section headers and proccess them. */
 	for (i = 0; i < header->e_shnum; i++) {
-		elf_section_header_t *sechdr;
-
-		sechdr = &((elf_section_header_t *)(((uint8_t *) header) +
+		elf_section_header_t *sechdr =
+		    &((elf_section_header_t *)(((uint8_t *) header) +
 		    header->e_shoff))[i];
-		rc = section_header(sechdr, header, as);
+		
+		int rc = section_header(sechdr, header, as);
 		if (rc != EE_OK)
 			return rc;
 	}
-
+	
 	return EE_OK;
 }
@@ -136,9 +133,10 @@
  *
  * @return NULL terminated description of error.
+ *
  */
 const char *elf_error(unsigned int rc)
 {
 	ASSERT(rc < sizeof(error_codes) / sizeof(char *));
-
+	
 	return error_codes[rc];
 }
@@ -147,11 +145,12 @@
  *
  * @param entry Segment header.
- * @param elf ELF header.
- * @param as Address space into wich the ELF is being loaded.
+ * @param elf   ELF header.
+ * @param as    Address space into wich the ELF is being loaded.
  *
  * @return EE_OK on success, error code otherwise.
+ *
  */
 static int segment_header(elf_segment_header_t *entry, elf_header_t *elf,
-    as_t *as, int flags)
+    as_t *as, unsigned int flags)
 {
 	switch (entry->p_type) {
@@ -170,7 +169,6 @@
 			return EE_UNSUPPORTED;
 		} */
-		if ((flags & ELD_F_LOADER) == 0) {
+		if ((flags & ELD_F_LOADER) == 0)
 			return EE_LOADER;
-		}
 		break;
 	case PT_SHLIB:
@@ -187,51 +185,53 @@
  *
  * @param entry Program header entry describing segment to be loaded.
- * @param elf ELF header.
- * @param as Address space into wich the ELF is being loaded.
+ * @param elf   ELF header.
+ * @param as    Address space into wich the ELF is being loaded.
  *
  * @return EE_OK on success, error code otherwise.
+ *
  */
 int load_segment(elf_segment_header_t *entry, elf_header_t *elf, as_t *as)
 {
-	as_area_t *a;
-	int flags = 0;
 	mem_backend_data_t backend_data;
-	uintptr_t base;
-	size_t mem_sz;
-	
 	backend_data.elf = elf;
 	backend_data.segment = entry;
-
+	
 	if (entry->p_align > 1) {
 		if ((entry->p_offset % entry->p_align) !=
-		    (entry->p_vaddr % entry->p_align)) {
+		    (entry->p_vaddr % entry->p_align))
 			return EE_INVALID;
-		}
-	}
-
+	}
+	
+	unsigned int flags = 0;
+	
 	if (entry->p_flags & PF_X)
 		flags |= AS_AREA_EXEC;
+	
 	if (entry->p_flags & PF_W)
 		flags |= AS_AREA_WRITE;
+	
 	if (entry->p_flags & PF_R)
 		flags |= AS_AREA_READ;
+	
 	flags |= AS_AREA_CACHEABLE;
-
-	/* 
+	
+	/*
 	 * Align vaddr down, inserting a little "gap" at the beginning.
 	 * Adjust area size, so that its end remains in place.
+	 *
 	 */
-	base = ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE);
-	mem_sz = entry->p_memsz + (entry->p_vaddr - base);
-
-	a = as_area_create(as, flags, mem_sz, base,
+	uintptr_t base = ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE);
+	size_t mem_sz = entry->p_memsz + (entry->p_vaddr - base);
+	
+	as_area_t *area = as_area_create(as, flags, mem_sz, base,
 	    AS_AREA_ATTR_NONE, &elf_backend, &backend_data);
-	if (!a)
+	if (!area)
 		return EE_MEMORY;
 	
 	/*
 	 * The segment will be mapped on demand by elf_page_fault().
+	 *
 	 */
-
+	
 	return EE_OK;
 }
@@ -240,8 +240,9 @@
  *
  * @param entry Segment header.
- * @param elf ELF header.
- * @param as Address space into wich the ELF is being loaded.
+ * @param elf   ELF header.
+ * @param as    Address space into wich the ELF is being loaded.
  *
  * @return EE_OK on success, error code otherwise.
+ *
  */
 static int section_header(elf_section_header_t *entry, elf_header_t *elf,
Index: kernel/generic/src/main/kinit.c
===================================================================
--- kernel/generic/src/main/kinit.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/src/main/kinit.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -115,7 +115,7 @@
 		thread = thread_create(kmp, NULL, TASK, THREAD_FLAG_WIRED, "kmp", true);
 		if (thread != NULL) {
-			spinlock_lock(&thread->lock);
+			irq_spinlock_lock(&thread->lock, false);
 			thread->cpu = &cpus[0];
-			spinlock_unlock(&thread->lock);
+			irq_spinlock_unlock(&thread->lock, false);
 			thread_ready(thread);
 		} else
@@ -135,7 +135,7 @@
 			thread = thread_create(kcpulb, NULL, TASK, THREAD_FLAG_WIRED, "kcpulb", true);
 			if (thread != NULL) {
-				spinlock_lock(&thread->lock);
+				irq_spinlock_lock(&thread->lock, false);
 				thread->cpu = &cpus[i];
-				spinlock_unlock(&thread->lock);
+				irq_spinlock_unlock(&thread->lock, false);
 				thread_ready(thread);
 			} else
@@ -199,5 +199,5 @@
 		str_cpy(namebuf + INIT_PREFIX_LEN,
 		    TASK_NAME_BUFLEN - INIT_PREFIX_LEN, name);
-
+		
 		int rc = program_create_from_image((void *) init.tasks[i].addr,
 		    namebuf, &programs[i]);
@@ -222,5 +222,5 @@
 		}
 	}
-
+	
 	/*
 	 * Run user tasks.
@@ -230,5 +230,5 @@
 			program_ready(&programs[i]);
 	}
-
+	
 #ifdef CONFIG_KCONSOLE
 	if (!stdin) {
Index: kernel/generic/src/mm/as.c
===================================================================
--- kernel/generic/src/mm/as.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/src/mm/as.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -33,5 +33,5 @@
 /**
  * @file
- * @brief	Address space related functions.
+ * @brief Address space related functions.
  *
  * This file contains address space manipulation functions.
@@ -86,4 +86,5 @@
  * Each architecture decides what functions will be used to carry out
  * address space operations such as creating or locking page tables.
+ *
  */
 as_operations_t *as_operations = NULL;
@@ -91,4 +92,5 @@
 /**
  * Slab for as_t objects.
+ *
  */
 static slab_cache_t *as_slab;
@@ -100,4 +102,5 @@
  * - as->asid for each as of the as_t type
  * - asids_allocated counter
+ *
  */
 SPINLOCK_INITIALIZE(asidlock);
@@ -106,4 +109,5 @@
  * This list contains address spaces that are not active on any
  * processor and that have valid ASID.
+ *
  */
 LIST_INITIALIZE(inactive_as_with_asid_head);
@@ -112,26 +116,24 @@
 as_t *AS_KERNEL = NULL;
 
-static int area_flags_to_page_flags(int);
+static unsigned int area_flags_to_page_flags(unsigned int);
 static as_area_t *find_area_and_lock(as_t *, uintptr_t);
 static bool check_area_conflicts(as_t *, uintptr_t, size_t, as_area_t *);
 static void sh_info_remove_reference(share_info_t *);
 
-static int as_constructor(void *obj, int flags)
+static int as_constructor(void *obj, unsigned int flags)
 {
 	as_t *as = (as_t *) obj;
-	int rc;
-
+	
 	link_initialize(&as->inactive_as_with_asid_link);
 	mutex_initialize(&as->lock, MUTEX_PASSIVE);
 	
-	rc = as_constructor_arch(as, flags);
+	int rc = as_constructor_arch(as, flags);
 	
 	return rc;
 }
 
-static int as_destructor(void *obj)
+static size_t as_destructor(void *obj)
 {
 	as_t *as = (as_t *) obj;
-
 	return as_destructor_arch(as);
 }
@@ -141,5 +143,5 @@
 {
 	as_arch_init();
-
+	
 	as_slab = slab_cache_create("as_slab", sizeof(as_t), 0,
 	    as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED);
@@ -157,12 +159,11 @@
 /** Create address space.
  *
- * @param flags		Flags that influence the way in wich the address space
- * 			is created.
- */
-as_t *as_create(int flags)
-{
-	as_t *as;
-
-	as = (as_t *) slab_alloc(as_slab, 0);
+ * @param flags Flags that influence the way in wich the address
+ *              space is created.
+ *
+ */
+as_t *as_create(unsigned int flags)
+{
+	as_t *as = (as_t *) slab_alloc(as_slab, 0);
 	(void) as_create_arch(as, 0);
 	
@@ -176,4 +177,5 @@
 	atomic_set(&as->refcount, 0);
 	as->cpu_refcount = 0;
+	
 #ifdef AS_PAGE_TABLE
 	as->genarch.page_table = page_table_create(flags);
@@ -192,10 +194,9 @@
  * We know that we don't hold any spinlock.
  *
- * @param as		Address space to be destroyed.
+ * @param as Address space to be destroyed.
+ *
  */
 void as_destroy(as_t *as)
 {
-	ipl_t ipl;
-	bool cond;
 	DEADLOCK_PROBE_INIT(p_asidlock);
 
@@ -214,7 +215,9 @@
 	 * disabled to prevent nested context switches. We also depend on the
 	 * fact that so far no spinlocks are held.
+	 *
 	 */
 	preemption_disable();
-	ipl = interrupts_read();
+	ipl_t ipl = interrupts_read();
+	
 retry:
 	interrupts_disable();
@@ -224,30 +227,37 @@
 		goto retry;
 	}
-	preemption_enable();	/* Interrupts disabled, enable preemption */
-	if (as->asid != ASID_INVALID && as != AS_KERNEL) {
+	
+	/* Interrupts disabled, enable preemption */
+	preemption_enable();
+	
+	if ((as->asid != ASID_INVALID) && (as != AS_KERNEL)) {
 		if (as->cpu_refcount == 0)
 			list_remove(&as->inactive_as_with_asid_link);
+		
 		asid_put(as->asid);
 	}
+	
 	spinlock_unlock(&asidlock);
-
+	
 	/*
 	 * Destroy address space areas of the address space.
 	 * The B+tree must be walked carefully because it is
 	 * also being destroyed.
-	 */	
-	for (cond = true; cond; ) {
-		btree_node_t *node;
-
+	 *
+	 */
+	bool cond = true;
+	while (cond) {
 		ASSERT(!list_empty(&as->as_area_btree.leaf_head));
-		node = list_get_instance(as->as_area_btree.leaf_head.next,
+		
+		btree_node_t *node =
+		    list_get_instance(as->as_area_btree.leaf_head.next,
 		    btree_node_t, leaf_link);
-
-		if ((cond = node->keys)) {
+		
+		if ((cond = node->keys))
 			as_area_destroy(as, node->key[0]);
-		}
-	}
-
+	}
+	
 	btree_destroy(&as->as_area_btree);
+	
 #ifdef AS_PAGE_TABLE
 	page_table_destroy(as->genarch.page_table);
@@ -255,7 +265,7 @@
 	page_table_destroy(NULL);
 #endif
-
+	
 	interrupts_restore(ipl);
-
+	
 	slab_free(as_slab, as);
 }
@@ -266,5 +276,6 @@
  * space.
  *
- * @param a		Address space to be held.
+ * @param as Address space to be held.
+ *
  */
 void as_hold(as_t *as)
@@ -278,5 +289,6 @@
  * space.
  *
- * @param a		Address space to be released.
+ * @param asAddress space to be released.
+ *
  */
 void as_release(as_t *as)
@@ -290,32 +302,30 @@
  * The created address space area is added to the target address space.
  *
- * @param as		Target address space.
- * @param flags		Flags of the area memory.
- * @param size		Size of area.
- * @param base		Base address of area.
- * @param attrs		Attributes of the area.
- * @param backend	Address space area backend. NULL if no backend is used.
- * @param backend_data	NULL or a pointer to an array holding two void *.
- *
- * @return		Address space area on success or NULL on failure.
- */
-as_area_t *
-as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs,
-    mem_backend_t *backend, mem_backend_data_t *backend_data)
-{
-	ipl_t ipl;
-	as_area_t *a;
-	
+ * @param as           Target address space.
+ * @param flags        Flags of the area memory.
+ * @param size         Size of area.
+ * @param base         Base address of area.
+ * @param attrs        Attributes of the area.
+ * @param backend      Address space area backend. NULL if no backend is used.
+ * @param backend_data NULL or a pointer to an array holding two void *.
+ *
+ * @return Address space area on success or NULL on failure.
+ *
+ */
+as_area_t *as_area_create(as_t *as, unsigned int flags, size_t size,
+    uintptr_t base, unsigned int attrs, mem_backend_t *backend,
+    mem_backend_data_t *backend_data)
+{
 	if (base % PAGE_SIZE)
 		return NULL;
-
+	
 	if (!size)
 		return NULL;
-
+	
 	/* Writeable executable areas are not supported. */
 	if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
 		return NULL;
 	
-	ipl = interrupts_disable();
+	ipl_t ipl = interrupts_disable();
 	mutex_lock(&as->lock);
 	
@@ -326,54 +336,52 @@
 	}
 	
-	a = (as_area_t *) malloc(sizeof(as_area_t), 0);
-
-	mutex_initialize(&a->lock, MUTEX_PASSIVE);
-	
-	a->as = as;
-	a->flags = flags;
-	a->attributes = attrs;
-	a->pages = SIZE2FRAMES(size);
-	a->base = base;
-	a->sh_info = NULL;
-	a->backend = backend;
+	as_area_t *area = (as_area_t *) malloc(sizeof(as_area_t), 0);
+	
+	mutex_initialize(&area->lock, MUTEX_PASSIVE);
+	
+	area->as = as;
+	area->flags = flags;
+	area->attributes = attrs;
+	area->pages = SIZE2FRAMES(size);
+	area->base = base;
+	area->sh_info = NULL;
+	area->backend = backend;
+	
 	if (backend_data)
-		a->backend_data = *backend_data;
+		area->backend_data = *backend_data;
 	else
-		memsetb(&a->backend_data, sizeof(a->backend_data), 0);
-
-	btree_create(&a->used_space);
-	
-	btree_insert(&as->as_area_btree, base, (void *) a, NULL);
-
+		memsetb(&area->backend_data, sizeof(area->backend_data), 0);
+	
+	btree_create(&area->used_space);
+	btree_insert(&as->as_area_btree, base, (void *) area, NULL);
+	
 	mutex_unlock(&as->lock);
 	interrupts_restore(ipl);
-
-	return a;
+	
+	return area;
 }
 
 /** Find address space area and change it.
  *
- * @param as		Address space.
- * @param address	Virtual address belonging to the area to be changed.
- * 			Must be page-aligned.
- * @param size		New size of the virtual memory block starting at
- * 			address. 
- * @param flags		Flags influencing the remap operation. Currently unused.
- *
- * @return		Zero on success or a value from @ref errno.h otherwise.
- */ 
-int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags)
-{
-	as_area_t *area;
-	ipl_t ipl;
-	size_t pages;
-	
-	ipl = interrupts_disable();
+ * @param as      Address space.
+ * @param address Virtual address belonging to the area to be changed.
+ *                Must be page-aligned.
+ * @param size    New size of the virtual memory block starting at
+ *                address.
+ * @param flags   Flags influencing the remap operation. Currently unused.
+ *
+ * @return Zero on success or a value from @ref errno.h otherwise.
+ *
+ */
+int as_area_resize(as_t *as, uintptr_t address, size_t size, unsigned int flags)
+{
+	ipl_t ipl = interrupts_disable();
 	mutex_lock(&as->lock);
 	
 	/*
 	 * Locate the area.
-	 */
-	area = find_area_and_lock(as, address);
+	 *
+	 */
+	as_area_t *area = find_area_and_lock(as, address);
 	if (!area) {
 		mutex_unlock(&as->lock);
@@ -381,9 +389,10 @@
 		return ENOENT;
 	}
-
+	
 	if (area->backend == &phys_backend) {
 		/*
 		 * Remapping of address space areas associated
 		 * with memory mapped devices is not supported.
+		 *
 		 */
 		mutex_unlock(&area->lock);
@@ -392,8 +401,10 @@
 		return ENOTSUP;
 	}
+	
 	if (area->sh_info) {
 		/*
-		 * Remapping of shared address space areas 
+		 * Remapping of shared address space areas
 		 * is not supported.
+		 *
 		 */
 		mutex_unlock(&area->lock);
@@ -402,9 +413,10 @@
 		return ENOTSUP;
 	}
-
-	pages = SIZE2FRAMES((address - area->base) + size);
+	
+	size_t pages = SIZE2FRAMES((address - area->base) + size);
 	if (!pages) {
 		/*
 		 * Zero size address space areas are not allowed.
+		 *
 		 */
 		mutex_unlock(&area->lock);
@@ -415,20 +427,21 @@
 	
 	if (pages < area->pages) {
-		bool cond;
 		uintptr_t start_free = area->base + pages * PAGE_SIZE;
-
+		
 		/*
 		 * Shrinking the area.
 		 * No need to check for overlaps.
-		 */
-
+		 *
+		 */
+		
 		page_table_lock(as, false);
-
+		
 		/*
 		 * Start TLB shootdown sequence.
+		 *
 		 */
 		tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base +
 		    pages * PAGE_SIZE, area->pages - pages);
-
+		
 		/*
 		 * Remove frames belonging to used space starting from
@@ -437,42 +450,46 @@
 		 * is also the right way to remove part of the used_space
 		 * B+tree leaf list.
-		 */		
-		for (cond = true; cond;) {
-			btree_node_t *node;
-		
+		 *
+		 */
+		bool cond = true;
+		while (cond) {
 			ASSERT(!list_empty(&area->used_space.leaf_head));
-			node = 
+			
+			btree_node_t *node =
 			    list_get_instance(area->used_space.leaf_head.prev,
 			    btree_node_t, leaf_link);
+			
 			if ((cond = (bool) node->keys)) {
-				uintptr_t b = node->key[node->keys - 1];
-				size_t c =
+				uintptr_t ptr = node->key[node->keys - 1];
+				size_t size =
 				    (size_t) node->value[node->keys - 1];
-				unsigned int i = 0;
-			
-				if (overlaps(b, c * PAGE_SIZE, area->base,
+				size_t i = 0;
+				
+				if (overlaps(ptr, size * PAGE_SIZE, area->base,
 				    pages * PAGE_SIZE)) {
 					
-					if (b + c * PAGE_SIZE <= start_free) {
+					if (ptr + size * PAGE_SIZE <= start_free) {
 						/*
 						 * The whole interval fits
 						 * completely in the resized
 						 * address space area.
+						 *
 						 */
 						break;
 					}
-		
+					
 					/*
 					 * Part of the interval corresponding
 					 * to b and c overlaps with the resized
 					 * address space area.
+					 *
 					 */
-		
-					cond = false;	/* we are almost done */
-					i = (start_free - b) >> PAGE_WIDTH;
+					
+					/* We are almost done */
+					cond = false;
+					i = (start_free - ptr) >> PAGE_WIDTH;
 					if (!used_space_remove(area, start_free,
-					    c - i))
-						panic("Cannot remove used "
-						    "space.");
+					    size - i))
+						panic("Cannot remove used space.");
 				} else {
 					/*
@@ -480,58 +497,61 @@
 					 * completely removed.
 					 */
-					if (!used_space_remove(area, b, c))
-						panic("Cannot remove used "
-						    "space.");
+					if (!used_space_remove(area, ptr, size))
+						panic("Cannot remove used space.");
 				}
-			
-				for (; i < c; i++) {
-					pte_t *pte;
-			
-					pte = page_mapping_find(as, b +
+				
+				for (; i < size; i++) {
+					pte_t *pte = page_mapping_find(as, ptr +
 					    i * PAGE_SIZE);
-					ASSERT(pte && PTE_VALID(pte) &&
-					    PTE_PRESENT(pte));
-					if (area->backend &&
-					    area->backend->frame_free) {
+					
+					ASSERT(pte);
+					ASSERT(PTE_VALID(pte));
+					ASSERT(PTE_PRESENT(pte));
+					
+					if ((area->backend) &&
+					    (area->backend->frame_free)) {
 						area->backend->frame_free(area,
-						    b + i * PAGE_SIZE,
+						    ptr + i * PAGE_SIZE,
 						    PTE_GET_FRAME(pte));
 					}
-					page_mapping_remove(as, b +
+					
+					page_mapping_remove(as, ptr +
 					    i * PAGE_SIZE);
 				}
 			}
 		}
-
+		
 		/*
 		 * Finish TLB shootdown sequence.
-		 */
-
+		 *
+		 */
+		
 		tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE,
 		    area->pages - pages);
-
+		
 		/*
 		 * Invalidate software translation caches (e.g. TSB on sparc64).
+		 *
 		 */
 		as_invalidate_translation_cache(as, area->base +
 		    pages * PAGE_SIZE, area->pages - pages);
 		tlb_shootdown_finalize();
-
+		
 		page_table_unlock(as, false);
-		
 	} else {
 		/*
 		 * Growing the area.
 		 * Check for overlaps with other address space areas.
+		 *
 		 */
 		if (!check_area_conflicts(as, address, pages * PAGE_SIZE,
 		    area)) {
 			mutex_unlock(&area->lock);
-			mutex_unlock(&as->lock);		
+			mutex_unlock(&as->lock);
 			interrupts_restore(ipl);
 			return EADDRNOTAVAIL;
 		}
-	} 
-
+	}
+	
 	area->pages = pages;
 	
@@ -539,5 +559,5 @@
 	mutex_unlock(&as->lock);
 	interrupts_restore(ipl);
-
+	
 	return 0;
 }
@@ -545,20 +565,16 @@
 /** Destroy address space area.
  *
- * @param as		Address space.
- * @param address	Address within the area to be deleted.
- *
- * @return		Zero on success or a value from @ref errno.h on failure.
+ * @param as      Address space.
+ * @param address Address within the area to be deleted.
+ *
+ * @return Zero on success or a value from @ref errno.h on failure.
+ *
  */
 int as_area_destroy(as_t *as, uintptr_t address)
 {
-	as_area_t *area;
-	uintptr_t base;
-	link_t *cur;
-	ipl_t ipl;
-
-	ipl = interrupts_disable();
+	ipl_t ipl = interrupts_disable();
 	mutex_lock(&as->lock);
-
-	area = find_area_and_lock(as, address);
+	
+	as_area_t *area = find_area_and_lock(as, address);
 	if (!area) {
 		mutex_unlock(&as->lock);
@@ -566,68 +582,75 @@
 		return ENOENT;
 	}
-
-	base = area->base;
-
+	
+	uintptr_t base = area->base;
+	
 	page_table_lock(as, false);
-
+	
 	/*
 	 * Start TLB shootdown sequence.
 	 */
 	tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages);
-
+	
 	/*
 	 * Visit only the pages mapped by used_space B+tree.
 	 */
+	link_t *cur;
 	for (cur = area->used_space.leaf_head.next;
 	    cur != &area->used_space.leaf_head; cur = cur->next) {
 		btree_node_t *node;
-		unsigned int i;
+		btree_key_t i;
 		
 		node = list_get_instance(cur, btree_node_t, leaf_link);
 		for (i = 0; i < node->keys; i++) {
-			uintptr_t b = node->key[i];
-			size_t j;
-			pte_t *pte;
+			uintptr_t ptr = node->key[i];
+			size_t size;
 			
-			for (j = 0; j < (size_t) node->value[i]; j++) {
-				pte = page_mapping_find(as, b + j * PAGE_SIZE);
-				ASSERT(pte && PTE_VALID(pte) &&
-				    PTE_PRESENT(pte));
-				if (area->backend &&
-				    area->backend->frame_free) {
-					area->backend->frame_free(area,	b +
-					    j * PAGE_SIZE, PTE_GET_FRAME(pte));
+			for (size = 0; size < (size_t) node->value[i]; size++) {
+				pte_t *pte = page_mapping_find(as, ptr + size * PAGE_SIZE);
+				
+				ASSERT(pte);
+				ASSERT(PTE_VALID(pte));
+				ASSERT(PTE_PRESENT(pte));
+				
+				if ((area->backend) &&
+				    (area->backend->frame_free)) {
+					area->backend->frame_free(area,
+					    ptr + size * PAGE_SIZE, PTE_GET_FRAME(pte));
 				}
-				page_mapping_remove(as, b + j * PAGE_SIZE);				
+				
+				page_mapping_remove(as, ptr + size * PAGE_SIZE);
 			}
 		}
 	}
-
+	
 	/*
 	 * Finish TLB shootdown sequence.
-	 */
-
+	 *
+	 */
+	
 	tlb_invalidate_pages(as->asid, area->base, area->pages);
-
+	
 	/*
 	 * Invalidate potential software translation caches (e.g. TSB on
 	 * sparc64).
+	 *
 	 */
 	as_invalidate_translation_cache(as, area->base, area->pages);
 	tlb_shootdown_finalize();
-
+	
 	page_table_unlock(as, false);
 	
 	btree_destroy(&area->used_space);
-
+	
 	area->attributes |= AS_AREA_ATTR_PARTIAL;
 	
 	if (area->sh_info)
 		sh_info_remove_reference(area->sh_info);
-		
+	
 	mutex_unlock(&area->lock);
-
+	
 	/*
 	 * Remove the empty area from address space.
+	 *
 	 */
 	btree_remove(&as->as_area_btree, base, NULL);
@@ -647,36 +670,31 @@
  * sh_info of the source area. The process of duplicating the
  * mapping is done through the backend share function.
- * 
- * @param src_as	Pointer to source address space.
- * @param src_base	Base address of the source address space area.
- * @param acc_size	Expected size of the source area.
- * @param dst_as	Pointer to destination address space.
- * @param dst_base	Target base address.
+ *
+ * @param src_as         Pointer to source address space.
+ * @param src_base       Base address of the source address space area.
+ * @param acc_size       Expected size of the source area.
+ * @param dst_as         Pointer to destination address space.
+ * @param dst_base       Target base address.
  * @param dst_flags_mask Destination address space area flags mask.
  *
- * @return		Zero on success or ENOENT if there is no such task or if
- * 			there is no such address space area, EPERM if there was
- * 			a problem in accepting the area or ENOMEM if there was a
- * 			problem in allocating destination address space area.
- * 			ENOTSUP is returned if the address space area backend
- * 			does not support sharing.
+ * @return Zero on success.
+ * @return ENOENT if there is no such task or such address space.
+ * @return EPERM if there was a problem in accepting the area.
+ * @return ENOMEM if there was a problem in allocating destination
+ *         address space area.
+ * @return ENOTSUP if the address space area backend does not support
+ *         sharing.
+ *
  */
 int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
-    as_t *dst_as, uintptr_t dst_base, int dst_flags_mask)
-{
-	ipl_t ipl;
-	int src_flags;
-	size_t src_size;
-	as_area_t *src_area, *dst_area;
-	share_info_t *sh_info;
-	mem_backend_t *src_backend;
-	mem_backend_data_t src_backend_data;
-	
-	ipl = interrupts_disable();
+    as_t *dst_as, uintptr_t dst_base, unsigned int dst_flags_mask)
+{
+	ipl_t ipl = interrupts_disable();
 	mutex_lock(&src_as->lock);
-	src_area = find_area_and_lock(src_as, src_base);
+	as_area_t *src_area = find_area_and_lock(src_as, src_base);
 	if (!src_area) {
 		/*
 		 * Could not find the source address space area.
+		 *
 		 */
 		mutex_unlock(&src_as->lock);
@@ -684,9 +702,10 @@
 		return ENOENT;
 	}
-
-	if (!src_area->backend || !src_area->backend->share) {
+	
+	if ((!src_area->backend) || (!src_area->backend->share)) {
 		/*
 		 * There is no backend or the backend does not
 		 * know how to share the area.
+		 *
 		 */
 		mutex_unlock(&src_area->lock);
@@ -696,15 +715,15 @@
 	}
 	
-	src_size = src_area->pages * PAGE_SIZE;
-	src_flags = src_area->flags;
-	src_backend = src_area->backend;
-	src_backend_data = src_area->backend_data;
-
+	size_t src_size = src_area->pages * PAGE_SIZE;
+	unsigned int src_flags = src_area->flags;
+	mem_backend_t *src_backend = src_area->backend;
+	mem_backend_data_t src_backend_data = src_area->backend_data;
+	
 	/* Share the cacheable flag from the original mapping */
 	if (src_flags & AS_AREA_CACHEABLE)
 		dst_flags_mask |= AS_AREA_CACHEABLE;
-
-	if (src_size != acc_size ||
-	    (src_flags & dst_flags_mask) != dst_flags_mask) {
+	
+	if ((src_size != acc_size) ||
+	    ((src_flags & dst_flags_mask) != dst_flags_mask)) {
 		mutex_unlock(&src_area->lock);
 		mutex_unlock(&src_as->lock);
@@ -712,11 +731,12 @@
 		return EPERM;
 	}
-
+	
 	/*
 	 * Now we are committed to sharing the area.
 	 * First, prepare the area for sharing.
 	 * Then it will be safe to unlock it.
-	 */
-	sh_info = src_area->sh_info;
+	 *
+	 */
+	share_info_t *sh_info = src_area->sh_info;
 	if (!sh_info) {
 		sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0);
@@ -725,6 +745,8 @@
 		btree_create(&sh_info->pagemap);
 		src_area->sh_info = sh_info;
+		
 		/*
 		 * Call the backend to setup sharing.
+		 *
 		 */
 		src_area->backend->share(src_area);
@@ -734,8 +756,8 @@
 		mutex_unlock(&sh_info->lock);
 	}
-
+	
 	mutex_unlock(&src_area->lock);
 	mutex_unlock(&src_as->lock);
-
+	
 	/*
 	 * Create copy of the source address space area.
@@ -745,7 +767,8 @@
 	 * The flags of the source area are masked against dst_flags_mask
 	 * to support sharing in less privileged mode.
-	 */
-	dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base,
-	    AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
+	 *
+	 */
+	as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask, src_size,
+	    dst_base, AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
 	if (!dst_area) {
 		/*
@@ -757,17 +780,18 @@
 		return ENOMEM;
 	}
-
+	
 	/*
 	 * Now the destination address space area has been
 	 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
 	 * attribute and set the sh_info.
-	 */	
-	mutex_lock(&dst_as->lock);	
+	 *
+	 */
+	mutex_lock(&dst_as->lock);
 	mutex_lock(&dst_area->lock);
 	dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
 	dst_area->sh_info = sh_info;
 	mutex_unlock(&dst_area->lock);
-	mutex_unlock(&dst_as->lock);	
-
+	mutex_unlock(&dst_as->lock);
+	
 	interrupts_restore(ipl);
 	
@@ -779,9 +803,10 @@
  * The address space area must be locked prior to this call.
  *
- * @param area		Address space area.
- * @param access	Access mode.
- *
- * @return		False if access violates area's permissions, true
- * 			otherwise.
+ * @param area   Address space area.
+ * @param access Access mode.
+ *
+ * @return False if access violates area's permissions, true
+ *         otherwise.
+ *
  */
 bool as_area_check_access(as_area_t *area, pf_access_t access)
@@ -792,5 +817,5 @@
 		[PF_ACCESS_EXEC] = AS_AREA_EXEC
 	};
-
+	
 	if (!(area->flags & flagmap[access]))
 		return false;
@@ -813,21 +838,13 @@
  *
  */
-int as_area_change_flags(as_t *as, int flags, uintptr_t address)
-{
-	as_area_t *area;
-	link_t *cur;
-	ipl_t ipl;
-	int page_flags;
-	uintptr_t *old_frame;
-	size_t frame_idx;
-	size_t used_pages;
-	
+int as_area_change_flags(as_t *as, unsigned int flags, uintptr_t address)
+{
 	/* Flags for the new memory mapping */
-	page_flags = area_flags_to_page_flags(flags);
-
-	ipl = interrupts_disable();
+	unsigned int page_flags = area_flags_to_page_flags(flags);
+	
+	ipl_t ipl = interrupts_disable();
 	mutex_lock(&as->lock);
-
-	area = find_area_and_lock(as, address);
+	
+	as_area_t *area = find_area_and_lock(as, address);
 	if (!area) {
 		mutex_unlock(&as->lock);
@@ -835,5 +852,5 @@
 		return ENOENT;
 	}
-
+	
 	if ((area->sh_info) || (area->backend != &anon_backend)) {
 		/* Copying shared areas not supported yet */
@@ -844,64 +861,70 @@
 		return ENOTSUP;
 	}
-
+	
 	/*
 	 * Compute total number of used pages in the used_space B+tree
-	 */
-	used_pages = 0;
-
+	 *
+	 */
+	size_t used_pages = 0;
+	link_t *cur;
+	
 	for (cur = area->used_space.leaf_head.next;
 	    cur != &area->used_space.leaf_head; cur = cur->next) {
-		btree_node_t *node;
-		unsigned int i;
-		
-		node = list_get_instance(cur, btree_node_t, leaf_link);
-		for (i = 0; i < node->keys; i++) {
+		btree_node_t *node
+		    = list_get_instance(cur, btree_node_t, leaf_link);
+		btree_key_t i;
+		
+		for (i = 0; i < node->keys; i++)
 			used_pages += (size_t) node->value[i];
-		}
-	}
-
+	}
+	
 	/* An array for storing frame numbers */
-	old_frame = malloc(used_pages * sizeof(uintptr_t), 0);
-
+	uintptr_t *old_frame = malloc(used_pages * sizeof(uintptr_t), 0);
+	
 	page_table_lock(as, false);
-
+	
 	/*
 	 * Start TLB shootdown sequence.
+	 *
 	 */
 	tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages);
-
+	
 	/*
 	 * Remove used pages from page tables and remember their frame
 	 * numbers.
-	 */
-	frame_idx = 0;
-
+	 *
+	 */
+	size_t frame_idx = 0;
+	
 	for (cur = area->used_space.leaf_head.next;
 	    cur != &area->used_space.leaf_head; cur = cur->next) {
-		btree_node_t *node;
-		unsigned int i;
-		
-		node = list_get_instance(cur, btree_node_t, leaf_link);
+		btree_node_t *node
+		    = list_get_instance(cur, btree_node_t, leaf_link);
+		btree_key_t i;
+		
 		for (i = 0; i < node->keys; i++) {
-			uintptr_t b = node->key[i];
-			size_t j;
-			pte_t *pte;
+			uintptr_t ptr = node->key[i];
+			size_t size;
 			
-			for (j = 0; j < (size_t) node->value[i]; j++) {
-				pte = page_mapping_find(as, b + j * PAGE_SIZE);
-				ASSERT(pte && PTE_VALID(pte) &&
-				    PTE_PRESENT(pte));
+			for (size = 0; size < (size_t) node->value[i]; size++) {
+				pte_t *pte = page_mapping_find(as, ptr + size * PAGE_SIZE);
+				
+				ASSERT(pte);
+				ASSERT(PTE_VALID(pte));
+				ASSERT(PTE_PRESENT(pte));
+				
 				old_frame[frame_idx++] = PTE_GET_FRAME(pte);
-
+				
 				/* Remove old mapping */
-				page_mapping_remove(as, b + j * PAGE_SIZE);
+				page_mapping_remove(as, ptr + size * PAGE_SIZE);
 			}
 		}
 	}
-
+	
 	/*
 	 * Finish TLB shootdown sequence.
-	 */
-
+	 *
+	 */
+	
 	tlb_invalidate_pages(as->asid, area->base, area->pages);
 	
@@ -909,15 +932,16 @@
 	 * Invalidate potential software translation caches (e.g. TSB on
 	 * sparc64).
+	 *
 	 */
 	as_invalidate_translation_cache(as, area->base, area->pages);
 	tlb_shootdown_finalize();
-
+	
 	page_table_unlock(as, false);
-
+	
 	/*
 	 * Set the new flags.
 	 */
 	area->flags = flags;
-
+	
 	/*
 	 * Map pages back in with new flags. This step is kept separate
@@ -926,36 +950,35 @@
 	 */
 	frame_idx = 0;
-
+	
 	for (cur = area->used_space.leaf_head.next;
 	    cur != &area->used_space.leaf_head; cur = cur->next) {
-		btree_node_t *node;
-		unsigned int i;
-		
-		node = list_get_instance(cur, btree_node_t, leaf_link);
+		btree_node_t *node
+		    = list_get_instance(cur, btree_node_t, leaf_link);
+		btree_key_t i;
+		
 		for (i = 0; i < node->keys; i++) {
-			uintptr_t b = node->key[i];
-			size_t j;
+			uintptr_t ptr = node->key[i];
+			size_t size;
 			
-			for (j = 0; j < (size_t) node->value[i]; j++) {
+			for (size = 0; size < (size_t) node->value[i]; size++) {
 				page_table_lock(as, false);
-
+				
 				/* Insert the new mapping */
-				page_mapping_insert(as, b + j * PAGE_SIZE,
+				page_mapping_insert(as, ptr + size * PAGE_SIZE,
 				    old_frame[frame_idx++], page_flags);
-
+				
 				page_table_unlock(as, false);
 			}
 		}
 	}
-
+	
 	free(old_frame);
-
+	
 	mutex_unlock(&area->lock);
 	mutex_unlock(&as->lock);
 	interrupts_restore(ipl);
-
+	
 	return 0;
 }
-
 
 /** Handle page fault within the current address space.
@@ -967,18 +990,17 @@
  * Interrupts are assumed disabled.
  *
- * @param page		Faulting page.
- * @param access	Access mode that caused the page fault (i.e.
- * 			read/write/exec).
- * @param istate	Pointer to the interrupted state.
- *
- * @return		AS_PF_FAULT on page fault, AS_PF_OK on success or
- * 			AS_PF_DEFER if the fault was caused by copy_to_uspace()
- * 			or copy_from_uspace().
+ * @param page   Faulting page.
+ * @param access Access mode that caused the page fault (i.e.
+ *               read/write/exec).
+ * @param istate Pointer to the interrupted state.
+ *
+ * @return AS_PF_FAULT on page fault.
+ * @return AS_PF_OK on success.
+ * @return AS_PF_DEFER if the fault was caused by copy_to_uspace()
+ *         or copy_from_uspace().
+ *
  */
 int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate)
 {
-	pte_t *pte;
-	as_area_t *area;
-	
 	if (!THREAD)
 		return AS_PF_FAULT;
@@ -988,14 +1010,15 @@
 	
 	mutex_lock(&AS->lock);
-	area = find_area_and_lock(AS, page);
+	as_area_t *area = find_area_and_lock(AS, page);
 	if (!area) {
 		/*
 		 * No area contained mapping for 'page'.
 		 * Signal page fault to low-level handler.
+		 *
 		 */
 		mutex_unlock(&AS->lock);
 		goto page_fault;
 	}
-
+	
 	if (area->attributes & AS_AREA_ATTR_PARTIAL) {
 		/*
@@ -1005,17 +1028,18 @@
 		mutex_unlock(&area->lock);
 		mutex_unlock(&AS->lock);
-		goto page_fault;		
-	}
-
-	if (!area->backend || !area->backend->page_fault) {
+		goto page_fault;
+	}
+	
+	if ((!area->backend) || (!area->backend->page_fault)) {
 		/*
 		 * The address space area is not backed by any backend
 		 * or the backend cannot handle page faults.
+		 *
 		 */
 		mutex_unlock(&area->lock);
 		mutex_unlock(&AS->lock);
-		goto page_fault;		
-	}
-
+		goto page_fault;
+	}
+	
 	page_table_lock(AS, false);
 	
@@ -1023,5 +1047,7 @@
 	 * To avoid race condition between two page faults on the same address,
 	 * we need to make sure the mapping has not been already inserted.
-	 */
+	 *
+	 */
+	pte_t *pte;
 	if ((pte = page_mapping_find(AS, page))) {
 		if (PTE_PRESENT(pte)) {
@@ -1039,4 +1065,5 @@
 	/*
 	 * Resort to the backend page fault handler.
+	 *
 	 */
 	if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
@@ -1051,5 +1078,5 @@
 	mutex_unlock(&AS->lock);
 	return AS_PF_OK;
-
+	
 page_fault:
 	if (THREAD->in_copy_from_uspace) {
@@ -1064,5 +1091,5 @@
 		return AS_PF_FAULT;
 	}
-
+	
 	return AS_PF_DEFER;
 }
@@ -1076,6 +1103,7 @@
  * When this function is enetered, no spinlocks may be held.
  *
- * @param old		Old address space or NULL.
- * @param new		New address space.
+ * @param old Old address space or NULL.
+ * @param new New address space.
+ *
  */
 void as_switch(as_t *old_as, as_t *new_as)
@@ -1083,12 +1111,14 @@
 	DEADLOCK_PROBE_INIT(p_asidlock);
 	preemption_disable();
+	
 retry:
 	(void) interrupts_disable();
 	if (!spinlock_trylock(&asidlock)) {
-		/* 
+		/*
 		 * Avoid deadlock with TLB shootdown.
 		 * We can enable interrupts here because
 		 * preemption is disabled. We should not be
 		 * holding any other lock.
+		 *
 		 */
 		(void) interrupts_enable();
@@ -1097,11 +1127,12 @@
 	}
 	preemption_enable();
-
+	
 	/*
 	 * First, take care of the old address space.
-	 */	
+	 */
 	if (old_as) {
 		ASSERT(old_as->cpu_refcount);
-		if((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) {
+		
+		if ((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) {
 			/*
 			 * The old address space is no longer active on
@@ -1109,19 +1140,23 @@
 			 * list of inactive address spaces with assigned
 			 * ASID.
+			 *
 			 */
 			ASSERT(old_as->asid != ASID_INVALID);
+			
 			list_append(&old_as->inactive_as_with_asid_link,
 			    &inactive_as_with_asid_head);
 		}
-
+		
 		/*
 		 * Perform architecture-specific tasks when the address space
 		 * is being removed from the CPU.
+		 *
 		 */
 		as_deinstall_arch(old_as);
 	}
-
+	
 	/*
 	 * Second, prepare the new address space.
+	 *
 	 */
 	if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) {
@@ -1131,4 +1166,5 @@
 			new_as->asid = asid_get();
 	}
+	
 #ifdef AS_PAGE_TABLE
 	SET_PTL0_ADDRESS(new_as->genarch.page_table);
@@ -1138,7 +1174,8 @@
 	 * Perform architecture-specific steps.
 	 * (e.g. write ASID to hardware register etc.)
+	 *
 	 */
 	as_install_arch(new_as);
-
+	
 	spinlock_unlock(&asidlock);
 	
@@ -1148,13 +1185,12 @@
 /** Convert address space area flags to page flags.
  *
- * @param aflags	Flags of some address space area.
- *
- * @return		Flags to be passed to page_mapping_insert().
- */
-int area_flags_to_page_flags(int aflags)
-{
-	int flags;
-
-	flags = PAGE_USER | PAGE_PRESENT;
+ * @param aflags Flags of some address space area.
+ *
+ * @return Flags to be passed to page_mapping_insert().
+ *
+ */
+unsigned int area_flags_to_page_flags(unsigned int aflags)
+{
+	unsigned int flags = PAGE_USER | PAGE_PRESENT;
 	
 	if (aflags & AS_AREA_READ)
@@ -1169,5 +1205,5 @@
 	if (aflags & AS_AREA_CACHEABLE)
 		flags |= PAGE_CACHEABLE;
-		
+	
 	return flags;
 }
@@ -1178,11 +1214,12 @@
  * Interrupts must be disabled.
  *
- * @param a		Address space area.
- *
- * @return		Flags to be used in page_mapping_insert().
- */
-int as_area_get_flags(as_area_t *a)
-{
-	return area_flags_to_page_flags(a->flags);
+ * @param area Address space area.
+ *
+ * @return Flags to be used in page_mapping_insert().
+ *
+ */
+unsigned int as_area_get_flags(as_area_t *area)
+{
+	return area_flags_to_page_flags(area->flags);
 }
 
@@ -1192,10 +1229,11 @@
  * table.
  *
- * @param flags		Flags saying whether the page table is for the kernel
- * 			address space.
- *
- * @return		First entry of the page table.
- */
-pte_t *page_table_create(int flags)
+ * @param flags Flags saying whether the page table is for the kernel
+ *              address space.
+ *
+ * @return First entry of the page table.
+ *
+ */
+pte_t *page_table_create(unsigned int flags)
 {
 	ASSERT(as_operations);
@@ -1209,5 +1247,6 @@
  * Destroy page table in architecture specific way.
  *
- * @param page_table	Physical address of PTL0.
+ * @param page_table Physical address of PTL0.
+ *
  */
 void page_table_destroy(pte_t *page_table)
@@ -1223,11 +1262,12 @@
  * This function should be called before any page_mapping_insert(),
  * page_mapping_remove() and page_mapping_find().
- * 
+ *
  * Locking order is such that address space areas must be locked
  * prior to this call. Address space can be locked prior to this
  * call in which case the lock argument is false.
  *
- * @param as		Address space.
- * @param lock		If false, do not attempt to lock as->lock.
+ * @param as   Address space.
+ * @param lock If false, do not attempt to lock as->lock.
+ *
  */
 void page_table_lock(as_t *as, bool lock)
@@ -1241,6 +1281,7 @@
 /** Unlock page table.
  *
- * @param as		Address space.
- * @param unlock	If false, do not attempt to unlock as->lock.
+ * @param as     Address space.
+ * @param unlock If false, do not attempt to unlock as->lock.
+ *
  */
 void page_table_unlock(as_t *as, bool unlock)
@@ -1257,21 +1298,19 @@
  * The address space must be locked and interrupts must be disabled.
  *
- * @param as		Address space.
- * @param va		Virtual address.
- *
- * @return		Locked address space area containing va on success or
- * 			NULL on failure.
+ * @param as Address space.
+ * @param va Virtual address.
+ *
+ * @return Locked address space area containing va on success or
+ *         NULL on failure.
+ *
  */
 as_area_t *find_area_and_lock(as_t *as, uintptr_t va)
 {
-	as_area_t *a;
-	btree_node_t *leaf, *lnode;
-	unsigned int i;
-	
-	a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
-	if (a) {
+	btree_node_t *leaf;
+	as_area_t *area = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
+	if (area) {
 		/* va is the base address of an address space area */
-		mutex_lock(&a->lock);
-		return a;
+		mutex_lock(&area->lock);
+		return area;
 	}
 	
@@ -1280,30 +1319,38 @@
 	 * to find out whether this is a miss or va belongs to an address
 	 * space area found there.
+	 *
 	 */
 	
 	/* First, search the leaf node itself. */
+	btree_key_t i;
+	
 	for (i = 0; i < leaf->keys; i++) {
-		a = (as_area_t *) leaf->value[i];
-		mutex_lock(&a->lock);
-		if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
-			return a;
-		}
-		mutex_unlock(&a->lock);
-	}
-
+		area = (as_area_t *) leaf->value[i];
+		
+		mutex_lock(&area->lock);
+		
+		if ((area->base <= va) && (va < area->base + area->pages * PAGE_SIZE))
+			return area;
+		
+		mutex_unlock(&area->lock);
+	}
+	
 	/*
 	 * Second, locate the left neighbour and test its last record.
 	 * Because of its position in the B+tree, it must have base < va.
-	 */
-	lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf);
+	 *
+	 */
+	btree_node_t *lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf);
 	if (lnode) {
-		a = (as_area_t *) lnode->value[lnode->keys - 1];
-		mutex_lock(&a->lock);
-		if (va < a->base + a->pages * PAGE_SIZE) {
-			return a;
-		}
-		mutex_unlock(&a->lock);
-	}
-
+		area = (as_area_t *) lnode->value[lnode->keys - 1];
+		
+		mutex_lock(&area->lock);
+		
+		if (va < area->base + area->pages * PAGE_SIZE)
+			return area;
+		
+		mutex_unlock(&area->lock);
+	}
+	
 	return NULL;
 }
@@ -1313,20 +1360,18 @@
  * The address space must be locked and interrupts must be disabled.
  *
- * @param as		Address space.
- * @param va		Starting virtual address of the area being tested.
- * @param size		Size of the area being tested.
- * @param avoid_area	Do not touch this area. 
- *
- * @return		True if there is no conflict, false otherwise.
- */
-bool
-check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area)
-{
-	as_area_t *a;
-	btree_node_t *leaf, *node;
-	unsigned int i;
-	
+ * @param as         Address space.
+ * @param va         Starting virtual address of the area being tested.
+ * @param size       Size of the area being tested.
+ * @param avoid_area Do not touch this area.
+ *
+ * @return True if there is no conflict, false otherwise.
+ *
+ */
+bool check_area_conflicts(as_t *as, uintptr_t va, size_t size,
+    as_area_t *avoid_area)
+{
 	/*
 	 * We don't want any area to have conflicts with NULL page.
+	 *
 	 */
 	if (overlaps(va, size, NULL, PAGE_SIZE))
@@ -1339,57 +1384,73 @@
 	 * record in the left neighbour, the leftmost record in the right
 	 * neighbour and all records in the leaf node itself.
-	 */
-	
-	if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
-		if (a != avoid_area)
+	 *
+	 */
+	btree_node_t *leaf;
+	as_area_t *area =
+	    (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
+	if (area) {
+		if (area != avoid_area)
 			return false;
 	}
 	
 	/* First, check the two border cases. */
-	if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
-		a = (as_area_t *) node->value[node->keys - 1];
-		mutex_lock(&a->lock);
-		if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
-			mutex_unlock(&a->lock);
+	btree_node_t *node =
+	    btree_leaf_node_left_neighbour(&as->as_area_btree, leaf);
+	if (node) {
+		area = (as_area_t *) node->value[node->keys - 1];
+		
+		mutex_lock(&area->lock);
+		
+		if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) {
+			mutex_unlock(&area->lock);
 			return false;
 		}
-		mutex_unlock(&a->lock);
-	}
+		
+		mutex_unlock(&area->lock);
+	}
+	
 	node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf);
 	if (node) {
-		a = (as_area_t *) node->value[0];
-		mutex_lock(&a->lock);
-		if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
-			mutex_unlock(&a->lock);
+		area = (as_area_t *) node->value[0];
+		
+		mutex_lock(&area->lock);
+		
+		if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) {
+			mutex_unlock(&area->lock);
 			return false;
 		}
-		mutex_unlock(&a->lock);
+		
+		mutex_unlock(&area->lock);
 	}
 	
 	/* Second, check the leaf node. */
+	btree_key_t i;
 	for (i = 0; i < leaf->keys; i++) {
-		a = (as_area_t *) leaf->value[i];
-	
-		if (a == avoid_area)
+		area = (as_area_t *) leaf->value[i];
+		
+		if (area == avoid_area)
 			continue;
-	
-		mutex_lock(&a->lock);
-		if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
-			mutex_unlock(&a->lock);
+		
+		mutex_lock(&area->lock);
+		
+		if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) {
+			mutex_unlock(&area->lock);
 			return false;
 		}
-		mutex_unlock(&a->lock);
-	}
-
+		
+		mutex_unlock(&area->lock);
+	}
+	
 	/*
 	 * So far, the area does not conflict with other areas.
 	 * Check if it doesn't conflict with kernel address space.
-	 */	 
+	 *
+	 */
 	if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
-		return !overlaps(va, size, 
+		return !overlaps(va, size,
 		    KERNEL_ADDRESS_SPACE_START,
 		    KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START);
 	}
-
+	
 	return true;
 }
@@ -1397,23 +1458,23 @@
 /** Return size of the address space area with given base.
  *
- * @param base		Arbitrary address insede the address space area.
- *
- * @return		Size of the address space area in bytes or zero if it
- *			does not exist.
+ * @param base Arbitrary address insede the address space area.
+ *
+ * @return Size of the address space area in bytes or zero if it
+ *         does not exist.
+ *
  */
 size_t as_area_get_size(uintptr_t base)
 {
-	ipl_t ipl;
-	as_area_t *src_area;
 	size_t size;
-
-	ipl = interrupts_disable();
-	src_area = find_area_and_lock(AS, base);
+	
+	ipl_t ipl = interrupts_disable();
+	as_area_t *src_area = find_area_and_lock(AS, base);
+	
 	if (src_area) {
 		size = src_area->pages * PAGE_SIZE;
 		mutex_unlock(&src_area->lock);
-	} else {
+	} else
 		size = 0;
-	}
+	
 	interrupts_restore(ipl);
 	return size;
@@ -1424,33 +1485,32 @@
  * The address space area must be already locked.
  *
- * @param a		Address space area.
- * @param page		First page to be marked.
- * @param count		Number of page to be marked.
- *
- * @return		Zero on failure and non-zero on success.
- */
-int used_space_insert(as_area_t *a, uintptr_t page, size_t count)
-{
-	btree_node_t *leaf, *node;
-	size_t pages;
-	unsigned int i;
-
+ * @param area  Address space area.
+ * @param page  First page to be marked.
+ * @param count Number of page to be marked.
+ *
+ * @return Zero on failure and non-zero on success.
+ *
+ */
+int used_space_insert(as_area_t *area, uintptr_t page, size_t count)
+{
 	ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
 	ASSERT(count);
-
-	pages = (size_t) btree_search(&a->used_space, page, &leaf);
+	
+	btree_node_t *leaf;
+	size_t pages = (size_t) btree_search(&area->used_space, page, &leaf);
 	if (pages) {
 		/*
 		 * We hit the beginning of some used space.
+		 *
 		 */
 		return 0;
 	}
-
+	
 	if (!leaf->keys) {
-		btree_insert(&a->used_space, page, (void *) count, leaf);
+		btree_insert(&area->used_space, page, (void *) count, leaf);
 		return 1;
 	}
-
-	node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
+	
+	btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, leaf);
 	if (node) {
 		uintptr_t left_pg = node->key[node->keys - 1];
@@ -1463,6 +1523,7 @@
 		 * somewhere between the rightmost interval of
 		 * the left neigbour and the first interval of the leaf.
-		 */
-		 
+		 *
+		 */
+		
 		if (page >= right_pg) {
 			/* Do nothing. */
@@ -1474,5 +1535,5 @@
 		    right_cnt * PAGE_SIZE)) {
 			/* The interval intersects with the right interval. */
-			return 0;			
+			return 0;
 		} else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
 		    (page + count * PAGE_SIZE == right_pg)) {
@@ -1480,12 +1541,14 @@
 			 * The interval can be added by merging the two already
 			 * present intervals.
+			 *
 			 */
 			node->value[node->keys - 1] += count + right_cnt;
-			btree_remove(&a->used_space, right_pg, leaf);
-			return 1; 
+			btree_remove(&area->used_space, right_pg, leaf);
+			return 1;
 		} else if (page == left_pg + left_cnt * PAGE_SIZE) {
-			/* 
+			/*
 			 * The interval can be added by simply growing the left
 			 * interval.
+			 *
 			 */
 			node->value[node->keys - 1] += count;
@@ -1496,4 +1559,5 @@
 			 * the right interval down and increasing its size
 			 * accordingly.
+			 *
 			 */
 			leaf->value[0] += count;
@@ -1504,6 +1568,7 @@
 			 * The interval is between both neigbouring intervals,
 			 * but cannot be merged with any of them.
+			 *
 			 */
-			btree_insert(&a->used_space, page, (void *) count,
+			btree_insert(&area->used_space, page, (void *) count,
 			    leaf);
 			return 1;
@@ -1512,10 +1577,11 @@
 		uintptr_t right_pg = leaf->key[0];
 		size_t right_cnt = (size_t) leaf->value[0];
-	
+		
 		/*
 		 * Investigate the border case in which the left neighbour does
 		 * not exist but the interval fits from the left.
-		 */
-		 
+		 *
+		 */
+		
 		if (overlaps(page, count * PAGE_SIZE, right_pg,
 		    right_cnt * PAGE_SIZE)) {
@@ -1527,4 +1593,5 @@
 			 * right interval down and increasing its size
 			 * accordingly.
+			 *
 			 */
 			leaf->key[0] = page;
@@ -1535,12 +1602,13 @@
 			 * The interval doesn't adjoin with the right interval.
 			 * It must be added individually.
+			 *
 			 */
-			btree_insert(&a->used_space, page, (void *) count,
+			btree_insert(&area->used_space, page, (void *) count,
 			    leaf);
 			return 1;
 		}
 	}
-
-	node = btree_leaf_node_right_neighbour(&a->used_space, leaf);
+	
+	node = btree_leaf_node_right_neighbour(&area->used_space, leaf);
 	if (node) {
 		uintptr_t left_pg = leaf->key[leaf->keys - 1];
@@ -1553,6 +1621,7 @@
 		 * somewhere between the leftmost interval of
 		 * the right neigbour and the last interval of the leaf.
-		 */
-
+		 *
+		 */
+		
 		if (page < left_pg) {
 			/* Do nothing. */
@@ -1564,5 +1633,5 @@
 		    right_cnt * PAGE_SIZE)) {
 			/* The interval intersects with the right interval. */
-			return 0;			
+			return 0;
 		} else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
 		    (page + count * PAGE_SIZE == right_pg)) {
@@ -1570,13 +1639,15 @@
 			 * The interval can be added by merging the two already
 			 * present intervals.
-			 * */
+			 *
+			 */
 			leaf->value[leaf->keys - 1] += count + right_cnt;
-			btree_remove(&a->used_space, right_pg, node);
-			return 1; 
+			btree_remove(&area->used_space, right_pg, node);
+			return 1;
 		} else if (page == left_pg + left_cnt * PAGE_SIZE) {
 			/*
 			 * The interval can be added by simply growing the left
 			 * interval.
-			 * */
+			 *
+			 */
 			leaf->value[leaf->keys - 1] +=  count;
 			return 1;
@@ -1586,4 +1657,5 @@
 			 * the right interval down and increasing its size
 			 * accordingly.
+			 *
 			 */
 			node->value[0] += count;
@@ -1594,6 +1666,7 @@
 			 * The interval is between both neigbouring intervals,
 			 * but cannot be merged with any of them.
+			 *
 			 */
-			btree_insert(&a->used_space, page, (void *) count,
+			btree_insert(&area->used_space, page, (void *) count,
 			    leaf);
 			return 1;
@@ -1602,10 +1675,11 @@
 		uintptr_t left_pg = leaf->key[leaf->keys - 1];
 		size_t left_cnt = (size_t) leaf->value[leaf->keys - 1];
-	
+		
 		/*
 		 * Investigate the border case in which the right neighbour
 		 * does not exist but the interval fits from the right.
-		 */
-		 
+		 *
+		 */
+		
 		if (overlaps(page, count * PAGE_SIZE, left_pg,
 		    left_cnt * PAGE_SIZE)) {
@@ -1616,4 +1690,5 @@
 			 * The interval can be added by growing the left
 			 * interval.
+			 *
 			 */
 			leaf->value[leaf->keys - 1] += count;
@@ -1623,6 +1698,7 @@
 			 * The interval doesn't adjoin with the left interval.
 			 * It must be added individually.
+			 *
 			 */
-			btree_insert(&a->used_space, page, (void *) count,
+			btree_insert(&area->used_space, page, (void *) count,
 			    leaf);
 			return 1;
@@ -1634,5 +1710,7 @@
 	 * only between two other intervals of the leaf. The two border cases
 	 * were already resolved.
-	 */
+	 *
+	 */
+	btree_key_t i;
 	for (i = 1; i < leaf->keys; i++) {
 		if (page < leaf->key[i]) {
@@ -1641,9 +1719,10 @@
 			size_t left_cnt = (size_t) leaf->value[i - 1];
 			size_t right_cnt = (size_t) leaf->value[i];
-
+			
 			/*
 			 * The interval fits between left_pg and right_pg.
+			 *
 			 */
-
+			
 			if (overlaps(page, count * PAGE_SIZE, left_pg,
 			    left_cnt * PAGE_SIZE)) {
@@ -1651,4 +1730,5 @@
 				 * The interval intersects with the left
 				 * interval.
+				 *
 				 */
 				return 0;
@@ -1658,6 +1738,7 @@
 				 * The interval intersects with the right
 				 * interval.
+				 *
 				 */
-				return 0;			
+				return 0;
 			} else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
 			    (page + count * PAGE_SIZE == right_pg)) {
@@ -1665,12 +1746,14 @@
 				 * The interval can be added by merging the two
 				 * already present intervals.
+				 *
 				 */
 				leaf->value[i - 1] += count + right_cnt;
-				btree_remove(&a->used_space, right_pg, leaf);
-				return 1; 
+				btree_remove(&area->used_space, right_pg, leaf);
+				return 1;
 			} else if (page == left_pg + left_cnt * PAGE_SIZE) {
 				/*
 				 * The interval can be added by simply growing
 				 * the left interval.
+				 *
 				 */
 				leaf->value[i - 1] += count;
@@ -1678,8 +1761,9 @@
 			} else if (page + count * PAGE_SIZE == right_pg) {
 				/*
-			         * The interval can be addded by simply moving
+				 * The interval can be addded by simply moving
 				 * base of the right interval down and
 				 * increasing its size accordingly.
-			 	 */
+				 *
+				 */
 				leaf->value[i] += count;
 				leaf->key[i] = page;
@@ -1690,6 +1774,7 @@
 				 * intervals, but cannot be merged with any of
 				 * them.
+				 *
 				 */
-				btree_insert(&a->used_space, page,
+				btree_insert(&area->used_space, page,
 				    (void *) count, leaf);
 				return 1;
@@ -1697,5 +1782,5 @@
 		}
 	}
-
+	
 	panic("Inconsistency detected while adding %" PRIs " pages of used "
 	    "space at %p.", count, page);
@@ -1706,28 +1791,27 @@
  * The address space area must be already locked.
  *
- * @param a		Address space area.
- * @param page		First page to be marked.
- * @param count		Number of page to be marked.
- *
- * @return		Zero on failure and non-zero on success.
- */
-int used_space_remove(as_area_t *a, uintptr_t page, size_t count)
-{
-	btree_node_t *leaf, *node;
-	size_t pages;
-	unsigned int i;
-
+ * @param area  Address space area.
+ * @param page  First page to be marked.
+ * @param count Number of page to be marked.
+ *
+ * @return Zero on failure and non-zero on success.
+ *
+ */
+int used_space_remove(as_area_t *area, uintptr_t page, size_t count)
+{
 	ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
 	ASSERT(count);
-
-	pages = (size_t) btree_search(&a->used_space, page, &leaf);
+	
+	btree_node_t *leaf;
+	size_t pages = (size_t) btree_search(&area->used_space, page, &leaf);
 	if (pages) {
 		/*
 		 * We are lucky, page is the beginning of some interval.
+		 *
 		 */
 		if (count > pages) {
 			return 0;
 		} else if (count == pages) {
-			btree_remove(&a->used_space, page, leaf);
+			btree_remove(&area->used_space, page, leaf);
 			return 1;
 		} else {
@@ -1735,5 +1819,7 @@
 			 * Find the respective interval.
 			 * Decrease its size and relocate its start address.
+			 *
 			 */
+			btree_key_t i;
 			for (i = 0; i < leaf->keys; i++) {
 				if (leaf->key[i] == page) {
@@ -1746,10 +1832,10 @@
 		}
 	}
-
-	node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
-	if (node && page < leaf->key[0]) {
+	
+	btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, leaf);
+	if ((node) && (page < leaf->key[0])) {
 		uintptr_t left_pg = node->key[node->keys - 1];
 		size_t left_cnt = (size_t) node->value[node->keys - 1];
-
+		
 		if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
 		    count * PAGE_SIZE)) {
@@ -1761,4 +1847,5 @@
 				 * removed by updating the size of the bigger
 				 * interval.
+				 *
 				 */
 				node->value[node->keys - 1] -= count;
@@ -1766,6 +1853,4 @@
 			} else if (page + count * PAGE_SIZE <
 			    left_pg + left_cnt*PAGE_SIZE) {
-				size_t new_cnt;
-				
 				/*
 				 * The interval is contained in the rightmost
@@ -1774,9 +1859,10 @@
 				 * the original interval and also inserting a
 				 * new interval.
+				 *
 				 */
-				new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
+				size_t new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
 				    (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
 				node->value[node->keys - 1] -= count + new_cnt;
-				btree_insert(&a->used_space, page +
+				btree_insert(&area->used_space, page +
 				    count * PAGE_SIZE, (void *) new_cnt, leaf);
 				return 1;
@@ -1784,15 +1870,14 @@
 		}
 		return 0;
-	} else if (page < leaf->key[0]) {
+	} else if (page < leaf->key[0])
 		return 0;
-	}
 	
 	if (page > leaf->key[leaf->keys - 1]) {
 		uintptr_t left_pg = leaf->key[leaf->keys - 1];
 		size_t left_cnt = (size_t) leaf->value[leaf->keys - 1];
-
+		
 		if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
 		    count * PAGE_SIZE)) {
-			if (page + count * PAGE_SIZE == 
+			if (page + count * PAGE_SIZE ==
 			    left_pg + left_cnt * PAGE_SIZE) {
 				/*
@@ -1800,4 +1885,5 @@
 				 * interval of the leaf and can be removed by
 				 * updating the size of the bigger interval.
+				 *
 				 */
 				leaf->value[leaf->keys - 1] -= count;
@@ -1805,6 +1891,4 @@
 			} else if (page + count * PAGE_SIZE < left_pg +
 			    left_cnt * PAGE_SIZE) {
-				size_t new_cnt;
-				
 				/*
 				 * The interval is contained in the rightmost
@@ -1813,9 +1897,10 @@
 				 * original interval and also inserting a new
 				 * interval.
+				 *
 				 */
-				new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
+				size_t new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
 				    (page + count * PAGE_SIZE)) >> PAGE_WIDTH;
 				leaf->value[leaf->keys - 1] -= count + new_cnt;
-				btree_insert(&a->used_space, page +
+				btree_insert(&area->used_space, page +
 				    count * PAGE_SIZE, (void *) new_cnt, leaf);
 				return 1;
@@ -1823,5 +1908,5 @@
 		}
 		return 0;
-	}	
+	}
 	
 	/*
@@ -1829,9 +1914,10 @@
 	 * Now the interval can be only between intervals of the leaf. 
 	 */
+	btree_key_t i;
 	for (i = 1; i < leaf->keys - 1; i++) {
 		if (page < leaf->key[i]) {
 			uintptr_t left_pg = leaf->key[i - 1];
 			size_t left_cnt = (size_t) leaf->value[i - 1];
-
+			
 			/*
 			 * Now the interval is between intervals corresponding
@@ -1847,4 +1933,5 @@
 					 * be removed by updating the size of
 					 * the bigger interval.
+					 *
 					 */
 					leaf->value[i - 1] -= count;
@@ -1852,6 +1939,4 @@
 				} else if (page + count * PAGE_SIZE <
 				    left_pg + left_cnt * PAGE_SIZE) {
-					size_t new_cnt;
-				
 					/*
 					 * The interval is contained in the
@@ -1861,10 +1946,10 @@
 					 * also inserting a new interval.
 					 */
-					new_cnt = ((left_pg +
+					size_t new_cnt = ((left_pg +
 					    left_cnt * PAGE_SIZE) -
 					    (page + count * PAGE_SIZE)) >>
 					    PAGE_WIDTH;
 					leaf->value[i - 1] -= count + new_cnt;
-					btree_insert(&a->used_space, page +
+					btree_insert(&area->used_space, page +
 					    count * PAGE_SIZE, (void *) new_cnt,
 					    leaf);
@@ -1875,5 +1960,5 @@
 		}
 	}
-
+	
 error:
 	panic("Inconsistency detected while removing %" PRIs " pages of used "
@@ -1885,12 +1970,14 @@
  * If the reference count drops to 0, the sh_info is deallocated.
  *
- * @param sh_info	Pointer to address space area share info.
+ * @param sh_info Pointer to address space area share info.
+ *
  */
 void sh_info_remove_reference(share_info_t *sh_info)
 {
 	bool dealloc = false;
-
+	
 	mutex_lock(&sh_info->lock);
 	ASSERT(sh_info->refcount);
+	
 	if (--sh_info->refcount == 0) {
 		dealloc = true;
@@ -1903,9 +1990,9 @@
 		for (cur = sh_info->pagemap.leaf_head.next;
 		    cur != &sh_info->pagemap.leaf_head; cur = cur->next) {
-			btree_node_t *node;
-			unsigned int i;
+			btree_node_t *node
+			    = list_get_instance(cur, btree_node_t, leaf_link);
+			btree_key_t i;
 			
-			node = list_get_instance(cur, btree_node_t, leaf_link);
-			for (i = 0; i < node->keys; i++) 
+			for (i = 0; i < node->keys; i++)
 				frame_free((uintptr_t) node->value[i]);
 		}
@@ -1925,5 +2012,5 @@
 
 /** Wrapper for as_area_create(). */
-unative_t sys_as_area_create(uintptr_t address, size_t size, int flags)
+unative_t sys_as_area_create(uintptr_t address, size_t size, unsigned int flags)
 {
 	if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address,
@@ -1935,5 +2022,5 @@
 
 /** Wrapper for as_area_resize(). */
-unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags)
+unative_t sys_as_area_resize(uintptr_t address, size_t size, unsigned int flags)
 {
 	return (unative_t) as_area_resize(AS, address, size, 0);
@@ -1941,5 +2028,5 @@
 
 /** Wrapper for as_area_change_flags(). */
-unative_t sys_as_area_change_flags(uintptr_t address, int flags)
+unative_t sys_as_area_change_flags(uintptr_t address, unsigned int flags)
 {
 	return (unative_t) as_area_change_flags(AS, flags, address);
@@ -1954,77 +2041,69 @@
 /** Get list of adress space areas.
  *
- * @param as		Address space.
- * @param obuf		Place to save pointer to returned buffer.
- * @param osize		Place to save size of returned buffer.
+ * @param as    Address space.
+ * @param obuf  Place to save pointer to returned buffer.
+ * @param osize Place to save size of returned buffer.
+ *
  */
 void as_get_area_info(as_t *as, as_area_info_t **obuf, size_t *osize)
 {
-	ipl_t ipl;
-	size_t area_cnt, area_idx, i;
+	ipl_t ipl = interrupts_disable();
+	mutex_lock(&as->lock);
+	
+	/* First pass, count number of areas. */
+	
+	size_t area_cnt = 0;
 	link_t *cur;
-
-	as_area_info_t *info;
-	size_t isize;
-
-	ipl = interrupts_disable();
-	mutex_lock(&as->lock);
-
-	/* First pass, count number of areas. */
-
-	area_cnt = 0;
-
+	
 	for (cur = as->as_area_btree.leaf_head.next;
 	    cur != &as->as_area_btree.leaf_head; cur = cur->next) {
-		btree_node_t *node;
-
-		node = list_get_instance(cur, btree_node_t, leaf_link);
+		btree_node_t *node =
+		    list_get_instance(cur, btree_node_t, leaf_link);
 		area_cnt += node->keys;
 	}
-
-        isize = area_cnt * sizeof(as_area_info_t);
-	info = malloc(isize, 0);
-
+	
+	size_t isize = area_cnt * sizeof(as_area_info_t);
+	as_area_info_t *info = malloc(isize, 0);
+	
 	/* Second pass, record data. */
-
-	area_idx = 0;
-
+	
+	size_t area_idx = 0;
+	
 	for (cur = as->as_area_btree.leaf_head.next;
 	    cur != &as->as_area_btree.leaf_head; cur = cur->next) {
-		btree_node_t *node;
-
-		node = list_get_instance(cur, btree_node_t, leaf_link);
-
+		btree_node_t *node =
+		    list_get_instance(cur, btree_node_t, leaf_link);
+		btree_key_t i;
+		
 		for (i = 0; i < node->keys; i++) {
 			as_area_t *area = node->value[i];
-
+			
 			ASSERT(area_idx < area_cnt);
 			mutex_lock(&area->lock);
-
+			
 			info[area_idx].start_addr = area->base;
 			info[area_idx].size = FRAMES2SIZE(area->pages);
 			info[area_idx].flags = area->flags;
 			++area_idx;
-
+			
 			mutex_unlock(&area->lock);
 		}
 	}
-
+	
 	mutex_unlock(&as->lock);
 	interrupts_restore(ipl);
-
+	
 	*obuf = info;
 	*osize = isize;
 }
 
-
 /** Print out information about address space.
  *
- * @param as		Address space.
+ * @param as Address space.
+ *
  */
 void as_print(as_t *as)
 {
-	ipl_t ipl;
-	
-	ipl = interrupts_disable();
+	ipl_t ipl = interrupts_disable();
 	mutex_lock(&as->lock);
 	
@@ -2033,12 +2112,11 @@
 	for (cur = as->as_area_btree.leaf_head.next;
 	    cur != &as->as_area_btree.leaf_head; cur = cur->next) {
-		btree_node_t *node;
-		
-		node = list_get_instance(cur, btree_node_t, leaf_link);
-		
-		unsigned int i;
+		btree_node_t *node
+		    = list_get_instance(cur, btree_node_t, leaf_link);
+		btree_key_t i;
+		
 		for (i = 0; i < node->keys; i++) {
 			as_area_t *area = node->value[i];
-		
+			
 			mutex_lock(&area->lock);
 			printf("as_area: %p, base=%p, pages=%" PRIs
Index: kernel/generic/src/mm/frame.c
===================================================================
--- kernel/generic/src/mm/frame.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/src/mm/frame.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -66,8 +66,8 @@
  * available.
  */
-mutex_t mem_avail_mtx;
-condvar_t mem_avail_cv;
-size_t mem_avail_req = 0;  /**< Number of frames requested. */
-size_t mem_avail_gen = 0;  /**< Generation counter. */
+static mutex_t mem_avail_mtx;
+static condvar_t mem_avail_cv;
+static size_t mem_avail_req = 0;  /**< Number of frames requested. */
+static size_t mem_avail_gen = 0;  /**< Generation counter. */
 
 /********************/
@@ -171,5 +171,5 @@
 	return total;
 }
-#endif
+#endif /* CONFIG_DEBUG */
 
 /** Find a zone with a given frames.
@@ -199,4 +199,5 @@
 		if (i >= zones.count)
 			i = 0;
+		
 	} while (i != hint);
 	
@@ -242,4 +243,5 @@
 		if (i >= zones.count)
 			i = 0;
+		
 	} while (i != hint);
 	
@@ -296,5 +298,5 @@
 		index = (frame_index(zone, frame)) +
 		    (1 << frame->buddy_order);
-	} else {	/* is_right */
+	} else {  /* is_right */
 		index = (frame_index(zone, frame)) -
 		    (1 << frame->buddy_order);
@@ -673,6 +675,5 @@
 bool zone_merge(size_t z1, size_t z2)
 {
-	ipl_t ipl = interrupts_disable();
-	spinlock_lock(&zones.lock);
+	irq_spinlock_lock(&zones.lock, true);
 	
 	bool ret = true;
@@ -744,6 +745,5 @@
 	
 errout:
-	spinlock_unlock(&zones.lock);
-	interrupts_restore(ipl);
+	irq_spinlock_unlock(&zones.lock, true);
 	
 	return ret;
@@ -777,5 +777,6 @@
  *
  */
-static void zone_construct(zone_t *zone, buddy_system_t *buddy, pfn_t start, size_t count, zone_flags_t flags)
+static void zone_construct(zone_t *zone, buddy_system_t *buddy, pfn_t start,
+    size_t count, zone_flags_t flags)
 {
 	zone->base = start;
@@ -841,8 +842,8 @@
  *
  */
-size_t zone_create(pfn_t start, size_t count, pfn_t confframe, zone_flags_t flags)
-{
-	ipl_t ipl = interrupts_disable();
-	spinlock_lock(&zones.lock);
+size_t zone_create(pfn_t start, size_t count, pfn_t confframe,
+    zone_flags_t flags)
+{
+	irq_spinlock_lock(&zones.lock, true);
 	
 	if (zone_flags_available(flags)) {  /* Create available zone */
@@ -889,6 +890,5 @@
 		size_t znum = zones_insert_zone(start, count);
 		if (znum == (size_t) -1) {
-			spinlock_unlock(&zones.lock);
-			interrupts_restore(ipl);
+			irq_spinlock_unlock(&zones.lock, true);
 			return (size_t) -1;
 		}
@@ -905,6 +905,5 @@
 		}
 		
-		spinlock_unlock(&zones.lock);
-		interrupts_restore(ipl);
+		irq_spinlock_unlock(&zones.lock, true);
 		
 		return znum;
@@ -914,12 +913,10 @@
 	size_t znum = zones_insert_zone(start, count);
 	if (znum == (size_t) -1) {
-		spinlock_unlock(&zones.lock);
-		interrupts_restore(ipl);
+		irq_spinlock_unlock(&zones.lock, true);
 		return (size_t) -1;
 	}
 	zone_construct(&zones.info[znum], NULL, start, count, flags);
 	
-	spinlock_unlock(&zones.lock);
-	interrupts_restore(ipl);
+	irq_spinlock_unlock(&zones.lock, true);
 	
 	return znum;
@@ -933,6 +930,5 @@
 void frame_set_parent(pfn_t pfn, void *data, size_t hint)
 {
-	ipl_t ipl = interrupts_disable();
-	spinlock_lock(&zones.lock);
+	irq_spinlock_lock(&zones.lock, true);
 	
 	size_t znum = find_zone(pfn, 1, hint);
@@ -943,12 +939,10 @@
 	    pfn - zones.info[znum].base)->parent = data;
 	
-	spinlock_unlock(&zones.lock);
-	interrupts_restore(ipl);
+	irq_spinlock_unlock(&zones.lock, true);
 }
 
 void *frame_get_parent(pfn_t pfn, size_t hint)
 {
-	ipl_t ipl = interrupts_disable();
-	spinlock_lock(&zones.lock);
+	irq_spinlock_lock(&zones.lock, true);
 	
 	size_t znum = find_zone(pfn, 1, hint);
@@ -959,6 +953,5 @@
 	    pfn - zones.info[znum].base)->parent;
 	
-	spinlock_unlock(&zones.lock);
-	interrupts_restore(ipl);
+	irq_spinlock_unlock(&zones.lock, true);
 	
 	return res;
@@ -977,10 +970,8 @@
 {
 	size_t size = ((size_t) 1) << order;
-	ipl_t ipl;
 	size_t hint = pzone ? (*pzone) : 0;
 	
 loop:
-	ipl = interrupts_disable();
-	spinlock_lock(&zones.lock);
+	irq_spinlock_lock(&zones.lock, true);
 	
 	/*
@@ -993,11 +984,7 @@
 	   if it does not help, reclaim all */
 	if ((znum == (size_t) -1) && (!(flags & FRAME_NO_RECLAIM))) {
-		spinlock_unlock(&zones.lock);
-		interrupts_restore(ipl);
-		
+		irq_spinlock_unlock(&zones.lock, true);
 		size_t freed = slab_reclaim(0);
-		
-		ipl = interrupts_disable();
-		spinlock_lock(&zones.lock);
+		irq_spinlock_lock(&zones.lock, true);
 		
 		if (freed > 0)
@@ -1006,11 +993,7 @@
 		
 		if (znum == (size_t) -1) {
-			spinlock_unlock(&zones.lock);
-			interrupts_restore(ipl);
-			
+			irq_spinlock_unlock(&zones.lock, true);
 			freed = slab_reclaim(SLAB_RECLAIM_ALL);
-			
-			ipl = interrupts_disable();
-			spinlock_lock(&zones.lock);
+			irq_spinlock_lock(&zones.lock, true);
 			
 			if (freed > 0)
@@ -1022,6 +1005,5 @@
 	if (znum == (size_t) -1) {
 		if (flags & FRAME_ATOMIC) {
-			spinlock_unlock(&zones.lock);
-			interrupts_restore(ipl);
+			irq_spinlock_unlock(&zones.lock, true);
 			return NULL;
 		}
@@ -1031,7 +1013,6 @@
 #endif
 		
-		spinlock_unlock(&zones.lock);
-		interrupts_restore(ipl);
-
+		irq_spinlock_unlock(&zones.lock, true);
+		
 		if (!THREAD)
 			panic("Cannot wait for memory to become available.");
@@ -1069,6 +1050,5 @@
 	    + zones.info[znum].base;
 	
-	spinlock_unlock(&zones.lock);
-	interrupts_restore(ipl);
+	irq_spinlock_unlock(&zones.lock, true);
 	
 	if (pzone)
@@ -1092,6 +1072,5 @@
 void frame_free(uintptr_t frame)
 {
-	ipl_t ipl = interrupts_disable();
-	spinlock_lock(&zones.lock);
+	irq_spinlock_lock(&zones.lock, true);
 	
 	/*
@@ -1105,6 +1084,5 @@
 	zone_frame_free(&zones.info[znum], pfn - zones.info[znum].base);
 	
-	spinlock_unlock(&zones.lock);
-	interrupts_restore(ipl);
+	irq_spinlock_unlock(&zones.lock, true);
 	
 	/*
@@ -1132,6 +1110,5 @@
 void frame_reference_add(pfn_t pfn)
 {
-	ipl_t ipl = interrupts_disable();
-	spinlock_lock(&zones.lock);
+	irq_spinlock_lock(&zones.lock, true);
 	
 	/*
@@ -1144,13 +1121,13 @@
 	zones.info[znum].frames[pfn - zones.info[znum].base].refcount++;
 	
-	spinlock_unlock(&zones.lock);
-	interrupts_restore(ipl);
-}
-
-/** Mark given range unavailable in frame zones. */
+	irq_spinlock_unlock(&zones.lock, true);
+}
+
+/** Mark given range unavailable in frame zones.
+ *
+ */
 void frame_mark_unavailable(pfn_t start, size_t count)
 {
-	ipl_t ipl = interrupts_disable();
-	spinlock_lock(&zones.lock);
+	irq_spinlock_lock(&zones.lock, true);
 	
 	size_t i;
@@ -1164,14 +1141,15 @@
 	}
 	
-	spinlock_unlock(&zones.lock);
-	interrupts_restore(ipl);
-}
-
-/** Initialize physical memory management. */
+	irq_spinlock_unlock(&zones.lock, true);
+}
+
+/** Initialize physical memory management.
+ *
+ */
 void frame_init(void)
 {
 	if (config.cpu_active == 1) {
 		zones.count = 0;
-		spinlock_initialize(&zones.lock, "zones.lock");
+		irq_spinlock_initialize(&zones.lock, "frame.zones.lock");
 		mutex_initialize(&mem_avail_mtx, MUTEX_ACTIVE);
 		condvar_initialize(&mem_avail_cv);
@@ -1204,9 +1182,10 @@
 }
 
-/** Return total size of all zones. */
+/** Return total size of all zones.
+ *
+ */
 uint64_t zones_total_size(void)
 {
-	ipl_t ipl = interrupts_disable();
-	spinlock_lock(&zones.lock);
+	irq_spinlock_lock(&zones.lock, true);
 	
 	uint64_t total = 0;
@@ -1215,6 +1194,5 @@
 		total += (uint64_t) FRAMES2SIZE(zones.info[i].count);
 	
-	spinlock_unlock(&zones.lock);
-	interrupts_restore(ipl);
+	irq_spinlock_unlock(&zones.lock, true);
 	
 	return total;
@@ -1229,6 +1207,5 @@
 	ASSERT(free != NULL);
 	
-	ipl_t ipl = interrupts_disable();
-	spinlock_lock(&zones.lock);
+	irq_spinlock_lock(&zones.lock, true);
 	
 	*total = 0;
@@ -1248,9 +1225,10 @@
 	}
 	
-	spinlock_unlock(&zones.lock);
-	interrupts_restore(ipl);
-}
-
-/** Prints list of zones. */
+	irq_spinlock_unlock(&zones.lock, true);
+}
+
+/** Prints list of zones.
+ *
+ */
 void zones_print_list(void)
 {
@@ -1278,10 +1256,8 @@
 	size_t i;
 	for (i = 0;; i++) {
-		ipl_t ipl = interrupts_disable();
-		spinlock_lock(&zones.lock);
+		irq_spinlock_lock(&zones.lock, true);
 		
 		if (i >= zones.count) {
-			spinlock_unlock(&zones.lock);
-			interrupts_restore(ipl);
+			irq_spinlock_unlock(&zones.lock, true);
 			break;
 		}
@@ -1293,6 +1269,5 @@
 		size_t busy_count = zones.info[i].busy_count;
 		
-		spinlock_unlock(&zones.lock);
-		interrupts_restore(ipl);
+		irq_spinlock_unlock(&zones.lock, true);
 		
 		bool available = zone_flags_available(flags);
@@ -1328,6 +1303,5 @@
 void zone_print_one(size_t num)
 {
-	ipl_t ipl = interrupts_disable();
-	spinlock_lock(&zones.lock);
+	irq_spinlock_lock(&zones.lock, true);
 	size_t znum = (size_t) -1;
 	
@@ -1341,6 +1315,5 @@
 	
 	if (znum == (size_t) -1) {
-		spinlock_unlock(&zones.lock);
-		interrupts_restore(ipl);
+		irq_spinlock_unlock(&zones.lock, true);
 		printf("Zone not found.\n");
 		return;
@@ -1353,6 +1326,5 @@
 	size_t busy_count = zones.info[i].busy_count;
 	
-	spinlock_unlock(&zones.lock);
-	interrupts_restore(ipl);
+	irq_spinlock_unlock(&zones.lock, true);
 	
 	bool available = zone_flags_available(flags);
Index: kernel/generic/src/mm/page.c
===================================================================
--- kernel/generic/src/mm/page.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/src/mm/page.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -33,5 +33,5 @@
 /**
  * @file
- * @brief	Virtual Address Translation subsystem.
+ * @brief Virtual Address Translation subsystem.
  *
  * This file contains code for creating, destroying and searching
@@ -39,4 +39,5 @@
  * Functions here are mere wrappers that call the real implementation.
  * They however, define the single interface. 
+ *
  */
 
@@ -55,4 +56,5 @@
  * will do an implicit serialization by virtue of running the TLB shootdown
  * interrupt handler.
+ *
  */
 
@@ -83,18 +85,18 @@
  * of page boundaries.
  *
- * @param s		Address of the structure.
- * @param size		Size of the structure.
+ * @param addr Address of the structure.
+ * @param size Size of the structure.
+ *
  */
-void map_structure(uintptr_t s, size_t size)
+void map_structure(uintptr_t addr, size_t size)
 {
-	int i, cnt, length;
-
-	length = size + (s - (s & ~(PAGE_SIZE - 1)));
-	cnt = length / PAGE_SIZE + (length % PAGE_SIZE > 0);
-
+	size_t length = size + (addr - (addr & ~(PAGE_SIZE - 1)));
+	size_t cnt = length / PAGE_SIZE + (length % PAGE_SIZE > 0);
+	
+	size_t i;
 	for (i = 0; i < cnt; i++)
-		page_mapping_insert(AS_KERNEL, s + i * PAGE_SIZE,
-		    s + i * PAGE_SIZE, PAGE_NOT_CACHEABLE | PAGE_WRITE);
-
+		page_mapping_insert(AS_KERNEL, addr + i * PAGE_SIZE,
+		    addr + i * PAGE_SIZE, PAGE_NOT_CACHEABLE | PAGE_WRITE);
+	
 	/* Repel prefetched accesses to the old mapping. */
 	memory_barrier();
@@ -108,11 +110,13 @@
  * The page table must be locked and interrupts must be disabled.
  *
- * @param as		Address space to wich page belongs.
- * @param page		Virtual address of the page to be mapped.
- * @param frame		Physical address of memory frame to which the mapping is
- * 			done.
- * @param flags		Flags to be used for mapping.
+ * @param as    Address space to wich page belongs.
+ * @param page  Virtual address of the page to be mapped.
+ * @param frame Physical address of memory frame to which the mapping is
+ *              done.
+ * @param flags Flags to be used for mapping.
+ *
  */
-void page_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, int flags)
+void page_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame,
+    unsigned int flags)
 {
 	ASSERT(page_mapping_operations);
@@ -133,6 +137,7 @@
  * The page table must be locked and interrupts must be disabled.
  *
- * @param as		Address space to wich page belongs.
- * @param page		Virtual address of the page to be demapped.
+ * @param as   Address space to wich page belongs.
+ * @param page Virtual address of the page to be demapped.
+ *
  */
 void page_mapping_remove(as_t *as, uintptr_t page)
@@ -142,5 +147,5 @@
 	
 	page_mapping_operations->mapping_remove(as, page);
-
+	
 	/* Repel prefetched accesses to the old mapping. */
 	memory_barrier();
@@ -153,9 +158,10 @@
  * The page table must be locked and interrupts must be disabled.
  *
- * @param as		Address space to wich page belongs.
- * @param page		Virtual page.
+ * @param as   Address space to wich page belongs.
+ * @param page Virtual page.
  *
- * @return		NULL if there is no such mapping; requested mapping
- * 			otherwise.
+ * @return NULL if there is no such mapping; requested mapping
+ *         otherwise.
+ *
  */
 pte_t *page_mapping_find(as_t *as, uintptr_t page)
@@ -163,5 +169,5 @@
 	ASSERT(page_mapping_operations);
 	ASSERT(page_mapping_operations->mapping_find);
-
+	
 	return page_mapping_operations->mapping_find(as, page);
 }
Index: kernel/generic/src/mm/slab.c
===================================================================
--- kernel/generic/src/mm/slab.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/src/mm/slab.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -33,5 +33,5 @@
 /**
  * @file
- * @brief	Slab allocator.
+ * @brief Slab allocator.
  *
  * The slab allocator is closely modelled after OpenSolaris slab allocator.
@@ -50,5 +50,5 @@
  *
  * The slab allocator supports per-CPU caches ('magazines') to facilitate
- * good SMP scaling. 
+ * good SMP scaling.
  *
  * When a new object is being allocated, it is first checked, if it is 
@@ -65,9 +65,9 @@
  * thrashing when somebody is allocating/deallocating 1 item at the magazine
  * size boundary. LIFO order is enforced, which should avoid fragmentation
- * as much as possible. 
- *  
+ * as much as possible.
+ *
  * Every cache contains list of full slabs and list of partially full slabs.
  * Empty slabs are immediately freed (thrashing will be avoided because
- * of magazines). 
+ * of magazines).
  *
  * The slab information structure is kept inside the data area, if possible.
@@ -95,7 +95,8 @@
  *
  * @todo
- * it might be good to add granularity of locks even to slab level,
+ * It might be good to add granularity of locks even to slab level,
  * we could then try_spinlock over all partial slabs and thus improve
- * scalability even on slab level
+ * scalability even on slab level.
+ *
  */
 
@@ -114,11 +115,13 @@
 #include <macros.h>
 
-SPINLOCK_INITIALIZE(slab_cache_lock);
+IRQ_SPINLOCK_STATIC_INITIALIZE(slab_cache_lock);
 static LIST_INITIALIZE(slab_cache_list);
 
 /** Magazine cache */
 static slab_cache_t mag_cache;
+
 /** Cache for cache descriptors */
 static slab_cache_t slab_cache_cache;
+
 /** Cache for external slab descriptors
  * This time we want per-cpu cache, so do not make it static
@@ -128,6 +131,8 @@
  */
 static slab_cache_t *slab_extern_cache;
+
 /** Caches for malloc */
 static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1];
+
 static const char *malloc_names[] =  {
 	"malloc-16",
@@ -154,34 +159,36 @@
 /** Slab descriptor */
 typedef struct {
-	slab_cache_t *cache; 	/**< Pointer to parent cache. */
-	link_t link;       	/**< List of full/partial slabs. */
-	void *start;       	/**< Start address of first available item. */
-	size_t available; 	/**< Count of available items in this slab. */
-	size_t nextavail; 	/**< The index of next available item. */
+	slab_cache_t *cache;  /**< Pointer to parent cache. */
+	link_t link;          /**< List of full/partial slabs. */
+	void *start;          /**< Start address of first available item. */
+	size_t available;     /**< Count of available items in this slab. */
+	size_t nextavail;     /**< The index of next available item. */
 } slab_t;
 
 #ifdef CONFIG_DEBUG
-static int _slab_initialized = 0;
+static unsigned int _slab_initialized = 0;
 #endif
 
 /**************************************/
 /* Slab allocation functions          */
-
-/**
- * Allocate frames for slab space and initialize
- *
- */
-static slab_t *slab_space_alloc(slab_cache_t *cache, int flags)
-{
-	void *data;
+/**************************************/
+
+/** Allocate frames for slab space and initialize
+ *
+ */
+static slab_t *slab_space_alloc(slab_cache_t *cache, unsigned int flags)
+{
+	
+	
+	size_t zone = 0;
+	
+	void *data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone);
+	if (!data) {
+		return NULL;
+	}
+	
 	slab_t *slab;
 	size_t fsize;
-	unsigned int i;
-	size_t zone = 0;
-	
-	data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone);
-	if (!data) {
-		return NULL;
-	}
+	
 	if (!(cache->flags & SLAB_CACHE_SLINSIDE)) {
 		slab = slab_alloc(slab_extern_cache, flags);
@@ -196,61 +203,62 @@
 	
 	/* Fill in slab structures */
-	for (i = 0; i < ((unsigned int) 1 << cache->order); i++)
+	size_t i;
+	for (i = 0; i < ((size_t) 1 << cache->order); i++)
 		frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab, zone);
-
+	
 	slab->start = data;
 	slab->available = cache->objects;
 	slab->nextavail = 0;
 	slab->cache = cache;
-
+	
 	for (i = 0; i < cache->objects; i++)
-		*((int *) (slab->start + i*cache->size)) = i + 1;
-
+		*((size_t *) (slab->start + i * cache->size)) = i + 1;
+	
 	atomic_inc(&cache->allocated_slabs);
 	return slab;
 }
 
-/**
- * Deallocate space associated with slab
+/** Deallocate space associated with slab
  *
  * @return number of freed frames
+ *
  */
 static size_t slab_space_free(slab_cache_t *cache, slab_t *slab)
 {
 	frame_free(KA2PA(slab->start));
-	if (! (cache->flags & SLAB_CACHE_SLINSIDE))
+	if (!(cache->flags & SLAB_CACHE_SLINSIDE))
 		slab_free(slab_extern_cache, slab);
-
+	
 	atomic_dec(&cache->allocated_slabs);
 	
-	return 1 << cache->order;
+	return (1 << cache->order);
 }
 
 /** Map object to slab structure */
-static slab_t * obj2slab(void *obj)
+static slab_t *obj2slab(void *obj)
 {
 	return (slab_t *) frame_get_parent(ADDR2PFN(KA2PA(obj)), 0);
 }
 
-/**************************************/
+/******************/
 /* Slab functions */
-
-
-/**
- * Return object to slab and call a destructor
+/******************/
+
+/** Return object to slab and call a destructor
  *
  * @param slab If the caller knows directly slab of the object, otherwise NULL
  *
  * @return Number of freed pages
+ *
  */
 static size_t slab_obj_destroy(slab_cache_t *cache, void *obj, slab_t *slab)
 {
-	int freed = 0;
-
 	if (!slab)
 		slab = obj2slab(obj);
-
+	
 	ASSERT(slab->cache == cache);
-
+	
+	size_t freed = 0;
+	
 	if (cache->destructor)
 		freed = cache->destructor(obj);
@@ -258,9 +266,9 @@
 	spinlock_lock(&cache->slablock);
 	ASSERT(slab->available < cache->objects);
-
-	*((int *)obj) = slab->nextavail;
+	
+	*((size_t *) obj) = slab->nextavail;
 	slab->nextavail = (obj - slab->start) / cache->size;
 	slab->available++;
-
+	
 	/* Move it to correct list */
 	if (slab->available == cache->objects) {
@@ -268,7 +276,6 @@
 		list_remove(&slab->link);
 		spinlock_unlock(&cache->slablock);
-
+		
 		return freed + slab_space_free(cache, slab);
-
 	} else if (slab->available == 1) {
 		/* It was in full, move to partial */
@@ -276,26 +283,28 @@
 		list_prepend(&slab->link, &cache->partial_slabs);
 	}
+	
 	spinlock_unlock(&cache->slablock);
 	return freed;
 }
 
-/**
- * Take new object from slab or create new if needed
+/** Take new object from slab or create new if needed
  *
  * @return Object address or null
+ *
  */
 static void *slab_obj_create(slab_cache_t *cache, int flags)
 {
+	spinlock_lock(&cache->slablock);
+	
 	slab_t *slab;
-	void *obj;
-
-	spinlock_lock(&cache->slablock);
-
+	
 	if (list_empty(&cache->partial_slabs)) {
-		/* Allow recursion and reclaiming
+		/*
+		 * Allow recursion and reclaiming
 		 * - this should work, as the slab control structures
 		 *   are small and do not need to allocate with anything
 		 *   other than frame_alloc when they are allocating,
 		 *   that's why we should get recursion at most 1-level deep
+		 *
 		 */
 		spinlock_unlock(&cache->slablock);
@@ -303,4 +312,5 @@
 		if (!slab)
 			return NULL;
+		
 		spinlock_lock(&cache->slablock);
 	} else {
@@ -309,37 +319,39 @@
 		list_remove(&slab->link);
 	}
-	obj = slab->start + slab->nextavail * cache->size;
-	slab->nextavail = *((int *)obj);
+	
+	void *obj = slab->start + slab->nextavail * cache->size;
+	slab->nextavail = *((size_t *) obj);
 	slab->available--;
-
+	
 	if (!slab->available)
 		list_prepend(&slab->link, &cache->full_slabs);
 	else
 		list_prepend(&slab->link, &cache->partial_slabs);
-
+	
 	spinlock_unlock(&cache->slablock);
-
-	if (cache->constructor && cache->constructor(obj, flags)) {
+	
+	if ((cache->constructor) && (cache->constructor(obj, flags))) {
 		/* Bad, bad, construction failed */
 		slab_obj_destroy(cache, obj, slab);
 		return NULL;
 	}
+	
 	return obj;
 }
 
-/**************************************/
+/****************************/
 /* CPU-Cache slab functions */
-
-/**
- * Finds a full magazine in cache, takes it from list
- * and returns it 
- *
- * @param first If true, return first, else last mag
- */
-static slab_magazine_t *get_mag_from_cache(slab_cache_t *cache, int first)
+/****************************/
+
+/** Find a full magazine in cache, take it from list and return it
+ *
+ * @param first If true, return first, else last mag.
+ *
+ */
+static slab_magazine_t *get_mag_from_cache(slab_cache_t *cache, bool first)
 {
 	slab_magazine_t *mag = NULL;
 	link_t *cur;
-
+	
 	spinlock_lock(&cache->maglock);
 	if (!list_empty(&cache->magazines)) {
@@ -348,17 +360,21 @@
 		else
 			cur = cache->magazines.prev;
+		
 		mag = list_get_instance(cur, slab_magazine_t, link);
 		list_remove(&mag->link);
 		atomic_dec(&cache->magazine_counter);
 	}
+	
 	spinlock_unlock(&cache->maglock);
 	return mag;
 }
 
-/** Prepend magazine to magazine list in cache */
+/** Prepend magazine to magazine list in cache
+ *
+ */
 static void put_mag_to_cache(slab_cache_t *cache, slab_magazine_t *mag)
 {
 	spinlock_lock(&cache->maglock);
-
+	
 	list_prepend(&mag->link, &cache->magazines);
 	atomic_inc(&cache->magazine_counter);
@@ -367,14 +383,14 @@
 }
 
-/**
- * Free all objects in magazine and free memory associated with magazine
+/** Free all objects in magazine and free memory associated with magazine
  *
  * @return Number of freed pages
+ *
  */
 static size_t magazine_destroy(slab_cache_t *cache, slab_magazine_t *mag)
 {
-	unsigned int i;
+	size_t i;
 	size_t frames = 0;
-
+	
 	for (i = 0; i < mag->busy; i++) {
 		frames += slab_obj_destroy(cache, mag->objs[i], NULL);
@@ -383,24 +399,23 @@
 	
 	slab_free(&mag_cache, mag);
-
+	
 	return frames;
 }
 
-/**
- * Find full magazine, set it as current and return it
+/** Find full magazine, set it as current and return it
  *
  * Assume cpu_magazine lock is held
+ *
  */
 static slab_magazine_t *get_full_current_mag(slab_cache_t *cache)
 {
-	slab_magazine_t *cmag, *lastmag, *newmag;
-
-	cmag = cache->mag_cache[CPU->id].current;
-	lastmag = cache->mag_cache[CPU->id].last;
+	slab_magazine_t *cmag = cache->mag_cache[CPU->id].current;
+	slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last;
+	
 	if (cmag) { /* First try local CPU magazines */
 		if (cmag->busy)
 			return cmag;
-
-		if (lastmag && lastmag->busy) {
+		
+		if ((lastmag) && (lastmag->busy)) {
 			cache->mag_cache[CPU->id].current = lastmag;
 			cache->mag_cache[CPU->id].last = cmag;
@@ -408,39 +423,40 @@
 		}
 	}
+	
 	/* Local magazines are empty, import one from magazine list */
-	newmag = get_mag_from_cache(cache, 1);
+	slab_magazine_t *newmag = get_mag_from_cache(cache, 1);
 	if (!newmag)
 		return NULL;
-
+	
 	if (lastmag)
 		magazine_destroy(cache, lastmag);
-
+	
 	cache->mag_cache[CPU->id].last = cmag;
 	cache->mag_cache[CPU->id].current = newmag;
+	
 	return newmag;
 }
 
-/**
- * Try to find object in CPU-cache magazines
+/** Try to find object in CPU-cache magazines
  *
  * @return Pointer to object or NULL if not available
+ *
  */
 static void *magazine_obj_get(slab_cache_t *cache)
 {
-	slab_magazine_t *mag;
-	void *obj;
-
 	if (!CPU)
 		return NULL;
-
+	
 	spinlock_lock(&cache->mag_cache[CPU->id].lock);
-
-	mag = get_full_current_mag(cache);
+	
+	slab_magazine_t *mag = get_full_current_mag(cache);
 	if (!mag) {
 		spinlock_unlock(&cache->mag_cache[CPU->id].lock);
 		return NULL;
 	}
-	obj = mag->objs[--mag->busy];
+	
+	void *obj = mag->objs[--mag->busy];
 	spinlock_unlock(&cache->mag_cache[CPU->id].lock);
+	
 	atomic_dec(&cache->cached_objs);
 	
@@ -448,28 +464,25 @@
 }
 
-/**
- * Assure that the current magazine is empty, return pointer to it, or NULL if 
- * no empty magazine is available and cannot be allocated
+/** Assure that the current magazine is empty, return pointer to it,
+ * or NULL if no empty magazine is available and cannot be allocated
  *
  * Assume mag_cache[CPU->id].lock is held
  *
- * We have 2 magazines bound to processor. 
- * First try the current. 
- *  If full, try the last.
- *   If full, put to magazines list.
- *   allocate new, exchange last & current
+ * We have 2 magazines bound to processor.
+ * First try the current.
+ * If full, try the last.
+ * If full, put to magazines list.
  *
  */
 static slab_magazine_t *make_empty_current_mag(slab_cache_t *cache)
 {
-	slab_magazine_t *cmag,*lastmag,*newmag;
-
-	cmag = cache->mag_cache[CPU->id].current;
-	lastmag = cache->mag_cache[CPU->id].last;
-
+	slab_magazine_t *cmag = cache->mag_cache[CPU->id].current;
+	slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last;
+	
 	if (cmag) {
 		if (cmag->busy < cmag->size)
 			return cmag;
-		if (lastmag && lastmag->busy < lastmag->size) {
+		
+		if ((lastmag) && (lastmag->busy < lastmag->size)) {
 			cache->mag_cache[CPU->id].last = cmag;
 			cache->mag_cache[CPU->id].current = lastmag;
@@ -477,40 +490,45 @@
 		}
 	}
+	
 	/* current | last are full | nonexistent, allocate new */
-	/* We do not want to sleep just because of caching */
-	/* Especially we do not want reclaiming to start, as 
-	 * this would deadlock */
-	newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM);
+	
+	/*
+	 * We do not want to sleep just because of caching,
+	 * especially we do not want reclaiming to start, as
+	 * this would deadlock.
+	 *
+	 */
+	slab_magazine_t *newmag = slab_alloc(&mag_cache,
+	    FRAME_ATOMIC | FRAME_NO_RECLAIM);
 	if (!newmag)
 		return NULL;
+	
 	newmag->size = SLAB_MAG_SIZE;
 	newmag->busy = 0;
-
+	
 	/* Flush last to magazine list */
 	if (lastmag)
 		put_mag_to_cache(cache, lastmag);
-
+	
 	/* Move current as last, save new as current */
-	cache->mag_cache[CPU->id].last = cmag;	
-	cache->mag_cache[CPU->id].current = newmag;	
-
+	cache->mag_cache[CPU->id].last = cmag;
+	cache->mag_cache[CPU->id].current = newmag;
+	
 	return newmag;
 }
 
-/**
- * Put object into CPU-cache magazine
- *
- * @return 0 - success, -1 - could not get memory
+/** Put object into CPU-cache magazine
+ *
+ * @return 0 on success, -1 on no memory
+ *
  */
 static int magazine_obj_put(slab_cache_t *cache, void *obj)
 {
-	slab_magazine_t *mag;
-
 	if (!CPU)
 		return -1;
-
+	
 	spinlock_lock(&cache->mag_cache[CPU->id].lock);
-
-	mag = make_empty_current_mag(cache);
+	
+	slab_magazine_t *mag = make_empty_current_mag(cache);
 	if (!mag) {
 		spinlock_unlock(&cache->mag_cache[CPU->id].lock);
@@ -519,94 +537,101 @@
 	
 	mag->objs[mag->busy++] = obj;
-
+	
 	spinlock_unlock(&cache->mag_cache[CPU->id].lock);
+	
 	atomic_inc(&cache->cached_objs);
+	
 	return 0;
 }
 
-
-/**************************************/
+/************************/
 /* Slab cache functions */
-
-/** Return number of objects that fit in certain cache size */
-static unsigned int comp_objects(slab_cache_t *cache)
+/************************/
+
+/** Return number of objects that fit in certain cache size
+ *
+ */
+static size_t comp_objects(slab_cache_t *cache)
 {
 	if (cache->flags & SLAB_CACHE_SLINSIDE)
-		return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) /
-		    cache->size;
-	else 
+		return ((PAGE_SIZE << cache->order)
+		    - sizeof(slab_t)) / cache->size;
+	else
 		return (PAGE_SIZE << cache->order) / cache->size;
 }
 
-/** Return wasted space in slab */
-static unsigned int badness(slab_cache_t *cache)
-{
-	unsigned int objects;
-	unsigned int ssize;
-
-	objects = comp_objects(cache);
-	ssize = PAGE_SIZE << cache->order;
+/** Return wasted space in slab
+ *
+ */
+static size_t badness(slab_cache_t *cache)
+{
+	size_t objects = comp_objects(cache);
+	size_t ssize = PAGE_SIZE << cache->order;
+	
 	if (cache->flags & SLAB_CACHE_SLINSIDE)
 		ssize -= sizeof(slab_t);
+	
 	return ssize - objects * cache->size;
 }
 
-/**
- * Initialize mag_cache structure in slab cache
+/** Initialize mag_cache structure in slab cache
+ *
  */
 static bool make_magcache(slab_cache_t *cache)
 {
-	unsigned int i;
-	
 	ASSERT(_slab_initialized >= 2);
-
+	
 	cache->mag_cache = malloc(sizeof(slab_mag_cache_t) * config.cpu_count,
 	    FRAME_ATOMIC);
 	if (!cache->mag_cache)
 		return false;
-
+	
+	size_t i;
 	for (i = 0; i < config.cpu_count; i++) {
 		memsetb(&cache->mag_cache[i], sizeof(cache->mag_cache[i]), 0);
 		spinlock_initialize(&cache->mag_cache[i].lock,
-		    "slab_maglock_cpu");
-	}
+		    "slab.cache.mag_cache[].lock");
+	}
+	
 	return true;
 }
 
-/** Initialize allocated memory as a slab cache */
+/** Initialize allocated memory as a slab cache
+ *
+ */
 static void _slab_cache_create(slab_cache_t *cache, const char *name,
-    size_t size, size_t align, int (*constructor)(void *obj, int kmflag),
-    int (*destructor)(void *obj), int flags)
-{
-	int pages;
-	ipl_t ipl;
-
+    size_t size, size_t align, int (*constructor)(void *obj,
+    unsigned int kmflag), size_t (*destructor)(void *obj), unsigned int flags)
+{
 	memsetb(cache, sizeof(*cache), 0);
 	cache->name = name;
-
+	
 	if (align < sizeof(unative_t))
 		align = sizeof(unative_t);
+	
 	size = ALIGN_UP(size, align);
-		
+	
 	cache->size = size;
-
 	cache->constructor = constructor;
 	cache->destructor = destructor;
 	cache->flags = flags;
-
+	
 	list_initialize(&cache->full_slabs);
 	list_initialize(&cache->partial_slabs);
 	list_initialize(&cache->magazines);
-	spinlock_initialize(&cache->slablock, "slab_lock");
-	spinlock_initialize(&cache->maglock, "slab_maglock");
+	
+	spinlock_initialize(&cache->slablock, "slab.cache.slablock");
+	spinlock_initialize(&cache->maglock, "slab.cache.maglock");
+	
 	if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
 		(void) make_magcache(cache);
-
+	
 	/* Compute slab sizes, object counts in slabs etc. */
 	if (cache->size < SLAB_INSIDE_SIZE)
 		cache->flags |= SLAB_CACHE_SLINSIDE;
-
+	
 	/* Minimum slab order */
-	pages = SIZE2FRAMES(cache->size);
+	size_t pages = SIZE2FRAMES(cache->size);
+	
 	/* We need the 2^order >= pages */
 	if (pages == 1)
@@ -614,59 +639,58 @@
 	else
 		cache->order = fnzb(pages - 1) + 1;
-
-	while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
+	
+	while (badness(cache) > SLAB_MAX_BADNESS(cache))
 		cache->order += 1;
-	}
+	
 	cache->objects = comp_objects(cache);
+	
 	/* If info fits in, put it inside */
 	if (badness(cache) > sizeof(slab_t))
 		cache->flags |= SLAB_CACHE_SLINSIDE;
-
+	
 	/* Add cache to cache list */
-	ipl = interrupts_disable();
-	spinlock_lock(&slab_cache_lock);
-
+	irq_spinlock_lock(&slab_cache_lock, true);
 	list_append(&cache->link, &slab_cache_list);
-
-	spinlock_unlock(&slab_cache_lock);
-	interrupts_restore(ipl);
-}
-
-/** Create slab cache  */
+	irq_spinlock_unlock(&slab_cache_lock, true);
+}
+
+/** Create slab cache 
+ *
+ */
 slab_cache_t *slab_cache_create(const char *name, size_t size, size_t align,
-    int (*constructor)(void *obj, int kmflag), int (*destructor)(void *obj),
-    int flags)
-{
-	slab_cache_t *cache;
-
-	cache = slab_alloc(&slab_cache_cache, 0);
+    int (*constructor)(void *obj, unsigned int kmflag),
+    size_t (*destructor)(void *obj), unsigned int flags)
+{
+	slab_cache_t *cache = slab_alloc(&slab_cache_cache, 0);
 	_slab_cache_create(cache, name, size, align, constructor, destructor,
 	    flags);
+	
 	return cache;
 }
 
-/** 
- * Reclaim space occupied by objects that are already free
+/** Reclaim space occupied by objects that are already free
  *
  * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing
+ *
  * @return Number of freed pages
- */
-static size_t _slab_reclaim(slab_cache_t *cache, int flags)
-{
-	unsigned int i;
+ *
+ */
+static size_t _slab_reclaim(slab_cache_t *cache, unsigned int flags)
+{
+	if (cache->flags & SLAB_CACHE_NOMAGAZINE)
+		return 0; /* Nothing to do */
+	
+	/*
+	 * We count up to original magazine count to avoid
+	 * endless loop
+	 */
+	atomic_count_t magcount = atomic_get(&cache->magazine_counter);
+	
 	slab_magazine_t *mag;
 	size_t frames = 0;
-	int magcount;
-	
-	if (cache->flags & SLAB_CACHE_NOMAGAZINE)
-		return 0; /* Nothing to do */
-
-	/* We count up to original magazine count to avoid
-	 * endless loop 
-	 */
-	magcount = atomic_get(&cache->magazine_counter);
-	while (magcount-- && (mag=get_mag_from_cache(cache, 0))) {
-		frames += magazine_destroy(cache,mag);
-		if (!(flags & SLAB_RECLAIM_ALL) && frames)
+	
+	while ((magcount--) && (mag = get_mag_from_cache(cache, 0))) {
+		frames += magazine_destroy(cache, mag);
+		if ((!(flags & SLAB_RECLAIM_ALL)) && (frames))
 			break;
 	}
@@ -675,7 +699,8 @@
 		/* Free cpu-bound magazines */
 		/* Destroy CPU magazines */
+		size_t i;
 		for (i = 0; i < config.cpu_count; i++) {
 			spinlock_lock(&cache->mag_cache[i].lock);
-
+			
 			mag = cache->mag_cache[i].current;
 			if (mag)
@@ -687,85 +712,88 @@
 				frames += magazine_destroy(cache, mag);
 			cache->mag_cache[i].last = NULL;
-
+			
 			spinlock_unlock(&cache->mag_cache[i].lock);
 		}
 	}
-
+	
 	return frames;
 }
 
-/** Check that there are no slabs and remove cache from system  */
+/** Check that there are no slabs and remove cache from system
+ *
+ */
 void slab_cache_destroy(slab_cache_t *cache)
 {
-	ipl_t ipl;
-
-	/* First remove cache from link, so that we don't need
+	/*
+	 * First remove cache from link, so that we don't need
 	 * to disable interrupts later
+	 *
 	 */
-
-	ipl = interrupts_disable();
-	spinlock_lock(&slab_cache_lock);
-
+	irq_spinlock_lock(&slab_cache_lock, true);
 	list_remove(&cache->link);
-
-	spinlock_unlock(&slab_cache_lock);
-	interrupts_restore(ipl);
-
-	/* Do not lock anything, we assume the software is correct and
-	 * does not touch the cache when it decides to destroy it */
+	irq_spinlock_unlock(&slab_cache_lock, true);
+	
+	/*
+	 * Do not lock anything, we assume the software is correct and
+	 * does not touch the cache when it decides to destroy it
+	 *
+	 */
 	
 	/* Destroy all magazines */
 	_slab_reclaim(cache, SLAB_RECLAIM_ALL);
-
+	
 	/* All slabs must be empty */
-	if (!list_empty(&cache->full_slabs) ||
-	    !list_empty(&cache->partial_slabs))
+	if ((!list_empty(&cache->full_slabs)) ||
+	    (!list_empty(&cache->partial_slabs)))
 		panic("Destroying cache that is not empty.");
-
+	
 	if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
 		free(cache->mag_cache);
+	
 	slab_free(&slab_cache_cache, cache);
 }
 
-/** Allocate new object from cache - if no flags given, always returns memory */
-void *slab_alloc(slab_cache_t *cache, int flags)
-{
-	ipl_t ipl;
+/** Allocate new object from cache - if no flags given, always returns memory
+ *
+ */
+void *slab_alloc(slab_cache_t *cache, unsigned int flags)
+{
+	/* Disable interrupts to avoid deadlocks with interrupt handlers */
+	ipl_t ipl = interrupts_disable();
+	
 	void *result = NULL;
 	
-	/* Disable interrupts to avoid deadlocks with interrupt handlers */
-	ipl = interrupts_disable();
-
-	if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) {
+	if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
 		result = magazine_obj_get(cache);
-	}
+	
 	if (!result)
 		result = slab_obj_create(cache, flags);
-
+	
 	interrupts_restore(ipl);
-
+	
 	if (result)
 		atomic_inc(&cache->allocated_objs);
-
+	
 	return result;
 }
 
-/** Return object to cache, use slab if known  */
+/** Return object to cache, use slab if known
+ *
+ */
 static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab)
 {
-	ipl_t ipl;
-
-	ipl = interrupts_disable();
-
+	ipl_t ipl = interrupts_disable();
+	
 	if ((cache->flags & SLAB_CACHE_NOMAGAZINE) ||
-	    magazine_obj_put(cache, obj)) {
+	    (magazine_obj_put(cache, obj)))
 		slab_obj_destroy(cache, obj, slab);
-
-	}
+	
 	interrupts_restore(ipl);
 	atomic_dec(&cache->allocated_objs);
 }
 
-/** Return slab object to cache */
+/** Return slab object to cache
+ *
+ */
 void slab_free(slab_cache_t *cache, void *obj)
 {
@@ -773,45 +801,39 @@
 }
 
-/* Go through all caches and reclaim what is possible */
-size_t slab_reclaim(int flags)
-{
-	slab_cache_t *cache;
+/** Go through all caches and reclaim what is possible
+ *
+ * Interrupts must be disabled before calling this function,
+ * otherwise  memory allocation from interrupts can deadlock.
+ *
+ */
+size_t slab_reclaim(unsigned int flags)
+{
+	irq_spinlock_lock(&slab_cache_lock, false);
+	
+	size_t frames = 0;
 	link_t *cur;
-	size_t frames = 0;
-
-	spinlock_lock(&slab_cache_lock);
-
-	/* TODO: Add assert, that interrupts are disabled, otherwise
-	 * memory allocation from interrupts can deadlock.
-	 */
-
 	for (cur = slab_cache_list.next; cur != &slab_cache_list;
 	    cur = cur->next) {
-		cache = list_get_instance(cur, slab_cache_t, link);
+		slab_cache_t *cache = list_get_instance(cur, slab_cache_t, link);
 		frames += _slab_reclaim(cache, flags);
 	}
-
-	spinlock_unlock(&slab_cache_lock);
-
+	
+	irq_spinlock_unlock(&slab_cache_lock, false);
+	
 	return frames;
 }
 
-
-/* Print list of slabs */
+/* Print list of slabs
+ *
+ */
 void slab_print_list(void)
 {
-	int skip = 0;
-
-	printf("slab name        size     pages  obj/pg slabs  cached allocated"
+	printf("slab name        size     pages  obj/pg   slabs  cached allocated"
 	    " ctl\n");
-	printf("---------------- -------- ------ ------ ------ ------ ---------"
+	printf("---------------- -------- ------ -------- ------ ------ ---------"
 	    " ---\n");
-
+	
+	size_t skip = 0;
 	while (true) {
-		slab_cache_t *cache;
-		link_t *cur;
-		ipl_t ipl;
-		int i;
-
 		/*
 		 * We must not hold the slab_cache_lock spinlock when printing
@@ -836,36 +858,34 @@
 		 * statistics.
 		 */
-
-		ipl = interrupts_disable();
-		spinlock_lock(&slab_cache_lock);
-
+		
+		irq_spinlock_lock(&slab_cache_lock, true);
+		
+		link_t *cur;
+		size_t i;
 		for (i = 0, cur = slab_cache_list.next;
-		    i < skip && cur != &slab_cache_list;
-		    i++, cur = cur->next)
-			;
-
+		    (i < skip) && (cur != &slab_cache_list);
+		    i++, cur = cur->next);
+		
 		if (cur == &slab_cache_list) {
-			spinlock_unlock(&slab_cache_lock);
-			interrupts_restore(ipl);
+			irq_spinlock_unlock(&slab_cache_lock, true);
 			break;
 		}
-
+		
 		skip++;
-
-		cache = list_get_instance(cur, slab_cache_t, link);
-
+		
+		slab_cache_t *cache = list_get_instance(cur, slab_cache_t, link);
+		
 		const char *name = cache->name;
 		uint8_t order = cache->order;
 		size_t size = cache->size;
-		unsigned int objects = cache->objects;
+		size_t objects = cache->objects;
 		long allocated_slabs = atomic_get(&cache->allocated_slabs);
 		long cached_objs = atomic_get(&cache->cached_objs);
 		long allocated_objs = atomic_get(&cache->allocated_objs);
-		int flags = cache->flags;
-		
-		spinlock_unlock(&slab_cache_lock);
-		interrupts_restore(ipl);
-		
-		printf("%-16s %8" PRIs " %6d %6u %6ld %6ld %9ld %-3s\n",
+		unsigned int flags = cache->flags;
+		
+		irq_spinlock_unlock(&slab_cache_lock, true);
+		
+		printf("%-16s %8" PRIs " %6u %8" PRIs " %6ld %6ld %9ld %-3s\n",
 		    name, size, (1 << order), objects, allocated_slabs,
 		    cached_objs, allocated_objs,
@@ -876,6 +896,4 @@
 void slab_cache_init(void)
 {
-	int i, size;
-
 	/* Initialize magazine cache */
 	_slab_cache_create(&mag_cache, "slab_magazine",
@@ -883,13 +901,18 @@
 	    sizeof(uintptr_t), NULL, NULL, SLAB_CACHE_NOMAGAZINE |
 	    SLAB_CACHE_SLINSIDE);
+	
 	/* Initialize slab_cache cache */
 	_slab_cache_create(&slab_cache_cache, "slab_cache",
 	    sizeof(slab_cache_cache), sizeof(uintptr_t), NULL, NULL,
 	    SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
+	
 	/* Initialize external slab cache */
 	slab_extern_cache = slab_cache_create("slab_extern", sizeof(slab_t), 0,
 	    NULL, NULL, SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED);
-
+	
 	/* Initialize structures for malloc */
+	size_t i;
+	size_t size;
+	
 	for (i = 0, size = (1 << SLAB_MIN_MALLOC_W);
 	    i < (SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1);
@@ -898,4 +921,5 @@
 		    NULL, NULL, SLAB_CACHE_MAGDEFERRED);
 	}
+	
 #ifdef CONFIG_DEBUG
 	_slab_initialized = 1;
@@ -906,35 +930,32 @@
  *
  * Kernel calls this function, when it knows the real number of
- * processors. 
- * Allocate slab for cpucache and enable it on all existing
- * slabs that are SLAB_CACHE_MAGDEFERRED
+ * processors. Allocate slab for cpucache and enable it on all
+ * existing slabs that are SLAB_CACHE_MAGDEFERRED
+ *
  */
 void slab_enable_cpucache(void)
 {
-	link_t *cur;
-	slab_cache_t *s;
-
 #ifdef CONFIG_DEBUG
 	_slab_initialized = 2;
 #endif
-
-	spinlock_lock(&slab_cache_lock);
-	
+	
+	irq_spinlock_lock(&slab_cache_lock, false);
+	
+	link_t *cur;
 	for (cur = slab_cache_list.next; cur != &slab_cache_list;
-	    cur = cur->next){
-		s = list_get_instance(cur, slab_cache_t, link);
-		if ((s->flags & SLAB_CACHE_MAGDEFERRED) !=
+	    cur = cur->next) {
+		slab_cache_t *slab = list_get_instance(cur, slab_cache_t, link);
+		if ((slab->flags & SLAB_CACHE_MAGDEFERRED) !=
 		    SLAB_CACHE_MAGDEFERRED)
 			continue;
-		(void) make_magcache(s);
-		s->flags &= ~SLAB_CACHE_MAGDEFERRED;
-	}
-
-	spinlock_unlock(&slab_cache_lock);
-}
-
-/**************************************/
-/* kalloc/kfree functions             */
-void *malloc(unsigned int size, int flags)
+		
+		(void) make_magcache(slab);
+		slab->flags &= ~SLAB_CACHE_MAGDEFERRED;
+	}
+	
+	irq_spinlock_unlock(&slab_cache_lock, false);
+}
+
+void *malloc(size_t size, unsigned int flags)
 {
 	ASSERT(_slab_initialized);
@@ -943,11 +964,11 @@
 	if (size < (1 << SLAB_MIN_MALLOC_W))
 		size = (1 << SLAB_MIN_MALLOC_W);
-
-	int idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;
-
+	
+	uint8_t idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;
+	
 	return slab_alloc(malloc_caches[idx], flags);
 }
 
-void *realloc(void *ptr, unsigned int size, int flags)
+void *realloc(void *ptr, size_t size, unsigned int flags)
 {
 	ASSERT(_slab_initialized);
@@ -959,5 +980,5 @@
 		if (size < (1 << SLAB_MIN_MALLOC_W))
 			size = (1 << SLAB_MIN_MALLOC_W);
-		int idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;
+		uint8_t idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;
 		
 		new_ptr = slab_alloc(malloc_caches[idx], flags);
@@ -980,5 +1001,5 @@
 	if (!ptr)
 		return;
-
+	
 	slab_t *slab = obj2slab(ptr);
 	_slab_free(slab->cache, ptr, slab);
Index: kernel/generic/src/mm/tlb.c
===================================================================
--- kernel/generic/src/mm/tlb.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/src/mm/tlb.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -33,5 +33,5 @@
 /**
  * @file
- * @brief	Generic TLB shootdown algorithm.
+ * @brief Generic TLB shootdown algorithm.
  *
  * The algorithm implemented here is based on the CMU TLB shootdown
@@ -53,11 +53,4 @@
 #include <cpu.h>
 
-/**
- * This lock is used for synchronisation between sender and
- * recipients of TLB shootdown message. It must be acquired
- * before CPU structure lock.
- */
-SPINLOCK_INITIALIZE(tlblock);
-
 void tlb_init(void)
 {
@@ -66,4 +59,12 @@
 
 #ifdef CONFIG_SMP
+
+/**
+ * This lock is used for synchronisation between sender and
+ * recipients of TLB shootdown message. It must be acquired
+ * before CPU structure lock.
+ *
+ */
+IRQ_SPINLOCK_STATIC_INITIALIZE(tlblock);
 
 /** Send TLB shootdown message.
@@ -78,13 +79,13 @@
  * @param page Virtual page address, if required by type.
  * @param count Number of pages, if required by type.
+ *
  */
 void tlb_shootdown_start(tlb_invalidate_type_t type, asid_t asid,
     uintptr_t page, size_t count)
 {
-	unsigned int i;
-
-	CPU->tlb_active = 0;
-	spinlock_lock(&tlblock);
+	CPU->tlb_active = false;
+	irq_spinlock_lock(&tlblock, false);
 	
+	size_t i;
 	for (i = 0; i < config.cpu_count; i++) {
 		cpu_t *cpu;
@@ -92,7 +93,7 @@
 		if (i == CPU->id)
 			continue;
-
+		
 		cpu = &cpus[i];
-		spinlock_lock(&cpu->lock);
+		irq_spinlock_lock(&cpu->lock, false);
 		if (cpu->tlb_messages_count == TLB_MESSAGE_QUEUE_LEN) {
 			/*
@@ -115,10 +116,10 @@
 			cpu->tlb_messages[idx].count = count;
 		}
-		spinlock_unlock(&cpu->lock);
+		irq_spinlock_unlock(&cpu->lock, false);
 	}
 	
 	tlb_shootdown_ipi_send();
-
-busy_wait:	
+	
+busy_wait:
 	for (i = 0; i < config.cpu_count; i++)
 		if (cpus[i].tlb_active)
@@ -126,9 +127,11 @@
 }
 
-/** Finish TLB shootdown sequence. */
+/** Finish TLB shootdown sequence.
+ *
+ */
 void tlb_shootdown_finalize(void)
 {
-	spinlock_unlock(&tlblock);
-	CPU->tlb_active = 1;
+	irq_spinlock_unlock(&tlblock, false);
+	CPU->tlb_active = true;
 }
 
@@ -138,28 +141,25 @@
 }
 
-/** Receive TLB shootdown message. */
+/** Receive TLB shootdown message.
+ *
+ */
 void tlb_shootdown_ipi_recv(void)
 {
-	tlb_invalidate_type_t type;
-	asid_t asid;
-	uintptr_t page;
-	size_t count;
-	unsigned int i;
-	
 	ASSERT(CPU);
 	
-	CPU->tlb_active = 0;
-	spinlock_lock(&tlblock);
-	spinlock_unlock(&tlblock);
+	CPU->tlb_active = false;
+	irq_spinlock_lock(&tlblock, false);
+	irq_spinlock_unlock(&tlblock, false);
 	
-	spinlock_lock(&CPU->lock);
+	irq_spinlock_lock(&CPU->lock, false);
 	ASSERT(CPU->tlb_messages_count <= TLB_MESSAGE_QUEUE_LEN);
-
+	
+	size_t i;
 	for (i = 0; i < CPU->tlb_messages_count; CPU->tlb_messages_count--) {
-		type = CPU->tlb_messages[i].type;
-		asid = CPU->tlb_messages[i].asid;
-		page = CPU->tlb_messages[i].page;
-		count = CPU->tlb_messages[i].count;
-
+		tlb_invalidate_type_t type = CPU->tlb_messages[i].type;
+		asid_t asid = CPU->tlb_messages[i].asid;
+		uintptr_t page = CPU->tlb_messages[i].page;
+		size_t count = CPU->tlb_messages[i].count;
+		
 		switch (type) {
 		case TLB_INVL_ALL:
@@ -170,5 +170,5 @@
 			break;
 		case TLB_INVL_PAGES:
-		    	ASSERT(count);
+			ASSERT(count);
 			tlb_invalidate_pages(asid, page, count);
 			break;
@@ -177,10 +177,11 @@
 			break;
 		}
+		
 		if (type == TLB_INVL_ALL)
 			break;
 	}
 	
-	spinlock_unlock(&CPU->lock);
-	CPU->tlb_active = 1;
+	irq_spinlock_unlock(&CPU->lock, false);
+	CPU->tlb_active = true;
 }
 
Index: kernel/generic/src/printf/vprintf.c
===================================================================
--- kernel/generic/src/printf/vprintf.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/src/printf/vprintf.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -42,5 +42,5 @@
 #include <str.h>
 
-SPINLOCK_STATIC_INITIALIZE_NAME(printf_lock, "*printf_lock");
+IRQ_SPINLOCK_STATIC_INITIALIZE_NAME(printf_lock, "*printf_lock");
 
 static int vprintf_str_write(const char *str, size_t size, void *data)
@@ -93,11 +93,7 @@
 	};
 	
-	ipl_t ipl = interrupts_disable();
-	spinlock_lock(&printf_lock);
-	
+	irq_spinlock_lock(&printf_lock, true);
 	int ret = printf_core(fmt, &ps, ap);
-	
-	spinlock_unlock(&printf_lock);
-	interrupts_restore(ipl);
+	irq_spinlock_unlock(&printf_lock, true);
 	
 	return ret;
Index: kernel/generic/src/proc/scheduler.c
===================================================================
--- kernel/generic/src/proc/scheduler.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/src/proc/scheduler.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -33,5 +33,5 @@
 /**
  * @file
- * @brief	Scheduler and load balancing.
+ * @brief Scheduler and load balancing.
  *
  * This file contains the scheduler and kcpulb kernel thread which
@@ -68,5 +68,5 @@
 static void scheduler_separated_stack(void);
 
-atomic_t nrdy;	/**< Number of ready threads in the system. */
+atomic_t nrdy;  /**< Number of ready threads in the system. */
 
 /** Carry out actions before new task runs. */
@@ -89,8 +89,8 @@
 	before_thread_runs_arch();
 #ifdef CONFIG_FPU_LAZY
-	if(THREAD == CPU->fpu_owner) 
+	if(THREAD == CPU->fpu_owner)
 		fpu_enable();
 	else
-		fpu_disable(); 
+		fpu_disable();
 #else
 	fpu_enable();
@@ -123,17 +123,18 @@
 restart:
 	fpu_enable();
-	spinlock_lock(&CPU->lock);
-
+	irq_spinlock_lock(&CPU->lock, false);
+	
 	/* Save old context */
-	if (CPU->fpu_owner != NULL) {  
-		spinlock_lock(&CPU->fpu_owner->lock);
+	if (CPU->fpu_owner != NULL) {
+		irq_spinlock_lock(&CPU->fpu_owner->lock, false);
 		fpu_context_save(CPU->fpu_owner->saved_fpu_context);
-		/* don't prevent migration */
+		
+		/* Don't prevent migration */
 		CPU->fpu_owner->fpu_context_engaged = 0;
-		spinlock_unlock(&CPU->fpu_owner->lock);
+		irq_spinlock_unlock(&CPU->fpu_owner->lock, false);
 		CPU->fpu_owner = NULL;
 	}
-
-	spinlock_lock(&THREAD->lock);
+	
+	irq_spinlock_lock(&THREAD->lock, false);
 	if (THREAD->fpu_context_exists) {
 		fpu_context_restore(THREAD->saved_fpu_context);
@@ -142,21 +143,23 @@
 		if (!THREAD->saved_fpu_context) {
 			/* Might sleep */
-			spinlock_unlock(&THREAD->lock);
-			spinlock_unlock(&CPU->lock);
+			irq_spinlock_unlock(&THREAD->lock, false);
+			irq_spinlock_unlock(&CPU->lock, false);
 			THREAD->saved_fpu_context =
 			    (fpu_context_t *) slab_alloc(fpu_context_slab, 0);
+			
 			/* We may have switched CPUs during slab_alloc */
-			goto restart; 
+			goto restart;
 		}
 		fpu_init();
 		THREAD->fpu_context_exists = 1;
 	}
+	
 	CPU->fpu_owner = THREAD;
 	THREAD->fpu_context_engaged = 1;
-	spinlock_unlock(&THREAD->lock);
-
-	spinlock_unlock(&CPU->lock);
-}
-#endif
+	irq_spinlock_unlock(&THREAD->lock, false);
+	
+	irq_spinlock_unlock(&CPU->lock, false);
+}
+#endif /* CONFIG_FPU_LAZY */
 
 /** Initialize scheduler
@@ -180,10 +183,6 @@
 static thread_t *find_best_thread(void)
 {
-	thread_t *t;
-	runq_t *r;
-	int i;
-
 	ASSERT(CPU != NULL);
-
+	
 loop:
 	
@@ -194,61 +193,60 @@
 		 * This improves energy saving and hyperthreading.
 		 */
-
+		
 		 /* Mark CPU as it was idle this clock tick */
-		 spinlock_lock(&CPU->lock);
-		 CPU->idle = true;
-		 spinlock_unlock(&CPU->lock);
-
-		 interrupts_enable();
-		 /*
+		irq_spinlock_lock(&CPU->lock, false);
+		CPU->idle = true;
+		irq_spinlock_unlock(&CPU->lock, false);
+		
+		interrupts_enable();
+		/*
 		 * An interrupt might occur right now and wake up a thread.
 		 * In such case, the CPU will continue to go to sleep
 		 * even though there is a runnable thread.
 		 */
-		 cpu_sleep();
-		 interrupts_disable();
-		 goto loop;
-	}
-	
+		cpu_sleep();
+		interrupts_disable();
+		goto loop;
+	}
+	
+	unsigned int i;
 	for (i = 0; i < RQ_COUNT; i++) {
-		r = &CPU->rq[i];
-		spinlock_lock(&r->lock);
-		if (r->n == 0) {
+		irq_spinlock_lock(&(CPU->rq[i].lock), false);
+		if (CPU->rq[i].n == 0) {
 			/*
 			 * If this queue is empty, try a lower-priority queue.
 			 */
-			spinlock_unlock(&r->lock);
+			irq_spinlock_unlock(&(CPU->rq[i].lock), false);
 			continue;
 		}
-
+		
 		atomic_dec(&CPU->nrdy);
 		atomic_dec(&nrdy);
-		r->n--;
-
+		CPU->rq[i].n--;
+		
 		/*
 		 * Take the first thread from the queue.
 		 */
-		t = list_get_instance(r->rq_head.next, thread_t, rq_link);
-		list_remove(&t->rq_link);
-
-		spinlock_unlock(&r->lock);
-
-		spinlock_lock(&t->lock);
-		t->cpu = CPU;
-
-		t->ticks = us2ticks((i + 1) * 10000);
-		t->priority = i;	/* correct rq index */
-
+		thread_t *thread =
+		    list_get_instance(CPU->rq[i].rq_head.next, thread_t, rq_link);
+		list_remove(&thread->rq_link);
+		
+		irq_spinlock_pass(&(CPU->rq[i].lock), &thread->lock);
+		
+		thread->cpu = CPU;
+		thread->ticks = us2ticks((i + 1) * 10000);
+		thread->priority = i;  /* Correct rq index */
+		
 		/*
 		 * Clear the THREAD_FLAG_STOLEN flag so that t can be migrated
 		 * when load balancing needs emerge.
 		 */
-		t->flags &= ~THREAD_FLAG_STOLEN;
-		spinlock_unlock(&t->lock);
-
-		return t;
-	}
+		thread->flags &= ~THREAD_FLAG_STOLEN;
+		irq_spinlock_unlock(&thread->lock, false);
+		
+		return thread;
+	}
+	
 	goto loop;
-
 }
 
@@ -267,30 +265,31 @@
 {
 	link_t head;
-	runq_t *r;
-	int i, n;
-
+	
 	list_initialize(&head);
-	spinlock_lock(&CPU->lock);
+	irq_spinlock_lock(&CPU->lock, false);
+	
 	if (CPU->needs_relink > NEEDS_RELINK_MAX) {
+		int i;
 		for (i = start; i < RQ_COUNT - 1; i++) {
-			/* remember and empty rq[i + 1] */
-			r = &CPU->rq[i + 1];
-			spinlock_lock(&r->lock);
-			list_concat(&head, &r->rq_head);
-			n = r->n;
-			r->n = 0;
-			spinlock_unlock(&r->lock);
-		
-			/* append rq[i + 1] to rq[i] */
-			r = &CPU->rq[i];
-			spinlock_lock(&r->lock);
-			list_concat(&r->rq_head, &head);
-			r->n += n;
-			spinlock_unlock(&r->lock);
+			/* Remember and empty rq[i + 1] */
+			
+			irq_spinlock_lock(&CPU->rq[i + 1].lock, false);
+			list_concat(&head, &CPU->rq[i + 1].rq_head);
+			size_t n = CPU->rq[i + 1].n;
+			CPU->rq[i + 1].n = 0;
+			irq_spinlock_unlock(&CPU->rq[i + 1].lock, false);
+			
+			/* Append rq[i + 1] to rq[i] */
+			
+			irq_spinlock_lock(&CPU->rq[i].lock, false);
+			list_concat(&CPU->rq[i].rq_head, &head);
+			CPU->rq[i].n += n;
+			irq_spinlock_unlock(&CPU->rq[i].lock, false);
 		}
+		
 		CPU->needs_relink = 0;
 	}
-	spinlock_unlock(&CPU->lock);
-
+	
+	irq_spinlock_unlock(&CPU->lock, false);
 }
 
@@ -305,14 +304,14 @@
 {
 	volatile ipl_t ipl;
-
+	
 	ASSERT(CPU != NULL);
-
+	
 	ipl = interrupts_disable();
-
+	
 	if (atomic_get(&haltstate))
 		halt();
 	
 	if (THREAD) {
-		spinlock_lock(&THREAD->lock);
+		irq_spinlock_lock(&THREAD->lock, false);
 		
 		/* Update thread kernel accounting */
@@ -330,25 +329,27 @@
 			THREAD->last_cycle = get_cycle();
 			
-			spinlock_unlock(&THREAD->lock);
+			irq_spinlock_unlock(&THREAD->lock, false);
 			interrupts_restore(THREAD->saved_context.ipl);
 			
 			return;
 		}
-
+		
 		/*
 		 * Interrupt priority level of preempted thread is recorded
 		 * here to facilitate scheduler() invocations from
-		 * interrupts_disable()'d code (e.g. waitq_sleep_timeout()). 
+		 * interrupts_disable()'d code (e.g. waitq_sleep_timeout()).
+		 *
 		 */
 		THREAD->saved_context.ipl = ipl;
 	}
-
+	
 	/*
 	 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM
 	 * and preemption counter. At this point THE could be coming either
 	 * from THREAD's or CPU's stack.
+	 *
 	 */
 	the_copy(THE, (the_t *) CPU->stack);
-
+	
 	/*
 	 * We may not keep the old stack.
@@ -362,4 +363,5 @@
 	 * Therefore the scheduler() function continues in
 	 * scheduler_separated_stack().
+	 *
 	 */
 	context_save(&CPU->saved_context);
@@ -367,5 +369,6 @@
 	    (uintptr_t) CPU->stack, CPU_STACK_SIZE);
 	context_restore(&CPU->saved_context);
-	/* not reached */
+	
+	/* Not reached */
 }
 
@@ -377,12 +380,12 @@
  *
  * Assume THREAD->lock is held.
+ *
  */
 void scheduler_separated_stack(void)
 {
-	int priority;
 	DEADLOCK_PROBE_INIT(p_joinwq);
 	task_t *old_task = TASK;
 	as_t *old_as = AS;
-
+	
 	ASSERT(CPU != NULL);
 	
@@ -391,36 +394,40 @@
 	 * possible destruction should thread_destroy() be called on this or any
 	 * other processor while the scheduler is still using them.
+	 *
 	 */
 	if (old_task)
 		task_hold(old_task);
+	
 	if (old_as)
 		as_hold(old_as);
-
+	
 	if (THREAD) {
-		/* must be run after the switch to scheduler stack */
+		/* Must be run after the switch to scheduler stack */
 		after_thread_ran();
-
+		
 		switch (THREAD->state) {
 		case Running:
-			spinlock_unlock(&THREAD->lock);
+			irq_spinlock_unlock(&THREAD->lock, false);
 			thread_ready(THREAD);
 			break;
-
+		
 		case Exiting:
 repeat:
 			if (THREAD->detached) {
-				thread_destroy(THREAD);
+				thread_destroy(THREAD, false);
 			} else {
 				/*
 				 * The thread structure is kept allocated until
 				 * somebody calls thread_detach() on it.
+				 *
 				 */
-				if (!spinlock_trylock(&THREAD->join_wq.lock)) {
+				if (!irq_spinlock_trylock(&THREAD->join_wq.lock)) {
 					/*
 					 * Avoid deadlock.
+					 *
 					 */
-					spinlock_unlock(&THREAD->lock);
+					irq_spinlock_unlock(&THREAD->lock, false);
 					delay(HZ);
-					spinlock_lock(&THREAD->lock);
+					irq_spinlock_lock(&THREAD->lock, false);
 					DEADLOCK_PROBE(p_joinwq,
 					    DEADLOCK_THRESHOLD);
@@ -429,8 +436,8 @@
 				_waitq_wakeup_unsafe(&THREAD->join_wq,
 				    WAKEUP_FIRST);
-				spinlock_unlock(&THREAD->join_wq.lock);
+				irq_spinlock_unlock(&THREAD->join_wq.lock, false);
 				
 				THREAD->state = Lingering;
-				spinlock_unlock(&THREAD->lock);
+				irq_spinlock_unlock(&THREAD->lock, false);
 			}
 			break;
@@ -439,17 +446,20 @@
 			/*
 			 * Prefer the thread after it's woken up.
+			 *
 			 */
 			THREAD->priority = -1;
-
+			
 			/*
 			 * We need to release wq->lock which we locked in
 			 * waitq_sleep(). Address of wq->lock is kept in
 			 * THREAD->sleep_queue.
+			 *
 			 */
-			spinlock_unlock(&THREAD->sleep_queue->lock);
-
+			irq_spinlock_unlock(&THREAD->sleep_queue->lock, false);
+			
 			/*
 			 * Check for possible requests for out-of-context
 			 * invocation.
+			 *
 			 */
 			if (THREAD->call_me) {
@@ -458,12 +468,13 @@
 				THREAD->call_me_with = NULL;
 			}
-
-			spinlock_unlock(&THREAD->lock);
-
+			
+			irq_spinlock_unlock(&THREAD->lock, false);
+			
 			break;
-
+		
 		default:
 			/*
 			 * Entering state is unexpected.
+			 *
 			 */
 			panic("tid%" PRIu64 ": unexpected state %s.",
@@ -471,19 +482,20 @@
 			break;
 		}
-
+		
 		THREAD = NULL;
 	}
-
+	
 	THREAD = find_best_thread();
 	
-	spinlock_lock(&THREAD->lock);
-	priority = THREAD->priority;
-	spinlock_unlock(&THREAD->lock);	
-
-	relink_rq(priority);		
-
+	irq_spinlock_lock(&THREAD->lock, false);
+	int priority = THREAD->priority;
+	irq_spinlock_unlock(&THREAD->lock, false);
+	
+	relink_rq(priority);
+	
 	/*
 	 * If both the old and the new task are the same, lots of work is
 	 * avoided.
+	 *
 	 */
 	if (TASK != THREAD->task) {
@@ -493,4 +505,5 @@
 		 * Note that it is possible for two tasks to share one address
 		 * space.
+		 (
 		 */
 		if (old_as != new_as) {
@@ -498,26 +511,28 @@
 			 * Both tasks and address spaces are different.
 			 * Replace the old one with the new one.
+			 *
 			 */
 			as_switch(old_as, new_as);
 		}
-
+		
 		TASK = THREAD->task;
 		before_task_runs();
 	}
-
+	
 	if (old_task)
 		task_release(old_task);
+	
 	if (old_as)
 		as_release(old_as);
 	
-	spinlock_lock(&THREAD->lock);	
+	irq_spinlock_lock(&THREAD->lock, false);
 	THREAD->state = Running;
-
+	
 #ifdef SCHEDULER_VERBOSE
 	printf("cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64 
 	    ", nrdy=%ld)\n", CPU->id, THREAD->tid, THREAD->priority,
 	    THREAD->ticks, atomic_get(&CPU->nrdy));
-#endif	
-
+#endif
+	
 	/*
 	 * Some architectures provide late kernel PA2KA(identity)
@@ -527,15 +542,18 @@
 	 * necessary, is to be mapped in before_thread_runs(). This
 	 * function must be executed before the switch to the new stack.
+	 *
 	 */
 	before_thread_runs();
-
+	
 	/*
 	 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to
 	 * thread's stack.
+	 *
 	 */
 	the_copy(THE, (the_t *) THREAD->kstack);
 	
 	context_restore(&THREAD->saved_context);
-	/* not reached */
+	
+	/* Not reached */
 }
 
@@ -551,12 +569,7 @@
 void kcpulb(void *arg)
 {
-	thread_t *t;
-	int count;
 	atomic_count_t average;
-	unsigned int i;
-	int j;
-	int k = 0;
-	ipl_t ipl;
-
+	atomic_count_t rdy;
+	
 	/*
 	 * Detach kcpulb as nobody will call thread_join_timeout() on it.
@@ -569,5 +582,5 @@
 	 */
 	thread_sleep(1);
-
+	
 not_satisfied:
 	/*
@@ -575,46 +588,53 @@
 	 * other CPU's. Note that situation can have changed between two
 	 * passes. Each time get the most up to date counts.
+	 *
 	 */
 	average = atomic_get(&nrdy) / config.cpu_active + 1;
-	count = average - atomic_get(&CPU->nrdy);
-
-	if (count <= 0)
+	rdy = atomic_get(&CPU->nrdy);
+	
+	if (average <= rdy)
 		goto satisfied;
-
+	
+	atomic_count_t count = average - rdy;
+	
 	/*
 	 * Searching least priority queues on all CPU's first and most priority
 	 * queues on all CPU's last.
-	 */
-	for (j = RQ_COUNT - 1; j >= 0; j--) {
-		for (i = 0; i < config.cpu_active; i++) {
-			link_t *l;
-			runq_t *r;
-			cpu_t *cpu;
-
-			cpu = &cpus[(i + k) % config.cpu_active];
-
+	 *
+	 */
+	size_t acpu;
+	size_t acpu_bias = 0;
+	int rq;
+	
+	for (rq = RQ_COUNT - 1; rq >= 0; rq--) {
+		for (acpu = 0; acpu < config.cpu_active; acpu++) {
+			cpu_t *cpu = &cpus[(acpu + acpu_bias) % config.cpu_active];
+			
 			/*
 			 * Not interested in ourselves.
 			 * Doesn't require interrupt disabling for kcpulb has
 			 * THREAD_FLAG_WIRED.
+			 *
 			 */
 			if (CPU == cpu)
 				continue;
+			
 			if (atomic_get(&cpu->nrdy) <= average)
 				continue;
-
-			ipl = interrupts_disable();
-			r = &cpu->rq[j];
-			spinlock_lock(&r->lock);
-			if (r->n == 0) {
-				spinlock_unlock(&r->lock);
-				interrupts_restore(ipl);
+			
+			irq_spinlock_lock(&(cpu->rq[rq].lock), true);
+			if (cpu->rq[rq].n == 0) {
+				irq_spinlock_unlock(&(cpu->rq[rq].lock), true);
 				continue;
 			}
-		
-			t = NULL;
-			l = r->rq_head.prev;	/* search rq from the back */
-			while (l != &r->rq_head) {
-				t = list_get_instance(l, thread_t, rq_link);
+			
+			thread_t *thread = NULL;
+			
+			/* Search rq from the back */
+			link_t *link = cpu->rq[rq].rq_head.prev;
+			
+			while (link != &(cpu->rq[rq].rq_head)) {
+				thread = (thread_t *) list_get_instance(link, thread_t, rq_link);
+				
 				/*
 				 * We don't want to steal CPU-wired threads
@@ -624,33 +644,38 @@
 				 * steal threads whose FPU context is still in
 				 * CPU.
+				 *
 				 */
-				spinlock_lock(&t->lock);
-				if ((!(t->flags & (THREAD_FLAG_WIRED |
-				    THREAD_FLAG_STOLEN))) &&
-				    (!(t->fpu_context_engaged))) {
+				irq_spinlock_lock(&thread->lock, false);
+				
+				if ((!(thread->flags & (THREAD_FLAG_WIRED | THREAD_FLAG_STOLEN)))
+				    && (!(thread->fpu_context_engaged))) {
 					/*
-					 * Remove t from r.
+					 * Remove thread from ready queue.
 					 */
-					spinlock_unlock(&t->lock);
+					irq_spinlock_unlock(&thread->lock, false);
 					
 					atomic_dec(&cpu->nrdy);
 					atomic_dec(&nrdy);
-
-					r->n--;
-					list_remove(&t->rq_link);
-
+					
+					cpu->rq[rq].n--;
+					list_remove(&thread->rq_link);
+					
 					break;
 				}
-				spinlock_unlock(&t->lock);
-				l = l->prev;
-				t = NULL;
+				
+				irq_spinlock_unlock(&thread->lock, false);
+				
+				link = link->prev;
+				thread = NULL;
 			}
-			spinlock_unlock(&r->lock);
-
-			if (t) {
+			
+			if (thread) {
 				/*
-				 * Ready t on local CPU
+				 * Ready thread on local CPU
+				 *
 				 */
-				spinlock_lock(&t->lock);
+				
+				irq_spinlock_pass(&(cpu->rq[rq].lock), &thread->lock);
+				
 #ifdef KCPULB_VERBOSE
 				printf("kcpulb%u: TID %" PRIu64 " -> cpu%u, "
@@ -659,30 +684,32 @@
 				    atomic_get(&nrdy) / config.cpu_active);
 #endif
-				t->flags |= THREAD_FLAG_STOLEN;
-				t->state = Entering;
-				spinlock_unlock(&t->lock);
-	
-				thread_ready(t);
-
-				interrupts_restore(ipl);
-	
+				
+				thread->flags |= THREAD_FLAG_STOLEN;
+				thread->state = Entering;
+				
+				irq_spinlock_unlock(&thread->lock, true);
+				thread_ready(thread);
+				
 				if (--count == 0)
 					goto satisfied;
-					
+				
 				/*
 				 * We are not satisfied yet, focus on another
 				 * CPU next time.
+				 *
 				 */
-				k++;
+				acpu_bias++;
 				
 				continue;
-			}
-			interrupts_restore(ipl);
+			} else
+				irq_spinlock_unlock(&(cpu->rq[rq].lock), true);
+			
 		}
 	}
-
+	
 	if (atomic_get(&CPU->nrdy)) {
 		/*
 		 * Be a little bit light-weight and let migrated threads run.
+		 *
 		 */
 		scheduler();
@@ -691,60 +718,56 @@
 		 * We failed to migrate a single thread.
 		 * Give up this turn.
+		 *
 		 */
 		goto loop;
 	}
-		
+	
 	goto not_satisfied;
-
+	
 satisfied:
 	goto loop;
 }
-
 #endif /* CONFIG_SMP */
 
-
-/** Print information about threads & scheduler queues */
+/** Print information about threads & scheduler queues
+ *
+ */
 void sched_print_list(void)
 {
-	ipl_t ipl;
-	unsigned int cpu, i;
-	runq_t *r;
-	thread_t *t;
-	link_t *cur;
-
-	/* We are going to mess with scheduler structures,
-	 * let's not be interrupted */
-	ipl = interrupts_disable();
+	size_t cpu;
 	for (cpu = 0; cpu < config.cpu_count; cpu++) {
-
 		if (!cpus[cpu].active)
 			continue;
-
-		spinlock_lock(&cpus[cpu].lock);
+		
+		irq_spinlock_lock(&cpus[cpu].lock, true);
+		
 		printf("cpu%u: address=%p, nrdy=%ld, needs_relink=%" PRIs "\n",
 		    cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy),
 		    cpus[cpu].needs_relink);
 		
+		unsigned int i;
 		for (i = 0; i < RQ_COUNT; i++) {
-			r = &cpus[cpu].rq[i];
-			spinlock_lock(&r->lock);
-			if (!r->n) {
-				spinlock_unlock(&r->lock);
+			irq_spinlock_lock(&(cpus[cpu].rq[i].lock), false);
+			if (cpus[cpu].rq[i].n == 0) {
+				irq_spinlock_unlock(&(cpus[cpu].rq[i].lock), false);
 				continue;
 			}
+			
 			printf("\trq[%u]: ", i);
-			for (cur = r->rq_head.next; cur != &r->rq_head;
-				cur = cur->next) {
-				t = list_get_instance(cur, thread_t, rq_link);
-				printf("%" PRIu64 "(%s) ", t->tid,
-				    thread_states[t->state]);
+			link_t *cur;
+			for (cur = cpus[cpu].rq[i].rq_head.next;
+			    cur != &(cpus[cpu].rq[i].rq_head);
+			    cur = cur->next) {
+				thread_t *thread = list_get_instance(cur, thread_t, rq_link);
+				printf("%" PRIu64 "(%s) ", thread->tid,
+				    thread_states[thread->state]);
 			}
 			printf("\n");
-			spinlock_unlock(&r->lock);
+			
+			irq_spinlock_unlock(&(cpus[cpu].rq[i].lock), false);
 		}
-		spinlock_unlock(&cpus[cpu].lock);
-	}
-	
-	interrupts_restore(ipl);
+		
+		irq_spinlock_unlock(&cpus[cpu].lock, true);
+	}
 }
 
Index: kernel/generic/src/proc/task.c
===================================================================
--- kernel/generic/src/proc/task.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/src/proc/task.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -60,5 +60,5 @@
 
 /** Spinlock protecting the tasks_tree AVL tree. */
-SPINLOCK_INITIALIZE(tasks_lock);
+IRQ_SPINLOCK_INITIALIZE(tasks_lock);
 
 /** AVL tree of active tasks.
@@ -81,7 +81,9 @@
 /* Forward declarations. */
 static void task_kill_internal(task_t *);
-static int tsk_constructor(void *, int);
-
-/** Initialize kernel tasks support. */
+static int tsk_constructor(void *, unsigned int);
+
+/** Initialize kernel tasks support.
+ *
+ */
 void task_init(void)
 {
@@ -92,19 +94,23 @@
 }
 
-/*
+/** Task finish walker.
+ *
  * The idea behind this walker is to kill and count all tasks different from
  * TASK.
+ *
  */
 static bool task_done_walker(avltree_node_t *node, void *arg)
 {
-	task_t *t = avltree_get_instance(node, task_t, tasks_tree_node);
-	unsigned *cnt = (unsigned *) arg;
-	
-	if (t != TASK) {
+	task_t *task = avltree_get_instance(node, task_t, tasks_tree_node);
+	size_t *cnt = (size_t *) arg;
+	
+	if (task != TASK) {
 		(*cnt)++;
+		
 #ifdef CONFIG_DEBUG
-		printf("[%"PRIu64"] ", t->taskid);
-#endif
-		task_kill_internal(t);
+		printf("[%"PRIu64"] ", task->taskid);
+#endif
+		
+		task_kill_internal(task);
 	}
 	
@@ -113,51 +119,55 @@
 }
 
-/** Kill all tasks except the current task. */
+/** Kill all tasks except the current task.
+ *
+ */
 void task_done(void)
 {
-	unsigned tasks_left;
-	
-	do { /* Repeat until there are any tasks except TASK */
-		/* Messing with task structures, avoid deadlock */
+	size_t tasks_left;
+	
+	/* Repeat until there are any tasks except TASK */
+	do {
 #ifdef CONFIG_DEBUG
 		printf("Killing tasks... ");
 #endif
-		ipl_t ipl = interrupts_disable();
-		spinlock_lock(&tasks_lock);
+		
+		irq_spinlock_lock(&tasks_lock, true);
 		tasks_left = 0;
 		avltree_walk(&tasks_tree, task_done_walker, &tasks_left);
-		spinlock_unlock(&tasks_lock);
-		interrupts_restore(ipl);
+		irq_spinlock_unlock(&tasks_lock, true);
+		
 		thread_sleep(1);
+		
 #ifdef CONFIG_DEBUG
 		printf("\n");
 #endif
-	} while (tasks_left);
-}
-
-int tsk_constructor(void *obj, int kmflags)
-{
-	task_t *ta = obj;
-	int i;
-	
-	atomic_set(&ta->refcount, 0);
-	atomic_set(&ta->lifecount, 0);
-	atomic_set(&ta->active_calls, 0);
-	
-	spinlock_initialize(&ta->lock, "task_ta_lock");
-	mutex_initialize(&ta->futexes_lock, MUTEX_PASSIVE);
-	
-	list_initialize(&ta->th_head);
-	list_initialize(&ta->sync_box_head);
-	
-	ipc_answerbox_init(&ta->answerbox, ta);
+	} while (tasks_left > 0);
+}
+
+int tsk_constructor(void *obj, unsigned int kmflags)
+{
+	task_t *task = (task_t *) obj;
+	
+	atomic_set(&task->refcount, 0);
+	atomic_set(&task->lifecount, 0);
+	atomic_set(&task->active_calls, 0);
+	
+	irq_spinlock_initialize(&task->lock, "task_t_lock");
+	mutex_initialize(&task->futexes_lock, MUTEX_PASSIVE);
+	
+	list_initialize(&task->th_head);
+	list_initialize(&task->sync_box_head);
+	
+	ipc_answerbox_init(&task->answerbox, task);
+	
+	size_t i;
 	for (i = 0; i < IPC_MAX_PHONES; i++)
-		ipc_phone_init(&ta->phones[i]);
+		ipc_phone_init(&task->phones[i]);
 	
 #ifdef CONFIG_UDEBUG
 	/* Init kbox stuff */
-	ta->kb.thread = NULL;
-	ipc_answerbox_init(&ta->kb.box, ta);
-	mutex_initialize(&ta->kb.cleanup_lock, MUTEX_PASSIVE);
+	task->kb.thread = NULL;
+	ipc_answerbox_init(&task->kb.box, task);
+	mutex_initialize(&task->kb.cleanup_lock, MUTEX_PASSIVE);
 #endif
 	
@@ -175,86 +185,83 @@
 task_t *task_create(as_t *as, const char *name)
 {
-	ipl_t ipl;
-	task_t *ta;
-	
-	ta = (task_t *) slab_alloc(task_slab, 0);
-	task_create_arch(ta);
-	ta->as = as;
-	memcpy(ta->name, name, TASK_NAME_BUFLEN);
-	ta->name[TASK_NAME_BUFLEN - 1] = 0;
-	
-	ta->context = CONTEXT;
-	ta->capabilities = 0;
-	ta->ucycles = 0;
-	ta->kcycles = 0;
-
-	ta->ipc_info.call_sent = 0;
-	ta->ipc_info.call_recieved = 0;
-	ta->ipc_info.answer_sent = 0;
-	ta->ipc_info.answer_recieved = 0;
-	ta->ipc_info.irq_notif_recieved = 0;
-	ta->ipc_info.forwarded = 0;
-
+	task_t *task = (task_t *) slab_alloc(task_slab, 0);
+	task_create_arch(task);
+	
+	task->as = as;
+	str_cpy(task->name, TASK_NAME_BUFLEN, name);
+	
+	task->context = CONTEXT;
+	task->capabilities = 0;
+	task->ucycles = 0;
+	task->kcycles = 0;
+	
+	task->ipc_info.call_sent = 0;
+	task->ipc_info.call_recieved = 0;
+	task->ipc_info.answer_sent = 0;
+	task->ipc_info.answer_recieved = 0;
+	task->ipc_info.irq_notif_recieved = 0;
+	task->ipc_info.forwarded = 0;
+	
 #ifdef CONFIG_UDEBUG
 	/* Init debugging stuff */
-	udebug_task_init(&ta->udebug);
+	udebug_task_init(&task->udebug);
 	
 	/* Init kbox stuff */
-	ta->kb.finished = false;
+	task->kb.finished = false;
 #endif
 	
 	if ((ipc_phone_0) &&
-	    (context_check(ipc_phone_0->task->context, ta->context)))
-		ipc_phone_connect(&ta->phones[0], ipc_phone_0);
-	
-	btree_create(&ta->futexes);
+	    (context_check(ipc_phone_0->task->context, task->context)))
+		ipc_phone_connect(&task->phones[0], ipc_phone_0);
+	
+	btree_create(&task->futexes);
 	
 	/*
 	 * Get a reference to the address space.
 	 */
-	as_hold(ta->as);
-
-	ipl = interrupts_disable();
-	spinlock_lock(&tasks_lock);
-	ta->taskid = ++task_counter;
-	avltree_node_initialize(&ta->tasks_tree_node);
-	ta->tasks_tree_node.key = ta->taskid; 
-	avltree_insert(&tasks_tree, &ta->tasks_tree_node);
-	spinlock_unlock(&tasks_lock);
-	interrupts_restore(ipl);
-	
-	return ta;
+	as_hold(task->as);
+	
+	irq_spinlock_lock(&tasks_lock, true);
+	
+	task->taskid = ++task_counter;
+	avltree_node_initialize(&task->tasks_tree_node);
+	task->tasks_tree_node.key = task->taskid;
+	avltree_insert(&tasks_tree, &task->tasks_tree_node);
+	
+	irq_spinlock_unlock(&tasks_lock, true);
+	
+	return task;
 }
 
 /** Destroy task.
  *
- * @param t Task to be destroyed.
- *
- */
-void task_destroy(task_t *t)
+ * @param task Task to be destroyed.
+ *
+ */
+void task_destroy(task_t *task)
 {
 	/*
 	 * Remove the task from the task B+tree.
 	 */
-	spinlock_lock(&tasks_lock);
-	avltree_delete(&tasks_tree, &t->tasks_tree_node);
-	spinlock_unlock(&tasks_lock);
+	irq_spinlock_lock(&tasks_lock, true);
+	avltree_delete(&tasks_tree, &task->tasks_tree_node);
+	irq_spinlock_unlock(&tasks_lock, true);
 	
 	/*
 	 * Perform architecture specific task destruction.
 	 */
-	task_destroy_arch(t);
+	task_destroy_arch(task);
 	
 	/*
 	 * Free up dynamically allocated state.
 	 */
-	btree_destroy(&t->futexes);
+	btree_destroy(&task->futexes);
 	
 	/*
 	 * Drop our reference to the address space.
 	 */
-	as_release(t->as);
-	
-	slab_free(task_slab, t);
+	as_release(task->as);
+	
+	slab_free(task_slab, task);
 }
 
@@ -263,9 +270,10 @@
  * Holding a reference to a task prevents destruction of that task.
  *
- * @param t		Task to be held.
- */
-void task_hold(task_t *t)
-{
-	atomic_inc(&t->refcount);
+ * @param task Task to be held.
+ *
+ */
+void task_hold(task_t *task)
+{
+	atomic_inc(&task->refcount);
 }
 
@@ -274,10 +282,11 @@
  * The last one to release a reference to a task destroys the task.
  *
- * @param t		Task to be released.
- */
-void task_release(task_t *t)
-{
-	if ((atomic_predec(&t->refcount)) == 0)
-		task_destroy(t);
+ * @param task Task to be released.
+ *
+ */
+void task_release(task_t *task)
+{
+	if ((atomic_predec(&task->refcount)) == 0)
+		task_destroy(task);
 }
 
@@ -346,5 +355,5 @@
 	
 	if (node)
-		return avltree_get_instance(node, task_t, tasks_tree_node); 
+		return avltree_get_instance(node, task_t, tasks_tree_node);
 	
 	return NULL;
@@ -356,31 +365,34 @@
  * already disabled.
  *
- * @param t       Pointer to thread.
+ * @param task    Pointer to the task.
  * @param ucycles Out pointer to sum of all user cycles.
  * @param kcycles Out pointer to sum of all kernel cycles.
  *
  */
-void task_get_accounting(task_t *t, uint64_t *ucycles, uint64_t *kcycles)
+void task_get_accounting(task_t *task, uint64_t *ucycles, uint64_t *kcycles)
 {
 	/* Accumulated values of task */
-	uint64_t uret = t->ucycles;
-	uint64_t kret = t->kcycles;
+	uint64_t uret = task->ucycles;
+	uint64_t kret = task->kcycles;
 	
 	/* Current values of threads */
 	link_t *cur;
-	for (cur = t->th_head.next; cur != &t->th_head; cur = cur->next) {
-		thread_t *thr = list_get_instance(cur, thread_t, th_link);
-		
-		spinlock_lock(&thr->lock);
+	for (cur = task->th_head.next; cur != &task->th_head; cur = cur->next) {
+		thread_t *thread = list_get_instance(cur, thread_t, th_link);
+		
+		irq_spinlock_lock(&thread->lock, false);
+		
 		/* Process only counted threads */
-		if (!thr->uncounted) {
-			if (thr == THREAD) {
+		if (!thread->uncounted) {
+			if (thread == THREAD) {
 				/* Update accounting of current thread */
 				thread_update_accounting(false);
-			} 
-			uret += thr->ucycles;
-			kret += thr->kcycles;
+			}
+			
+			uret += thread->ucycles;
+			kret += thread->kcycles;
 		}
-		spinlock_unlock(&thr->lock);
+		
+		irq_spinlock_unlock(&thread->lock, false);
 	}
 	
@@ -389,5 +401,5 @@
 }
 
-static void task_kill_internal(task_t *ta)
+static void task_kill_internal(task_t *task)
 {
 	link_t *cur;
@@ -396,21 +408,22 @@
 	 * Interrupt all threads.
 	 */
-	spinlock_lock(&ta->lock);
-	for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) {
-		thread_t *thr;
+	irq_spinlock_lock(&task->lock, false);
+	for (cur = task->th_head.next; cur != &task->th_head; cur = cur->next) {
+		thread_t *thread = list_get_instance(cur, thread_t, th_link);
 		bool sleeping = false;
 		
-		thr = list_get_instance(cur, thread_t, th_link);
-		
-		spinlock_lock(&thr->lock);
-		thr->interrupted = true;
-		if (thr->state == Sleeping)
+		irq_spinlock_lock(&thread->lock, false);
+		
+		thread->interrupted = true;
+		if (thread->state == Sleeping)
 			sleeping = true;
-		spinlock_unlock(&thr->lock);
+		
+		irq_spinlock_unlock(&thread->lock, false);
 		
 		if (sleeping)
-			waitq_interrupt_sleep(thr);
+			waitq_interrupt_sleep(thread);
 	}
-	spinlock_unlock(&ta->lock);
+	
+	irq_spinlock_unlock(&task->lock, false);
 }
 
@@ -427,58 +440,55 @@
 int task_kill(task_id_t id)
 {
-	ipl_t ipl;
-	task_t *ta;
-
 	if (id == 1)
 		return EPERM;
 	
-	ipl = interrupts_disable();
-	spinlock_lock(&tasks_lock);
-	if (!(ta = task_find_by_id(id))) {
-		spinlock_unlock(&tasks_lock);
-		interrupts_restore(ipl);
+	irq_spinlock_lock(&tasks_lock, true);
+	
+	task_t *task = task_find_by_id(id);
+	if (!task) {
+		irq_spinlock_unlock(&tasks_lock, true);
 		return ENOENT;
 	}
-	task_kill_internal(ta);
-	spinlock_unlock(&tasks_lock);
-	interrupts_restore(ipl);
-	return 0;
+	
+	task_kill_internal(task);
+	irq_spinlock_unlock(&tasks_lock, true);
+	
+	return EOK;
 }
 
 static bool task_print_walker(avltree_node_t *node, void *arg)
 {
-	task_t *t = avltree_get_instance(node, task_t, tasks_tree_node);
-	int j;
-	
-	spinlock_lock(&t->lock);
+	task_t *task = avltree_get_instance(node, task_t, tasks_tree_node);
+	irq_spinlock_lock(&task->lock, false);
 	
 	uint64_t ucycles;
 	uint64_t kcycles;
 	char usuffix, ksuffix;
-	task_get_accounting(t, &ucycles, &kcycles);
+	task_get_accounting(task, &ucycles, &kcycles);
 	order_suffix(ucycles, &ucycles, &usuffix);
 	order_suffix(kcycles, &kcycles, &ksuffix);
 	
-#ifdef __32_BITS__	
+#ifdef __32_BITS__
 	printf("%-6" PRIu64 " %-12s %-3" PRIu32 " %10p %10p %9" PRIu64 "%c %9"
-		PRIu64 "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as,
-		ucycles, usuffix, kcycles, ksuffix, atomic_get(&t->refcount),
-		atomic_get(&t->active_calls));
+	    PRIu64 "%c %7ld %6ld", task->taskid, task->name, task->context,
+	    task, task->as, ucycles, usuffix, kcycles, ksuffix,
+	    atomic_get(&task->refcount), atomic_get(&task->active_calls));
 #endif
 	
 #ifdef __64_BITS__
 	printf("%-6" PRIu64 " %-12s %-3" PRIu32 " %18p %18p %9" PRIu64 "%c %9"
-		PRIu64 "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as,
-		ucycles, usuffix, kcycles, ksuffix, atomic_get(&t->refcount),
-		atomic_get(&t->active_calls));
-#endif
-	
-	for (j = 0; j < IPC_MAX_PHONES; j++) {
-		if (t->phones[j].callee)
-			printf(" %d:%p", j, t->phones[j].callee);
+	    PRIu64 "%c %7ld %6ld", task->taskid, task->name, task->context,
+	    task, task->as, ucycles, usuffix, kcycles, ksuffix,
+	    atomic_get(&task->refcount), atomic_get(&task->active_calls));
+#endif
+	
+	size_t i;
+	for (i = 0; i < IPC_MAX_PHONES; i++) {
+		if (task->phones[i].callee)
+			printf(" %" PRIs ":%p", i, task->phones[i].callee);
 	}
 	printf("\n");
 	
-	spinlock_unlock(&t->lock);
+	irq_spinlock_unlock(&task->lock, false);
 	return true;
 }
@@ -487,9 +497,6 @@
 void task_print_list(void)
 {
-	ipl_t ipl;
-	
 	/* Messing with task structures, avoid deadlock */
-	ipl = interrupts_disable();
-	spinlock_lock(&tasks_lock);
+	irq_spinlock_lock(&tasks_lock, true);
 	
 #ifdef __32_BITS__
@@ -509,6 +516,5 @@
 	avltree_walk(&tasks_tree, task_print_walker, NULL);
 	
-	spinlock_unlock(&tasks_lock);
-	interrupts_restore(ipl);
+	irq_spinlock_unlock(&tasks_lock, true);
 }
 
Index: kernel/generic/src/proc/thread.c
===================================================================
--- kernel/generic/src/proc/thread.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/src/proc/thread.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -33,5 +33,5 @@
 /**
  * @file
- * @brief	Thread management functions.
+ * @brief Thread management functions.
  */
 
@@ -94,6 +94,7 @@
  *
  * For locking rules, see declaration thereof.
- */
-SPINLOCK_INITIALIZE(threads_lock);
+ *
+ */
+IRQ_SPINLOCK_INITIALIZE(threads_lock);
 
 /** AVL tree of all threads.
@@ -101,11 +102,13 @@
  * When a thread is found in the threads_tree AVL tree, it is guaranteed to
  * exist as long as the threads_lock is held.
- */
-avltree_t threads_tree;		
-
-SPINLOCK_INITIALIZE(tidlock);
-thread_id_t last_tid = 0;
+ *
+ */
+avltree_t threads_tree;
+
+IRQ_SPINLOCK_STATIC_INITIALIZE(tidlock);
+static thread_id_t last_tid = 0;
 
 static slab_cache_t *thread_slab;
+
 #ifdef CONFIG_FPU
 slab_cache_t *fpu_context_slab;
@@ -125,15 +128,13 @@
 	void *arg = THREAD->thread_arg;
 	THREAD->last_cycle = get_cycle();
-
+	
 	/* This is where each thread wakes up after its creation */
-	spinlock_unlock(&THREAD->lock);
+	irq_spinlock_unlock(&THREAD->lock, false);
 	interrupts_enable();
-
+	
 	f(arg);
 	
 	/* Accumulate accounting to the task */
-	ipl_t ipl = interrupts_disable();
-	
-	spinlock_lock(&THREAD->lock);
+	irq_spinlock_lock(&THREAD->lock, true);
 	if (!THREAD->uncounted) {
 		thread_update_accounting(true);
@@ -142,73 +143,74 @@
 		uint64_t kcycles = THREAD->kcycles;
 		THREAD->kcycles = 0;
-
-		spinlock_unlock(&THREAD->lock);
 		
-		spinlock_lock(&TASK->lock);
+		irq_spinlock_pass(&THREAD->lock, &TASK->lock);
 		TASK->ucycles += ucycles;
 		TASK->kcycles += kcycles;
-		spinlock_unlock(&TASK->lock);
+		irq_spinlock_unlock(&TASK->lock, true);
 	} else
-		spinlock_unlock(&THREAD->lock);
-	
-	interrupts_restore(ipl);
+		irq_spinlock_unlock(&THREAD->lock, true);
 	
 	thread_exit();
-	/* not reached */
-}
-
-/** Initialization and allocation for thread_t structure */
-static int thr_constructor(void *obj, int kmflags)
-{
-	thread_t *t = (thread_t *) obj;
-
-	spinlock_initialize(&t->lock, "thread_t_lock");
-	link_initialize(&t->rq_link);
-	link_initialize(&t->wq_link);
-	link_initialize(&t->th_link);
-
+	
+	/* Not reached */
+}
+
+/** Initialization and allocation for thread_t structure
+ *
+ */
+static int thr_constructor(void *obj, unsigned int kmflags)
+{
+	thread_t *thread = (thread_t *) obj;
+	
+	irq_spinlock_initialize(&thread->lock, "thread_t_lock");
+	link_initialize(&thread->rq_link);
+	link_initialize(&thread->wq_link);
+	link_initialize(&thread->th_link);
+	
 	/* call the architecture-specific part of the constructor */
-	thr_constructor_arch(t);
+	thr_constructor_arch(thread);
 	
 #ifdef CONFIG_FPU
 #ifdef CONFIG_FPU_LAZY
-	t->saved_fpu_context = NULL;
-#else
-	t->saved_fpu_context = slab_alloc(fpu_context_slab, kmflags);
-	if (!t->saved_fpu_context)
+	thread->saved_fpu_context = NULL;
+#else /* CONFIG_FPU_LAZY */
+	thread->saved_fpu_context = slab_alloc(fpu_context_slab, kmflags);
+	if (!thread->saved_fpu_context)
 		return -1;
-#endif
-#endif
-
-	t->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags);
-	if (!t->kstack) {
+#endif /* CONFIG_FPU_LAZY */
+#endif /* CONFIG_FPU */
+	
+	thread->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags);
+	if (!thread->kstack) {
 #ifdef CONFIG_FPU
-		if (t->saved_fpu_context)
-			slab_free(fpu_context_slab, t->saved_fpu_context);
+		if (thread->saved_fpu_context)
+			slab_free(fpu_context_slab, thread->saved_fpu_context);
 #endif
 		return -1;
 	}
-
+	
 #ifdef CONFIG_UDEBUG
-	mutex_initialize(&t->udebug.lock, MUTEX_PASSIVE);
-#endif
-
+	mutex_initialize(&thread->udebug.lock, MUTEX_PASSIVE);
+#endif
+	
 	return 0;
 }
 
 /** Destruction of thread_t object */
-static int thr_destructor(void *obj)
-{
-	thread_t *t = (thread_t *) obj;
-
+static size_t thr_destructor(void *obj)
+{
+	thread_t *thread = (thread_t *) obj;
+	
 	/* call the architecture-specific part of the destructor */
-	thr_destructor_arch(t);
-
-	frame_free(KA2PA(t->kstack));
+	thr_destructor_arch(thread);
+	
+	frame_free(KA2PA(thread->kstack));
+	
 #ifdef CONFIG_FPU
-	if (t->saved_fpu_context)
-		slab_free(fpu_context_slab, t->saved_fpu_context);
-#endif
-	return 1; /* One page freed */
+	if (thread->saved_fpu_context)
+		slab_free(fpu_context_slab, thread->saved_fpu_context);
+#endif
+	
+	return 1;  /* One page freed */
 }
 
@@ -221,13 +223,14 @@
 {
 	THREAD = NULL;
+	
 	atomic_set(&nrdy, 0);
 	thread_slab = slab_cache_create("thread_slab", sizeof(thread_t), 0,
 	    thr_constructor, thr_destructor, 0);
-
+	
 #ifdef CONFIG_FPU
 	fpu_context_slab = slab_cache_create("fpu_slab", sizeof(fpu_context_t),
 	    FPU_CONTEXT_ALIGN, NULL, NULL, 0);
 #endif
-
+	
 	avltree_create(&threads_tree);
 }
@@ -235,145 +238,132 @@
 /** Make thread ready
  *
- * Switch thread t to the ready state.
+ * Switch thread to the ready state.
  *
  * @param t Thread to make ready.
  *
  */
-void thread_ready(thread_t *t)
-{
-	cpu_t *cpu;
-	runq_t *r;
-	ipl_t ipl;
-	int i, avg;
-
-	ipl = interrupts_disable();
-
-	spinlock_lock(&t->lock);
-
-	ASSERT(!(t->state == Ready));
-
-	i = (t->priority < RQ_COUNT - 1) ? ++t->priority : t->priority;
-	
-	cpu = CPU;
-	if (t->flags & THREAD_FLAG_WIRED) {
-		ASSERT(t->cpu != NULL);
-		cpu = t->cpu;
+void thread_ready(thread_t *thread)
+{
+	irq_spinlock_lock(&thread->lock, true);
+	
+	ASSERT(!(thread->state == Ready));
+	
+	int i = (thread->priority < RQ_COUNT - 1)
+	    ? ++thread->priority : thread->priority;
+	
+	cpu_t *cpu = CPU;
+	if (thread->flags & THREAD_FLAG_WIRED) {
+		ASSERT(thread->cpu != NULL);
+		cpu = thread->cpu;
 	}
-	t->state = Ready;
-	spinlock_unlock(&t->lock);
+	thread->state = Ready;
+	
+	irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock));
 	
 	/*
-	 * Append t to respective ready queue on respective processor.
+	 * Append thread to respective ready queue
+	 * on respective processor.
 	 */
-	r = &cpu->rq[i];
-	spinlock_lock(&r->lock);
-	list_append(&t->rq_link, &r->rq_head);
-	r->n++;
-	spinlock_unlock(&r->lock);
-
+	
+	list_append(&thread->rq_link, &cpu->rq[i].rq_head);
+	cpu->rq[i].n++;
+	irq_spinlock_unlock(&(cpu->rq[i].lock), true);
+	
 	atomic_inc(&nrdy);
-	// FIXME: Why is the avg value never read?
-	avg = atomic_get(&nrdy) / config.cpu_active;
+	// FIXME: Why is the avg value not used
+	// avg = atomic_get(&nrdy) / config.cpu_active;
 	atomic_inc(&cpu->nrdy);
-
+}
+
+/** Create new thread
+ *
+ * Create a new thread.
+ *
+ * @param func      Thread's implementing function.
+ * @param arg       Thread's implementing function argument.
+ * @param task      Task to which the thread belongs. The caller must
+ *                  guarantee that the task won't cease to exist during the
+ *                  call. The task's lock may not be held.
+ * @param flags     Thread flags.
+ * @param name      Symbolic name (a copy is made).
+ * @param uncounted Thread's accounting doesn't affect accumulated task
+ *                  accounting.
+ *
+ * @return New thread's structure on success, NULL on failure.
+ *
+ */
+thread_t *thread_create(void (* func)(void *), void *arg, task_t *task,
+    unsigned int flags, const char *name, bool uncounted)
+{
+	thread_t *thread = (thread_t *) slab_alloc(thread_slab, 0);
+	if (!thread)
+		return NULL;
+	
+	/* Not needed, but good for debugging */
+	memsetb(thread->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0);
+	
+	irq_spinlock_lock(&tidlock, true);
+	thread->tid = ++last_tid;
+	irq_spinlock_unlock(&tidlock, true);
+	
+	context_save(&thread->saved_context);
+	context_set(&thread->saved_context, FADDR(cushion),
+	    (uintptr_t) thread->kstack, THREAD_STACK_SIZE);
+	
+	the_initialize((the_t *) thread->kstack);
+	
+	ipl_t ipl = interrupts_disable();
+	thread->saved_context.ipl = interrupts_read();
 	interrupts_restore(ipl);
-}
-
-/** Create new thread
- *
- * Create a new thread.
- *
- * @param func		Thread's implementing function.
- * @param arg		Thread's implementing function argument.
- * @param task		Task to which the thread belongs. The caller must
- * 			guarantee that the task won't cease to exist during the
- * 			call. The task's lock may not be held.
- * @param flags		Thread flags.
- * @param name		Symbolic name (a copy is made).
- * @param uncounted	Thread's accounting doesn't affect accumulated task
- * 			accounting.
- *
- * @return 		New thread's structure on success, NULL on failure.
- *
- */
-thread_t *thread_create(void (* func)(void *), void *arg, task_t *task,
-    int flags, const char *name, bool uncounted)
-{
-	thread_t *t;
-	ipl_t ipl;
-	
-	t = (thread_t *) slab_alloc(thread_slab, 0);
-	if (!t)
-		return NULL;
-	
-	/* Not needed, but good for debugging */
-	memsetb(t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0);
-	
-	ipl = interrupts_disable();
-	spinlock_lock(&tidlock);
-	t->tid = ++last_tid;
-	spinlock_unlock(&tidlock);
-	interrupts_restore(ipl);
-	
-	context_save(&t->saved_context);
-	context_set(&t->saved_context, FADDR(cushion), (uintptr_t) t->kstack,
-	    THREAD_STACK_SIZE);
-	
-	the_initialize((the_t *) t->kstack);
-	
-	ipl = interrupts_disable();
-	t->saved_context.ipl = interrupts_read();
-	interrupts_restore(ipl);
-	
-	memcpy(t->name, name, THREAD_NAME_BUFLEN);
-	t->name[THREAD_NAME_BUFLEN - 1] = 0;
-	
-	t->thread_code = func;
-	t->thread_arg = arg;
-	t->ticks = -1;
-	t->ucycles = 0;
-	t->kcycles = 0;
-	t->uncounted = uncounted;
-	t->priority = -1;		/* start in rq[0] */
-	t->cpu = NULL;
-	t->flags = flags;
-	t->state = Entering;
-	t->call_me = NULL;
-	t->call_me_with = NULL;
-	
-	timeout_initialize(&t->sleep_timeout);
-	t->sleep_interruptible = false;
-	t->sleep_queue = NULL;
-	t->timeout_pending = 0;
-
-	t->in_copy_from_uspace = false;
-	t->in_copy_to_uspace = false;
-
-	t->interrupted = false;	
-	t->detached = false;
-	waitq_initialize(&t->join_wq);
-	
-	t->rwlock_holder_type = RWLOCK_NONE;
-		
-	t->task = task;
-	
-	t->fpu_context_exists = 0;
-	t->fpu_context_engaged = 0;
-
-	avltree_node_initialize(&t->threads_tree_node);
-	t->threads_tree_node.key = (uintptr_t) t;
-
+	
+	str_cpy(thread->name, THREAD_NAME_BUFLEN, name);
+	
+	thread->thread_code = func;
+	thread->thread_arg = arg;
+	thread->ticks = -1;
+	thread->ucycles = 0;
+	thread->kcycles = 0;
+	thread->uncounted = uncounted;
+	thread->priority = -1;          /* Start in rq[0] */
+	thread->cpu = NULL;
+	thread->flags = flags;
+	thread->state = Entering;
+	thread->call_me = NULL;
+	thread->call_me_with = NULL;
+	
+	timeout_initialize(&thread->sleep_timeout);
+	thread->sleep_interruptible = false;
+	thread->sleep_queue = NULL;
+	thread->timeout_pending = false;
+	
+	thread->in_copy_from_uspace = false;
+	thread->in_copy_to_uspace = false;
+	
+	thread->interrupted = false;
+	thread->detached = false;
+	waitq_initialize(&thread->join_wq);
+	
+	thread->rwlock_holder_type = RWLOCK_NONE;
+	
+	thread->task = task;
+	
+	thread->fpu_context_exists = 0;
+	thread->fpu_context_engaged = 0;
+	
+	avltree_node_initialize(&thread->threads_tree_node);
+	thread->threads_tree_node.key = (uintptr_t) thread;
+	
 #ifdef CONFIG_UDEBUG
 	/* Init debugging stuff */
-	udebug_thread_initialize(&t->udebug);
-#endif
-
-	/* might depend on previous initialization */
-	thread_create_arch(t);	
-
+	udebug_thread_initialize(&thread->udebug);
+#endif
+	
+	/* Might depend on previous initialization */
+	thread_create_arch(thread);
+	
 	if (!(flags & THREAD_FLAG_NOATTACH))
-		thread_attach(t, task);
-
-	return t;
+		thread_attach(thread, task);
+	
+	return thread;
 }
 
@@ -381,37 +371,39 @@
  *
  * Detach thread from all queues, cpus etc. and destroy it.
- *
- * Assume thread->lock is held!!
- */
-void thread_destroy(thread_t *t)
-{
-	ASSERT(t->state == Exiting || t->state == Lingering);
-	ASSERT(t->task);
-	ASSERT(t->cpu);
-
-	spinlock_lock(&t->cpu->lock);
-	if (t->cpu->fpu_owner == t)
-		t->cpu->fpu_owner = NULL;
-	spinlock_unlock(&t->cpu->lock);
-
-	spinlock_unlock(&t->lock);
-
-	spinlock_lock(&threads_lock);
-	avltree_delete(&threads_tree, &t->threads_tree_node);
-	spinlock_unlock(&threads_lock);
-
+ * Assume thread->lock is held!
+ *
+ * @param thread  Thread to be destroyed.
+ * @param irq_res Indicate whether it should unlock thread->lock
+ *                in interrupts-restore mode.
+ *
+ */
+void thread_destroy(thread_t *thread, bool irq_res)
+{
+	ASSERT((thread->state == Exiting) || (thread->state == Lingering));
+	ASSERT(thread->task);
+	ASSERT(thread->cpu);
+	
+	irq_spinlock_lock(&thread->cpu->lock, false);
+	if (thread->cpu->fpu_owner == thread)
+		thread->cpu->fpu_owner = NULL;
+	irq_spinlock_unlock(&thread->cpu->lock, false);
+	
+	irq_spinlock_pass(&thread->lock, &threads_lock);
+	
+	avltree_delete(&threads_tree, &thread->threads_tree_node);
+	
+	irq_spinlock_pass(&threads_lock, &thread->task->lock);
+	
 	/*
 	 * Detach from the containing task.
 	 */
-	spinlock_lock(&t->task->lock);
-	list_remove(&t->th_link);
-	spinlock_unlock(&t->task->lock);	
-
+	list_remove(&thread->th_link);
+	irq_spinlock_unlock(&thread->task->lock, irq_res);
+	
 	/*
 	 * Drop the reference to the containing task.
 	 */
-	task_release(t->task);
-	
-	slab_free(thread_slab, t);
+	task_release(thread->task);
+	slab_free(thread_slab, thread);
 }
 
@@ -421,46 +413,41 @@
  * threads_tree.
  *
- * @param t	Thread to be attached to the task.
- * @param task	Task to which the thread is to be attached.
- */
-void thread_attach(thread_t *t, task_t *task)
-{
-	ipl_t ipl;
-
+ * @param t    Thread to be attached to the task.
+ * @param task Task to which the thread is to be attached.
+ *
+ */
+void thread_attach(thread_t *thread, task_t *task)
+{
 	/*
 	 * Attach to the specified task.
 	 */
-	ipl = interrupts_disable();
-	spinlock_lock(&task->lock);
-
+	irq_spinlock_lock(&task->lock, true);
+	
 	/* Hold a reference to the task. */
 	task_hold(task);
-
+	
 	/* Must not count kbox thread into lifecount */
-	if (t->flags & THREAD_FLAG_USPACE)
+	if (thread->flags & THREAD_FLAG_USPACE)
 		atomic_inc(&task->lifecount);
-
-	list_append(&t->th_link, &task->th_head);
-	spinlock_unlock(&task->lock);
-
+	
+	list_append(&thread->th_link, &task->th_head);
+	
+	irq_spinlock_pass(&task->lock, &threads_lock);
+	
 	/*
 	 * Register this thread in the system-wide list.
 	 */
-	spinlock_lock(&threads_lock);
-	avltree_insert(&threads_tree, &t->threads_tree_node);
-	spinlock_unlock(&threads_lock);
-	
-	interrupts_restore(ipl);
+	avltree_insert(&threads_tree, &thread->threads_tree_node);
+	irq_spinlock_unlock(&threads_lock, true);
 }
 
 /** Terminate thread.
  *
- * End current thread execution and switch it to the exiting state. All pending
- * timeouts are executed.
+ * End current thread execution and switch it to the exiting state.
+ * All pending timeouts are executed.
+ *
  */
 void thread_exit(void)
 {
-	ipl_t ipl;
-
 	if (THREAD->flags & THREAD_FLAG_USPACE) {
 #ifdef CONFIG_UDEBUG
@@ -475,4 +462,5 @@
 			 * can only be created by threads of the same task.
 			 * We are safe to perform cleanup.
+			 *
 			 */
 			ipc_cleanup();
@@ -481,24 +469,21 @@
 		}
 	}
-
+	
 restart:
-	ipl = interrupts_disable();
-	spinlock_lock(&THREAD->lock);
-	if (THREAD->timeout_pending) { 
-		/* busy waiting for timeouts in progress */
-		spinlock_unlock(&THREAD->lock);
-		interrupts_restore(ipl);
+	irq_spinlock_lock(&THREAD->lock, true);
+	if (THREAD->timeout_pending) {
+		/* Busy waiting for timeouts in progress */
+		irq_spinlock_unlock(&THREAD->lock, true);
 		goto restart;
 	}
 	
 	THREAD->state = Exiting;
-	spinlock_unlock(&THREAD->lock);
+	irq_spinlock_unlock(&THREAD->lock, true);
+	
 	scheduler();
-
+	
 	/* Not reached */
-	while (1)
-		;
-}
-
+	while (true);
+}
 
 /** Thread sleep
@@ -515,5 +500,5 @@
 	while (sec > 0) {
 		uint32_t period = (sec > 1000) ? 1000 : sec;
-	
+		
 		thread_usleep(period * 1000000);
 		sec -= period;
@@ -523,18 +508,16 @@
 /** Wait for another thread to exit.
  *
- * @param t Thread to join on exit.
- * @param usec Timeout in microseconds.
- * @param flags Mode of operation.
+ * @param thread Thread to join on exit.
+ * @param usec   Timeout in microseconds.
+ * @param flags  Mode of operation.
  *
  * @return An error code from errno.h or an error code from synch.h.
- */
-int thread_join_timeout(thread_t *t, uint32_t usec, int flags)
-{
-	ipl_t ipl;
-	int rc;
-
-	if (t == THREAD)
+ *
+ */
+int thread_join_timeout(thread_t *thread, uint32_t usec, unsigned int flags)
+{
+	if (thread == THREAD)
 		return EINVAL;
-
+	
 	/*
 	 * Since thread join can only be called once on an undetached thread,
@@ -542,13 +525,9 @@
 	 */
 	
-	ipl = interrupts_disable();
-	spinlock_lock(&t->lock);
-	ASSERT(!t->detached);
-	spinlock_unlock(&t->lock);
-	interrupts_restore(ipl);
-	
-	rc = waitq_sleep_timeout(&t->join_wq, usec, flags);
-	
-	return rc;	
+	irq_spinlock_lock(&thread->lock, true);
+	ASSERT(!thread->detached);
+	irq_spinlock_unlock(&thread->lock, true);
+	
+	return waitq_sleep_timeout(&thread->join_wq, usec, flags);
 }
 
@@ -558,26 +537,28 @@
  * state, deallocate its resources.
  *
- * @param t Thread to be detached.
- */
-void thread_detach(thread_t *t)
-{
-	ipl_t ipl;
-
+ * @param thread Thread to be detached.
+ *
+ */
+void thread_detach(thread_t *thread)
+{
 	/*
 	 * Since the thread is expected not to be already detached,
 	 * pointer to it must be still valid.
 	 */
-	ipl = interrupts_disable();
-	spinlock_lock(&t->lock);
-	ASSERT(!t->detached);
-	if (t->state == Lingering) {
-		thread_destroy(t);	/* unlocks &t->lock */
-		interrupts_restore(ipl);
+	irq_spinlock_lock(&thread->lock, true);
+	ASSERT(!thread->detached);
+	
+	if (thread->state == Lingering) {
+		/*
+		 * Unlock &thread->lock and restore
+		 * interrupts in thread_destroy().
+		 */
+		thread_destroy(thread, true);
 		return;
 	} else {
-		t->detached = true;
+		thread->detached = true;
 	}
-	spinlock_unlock(&t->lock);
-	interrupts_restore(ipl);
+	
+	irq_spinlock_unlock(&thread->lock, true);
 }
 
@@ -601,5 +582,6 @@
  *
  * Register a function and its argument to be executed
- * on next context switch to the current thread.
+ * on next context switch to the current thread. Must
+ * be called with interrupts disabled.
  *
  * @param call_me      Out-of-context function.
@@ -609,67 +591,62 @@
 void thread_register_call_me(void (* call_me)(void *), void *call_me_with)
 {
-	ipl_t ipl;
-	
-	ipl = interrupts_disable();
-	spinlock_lock(&THREAD->lock);
+	irq_spinlock_lock(&THREAD->lock, false);
 	THREAD->call_me = call_me;
 	THREAD->call_me_with = call_me_with;
-	spinlock_unlock(&THREAD->lock);
-	interrupts_restore(ipl);
+	irq_spinlock_unlock(&THREAD->lock, false);
 }
 
 static bool thread_walker(avltree_node_t *node, void *arg)
 {
-	thread_t *t = avltree_get_instance(node, thread_t, threads_tree_node);
+	thread_t *thread = avltree_get_instance(node, thread_t, threads_tree_node);
 	
 	uint64_t ucycles, kcycles;
 	char usuffix, ksuffix;
-	order_suffix(t->ucycles, &ucycles, &usuffix);
-	order_suffix(t->kcycles, &kcycles, &ksuffix);
-
+	order_suffix(thread->ucycles, &ucycles, &usuffix);
+	order_suffix(thread->kcycles, &kcycles, &ksuffix);
+	
 #ifdef __32_BITS__
 	printf("%-6" PRIu64" %-10s %10p %-8s %10p %-3" PRIu32 " %10p %10p %9"
-		PRIu64 "%c %9" PRIu64 "%c ", t->tid, t->name, t,
-		thread_states[t->state], t->task, t->task->context, t->thread_code,
-		t->kstack, ucycles, usuffix, kcycles, ksuffix);
-#endif
-
+		PRIu64 "%c %9" PRIu64 "%c ", thread->tid, thread->name, thread,
+		thread_states[thread->state], thread->task, thread->task->context,
+		thread->thread_code, thread->kstack, ucycles, usuffix, kcycles, ksuffix);
+#endif
+	
 #ifdef __64_BITS__
 	printf("%-6" PRIu64" %-10s %18p %-8s %18p %-3" PRIu32 " %18p %18p %9"
-		PRIu64 "%c %9" PRIu64 "%c ", t->tid, t->name, t,
-		thread_states[t->state], t->task, t->task->context, t->thread_code,
-		t->kstack, ucycles, usuffix, kcycles, ksuffix);
-#endif
-			
-	if (t->cpu)
-		printf("%-4u", t->cpu->id);
+		PRIu64 "%c %9" PRIu64 "%c ", thread->tid, thread->name, thread,
+		thread_states[thread->state], thread->task, thread->task->context,
+		thread->thread_code, thread->kstack, ucycles, usuffix, kcycles, ksuffix);
+#endif
+	
+	if (thread->cpu)
+		printf("%-4u", thread->cpu->id);
 	else
 		printf("none");
-			
-	if (t->state == Sleeping) {
+	
+	if (thread->state == Sleeping) {
 #ifdef __32_BITS__
-		printf(" %10p", t->sleep_queue);
-#endif
-
+		printf(" %10p", thread->sleep_queue);
+#endif
+		
 #ifdef __64_BITS__
-		printf(" %18p", t->sleep_queue);
+		printf(" %18p", thread->sleep_queue);
 #endif
 	}
-			
+	
 	printf("\n");
-
+	
 	return true;
 }
 
-/** Print list of threads debug info */
+/** Print list of threads debug info
+ *
+ */
 void thread_print_list(void)
 {
-	ipl_t ipl;
-	
 	/* Messing with thread structures, avoid deadlock */
-	ipl = interrupts_disable();
-	spinlock_lock(&threads_lock);
-
-#ifdef __32_BITS__	
+	irq_spinlock_lock(&threads_lock, true);
+	
+#ifdef __32_BITS__
 	printf("tid    name       address    state    task       "
 		"ctx code       stack      ucycles    kcycles    cpu  "
@@ -679,5 +656,5 @@
 		"----------\n");
 #endif
-
+	
 #ifdef __64_BITS__
 	printf("tid    name       address            state    task               "
@@ -688,9 +665,8 @@
 		"------------------\n");
 #endif
-
+	
 	avltree_walk(&threads_tree, thread_walker, NULL);
-
-	spinlock_unlock(&threads_lock);
-	interrupts_restore(ipl);
+	
+	irq_spinlock_unlock(&threads_lock, true);
 }
 
@@ -700,13 +676,13 @@
  * interrupts must be already disabled.
  *
- * @param t Pointer to thread.
+ * @param thread Pointer to thread.
  *
  * @return True if thread t is known to the system, false otherwise.
- */
-bool thread_exists(thread_t *t)
-{
-	avltree_node_t *node;
-
-	node = avltree_search(&threads_tree, (avltree_key_t) ((uintptr_t) t));
+ *
+ */
+bool thread_exists(thread_t *thread)
+{
+	avltree_node_t *node =
+	    avltree_search(&threads_tree, (avltree_key_t) ((uintptr_t) thread));
 	
 	return node != NULL;
@@ -718,14 +694,16 @@
  * interrupts must be already disabled.
  *
- * @param user	True to update user accounting, false for kernel.
+ * @param user True to update user accounting, false for kernel.
+ *
  */
 void thread_update_accounting(bool user)
 {
 	uint64_t time = get_cycle();
-	if (user) {
+	
+	if (user)
 		THREAD->ucycles += time - THREAD->last_cycle;
-	} else {
+	else
 		THREAD->kcycles += time - THREAD->last_cycle;
-	}
+	
 	THREAD->last_cycle = time;
 }
@@ -774,23 +752,21 @@
     size_t name_len, thread_id_t *uspace_thread_id)
 {
-	thread_t *t;
-	char namebuf[THREAD_NAME_BUFLEN];
-	uspace_arg_t *kernel_uarg;
-	int rc;
-
 	if (name_len > THREAD_NAME_BUFLEN - 1)
 		name_len = THREAD_NAME_BUFLEN - 1;
-
-	rc = copy_from_uspace(namebuf, uspace_name, name_len);
+	
+	char namebuf[THREAD_NAME_BUFLEN];
+	int rc = copy_from_uspace(namebuf, uspace_name, name_len);
 	if (rc != 0)
 		return (unative_t) rc;
-
+	
 	namebuf[name_len] = 0;
-
+	
 	/*
 	 * In case of failure, kernel_uarg will be deallocated in this function.
 	 * In case of success, kernel_uarg will be freed in uinit().
+	 *
 	 */
-	kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
+	uspace_arg_t *kernel_uarg =
+	    (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
 	
 	rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t));
@@ -799,13 +775,11 @@
 		return (unative_t) rc;
 	}
-
-	t = thread_create(uinit, kernel_uarg, TASK,
+	
+	thread_t *thread = thread_create(uinit, kernel_uarg, TASK,
 	    THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf, false);
-	if (t) {
+	if (thread) {
 		if (uspace_thread_id != NULL) {
-			int rc;
-
-			rc = copy_to_uspace(uspace_thread_id, &t->tid,
-			    sizeof(t->tid));
+			rc = copy_to_uspace(uspace_thread_id, &thread->tid,
+			    sizeof(thread->tid));
 			if (rc != 0) {
 				/*
@@ -813,6 +787,7 @@
 				 * has already been created. We need to undo its
 				 * creation now.
+				 *
 				 */
-
+				
 				/*
 				 * The new thread structure is initialized, but
@@ -820,10 +795,11 @@
 				 * We can safely deallocate it.
 				 */
-				slab_free(thread_slab, t);
-			 	free(kernel_uarg);
-
+				slab_free(thread_slab, thread);
+				free(kernel_uarg);
+				
 				return (unative_t) rc;
 			 }
 		}
+		
 #ifdef CONFIG_UDEBUG
 		/*
@@ -833,15 +809,16 @@
 		 * THREAD_B events for threads that already existed
 		 * and could be detected with THREAD_READ before.
+		 *
 		 */
-		udebug_thread_b_event_attach(t, TASK);
+		udebug_thread_b_event_attach(thread, TASK);
 #else
-		thread_attach(t, TASK);
-#endif
-		thread_ready(t);
-
+		thread_attach(thread, TASK);
+#endif
+		thread_ready(thread);
+		
 		return 0;
 	} else
 		free(kernel_uarg);
-
+	
 	return (unative_t) ENOMEM;
 }
@@ -853,4 +830,5 @@
 {
 	thread_exit();
+	
 	/* Unreachable */
 	return 0;
@@ -863,4 +841,5 @@
  *
  * @return 0 on success or an error code from @ref errno.h.
+ *
  */
 unative_t sys_thread_get_id(thread_id_t *uspace_thread_id)
@@ -869,4 +848,5 @@
 	 * No need to acquire lock on THREAD because tid
 	 * remains constant for the lifespan of the thread.
+	 *
 	 */
 	return (unative_t) copy_to_uspace(uspace_thread_id, &THREAD->tid,
Index: kernel/generic/src/security/cap.c
===================================================================
--- kernel/generic/src/security/cap.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/src/security/cap.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -27,15 +27,15 @@
  */
 
-/** @addtogroup generic	
+/** @addtogroup generic
  * @{
  */
 
 /**
- * @file	cap.c
- * @brief	Capabilities control.
+ * @file cap.c
+ * @brief Capabilities control.
  *
  * @see cap.h
  */
- 
+
 #include <security/cap.h>
 #include <proc/task.h>
@@ -48,37 +48,27 @@
 /** Set capabilities.
  *
- * @param t Task whose capabilities are to be changed.
+ * @param task Task whose capabilities are to be changed.
  * @param caps New set of capabilities.
+ *
  */
-void cap_set(task_t *t, cap_t caps)
+void cap_set(task_t *task, cap_t caps)
 {
-	ipl_t ipl;
-	
-	ipl = interrupts_disable();
-	spinlock_lock(&t->lock);
-	
-	t->capabilities = caps;
-	
-	spinlock_unlock(&t->lock);
-	interrupts_restore(ipl);
+	irq_spinlock_lock(&task->lock, true);
+	task->capabilities = caps;
+	irq_spinlock_unlock(&task->lock, true);
 }
 
 /** Get capabilities.
  *
- * @param t Task whose capabilities are to be returned.
+ * @param task Task whose capabilities are to be returned.
+ *
  * @return Task's capabilities.
+ *
  */
-cap_t cap_get(task_t *t)
+cap_t cap_get(task_t *task)
 {
-	ipl_t ipl;
-	cap_t caps;
-	
-	ipl = interrupts_disable();
-	spinlock_lock(&t->lock);
-	
-	caps = t->capabilities;
-	
-	spinlock_unlock(&t->lock);
-	interrupts_restore(ipl);
+	irq_spinlock_lock(&task->lock, true);
+	cap_t caps = task->capabilities;
+	irq_spinlock_unlock(&task->lock, true);
 	
 	return caps;
@@ -93,34 +83,29 @@
  *
  * @return Zero on success or an error code from @ref errno.h.
+ *
  */
 unative_t sys_cap_grant(sysarg64_t *uspace_taskid_arg, cap_t caps)
 {
-	sysarg64_t taskid_arg;
-	task_t *t;
-	ipl_t ipl;
-	int rc;
-	
 	if (!(cap_get(TASK) & CAP_CAP))
 		return (unative_t) EPERM;
 	
-	rc = copy_from_uspace(&taskid_arg, uspace_taskid_arg, sizeof(sysarg64_t));
+	sysarg64_t taskid_arg;
+	int rc = copy_from_uspace(&taskid_arg, uspace_taskid_arg, sizeof(sysarg64_t));
 	if (rc != 0)
 		return (unative_t) rc;
-		
-	ipl = interrupts_disable();
-	spinlock_lock(&tasks_lock);
-	t = task_find_by_id((task_id_t) taskid_arg.value);
-	if ((!t) || (!context_check(CONTEXT, t->context))) {
-		spinlock_unlock(&tasks_lock);
-		interrupts_restore(ipl);
+	
+	irq_spinlock_lock(&tasks_lock, true);
+	task_t *task = task_find_by_id((task_id_t) taskid_arg.value);
+	
+	if ((!task) || (!context_check(CONTEXT, task->context))) {
+		irq_spinlock_unlock(&tasks_lock, true);
 		return (unative_t) ENOENT;
 	}
 	
-	spinlock_lock(&t->lock);
-	cap_set(t, cap_get(t) | caps);
-	spinlock_unlock(&t->lock);
+	irq_spinlock_lock(&task->lock, false);
+	task->capabilities |= caps;
+	irq_spinlock_unlock(&task->lock, false);
 	
-	spinlock_unlock(&tasks_lock);
-	interrupts_restore(ipl);	
+	irq_spinlock_unlock(&tasks_lock, true);
 	return 0;
 }
@@ -135,25 +120,21 @@
  *
  * @return Zero on success or an error code from @ref errno.h.
+ *
  */
 unative_t sys_cap_revoke(sysarg64_t *uspace_taskid_arg, cap_t caps)
 {
 	sysarg64_t taskid_arg;
-	task_t *t;
-	ipl_t ipl;
-	int rc;
-	
-	rc = copy_from_uspace(&taskid_arg, uspace_taskid_arg, sizeof(sysarg64_t));
+	int rc = copy_from_uspace(&taskid_arg, uspace_taskid_arg, sizeof(sysarg64_t));
 	if (rc != 0)
 		return (unative_t) rc;
-
-	ipl = interrupts_disable();
-	spinlock_lock(&tasks_lock);	
-	t = task_find_by_id((task_id_t) taskid_arg.value);
-	if ((!t) || (!context_check(CONTEXT, t->context))) {
-		spinlock_unlock(&tasks_lock);
-		interrupts_restore(ipl);
+	
+	irq_spinlock_lock(&tasks_lock, true);
+	
+	task_t *task = task_find_by_id((task_id_t) taskid_arg.value);
+	if ((!task) || (!context_check(CONTEXT, task->context))) {
+		irq_spinlock_unlock(&tasks_lock, true);
 		return (unative_t) ENOENT;
 	}
-
+	
 	/*
 	 * Revoking capabilities is different from granting them in that
@@ -161,17 +142,16 @@
 	 * doesn't have CAP_CAP.
 	 */
-	if (!(cap_get(TASK) & CAP_CAP) || !(t == TASK)) {
-		spinlock_unlock(&tasks_lock);
-		interrupts_restore(ipl);
+	irq_spinlock_unlock(&TASK->lock, false);
+	
+	if ((!(TASK->capabilities & CAP_CAP)) || (task != TASK)) {
+		irq_spinlock_unlock(&TASK->lock, false);
+		irq_spinlock_unlock(&tasks_lock, true);
 		return (unative_t) EPERM;
 	}
 	
-	spinlock_lock(&t->lock);
-	cap_set(t, cap_get(t) & ~caps);
-	spinlock_unlock(&t->lock);
-
-	spinlock_unlock(&tasks_lock);
-
-	interrupts_restore(ipl);
+	task->capabilities &= ~caps;
+	irq_spinlock_unlock(&TASK->lock, false);
+	
+	irq_spinlock_unlock(&tasks_lock, true);
 	return 0;
 }
@@ -179,3 +159,2 @@
 /** @}
  */
-
Index: kernel/generic/src/synch/mutex.c
===================================================================
--- kernel/generic/src/synch/mutex.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/src/synch/mutex.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -67,5 +67,5 @@
  *
  */
-int _mutex_lock_timeout(mutex_t *mtx, uint32_t usec, int flags)
+int _mutex_lock_timeout(mutex_t *mtx, uint32_t usec, unsigned int flags)
 {
 	int rc;
Index: kernel/generic/src/synch/rwlock.c
===================================================================
--- kernel/generic/src/synch/rwlock.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/src/synch/rwlock.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -33,5 +33,5 @@
 /**
  * @file
- * @brief	Reader/Writer locks.
+ * @brief Reader/Writer locks.
  *
  * A reader/writer lock can be held by multiple readers at a time.
@@ -57,5 +57,5 @@
  * each thread can block on only one rwlock at a time.
  */
- 
+
 #include <synch/rwlock.h>
 #include <synch/spinlock.h>
@@ -69,9 +69,6 @@
 #include <panic.h>
 
-#define ALLOW_ALL		0
-#define ALLOW_READERS_ONLY	1
-
-static void let_others_in(rwlock_t *rwl, int readers_only);
-static void release_spinlock(void *arg);
+#define ALLOW_ALL           0
+#define ALLOW_READERS_ONLY  1
 
 /** Initialize reader/writer lock
@@ -80,11 +77,101 @@
  *
  * @param rwl Reader/Writer lock.
+ *
  */
 void rwlock_initialize(rwlock_t *rwl) {
-	spinlock_initialize(&rwl->lock, "rwlock_t");
+	irq_spinlock_initialize(&rwl->lock, "rwl.lock");
 	mutex_initialize(&rwl->exclusive, MUTEX_PASSIVE);
 	rwl->readers_in = 0;
 }
 
+/** Direct handoff of reader/writer lock ownership.
+ *
+ * Direct handoff of reader/writer lock ownership
+ * to waiting readers or a writer.
+ *
+ * Must be called with rwl->lock locked.
+ * Must be called with interrupts_disable()'d.
+ *
+ * @param rwl          Reader/Writer lock.
+ * @param readers_only See the description below.
+ *
+ * If readers_only is false: (unlock scenario)
+ * Let the first sleeper on 'exclusive' mutex in, no matter
+ * whether it is a reader or a writer. If there are more leading
+ * readers in line, let each of them in.
+ *
+ * Otherwise: (timeout scenario)
+ * Let all leading readers in.
+ *
+ */
+static void let_others_in(rwlock_t *rwl, int readers_only)
+{
+	rwlock_type_t type = RWLOCK_NONE;
+	thread_t *thread = NULL;
+	bool one_more = true;
+	
+	irq_spinlock_lock(&rwl->exclusive.sem.wq.lock, false);
+	
+	if (!list_empty(&rwl->exclusive.sem.wq.head))
+		thread = list_get_instance(rwl->exclusive.sem.wq.head.next,
+		    thread_t, wq_link);
+	
+	do {
+		if (thread) {
+			irq_spinlock_lock(&thread->lock, false);
+			type = thread->rwlock_holder_type;
+			irq_spinlock_unlock(&thread->lock, false);
+		}
+		
+		/*
+		 * If readers_only is true, we wake all leading readers
+		 * if and only if rwl is locked by another reader.
+		 * Assumption: readers_only ==> rwl->readers_in
+		 *
+		 */
+		if ((readers_only) && (type != RWLOCK_READER))
+			break;
+		
+		if (type == RWLOCK_READER) {
+			/*
+			 * Waking up a reader.
+			 * We are responsible for incrementing rwl->readers_in
+			 * for it.
+			 *
+			 */
+			 rwl->readers_in++;
+		}
+		
+		/*
+		 * Only the last iteration through this loop can increment
+		 * rwl->exclusive.sem.wq.missed_wakeup's. All preceeding
+		 * iterations will wake up a thread.
+		 *
+		 */
+		
+		/*
+		 * We call the internal version of waitq_wakeup, which
+		 * relies on the fact that the waitq is already locked.
+		 *
+		 */
+		_waitq_wakeup_unsafe(&rwl->exclusive.sem.wq, WAKEUP_FIRST);
+		
+		thread = NULL;
+		if (!list_empty(&rwl->exclusive.sem.wq.head)) {
+			thread = list_get_instance(rwl->exclusive.sem.wq.head.next,
+			    thread_t, wq_link);
+			
+			if (thread) {
+				irq_spinlock_lock(&thread->lock, false);
+				if (thread->rwlock_holder_type != RWLOCK_READER)
+					one_more = false;
+				irq_spinlock_unlock(&thread->lock, false);
+			}
+		}
+	} while ((type == RWLOCK_READER) && (thread) && (one_more));
+	
+	irq_spinlock_unlock(&rwl->exclusive.sem.wq.lock, false);
+}
+
 /** Acquire reader/writer lock for reading
  *
@@ -92,6 +179,6 @@
  * Timeout and willingness to block may be specified.
  *
- * @param rwl Reader/Writer lock.
- * @param usec Timeout in microseconds.
+ * @param rwl   Reader/Writer lock.
+ * @param usec  Timeout in microseconds.
  * @param flags Specify mode of operation.
  *
@@ -100,43 +187,54 @@
  *
  * @return See comment for waitq_sleep_timeout().
- */
-int _rwlock_write_lock_timeout(rwlock_t *rwl, uint32_t usec, int flags)
-{
-	ipl_t ipl;
-	int rc;
-	
-	ipl = interrupts_disable();
-	spinlock_lock(&THREAD->lock);
+ *
+ */
+int _rwlock_write_lock_timeout(rwlock_t *rwl, uint32_t usec, unsigned int flags)
+{
+	irq_spinlock_lock(&THREAD->lock, true);
 	THREAD->rwlock_holder_type = RWLOCK_WRITER;
-	spinlock_unlock(&THREAD->lock);	
-	interrupts_restore(ipl);
-
+	irq_spinlock_unlock(&THREAD->lock, true);
+	
 	/*
 	 * Writers take the easy part.
 	 * They just need to acquire the exclusive mutex.
+	 *
 	 */
-	rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags);
+	int rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags);
 	if (SYNCH_FAILED(rc)) {
-
 		/*
 		 * Lock operation timed out or was interrupted.
 		 * The state of rwl is UNKNOWN at this point.
 		 * No claims about its holder can be made.
-		 */
-		 
-		ipl = interrupts_disable();
-		spinlock_lock(&rwl->lock);
+		 *
+		 */
+		irq_spinlock_lock(&rwl->lock, true);
+		
 		/*
 		 * Now when rwl is locked, we can inspect it again.
 		 * If it is held by some readers already, we can let
 		 * readers from the head of the wait queue in.
+		 *
 		 */
 		if (rwl->readers_in)
 			let_others_in(rwl, ALLOW_READERS_ONLY);
-		spinlock_unlock(&rwl->lock);
-		interrupts_restore(ipl);
+		
+		irq_spinlock_unlock(&rwl->lock, true);
 	}
 	
 	return rc;
+}
+
+/** Release spinlock callback
+ *
+ * This is a callback function invoked from the scheduler.
+ * The callback is registered in _rwlock_read_lock_timeout().
+ *
+ * @param arg Spinlock.
+ *
+ */
+static void release_spinlock(void *arg)
+{
+	if (arg != NULL)
+		irq_spinlock_unlock((irq_spinlock_t *) arg, false);
 }
 
@@ -146,6 +244,6 @@
  * Timeout and willingness to block may be specified.
  *
- * @param rwl Reader/Writer lock.
- * @param usec Timeout in microseconds.
+ * @param rwl   Reader/Writer lock.
+ * @param usec  Timeout in microseconds.
  * @param flags Select mode of operation.
  *
@@ -154,23 +252,27 @@
  *
  * @return See comment for waitq_sleep_timeout().
- */
-int _rwlock_read_lock_timeout(rwlock_t *rwl, uint32_t usec, int flags)
-{
-	int rc;
-	ipl_t ipl;
-	
-	ipl = interrupts_disable();
-	spinlock_lock(&THREAD->lock);
+ *
+ */
+int _rwlock_read_lock_timeout(rwlock_t *rwl, uint32_t usec, unsigned int flags)
+{
+	/*
+	 * Since the locking scenarios get a little bit too
+	 * complicated, we do not rely on internal irq_spinlock_t
+	 * interrupt disabling logic here and control interrupts
+	 * manually.
+	 *
+	 */
+	ipl_t ipl = interrupts_disable();
+	
+	irq_spinlock_lock(&THREAD->lock, false);
 	THREAD->rwlock_holder_type = RWLOCK_READER;
-	spinlock_unlock(&THREAD->lock);	
-
-	spinlock_lock(&rwl->lock);
-
+	irq_spinlock_pass(&THREAD->lock, &rwl->lock);
+	
 	/*
 	 * Find out whether we can get what we want without blocking.
+	 *
 	 */
-	rc = mutex_trylock(&rwl->exclusive);
+	int rc = mutex_trylock(&rwl->exclusive);
 	if (SYNCH_FAILED(rc)) {
-
 		/*
 		 * 'exclusive' mutex is being held by someone else.
@@ -178,18 +280,19 @@
 		 * else waiting for it, we can enter the critical
 		 * section.
-		 */
-
+		 *
+		 */
+		
 		if (rwl->readers_in) {
-			spinlock_lock(&rwl->exclusive.sem.wq.lock);
+			irq_spinlock_lock(&rwl->exclusive.sem.wq.lock, false);
 			if (list_empty(&rwl->exclusive.sem.wq.head)) {
 				/*
 				 * We can enter.
 				 */
-				spinlock_unlock(&rwl->exclusive.sem.wq.lock);
+				irq_spinlock_unlock(&rwl->exclusive.sem.wq.lock, false);
 				goto shortcut;
 			}
-			spinlock_unlock(&rwl->exclusive.sem.wq.lock);
+			irq_spinlock_unlock(&rwl->exclusive.sem.wq.lock, false);
 		}
-
+		
 		/*
 		 * In order to prevent a race condition when a reader
@@ -197,11 +300,12 @@
 		 * we register a function to unlock rwl->lock
 		 * after this thread is put asleep.
-		 */
-		#ifdef CONFIG_SMP
+		 *
+		 */
+#ifdef CONFIG_SMP
 		thread_register_call_me(release_spinlock, &rwl->lock);
-		#else
+#else
 		thread_register_call_me(release_spinlock, NULL);
-		#endif
-				 
+#endif
+		
 		rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags);
 		switch (rc) {
@@ -209,7 +313,8 @@
 			/*
 			 * release_spinlock() wasn't called
+			 *
 			 */
 			thread_register_call_me(NULL, NULL);
-			spinlock_unlock(&rwl->lock);
+			irq_spinlock_unlock(&rwl->lock, false);
 		case ESYNCH_TIMEOUT:
 		case ESYNCH_INTERRUPTED:
@@ -217,6 +322,7 @@
 			 * The sleep timed out.
 			 * We just restore interrupt priority level.
+			 *
 			 */
-		case ESYNCH_OK_BLOCKED:		
+		case ESYNCH_OK_BLOCKED:
 			/*
 			 * We were woken with rwl->readers_in already
@@ -228,4 +334,5 @@
 			 * 'readers_in' is incremented. Same time means both
 			 * events happen atomically when rwl->lock is held.)
+			 *
 			 */
 			interrupts_restore(ipl);
@@ -240,16 +347,15 @@
 		return rc;
 	}
-
+	
 shortcut:
-
 	/*
 	 * We can increment readers_in only if we didn't go to sleep.
 	 * For sleepers, rwlock_let_others_in() will do the job.
+	 *
 	 */
 	rwl->readers_in++;
-	
-	spinlock_unlock(&rwl->lock);
+	irq_spinlock_unlock(&rwl->lock, false);
 	interrupts_restore(ipl);
-
+	
 	return ESYNCH_OK_ATOMIC;
 }
@@ -262,15 +368,11 @@
  *
  * @param rwl Reader/Writer lock.
+ *
  */
 void rwlock_write_unlock(rwlock_t *rwl)
 {
-	ipl_t ipl;
-	
-	ipl = interrupts_disable();
-	spinlock_lock(&rwl->lock);
+	irq_spinlock_lock(&rwl->lock, true);
 	let_others_in(rwl, ALLOW_ALL);
-	spinlock_unlock(&rwl->lock);
-	interrupts_restore(ipl);
-	
+	irq_spinlock_unlock(&rwl->lock, true);
 }
 
@@ -283,109 +385,14 @@
  *
  * @param rwl Reader/Writer lock.
+ *
  */
 void rwlock_read_unlock(rwlock_t *rwl)
 {
-	ipl_t ipl;
-
-	ipl = interrupts_disable();
-	spinlock_lock(&rwl->lock);
+	irq_spinlock_lock(&rwl->lock, true);
+	
 	if (!--rwl->readers_in)
 		let_others_in(rwl, ALLOW_ALL);
-	spinlock_unlock(&rwl->lock);
-	interrupts_restore(ipl);
-}
-
-
-/** Direct handoff of reader/writer lock ownership.
- *
- * Direct handoff of reader/writer lock ownership
- * to waiting readers or a writer.
- *
- * Must be called with rwl->lock locked.
- * Must be called with interrupts_disable()'d.
- *
- * @param rwl Reader/Writer lock.
- * @param readers_only See the description below.
- *
- * If readers_only is false: (unlock scenario)
- * Let the first sleeper on 'exclusive' mutex in, no matter
- * whether it is a reader or a writer. If there are more leading
- * readers in line, let each of them in.
- *
- * Otherwise: (timeout scenario)
- * Let all leading readers in.
- */
-void let_others_in(rwlock_t *rwl, int readers_only)
-{
-	rwlock_type_t type = RWLOCK_NONE;
-	thread_t *t = NULL;
-	bool one_more = true;
-	
-	spinlock_lock(&rwl->exclusive.sem.wq.lock);
-
-	if (!list_empty(&rwl->exclusive.sem.wq.head))
-		t = list_get_instance(rwl->exclusive.sem.wq.head.next, thread_t,
-		    wq_link);
-	do {
-		if (t) {
-			spinlock_lock(&t->lock);
-			type = t->rwlock_holder_type;
-			spinlock_unlock(&t->lock);			
-		}
-	
-		/*
-		 * If readers_only is true, we wake all leading readers
-		 * if and only if rwl is locked by another reader.
-		 * Assumption: readers_only ==> rwl->readers_in
-		 */
-		if (readers_only && (type != RWLOCK_READER))
-			break;
-
-
-		if (type == RWLOCK_READER) {
-			/*
-			 * Waking up a reader.
-			 * We are responsible for incrementing rwl->readers_in
-			 * for it.
-			 */
-			 rwl->readers_in++;
-		}
-
-		/*
-		 * Only the last iteration through this loop can increment
-		 * rwl->exclusive.sem.wq.missed_wakeup's. All preceeding
-		 * iterations will wake up a thread.
-		 */
-		/* We call the internal version of waitq_wakeup, which
-		 * relies on the fact that the waitq is already locked.
-		 */
-		_waitq_wakeup_unsafe(&rwl->exclusive.sem.wq, WAKEUP_FIRST);
-		
-		t = NULL;
-		if (!list_empty(&rwl->exclusive.sem.wq.head)) {
-			t = list_get_instance(rwl->exclusive.sem.wq.head.next,
-			    thread_t, wq_link);
-			if (t) {
-				spinlock_lock(&t->lock);
-				if (t->rwlock_holder_type != RWLOCK_READER)
-					one_more = false;
-				spinlock_unlock(&t->lock);	
-			}
-		}
-	} while ((type == RWLOCK_READER) && t && one_more);
-
-	spinlock_unlock(&rwl->exclusive.sem.wq.lock);
-}
-
-/** Release spinlock callback
- *
- * This is a callback function invoked from the scheduler.
- * The callback is registered in _rwlock_read_lock_timeout().
- *
- * @param arg Spinlock.
- */
-void release_spinlock(void *arg)
-{
-	spinlock_unlock((spinlock_t *) arg);
+	
+	irq_spinlock_unlock(&rwl->lock, true);
 }
 
Index: kernel/generic/src/synch/semaphore.c
===================================================================
--- kernel/generic/src/synch/semaphore.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/src/synch/semaphore.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -33,5 +33,5 @@
 /**
  * @file
- * @brief	Semaphores.
+ * @brief Semaphores.
  */
 
@@ -47,20 +47,15 @@
  * Initialize semaphore.
  *
- * @param s Semaphore.
+ * @param sem Semaphore.
  * @param val Maximal number of threads allowed to enter critical section.
+ *
  */
-void semaphore_initialize(semaphore_t *s, int val)
+void semaphore_initialize(semaphore_t *sem, int val)
 {
-	ipl_t ipl;
+	waitq_initialize(&sem->wq);
 	
-	waitq_initialize(&s->wq);
-	
-	ipl = interrupts_disable();
-
-	spinlock_lock(&s->wq.lock);
-	s->wq.missed_wakeups = val;
-	spinlock_unlock(&s->wq.lock);
-
-	interrupts_restore(ipl);
+	irq_spinlock_lock(&sem->wq.lock, true);
+	sem->wq.missed_wakeups = val;
+	irq_spinlock_unlock(&sem->wq.lock, true);
 }
 
@@ -70,6 +65,6 @@
  * Conditional mode and mode with timeout can be requested.
  *
- * @param s Semaphore.
- * @param usec Timeout in microseconds.
+ * @param sem   Semaphore.
+ * @param usec  Timeout in microseconds.
  * @param flags Select mode of operation.
  *
@@ -78,8 +73,9 @@
  *
  * @return See comment for waitq_sleep_timeout().
+ *
  */
-int _semaphore_down_timeout(semaphore_t *s, uint32_t usec, int flags)
+int _semaphore_down_timeout(semaphore_t *sem, uint32_t usec, unsigned int flags)
 {
-	return waitq_sleep_timeout(&s->wq, usec, flags); 
+	return waitq_sleep_timeout(&sem->wq, usec, flags);
 }
 
@@ -89,8 +85,9 @@
  *
  * @param s Semaphore.
+ *
  */
-void semaphore_up(semaphore_t *s)
+void semaphore_up(semaphore_t *sem)
 {
-	waitq_wakeup(&s->wq, WAKEUP_FIRST);
+	waitq_wakeup(&sem->wq, WAKEUP_FIRST);
 }
 
Index: kernel/generic/src/synch/waitq.c
===================================================================
--- kernel/generic/src/synch/waitq.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/src/synch/waitq.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -33,5 +33,5 @@
 /**
  * @file
- * @brief	Wait queue.
+ * @brief Wait queue.
  *
  * Wait queue is the basic synchronization primitive upon which all
@@ -41,4 +41,5 @@
  * fashion. Conditional operation as well as timeouts and interruptions
  * are supported.
+ *
  */
 
@@ -56,5 +57,5 @@
 #include <arch/cycle.h>
 
-static void waitq_sleep_timed_out(void *data);
+static void waitq_sleep_timed_out(void *);
 
 /** Initialize wait queue
@@ -62,9 +63,10 @@
  * Initialize wait queue.
  *
- * @param wq		Pointer to wait queue to be initialized.
+ * @param wq Pointer to wait queue to be initialized.
+ *
  */
 void waitq_initialize(waitq_t *wq)
 {
-	spinlock_initialize(&wq->lock, "waitq_lock");
+	irq_spinlock_initialize(&wq->lock, "wq.lock");
 	list_initialize(&wq->head);
 	wq->missed_wakeups = 0;
@@ -81,41 +83,44 @@
  * timeout at all.
  *
- * @param data		Pointer to the thread that called waitq_sleep_timeout().
+ * @param data Pointer to the thread that called waitq_sleep_timeout().
+ *
  */
 void waitq_sleep_timed_out(void *data)
 {
-	thread_t *t = (thread_t *) data;
-	waitq_t *wq;
+	thread_t *thread = (thread_t *) data;
 	bool do_wakeup = false;
 	DEADLOCK_PROBE_INIT(p_wqlock);
-
-	spinlock_lock(&threads_lock);
-	if (!thread_exists(t))
+	
+	irq_spinlock_lock(&threads_lock, false);
+	if (!thread_exists(thread))
 		goto out;
-
+	
 grab_locks:
-	spinlock_lock(&t->lock);
-	if ((wq = t->sleep_queue)) {		/* assignment */
-		if (!spinlock_trylock(&wq->lock)) {
-			spinlock_unlock(&t->lock);
+	irq_spinlock_lock(&thread->lock, false);
+	
+	waitq_t *wq;
+	if ((wq = thread->sleep_queue)) {  /* Assignment */
+		if (!irq_spinlock_trylock(&wq->lock)) {
+			irq_spinlock_unlock(&thread->lock, false);
 			DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD);
-			goto grab_locks;	/* avoid deadlock */
-		}
-
-		list_remove(&t->wq_link);
-		t->saved_context = t->sleep_timeout_context;
+			/* Avoid deadlock */
+			goto grab_locks;
+		}
+		
+		list_remove(&thread->wq_link);
+		thread->saved_context = thread->sleep_timeout_context;
 		do_wakeup = true;
-		t->sleep_queue = NULL;
-		spinlock_unlock(&wq->lock);
-	}
-	
-	t->timeout_pending = false;
-	spinlock_unlock(&t->lock);
+		thread->sleep_queue = NULL;
+		irq_spinlock_unlock(&wq->lock, false);
+	}
+	
+	thread->timeout_pending = false;
+	irq_spinlock_unlock(&thread->lock, false);
 	
 	if (do_wakeup)
-		thread_ready(t);
-
+		thread_ready(thread);
+	
 out:
-	spinlock_unlock(&threads_lock);
+	irq_spinlock_unlock(&threads_lock, false);
 }
 
@@ -125,52 +130,54 @@
  * If the thread is not found sleeping, no action is taken.
  *
- * @param t		Thread to be interrupted.
- */
-void waitq_interrupt_sleep(thread_t *t)
-{
+ * @param thread Thread to be interrupted.
+ *
+ */
+void waitq_interrupt_sleep(thread_t *thread)
+{
+	bool do_wakeup = false;
+	DEADLOCK_PROBE_INIT(p_wqlock);
+	
+	irq_spinlock_lock(&threads_lock, true);
+	if (!thread_exists(thread))
+		goto out;
+	
+grab_locks:
+	irq_spinlock_lock(&thread->lock, false);
+	
 	waitq_t *wq;
-	bool do_wakeup = false;
-	ipl_t ipl;
-	DEADLOCK_PROBE_INIT(p_wqlock);
-
-	ipl = interrupts_disable();
-	spinlock_lock(&threads_lock);
-	if (!thread_exists(t))
-		goto out;
-
-grab_locks:
-	spinlock_lock(&t->lock);
-	if ((wq = t->sleep_queue)) {		/* assignment */
-		if (!(t->sleep_interruptible)) {
+	if ((wq = thread->sleep_queue)) {  /* Assignment */
+		if (!(thread->sleep_interruptible)) {
 			/*
 			 * The sleep cannot be interrupted.
+			 *
 			 */
-			spinlock_unlock(&t->lock);
+			irq_spinlock_unlock(&thread->lock, false);
 			goto out;
 		}
-			
-		if (!spinlock_trylock(&wq->lock)) {
-			spinlock_unlock(&t->lock);
+		
+		if (!irq_spinlock_trylock(&wq->lock)) {
+			irq_spinlock_unlock(&thread->lock, false);
 			DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD);
-			goto grab_locks;	/* avoid deadlock */
-		}
-
-		if (t->timeout_pending && timeout_unregister(&t->sleep_timeout))
-			t->timeout_pending = false;
-
-		list_remove(&t->wq_link);
-		t->saved_context = t->sleep_interruption_context;
+			/* Avoid deadlock */
+			goto grab_locks;
+		}
+		
+		if ((thread->timeout_pending) &&
+		    (timeout_unregister(&thread->sleep_timeout)))
+			thread->timeout_pending = false;
+		
+		list_remove(&thread->wq_link);
+		thread->saved_context = thread->sleep_interruption_context;
 		do_wakeup = true;
-		t->sleep_queue = NULL;
-		spinlock_unlock(&wq->lock);
-	}
-	spinlock_unlock(&t->lock);
-
+		thread->sleep_queue = NULL;
+		irq_spinlock_unlock(&wq->lock, false);
+	}
+	irq_spinlock_unlock(&thread->lock, false);
+	
 	if (do_wakeup)
-		thread_ready(t);
-
+		thread_ready(thread);
+	
 out:
-	spinlock_unlock(&threads_lock);
-	interrupts_restore(ipl);
+	irq_spinlock_unlock(&threads_lock, true);
 }
 
@@ -180,30 +187,31 @@
  * is sleeping interruptibly.
  *
- * @param wq		Pointer to wait queue.
+ * @param wq Pointer to wait queue.
+ *
  */
 void waitq_unsleep(waitq_t *wq)
 {
-	ipl_t ipl;
-
-	ipl = interrupts_disable();
-	spinlock_lock(&wq->lock);
-
+	irq_spinlock_lock(&wq->lock, true);
+	
 	if (!list_empty(&wq->head)) {
-		thread_t *t;
-		
-		t = list_get_instance(wq->head.next, thread_t, wq_link);
-		spinlock_lock(&t->lock);
-		ASSERT(t->sleep_interruptible);
-		if (t->timeout_pending && timeout_unregister(&t->sleep_timeout))
-			t->timeout_pending = false;
-		list_remove(&t->wq_link);
-		t->saved_context = t->sleep_interruption_context;
-		t->sleep_queue = NULL;
-		spinlock_unlock(&t->lock);
-		thread_ready(t);
-	}
-
-	spinlock_unlock(&wq->lock);
-	interrupts_restore(ipl);
+		thread_t *thread = list_get_instance(wq->head.next, thread_t, wq_link);
+		
+		irq_spinlock_lock(&thread->lock, false);
+		
+		ASSERT(thread->sleep_interruptible);
+		
+		if ((thread->timeout_pending) &&
+		    (timeout_unregister(&thread->sleep_timeout)))
+			thread->timeout_pending = false;
+		
+		list_remove(&thread->wq_link);
+		thread->saved_context = thread->sleep_interruption_context;
+		thread->sleep_queue = NULL;
+		
+		irq_spinlock_unlock(&thread->lock, false);
+		thread_ready(thread);
+	}
+	
+	irq_spinlock_unlock(&wq->lock, true);
 }
 
@@ -221,14 +229,14 @@
  * and all the *_timeout() functions use it.
  *
- * @param wq		Pointer to wait queue.
- * @param usec		Timeout in microseconds.
- * @param flags		Specify mode of the sleep.
+ * @param wq    Pointer to wait queue.
+ * @param usec  Timeout in microseconds.
+ * @param flags Specify mode of the sleep.
  *
  * The sleep can be interrupted only if the
  * SYNCH_FLAGS_INTERRUPTIBLE bit is specified in flags.
- * 
+ *
  * If usec is greater than zero, regardless of the value of the
  * SYNCH_FLAGS_NON_BLOCKING bit in flags, the call will not return until either
- * timeout, interruption or wakeup comes. 
+ * timeout, interruption or wakeup comes.
  *
  * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is not set in flags,
@@ -238,31 +246,22 @@
  * call will immediately return, reporting either success or failure.
  *
- * @return		Returns one of ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT,
- * 			ESYNCH_INTERRUPTED, ESYNCH_OK_ATOMIC and
- * 			ESYNCH_OK_BLOCKED.
- *
- * @li	ESYNCH_WOULD_BLOCK means that the sleep failed because at the time of
- *	the call there was no pending wakeup.
- *
- * @li	ESYNCH_TIMEOUT means that the sleep timed out.
- *
- * @li	ESYNCH_INTERRUPTED means that somebody interrupted the sleeping thread.
- *
- * @li	ESYNCH_OK_ATOMIC means that the sleep succeeded and that there was
- * 	a pending wakeup at the time of the call. The caller was not put
- * 	asleep at all.
- * 
- * @li	ESYNCH_OK_BLOCKED means that the sleep succeeded; the full sleep was 
- * 	attempted.
- */
-int waitq_sleep_timeout(waitq_t *wq, uint32_t usec, int flags)
-{
-	ipl_t ipl;
-	int rc;
-
+ * @return ESYNCH_WOULD_BLOCK, meaning that the sleep failed because at the
+ *         time of the call there was no pending wakeup
+ * @return ESYNCH_TIMEOUT, meaning that the sleep timed out.
+ * @return ESYNCH_INTERRUPTED, meaning that somebody interrupted the sleeping
+ *         thread.
+ * @return ESYNCH_OK_ATOMIC, meaning that the sleep succeeded and that there
+ *         was a pending wakeup at the time of the call. The caller was not put
+ *         asleep at all.
+ * @return ESYNCH_OK_BLOCKED, meaning that the sleep succeeded; the full sleep
+ *         was attempted.
+ *
+ */
+int waitq_sleep_timeout(waitq_t *wq, uint32_t usec, unsigned int flags)
+{
 	ASSERT((!PREEMPTION_DISABLED) || (PARAM_NON_BLOCKING(flags, usec)));
 	
-	ipl = waitq_sleep_prepare(wq);
-	rc = waitq_sleep_timeout_unsafe(wq, usec, flags);
+	ipl_t ipl = waitq_sleep_prepare(wq);
+	int rc = waitq_sleep_timeout_unsafe(wq, usec, flags);
 	waitq_sleep_finish(wq, rc, ipl);
 	return rc;
@@ -274,7 +273,8 @@
  * and interrupts disabled.
  *
- * @param wq		Wait queue.
- *
- * @return		Interrupt level as it existed on entry to this function.
+ * @param wq Wait queue.
+ *
+ * @return Interrupt level as it existed on entry to this function.
+ *
  */
 ipl_t waitq_sleep_prepare(waitq_t *wq)
@@ -284,6 +284,6 @@
 restart:
 	ipl = interrupts_disable();
-
-	if (THREAD) {	/* needed during system initiailzation */
+	
+	if (THREAD) {  /* Needed during system initiailzation */
 		/*
 		 * Busy waiting for a delayed timeout.
@@ -292,15 +292,18 @@
 		 * Simply, the thread is not allowed to go to sleep if
 		 * there are timeouts in progress.
+		 *
 		 */
-		spinlock_lock(&THREAD->lock);
+		irq_spinlock_lock(&THREAD->lock, false);
+		
 		if (THREAD->timeout_pending) {
-			spinlock_unlock(&THREAD->lock);
+			irq_spinlock_unlock(&THREAD->lock, false);
 			interrupts_restore(ipl);
 			goto restart;
 		}
-		spinlock_unlock(&THREAD->lock);
-	}
-													
-	spinlock_lock(&wq->lock);
+		
+		irq_spinlock_unlock(&THREAD->lock, false);
+	}
+	
+	irq_spinlock_lock(&wq->lock, false);
 	return ipl;
 }
@@ -312,7 +315,8 @@
  * lock is released.
  *
- * @param wq		Wait queue.
- * @param rc		Return code of waitq_sleep_timeout_unsafe().
- * @param ipl		Interrupt level returned by waitq_sleep_prepare().
+ * @param wq  Wait queue.
+ * @param rc  Return code of waitq_sleep_timeout_unsafe().
+ * @param ipl Interrupt level returned by waitq_sleep_prepare().
+ *
  */
 void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl)
@@ -321,9 +325,10 @@
 	case ESYNCH_WOULD_BLOCK:
 	case ESYNCH_OK_ATOMIC:
-		spinlock_unlock(&wq->lock);
+		irq_spinlock_unlock(&wq->lock, false);
 		break;
 	default:
 		break;
 	}
+	
 	interrupts_restore(ipl);
 }
@@ -335,20 +340,20 @@
  * and followed by a call to waitq_sleep_finish().
  *
- * @param wq		See waitq_sleep_timeout().
- * @param usec		See waitq_sleep_timeout().
- * @param flags		See waitq_sleep_timeout().
- *
- * @return		See waitq_sleep_timeout().
- */
-int waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, int flags)
-{
-	/* checks whether to go to sleep at all */
+ * @param wq    See waitq_sleep_timeout().
+ * @param usec  See waitq_sleep_timeout().
+ * @param flags See waitq_sleep_timeout().
+ *
+ * @return See waitq_sleep_timeout().
+ *
+ */
+int waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, unsigned int flags)
+{
+	/* Checks whether to go to sleep at all */
 	if (wq->missed_wakeups) {
 		wq->missed_wakeups--;
 		return ESYNCH_OK_ATOMIC;
-	}
-	else {
+	} else {
 		if (PARAM_NON_BLOCKING(flags, usec)) {
-			/* return immediatelly instead of going to sleep */
+			/* Return immediatelly instead of going to sleep */
 			return ESYNCH_WOULD_BLOCK;
 		}
@@ -357,22 +362,24 @@
 	/*
 	 * Now we are firmly decided to go to sleep.
+	 *
 	 */
-	spinlock_lock(&THREAD->lock);
-
+	irq_spinlock_lock(&THREAD->lock, false);
+	
 	if (flags & SYNCH_FLAGS_INTERRUPTIBLE) {
-
 		/*
 		 * If the thread was already interrupted,
 		 * don't go to sleep at all.
+		 *
 		 */
 		if (THREAD->interrupted) {
-			spinlock_unlock(&THREAD->lock);
-			spinlock_unlock(&wq->lock);
+			irq_spinlock_unlock(&THREAD->lock, false);
+			irq_spinlock_unlock(&wq->lock, false);
 			return ESYNCH_INTERRUPTED;
 		}
-
+		
 		/*
 		 * Set context that will be restored if the sleep
 		 * of this thread is ever interrupted.
+		 *
 		 */
 		THREAD->sleep_interruptible = true;
@@ -380,12 +387,10 @@
 			/* Short emulation of scheduler() return code. */
 			THREAD->last_cycle = get_cycle();
-			spinlock_unlock(&THREAD->lock);
+			irq_spinlock_unlock(&THREAD->lock, false);
 			return ESYNCH_INTERRUPTED;
 		}
-
-	} else {
+	} else
 		THREAD->sleep_interruptible = false;
-	}
-
+	
 	if (usec) {
 		/* We use the timeout variant. */
@@ -393,28 +398,29 @@
 			/* Short emulation of scheduler() return code. */
 			THREAD->last_cycle = get_cycle();
-			spinlock_unlock(&THREAD->lock);
+			irq_spinlock_unlock(&THREAD->lock, false);
 			return ESYNCH_TIMEOUT;
 		}
+		
 		THREAD->timeout_pending = true;
 		timeout_register(&THREAD->sleep_timeout, (uint64_t) usec,
 		    waitq_sleep_timed_out, THREAD);
 	}
-
+	
 	list_append(&THREAD->wq_link, &wq->head);
-
+	
 	/*
 	 * Suspend execution.
+	 *
 	 */
 	THREAD->state = Sleeping;
 	THREAD->sleep_queue = wq;
-
-	spinlock_unlock(&THREAD->lock);
-
+	
+	irq_spinlock_unlock(&THREAD->lock, false);
+	
 	/* wq->lock is released in scheduler_separated_stack() */
-	scheduler(); 
+	scheduler();
 	
 	return ESYNCH_OK_BLOCKED;
 }
-
 
 /** Wake up first thread sleeping in a wait queue
@@ -426,18 +432,13 @@
  * timeout.
  *
- * @param wq		Pointer to wait queue.
- * @param mode		Wakeup mode.
+ * @param wq   Pointer to wait queue.
+ * @param mode Wakeup mode.
+ *
  */
 void waitq_wakeup(waitq_t *wq, wakeup_mode_t mode)
 {
-	ipl_t ipl;
-
-	ipl = interrupts_disable();
-	spinlock_lock(&wq->lock);
-
+	irq_spinlock_lock(&wq->lock, true);
 	_waitq_wakeup_unsafe(wq, mode);
-
-	spinlock_unlock(&wq->lock);
-	interrupts_restore(ipl);
+	irq_spinlock_unlock(&wq->lock, true);
 }
 
@@ -447,26 +448,27 @@
  * assumes wq->lock is already locked and interrupts are already disabled.
  *
- * @param wq		Pointer to wait queue.
- * @param mode		If mode is WAKEUP_FIRST, then the longest waiting
- * 			thread, if any, is woken up. If mode is WAKEUP_ALL, then
- *			all waiting threads, if any, are woken up. If there are
- *			no waiting threads to be woken up, the missed wakeup is
- *			recorded in the wait queue.
+ * @param wq   Pointer to wait queue.
+ * @param mode If mode is WAKEUP_FIRST, then the longest waiting
+ *             thread, if any, is woken up. If mode is WAKEUP_ALL, then
+ *             all waiting threads, if any, are woken up. If there are
+ *             no waiting threads to be woken up, the missed wakeup is
+ *             recorded in the wait queue.
+ *
  */
 void _waitq_wakeup_unsafe(waitq_t *wq, wakeup_mode_t mode)
 {
-	thread_t *t;
 	size_t count = 0;
-
-loop:	
+	
+loop:
 	if (list_empty(&wq->head)) {
 		wq->missed_wakeups++;
-		if (count && mode == WAKEUP_ALL)
+		if ((count) && (mode == WAKEUP_ALL))
 			wq->missed_wakeups--;
+		
 		return;
 	}
-
+	
 	count++;
-	t = list_get_instance(wq->head.next, thread_t, wq_link);
+	thread_t *thread = list_get_instance(wq->head.next, thread_t, wq_link);
 	
 	/*
@@ -480,20 +482,23 @@
 	 * invariant must hold:
 	 *
-	 * t->sleep_queue != NULL <=> t sleeps in a wait queue
+	 * thread->sleep_queue != NULL <=> thread sleeps in a wait queue
 	 *
 	 * For an observer who locks the thread, the invariant
 	 * holds only when the lock is held prior to removing
 	 * it from the wait queue.
+	 *
 	 */
-	spinlock_lock(&t->lock);
-	list_remove(&t->wq_link);
-	
-	if (t->timeout_pending && timeout_unregister(&t->sleep_timeout))
-		t->timeout_pending = false;
-	t->sleep_queue = NULL;
-	spinlock_unlock(&t->lock);
-
-	thread_ready(t);
-
+	irq_spinlock_lock(&thread->lock, false);
+	list_remove(&thread->wq_link);
+	
+	if ((thread->timeout_pending) &&
+	    (timeout_unregister(&thread->sleep_timeout)))
+		thread->timeout_pending = false;
+	
+	thread->sleep_queue = NULL;
+	irq_spinlock_unlock(&thread->lock, false);
+	
+	thread_ready(thread);
+	
 	if (mode == WAKEUP_ALL)
 		goto loop;
Index: kernel/generic/src/syscall/syscall.c
===================================================================
--- kernel/generic/src/syscall/syscall.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/src/syscall/syscall.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -59,24 +59,20 @@
     unative_t a4, unative_t a5, unative_t a6, unative_t id)
 {
-	unative_t rc;
-	ipl_t ipl;
-
 	/* Do userpace accounting */
-	ipl = interrupts_disable();
-	spinlock_lock(&THREAD->lock);
+	irq_spinlock_lock(&THREAD->lock, true);
 	thread_update_accounting(true);
-	spinlock_unlock(&THREAD->lock);
-	interrupts_restore(ipl);
-
+	irq_spinlock_unlock(&THREAD->lock, true);
+	
 #ifdef CONFIG_UDEBUG
 	/*
 	 * Early check for undebugged tasks. We do not lock anything as this
 	 * test need not be precise in either direction.
+	 *
 	 */
-	if (THREAD->udebug.active) {
+	if (THREAD->udebug.active)
 		udebug_syscall_event(a1, a2, a3, a4, a5, a6, id, 0, false);
-	}
 #endif
 	
+	unative_t rc;
 	if (id < SYSCALL_END) {
 		rc = syscall_table[id](a1, a2, a3, a4, a5, a6);
@@ -93,5 +89,5 @@
 	if (THREAD->udebug.active) {
 		udebug_syscall_event(a1, a2, a3, a4, a5, a6, id, rc, true);
-	
+		
 		/*
 		 * Stopping point needed for tasks that only invoke
@@ -103,11 +99,9 @@
 	}
 #endif
-
+	
 	/* Do kernel accounting */
-	(void) interrupts_disable();
-	spinlock_lock(&THREAD->lock);
+	irq_spinlock_lock(&THREAD->lock, true);
 	thread_update_accounting(false);
-	spinlock_unlock(&THREAD->lock);
-	interrupts_restore(ipl);
+	irq_spinlock_unlock(&THREAD->lock, true);
 	
 	return rc;
Index: kernel/generic/src/sysinfo/stats.c
===================================================================
--- kernel/generic/src/sysinfo/stats.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/src/sysinfo/stats.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -110,10 +110,7 @@
 	}
 	
-	/* Each CPU structure is locked separatelly */
-	ipl_t ipl = interrupts_disable();
-	
 	size_t i;
 	for (i = 0; i < config.cpu_count; i++) {
-		spinlock_lock(&cpus[i].lock);
+		irq_spinlock_lock(&cpus[i].lock, true);
 		
 		stats_cpus[i].id = cpus[i].id;
@@ -123,8 +120,6 @@
 		stats_cpus[i].idle_ticks = cpus[i].idle_ticks;
 		
-		spinlock_unlock(&cpus[i].lock);
-	}
-	
-	interrupts_restore(ipl);
+		irq_spinlock_unlock(&cpus[i].lock, true);
+	}
 	
 	return ((void *) stats_cpus);
@@ -235,5 +230,5 @@
 	
 	/* Interrupts are already disabled */
-	spinlock_lock(&(task->lock));
+	irq_spinlock_lock(&(task->lock), false);
 	
 	/* Record the statistics and increment the iterator */
@@ -241,5 +236,5 @@
 	(*iterator)++;
 	
-	spinlock_unlock(&(task->lock));
+	irq_spinlock_unlock(&(task->lock), false);
 	
 	return true;
@@ -260,6 +255,5 @@
 {
 	/* Messing with task structures, avoid deadlock */
-	ipl_t ipl = interrupts_disable();
-	spinlock_lock(&tasks_lock);
+	irq_spinlock_lock(&tasks_lock, true);
 	
 	/* First walk the task tree to count the tasks */
@@ -269,7 +263,5 @@
 	if (count == 0) {
 		/* No tasks found (strange) */
-		spinlock_unlock(&tasks_lock);
-		interrupts_restore(ipl);
-		
+		irq_spinlock_unlock(&tasks_lock, true);
 		*size = 0;
 		return NULL;
@@ -278,6 +270,5 @@
 	*size = sizeof(stats_task_t) * count;
 	if (dry_run) {
-		spinlock_unlock(&tasks_lock);
-		interrupts_restore(ipl);
+		irq_spinlock_unlock(&tasks_lock, true);
 		return NULL;
 	}
@@ -286,7 +277,5 @@
 	if (stats_tasks == NULL) {
 		/* No free space for allocation */
-		spinlock_unlock(&tasks_lock);
-		interrupts_restore(ipl);
-		
+		irq_spinlock_unlock(&tasks_lock, true);
 		*size = 0;
 		return NULL;
@@ -297,6 +286,5 @@
 	avltree_walk(&tasks_tree, task_serialize_walker, (void *) &iterator);
 	
-	spinlock_unlock(&tasks_lock);
-	interrupts_restore(ipl);
+	irq_spinlock_unlock(&tasks_lock, true);
 	
 	return ((void *) stats_tasks);
@@ -346,5 +334,5 @@
 	
 	/* Interrupts are already disabled */
-	spinlock_lock(&thread->lock);
+	irq_spinlock_lock(&thread->lock, false);
 	
 	/* Record the statistics and increment the iterator */
@@ -352,5 +340,5 @@
 	(*iterator)++;
 	
-	spinlock_unlock(&thread->lock);
+	irq_spinlock_unlock(&thread->lock, false);
 	
 	return true;
@@ -371,6 +359,5 @@
 {
 	/* Messing with threads structures, avoid deadlock */
-	ipl_t ipl = interrupts_disable();
-	spinlock_lock(&threads_lock);
+	irq_spinlock_lock(&threads_lock, true);
 	
 	/* First walk the thread tree to count the threads */
@@ -380,7 +367,5 @@
 	if (count == 0) {
 		/* No threads found (strange) */
-		spinlock_unlock(&threads_lock);
-		interrupts_restore(ipl);
-		
+		irq_spinlock_unlock(&threads_lock, true);
 		*size = 0;
 		return NULL;
@@ -389,6 +374,5 @@
 	*size = sizeof(stats_thread_t) * count;
 	if (dry_run) {
-		spinlock_unlock(&threads_lock);
-		interrupts_restore(ipl);
+		irq_spinlock_unlock(&threads_lock, true);
 		return NULL;
 	}
@@ -397,7 +381,5 @@
 	if (stats_threads == NULL) {
 		/* No free space for allocation */
-		spinlock_unlock(&threads_lock);
-		interrupts_restore(ipl);
-		
+		irq_spinlock_unlock(&threads_lock, true);
 		*size = 0;
 		return NULL;
@@ -408,6 +390,5 @@
 	avltree_walk(&threads_tree, thread_serialize_walker, (void *) &iterator);
 	
-	spinlock_unlock(&threads_lock);
-	interrupts_restore(ipl);
+	irq_spinlock_unlock(&threads_lock, true);
 	
 	return ((void *) stats_threads);
@@ -443,12 +424,10 @@
 	
 	/* Messing with task structures, avoid deadlock */
-	ipl_t ipl = interrupts_disable();
-	spinlock_lock(&tasks_lock);
+	irq_spinlock_lock(&tasks_lock, true);
 	
 	task_t *task = task_find_by_id(task_id);
 	if (task == NULL) {
 		/* No task with this ID */
-		spinlock_unlock(&tasks_lock);
-		interrupts_restore(ipl);
+		irq_spinlock_unlock(&tasks_lock, true);
 		return ret;
 	}
@@ -459,5 +438,5 @@
 		ret.data.size = sizeof(stats_task_t);
 		
-		spinlock_unlock(&tasks_lock);
+		irq_spinlock_unlock(&tasks_lock, true);
 	} else {
 		/* Allocate stats_task_t structure */
@@ -465,6 +444,5 @@
 		    (stats_task_t *) malloc(sizeof(stats_task_t), FRAME_ATOMIC);
 		if (stats_task == NULL) {
-			spinlock_unlock(&tasks_lock);
-			interrupts_restore(ipl);
+			irq_spinlock_unlock(&tasks_lock, true);
 			return ret;
 		}
@@ -474,15 +452,12 @@
 		ret.data.data = (void *) stats_task;
 		ret.data.size = sizeof(stats_task_t);
-	
+		
 		/* Hand-over-hand locking */
-		spinlock_lock(&task->lock);
-		spinlock_unlock(&tasks_lock);
+		irq_spinlock_exchange(&tasks_lock, &task->lock);
 		
 		produce_stats_task(task, stats_task);
 		
-		spinlock_unlock(&task->lock);
-	}
-	
-	interrupts_restore(ipl);
+		irq_spinlock_unlock(&task->lock, true);
+	}
 	
 	return ret;
@@ -518,12 +493,10 @@
 	
 	/* Messing with threads structures, avoid deadlock */
-	ipl_t ipl = interrupts_disable();
-	spinlock_lock(&threads_lock);
+	irq_spinlock_lock(&threads_lock, true);
 	
 	thread_t *thread = thread_find_by_id(thread_id);
 	if (thread == NULL) {
 		/* No thread with this ID */
-		spinlock_unlock(&threads_lock);
-		interrupts_restore(ipl);
+		irq_spinlock_unlock(&threads_lock, true);
 		return ret;
 	}
@@ -534,5 +507,5 @@
 		ret.data.size = sizeof(stats_thread_t);
 		
-		spinlock_unlock(&threads_lock);
+		irq_spinlock_unlock(&threads_lock, true);
 	} else {
 		/* Allocate stats_thread_t structure */
@@ -540,6 +513,5 @@
 		    (stats_thread_t *) malloc(sizeof(stats_thread_t), FRAME_ATOMIC);
 		if (stats_thread == NULL) {
-			spinlock_unlock(&threads_lock);
-			interrupts_restore(ipl);
+			irq_spinlock_unlock(&threads_lock, true);
 			return ret;
 		}
@@ -551,13 +523,10 @@
 		
 		/* Hand-over-hand locking */
-		spinlock_lock(&thread->lock);
-		spinlock_unlock(&threads_lock);
+		irq_spinlock_exchange(&threads_lock, &thread->lock);
 		
 		produce_stats_thread(thread, stats_thread);
 		
-		spinlock_unlock(&thread->lock);
-	}
-	
-	interrupts_restore(ipl);
+		irq_spinlock_unlock(&thread->lock, true);
+	}
 	
 	return ret;
@@ -673,5 +642,5 @@
 {
 	mutex_initialize(&load_lock, MUTEX_PASSIVE);
-
+	
 	sysinfo_set_item_fn_val("system.uptime", NULL, get_stats_uptime);
 	sysinfo_set_item_fn_data("system.cpus", NULL, get_stats_cpus);
Index: kernel/generic/src/sysinfo/sysinfo.c
===================================================================
--- kernel/generic/src/sysinfo/sysinfo.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/src/sysinfo/sysinfo.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -58,5 +58,5 @@
  *
  */
-static int sysinfo_item_constructor(void *obj, int kmflag)
+static int sysinfo_item_constructor(void *obj, unsigned int kmflag)
 {
 	sysinfo_item_t *item = (sysinfo_item_t *) obj;
@@ -78,5 +78,5 @@
  *
  */
-static int sysinfo_item_destructor(void *obj)
+static size_t sysinfo_item_destructor(void *obj)
 {
 	sysinfo_item_t *item = (sysinfo_item_t *) obj;
Index: kernel/generic/src/time/clock.c
===================================================================
--- kernel/generic/src/time/clock.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/src/time/clock.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -33,11 +33,12 @@
 /**
  * @file
- * @brief	High-level clock interrupt handler.
+ * @brief High-level clock interrupt handler.
  *
  * This file contains the clock() function which is the source
  * of preemption. It is also responsible for executing expired
  * timeouts.
- */
- 
+ *
+ */
+
 #include <time/clock.h>
 #include <time/timeout.h>
@@ -63,6 +64,8 @@
 static parea_t clock_parea;
 
-/* Variable holding fragment of second, so that we would update
- * seconds correctly
+/** Fragment of second
+ *
+ * For updating  seconds correctly.
+ *
  */
 static unative_t secfrag = 0;
@@ -73,10 +76,9 @@
  * information about realtime data. We allocate 1 page with these 
  * data and update it periodically.
+ *
  */
 void clock_counter_init(void)
 {
-	void *faddr;
-
-	faddr = frame_alloc(ONE_FRAME, FRAME_ATOMIC);
+	void *faddr = frame_alloc(ONE_FRAME, FRAME_ATOMIC);
 	if (!faddr)
 		panic("Cannot allocate page for clock.");
@@ -87,12 +89,13 @@
 	uptime->seconds2 = 0;
 	uptime->useconds = 0;
-
+	
 	clock_parea.pbase = (uintptr_t) faddr;
 	clock_parea.frames = 1;
 	ddi_parea_register(&clock_parea);
-
+	
 	/*
 	 * Prepare information for the userspace so that it can successfully
 	 * physmem_map() the clock_parea.
+	 *
 	 */
 	sysinfo_set_item_val("clock.cacheable", NULL, (unative_t) true);
@@ -100,9 +103,9 @@
 }
 
-
 /** Update public counters
  *
  * Update it only on first processor
- * TODO: Do we really need so many write barriers? 
+ * TODO: Do we really need so many write barriers?
+ *
  */
 static void clock_update_counters(void)
@@ -131,60 +134,64 @@
 void clock(void)
 {
-	link_t *l;
-	timeout_t *h;
-	timeout_handler_t f;
-	void *arg;
 	size_t missed_clock_ticks = CPU->missed_clock_ticks;
-	unsigned int i;
-
+	
 	/* Account lost ticks to CPU usage */
-	if (CPU->idle) {
+	if (CPU->idle)
 		CPU->idle_ticks += missed_clock_ticks + 1;
-	} else {
+	else
 		CPU->busy_ticks += missed_clock_ticks + 1;
-	}
+	
 	CPU->idle = false;
-
+	
 	/*
 	 * To avoid lock ordering problems,
 	 * run all expired timeouts as you visit them.
+	 *
 	 */
+	size_t i;
 	for (i = 0; i <= missed_clock_ticks; i++) {
 		clock_update_counters();
-		spinlock_lock(&CPU->timeoutlock);
-		while ((l = CPU->timeout_active_head.next) != &CPU->timeout_active_head) {
-			h = list_get_instance(l, timeout_t, link);
-			spinlock_lock(&h->lock);
-			if (h->ticks-- != 0) {
-				spinlock_unlock(&h->lock);
+		irq_spinlock_lock(&CPU->timeoutlock, false);
+		
+		link_t *cur;
+		while ((cur = CPU->timeout_active_head.next) != &CPU->timeout_active_head) {
+			timeout_t *timeout = list_get_instance(cur, timeout_t, link);
+			
+			irq_spinlock_lock(&timeout->lock, false);
+			if (timeout->ticks-- != 0) {
+				irq_spinlock_unlock(&timeout->lock, false);
 				break;
 			}
-			list_remove(l);
-			f = h->handler;
-			arg = h->arg;
-			timeout_reinitialize(h);
-			spinlock_unlock(&h->lock);	
-			spinlock_unlock(&CPU->timeoutlock);
-
-			f(arg);
-
-			spinlock_lock(&CPU->timeoutlock);
+			
+			list_remove(cur);
+			timeout_handler_t handler = timeout->handler;
+			void *arg = timeout->arg;
+			timeout_reinitialize(timeout);
+			
+			irq_spinlock_unlock(&timeout->lock, false);
+			irq_spinlock_unlock(&CPU->timeoutlock, false);
+			
+			handler(arg);
+			
+			irq_spinlock_lock(&CPU->timeoutlock, false);
 		}
-		spinlock_unlock(&CPU->timeoutlock);
+		
+		irq_spinlock_unlock(&CPU->timeoutlock, false);
 	}
 	CPU->missed_clock_ticks = 0;
-
+	
 	/*
 	 * Do CPU usage accounting and find out whether to preempt THREAD.
+	 *
 	 */
-
+	
 	if (THREAD) {
 		uint64_t ticks;
 		
-		spinlock_lock(&CPU->lock);
+		irq_spinlock_lock(&CPU->lock, false);
 		CPU->needs_relink += 1 + missed_clock_ticks;
-		spinlock_unlock(&CPU->lock);	
-	
-		spinlock_lock(&THREAD->lock);
+		irq_spinlock_unlock(&CPU->lock, false);
+		
+		irq_spinlock_lock(&THREAD->lock, false);
 		if ((ticks = THREAD->ticks)) {
 			if (ticks >= 1 + missed_clock_ticks)
@@ -193,10 +200,7 @@
 				THREAD->ticks = 0;
 		}
-		spinlock_unlock(&THREAD->lock);
+		irq_spinlock_unlock(&THREAD->lock, false);
 		
 		if ((!ticks) && (!PREEMPTION_DISABLED)) {
-#ifdef CONFIG_UDEBUG
-			istate_t *istate;
-#endif
 			scheduler();
 #ifdef CONFIG_UDEBUG
@@ -205,11 +209,10 @@
 			 * before it begins executing userspace code.
 			 */
-			istate = THREAD->udebug.uspace_state;
-			if (istate && istate_from_uspace(istate))
+			istate_t *istate = THREAD->udebug.uspace_state;
+			if ((istate) && (istate_from_uspace(istate)))
 				udebug_before_thread_runs();
 #endif
 		}
 	}
-
 }
 
Index: kernel/generic/src/time/timeout.c
===================================================================
--- kernel/generic/src/time/timeout.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/src/time/timeout.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -33,5 +33,5 @@
 /**
  * @file
- * @brief		Timeout management functions.
+ * @brief Timeout management functions.
  */
 
@@ -53,25 +53,23 @@
 void timeout_init(void)
 {
-	spinlock_initialize(&CPU->timeoutlock, "timeout_lock");
+	irq_spinlock_initialize(&CPU->timeoutlock, "cpu.timeoutlock");
 	list_initialize(&CPU->timeout_active_head);
 }
 
-
-/** Reinitialize timeout 
+/** Reinitialize timeout
  *
  * Initialize all members except the lock.
  *
- * @param t		Timeout to be initialized.
- *
- */
-void timeout_reinitialize(timeout_t *t)
-{
-	t->cpu = NULL;
-	t->ticks = 0;
-	t->handler = NULL;
-	t->arg = NULL;
-	link_initialize(&t->link);
-}
-
+ * @param timeout Timeout to be initialized.
+ *
+ */
+void timeout_reinitialize(timeout_t *timeout)
+{
+	timeout->cpu = NULL;
+	timeout->ticks = 0;
+	timeout->handler = NULL;
+	timeout->arg = NULL;
+	link_initialize(&timeout->link);
+}
 
 /** Initialize timeout
@@ -79,13 +77,12 @@
  * Initialize all members including the lock.
  *
- * @param t		Timeout to be initialized.
- *
- */
-void timeout_initialize(timeout_t *t)
-{
-	spinlock_initialize(&t->lock, "timeout_t_lock");
-	timeout_reinitialize(t);
-}
-
+ * @param timeout Timeout to be initialized.
+ *
+ */
+void timeout_initialize(timeout_t *timeout)
+{
+	irq_spinlock_initialize(&timeout->lock, "timeout_t_lock");
+	timeout_reinitialize(timeout);
+}
 
 /** Register timeout
@@ -95,70 +92,67 @@
  * time microseconds (or slightly more).
  *
- * @param t		Timeout structure.
- * @param time		Number of usec in the future to execute the handler.
- * @param f		Timeout handler function.
- * @param arg		Timeout handler argument.
- *
- */
-void
-timeout_register(timeout_t *t, uint64_t time, timeout_handler_t f, void *arg)
-{
-	timeout_t *hlp = NULL;
-	link_t *l, *m;
-	ipl_t ipl;
-	uint64_t sum;
-
-	ipl = interrupts_disable();
-	spinlock_lock(&CPU->timeoutlock);
-	spinlock_lock(&t->lock);
-
-	if (t->cpu)
-		panic("Unexpected: t->cpu != 0.");
-
-	t->cpu = CPU;
-	t->ticks = us2ticks(time);
-	
-	t->handler = f;
-	t->arg = arg;
-
-	/*
-	 * Insert t into the active timeouts list according to t->ticks.
-	 */
-	sum = 0;
-	l = CPU->timeout_active_head.next;
-	while (l != &CPU->timeout_active_head) {
-		hlp = list_get_instance(l, timeout_t, link);
-		spinlock_lock(&hlp->lock);
-		if (t->ticks < sum + hlp->ticks) {
-			spinlock_unlock(&hlp->lock);
+ * @param timeout Timeout structure.
+ * @param time    Number of usec in the future to execute the handler.
+ * @param handler Timeout handler function.
+ * @param arg     Timeout handler argument.
+ *
+ */
+void timeout_register(timeout_t *timeout, uint64_t time,
+    timeout_handler_t handler, void *arg)
+{
+	irq_spinlock_lock(&CPU->timeoutlock, true);
+	irq_spinlock_lock(&timeout->lock, false);
+	
+	if (timeout->cpu)
+		panic("Unexpected: timeout->cpu != 0.");
+	
+	timeout->cpu = CPU;
+	timeout->ticks = us2ticks(time);
+	
+	timeout->handler = handler;
+	timeout->arg = arg;
+	
+	/*
+	 * Insert timeout into the active timeouts list according to timeout->ticks.
+	 */
+	uint64_t sum = 0;
+	timeout_t *target = NULL;
+	link_t *cur;
+	for (cur = CPU->timeout_active_head.next;
+	    cur != &CPU->timeout_active_head; cur = cur->next) {
+		target = list_get_instance(cur, timeout_t, link);
+		irq_spinlock_lock(&target->lock, false);
+		
+		if (timeout->ticks < sum + target->ticks) {
+			irq_spinlock_unlock(&target->lock, false);
 			break;
 		}
-		sum += hlp->ticks;
-		spinlock_unlock(&hlp->lock);
-		l = l->next;
-	}
-
-	m = l->prev;
-	list_prepend(&t->link, m); /* avoid using l->prev */
-
-	/*
-	 * Adjust t->ticks according to ticks accumulated in h's predecessors.
-	 */
-	t->ticks -= sum;
-
-	/*
-	 * Decrease ticks of t's immediate succesor by t->ticks.
-	 */
-	if (l != &CPU->timeout_active_head) {
-		spinlock_lock(&hlp->lock);
-		hlp->ticks -= t->ticks;
-		spinlock_unlock(&hlp->lock);
-	}
-
-	spinlock_unlock(&t->lock);
-	spinlock_unlock(&CPU->timeoutlock);
-	interrupts_restore(ipl);
-}
-
+		
+		sum += target->ticks;
+		irq_spinlock_unlock(&target->lock, false);
+	}
+	
+	/* Avoid using cur->prev directly */
+	link_t *prev = cur->prev;
+	list_prepend(&timeout->link, prev);
+	
+	/*
+	 * Adjust timeout->ticks according to ticks
+	 * accumulated in target's predecessors.
+	 */
+	timeout->ticks -= sum;
+	
+	/*
+	 * Decrease ticks of timeout's immediate succesor by timeout->ticks.
+	 */
+	if (cur != &CPU->timeout_active_head) {
+		irq_spinlock_lock(&target->lock, false);
+		target->ticks -= timeout->ticks;
+		irq_spinlock_unlock(&target->lock, false);
+	}
+	
+	irq_spinlock_unlock(&timeout->lock, false);
+	irq_spinlock_unlock(&CPU->timeoutlock, true);
+}
 
 /** Unregister timeout
@@ -166,26 +160,22 @@
  * Remove timeout from timeout list.
  *
- * @param t		Timeout to unregister.
- *
- * @return		True on success, false on failure.
- */
-bool timeout_unregister(timeout_t *t)
-{
-	timeout_t *hlp;
-	link_t *l;
-	ipl_t ipl;
+ * @param timeout Timeout to unregister.
+ *
+ * @return True on success, false on failure.
+ *
+ */
+bool timeout_unregister(timeout_t *timeout)
+{
 	DEADLOCK_PROBE_INIT(p_tolock);
-
+	
 grab_locks:
-	ipl = interrupts_disable();
-	spinlock_lock(&t->lock);
-	if (!t->cpu) {
-		spinlock_unlock(&t->lock);
-		interrupts_restore(ipl);
+	irq_spinlock_lock(&timeout->lock, true);
+	if (!timeout->cpu) {
+		irq_spinlock_unlock(&timeout->lock, true);
 		return false;
 	}
-	if (!spinlock_trylock(&t->cpu->timeoutlock)) {
-		spinlock_unlock(&t->lock);
-		interrupts_restore(ipl);
+	
+	if (!irq_spinlock_trylock(&timeout->cpu->timeoutlock)) {
+		irq_spinlock_unlock(&timeout->lock, true);
 		DEADLOCK_PROBE(p_tolock, DEADLOCK_THRESHOLD);
 		goto grab_locks;
@@ -193,23 +183,22 @@
 	
 	/*
-	 * Now we know for sure that t hasn't been activated yet
-	 * and is lurking in t->cpu->timeout_active_head queue.
-	 */
-
-	l = t->link.next;
-	if (l != &t->cpu->timeout_active_head) {
-		hlp = list_get_instance(l, timeout_t, link);
-		spinlock_lock(&hlp->lock);
-		hlp->ticks += t->ticks;
-		spinlock_unlock(&hlp->lock);
-	}
-	
-	list_remove(&t->link);
-	spinlock_unlock(&t->cpu->timeoutlock);
-
-	timeout_reinitialize(t);
-	spinlock_unlock(&t->lock);
-
-	interrupts_restore(ipl);
+	 * Now we know for sure that timeout hasn't been activated yet
+	 * and is lurking in timeout->cpu->timeout_active_head queue.
+	 */
+	
+	link_t *cur = timeout->link.next;
+	if (cur != &timeout->cpu->timeout_active_head) {
+		timeout_t *tmp = list_get_instance(cur, timeout_t, link);
+		irq_spinlock_lock(&tmp->lock, false);
+		tmp->ticks += timeout->ticks;
+		irq_spinlock_unlock(&tmp->lock, false);
+	}
+	
+	list_remove(&timeout->link);
+	irq_spinlock_unlock(&timeout->cpu->timeoutlock, false);
+	
+	timeout_reinitialize(timeout);
+	irq_spinlock_unlock(&timeout->lock, true);
+	
 	return true;
 }
Index: kernel/generic/src/udebug/udebug.c
===================================================================
--- kernel/generic/src/udebug/udebug.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/src/udebug/udebug.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -33,9 +33,9 @@
 /**
  * @file
- * @brief	Udebug hooks and data structure management.
+ * @brief Udebug hooks and data structure management.
  *
  * Udebug is an interface that makes userspace debuggers possible.
  */
- 
+
 #include <synch/waitq.h>
 #include <debug.h>
@@ -45,9 +45,9 @@
 #include <arch.h>
 
-
 /** Initialize udebug part of task structure.
  *
  * Called as part of task structure initialization.
- * @param ut	Pointer to the structure to initialize.
+ * @param ut Pointer to the structure to initialize.
+ *
  */
 void udebug_task_init(udebug_task_t *ut)
@@ -63,5 +63,7 @@
  *
  * Called as part of thread structure initialization.
- * @param ut	Pointer to the structure to initialize.
+ *
+ * @param ut Pointer to the structure to initialize.
+ *
  */
 void udebug_thread_initialize(udebug_thread_t *ut)
@@ -70,5 +72,5 @@
 	waitq_initialize(&ut->go_wq);
 	condvar_initialize(&ut->active_cv);
-
+	
 	ut->go_call = NULL;
 	ut->uspace_state = NULL;
@@ -76,5 +78,5 @@
 	ut->stoppable = true;
 	ut->active = false;
-	ut->cur_event = 0; /* none */
+	ut->cur_event = 0;  /* None */
 }
 
@@ -85,16 +87,14 @@
  * is received.
  *
- * @param wq	The wait queue used by the thread to wait for GO messages.
+ * @param wq The wait queue used by the thread to wait for GO messages.
+ *
  */
 static void udebug_wait_for_go(waitq_t *wq)
 {
-	int rc;
-	ipl_t ipl;
-
-	ipl = waitq_sleep_prepare(wq);
-
-	wq->missed_wakeups = 0;	/* Enforce blocking. */
-	rc = waitq_sleep_timeout_unsafe(wq, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE);
-
+	ipl_t ipl = waitq_sleep_prepare(wq);
+	
+	wq->missed_wakeups = 0;  /* Enforce blocking. */
+	int rc = waitq_sleep_timeout_unsafe(wq, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE);
+	
 	waitq_sleep_finish(wq, rc, ipl);
 }
@@ -102,69 +102,68 @@
 /** Start of stoppable section.
  *
- * A stoppable section is a section of code where if the thread can be stoped. In other words,
- * if a STOP operation is issued, the thread is guaranteed not to execute
- * any userspace instructions until the thread is resumed.
+ * A stoppable section is a section of code where if the thread can
+ * be stoped. In other words, if a STOP operation is issued, the thread
+ * is guaranteed not to execute any userspace instructions until the
+ * thread is resumed.
  *
  * Having stoppable sections is better than having stopping points, since
  * a thread can be stopped even when it is blocked indefinitely in a system
  * call (whereas it would not reach any stopping point).
+ *
  */
 void udebug_stoppable_begin(void)
 {
-	int nsc;
-	call_t *db_call, *go_call;
-
 	ASSERT(THREAD);
 	ASSERT(TASK);
-
+	
 	mutex_lock(&TASK->udebug.lock);
-
-	nsc = --TASK->udebug.not_stoppable_count;
-
+	
+	int nsc = --TASK->udebug.not_stoppable_count;
+	
 	/* Lock order OK, THREAD->udebug.lock is after TASK->udebug.lock */
 	mutex_lock(&THREAD->udebug.lock);
 	ASSERT(THREAD->udebug.stoppable == false);
 	THREAD->udebug.stoppable = true;
-
-	if (TASK->udebug.dt_state == UDEBUG_TS_BEGINNING && nsc == 0) {
+	
+	if ((TASK->udebug.dt_state == UDEBUG_TS_BEGINNING) && (nsc == 0)) {
 		/*
 		 * This was the last non-stoppable thread. Reply to
 		 * DEBUG_BEGIN call.
+		 *
 		 */
-
-		db_call = TASK->udebug.begin_call;
+		
+		call_t *db_call = TASK->udebug.begin_call;
 		ASSERT(db_call);
-
+		
 		TASK->udebug.dt_state = UDEBUG_TS_ACTIVE;
 		TASK->udebug.begin_call = NULL;
-
+		
 		IPC_SET_RETVAL(db_call->data, 0);
-		ipc_answer(&TASK->answerbox, db_call);		
-
+		ipc_answer(&TASK->answerbox, db_call);
 	} else if (TASK->udebug.dt_state == UDEBUG_TS_ACTIVE) {
 		/*
 		 * Active debugging session
 		 */
-
+		
 		if (THREAD->udebug.active == true &&
 		    THREAD->udebug.go == false) {
 			/*
 			 * Thread was requested to stop - answer go call
+			 *
 			 */
-
+			
 			/* Make sure nobody takes this call away from us */
-			go_call = THREAD->udebug.go_call;
+			call_t *go_call = THREAD->udebug.go_call;
 			THREAD->udebug.go_call = NULL;
 			ASSERT(go_call);
-
+			
 			IPC_SET_RETVAL(go_call->data, 0);
 			IPC_SET_ARG1(go_call->data, UDEBUG_EVENT_STOP);
-
+			
 			THREAD->udebug.cur_event = UDEBUG_EVENT_STOP;
-
-	    		ipc_answer(&TASK->answerbox, go_call);
+			ipc_answer(&TASK->answerbox, go_call);
 		}
 	}
-
+	
 	mutex_unlock(&THREAD->udebug.lock);
         mutex_unlock(&TASK->udebug.lock);
@@ -174,5 +173,7 @@
  *
  * This is the point where the thread will block if it is stopped.
- * (As, by definition, a stopped thread must not leave its stoppable section).
+ * (As, by definition, a stopped thread must not leave its stoppable
+ * section).
+ *
  */
 void udebug_stoppable_end(void)
@@ -181,11 +182,11 @@
 	mutex_lock(&TASK->udebug.lock);
 	mutex_lock(&THREAD->udebug.lock);
-
-	if (THREAD->udebug.active && THREAD->udebug.go == false) {
+	
+	if ((THREAD->udebug.active) && (THREAD->udebug.go == false)) {
 		mutex_unlock(&THREAD->udebug.lock);
 		mutex_unlock(&TASK->udebug.lock);
-
+		
 		udebug_wait_for_go(&THREAD->udebug.go_wq);
-
+		
 		goto restart;
 		/* Must try again - have to lose stoppability atomically. */
@@ -194,5 +195,5 @@
 		ASSERT(THREAD->udebug.stoppable == true);
 		THREAD->udebug.stoppable = false;
-
+		
 		mutex_unlock(&THREAD->udebug.lock);
 		mutex_unlock(&TASK->udebug.lock);
@@ -203,4 +204,5 @@
  *
  * This function is called from clock().
+ *
  */
 void udebug_before_thread_runs(void)
@@ -215,4 +217,5 @@
  * Must be called before and after servicing a system call. This generates
  * a SYSCALL_B or SYSCALL_E event, depending on the value of @a end_variant.
+ *
  */
 void udebug_syscall_event(unative_t a1, unative_t a2, unative_t a3,
@@ -220,12 +223,10 @@
     bool end_variant)
 {
-	call_t *call;
-	udebug_event_t etype;
-
-	etype = end_variant ? UDEBUG_EVENT_SYSCALL_E : UDEBUG_EVENT_SYSCALL_B;
-
+	udebug_event_t etype =
+	    end_variant ? UDEBUG_EVENT_SYSCALL_E : UDEBUG_EVENT_SYSCALL_B;
+	
 	mutex_lock(&TASK->udebug.lock);
 	mutex_lock(&THREAD->udebug.lock);
-
+	
 	/* Must only generate events when in debugging session and is go. */
 	if (THREAD->udebug.active != true || THREAD->udebug.go == false ||
@@ -235,14 +236,14 @@
 		return;
 	}
-
+	
 	/* Fill in the GO response. */
-	call = THREAD->udebug.go_call;
+	call_t *call = THREAD->udebug.go_call;
 	THREAD->udebug.go_call = NULL;
-
+	
 	IPC_SET_RETVAL(call->data, 0);
 	IPC_SET_ARG1(call->data, etype);
 	IPC_SET_ARG2(call->data, id);
 	IPC_SET_ARG3(call->data, rc);
-
+	
 	THREAD->udebug.syscall_args[0] = a1;
 	THREAD->udebug.syscall_args[1] = a2;
@@ -251,18 +252,19 @@
 	THREAD->udebug.syscall_args[4] = a5;
 	THREAD->udebug.syscall_args[5] = a6;
-
+	
 	/*
 	 * Make sure udebug.go is false when going to sleep
 	 * in case we get woken up by DEBUG_END. (At which
 	 * point it must be back to the initial true value).
+	 *
 	 */
 	THREAD->udebug.go = false;
 	THREAD->udebug.cur_event = etype;
-
+	
 	ipc_answer(&TASK->answerbox, call);
-
+	
 	mutex_unlock(&THREAD->udebug.lock);
 	mutex_unlock(&TASK->udebug.lock);
-
+	
 	udebug_wait_for_go(&THREAD->udebug.go_wq);
 }
@@ -280,49 +282,52 @@
  * and get a THREAD_B event for them.
  *
- * @param t	Structure of the thread being created. Not locked, as the
- *		thread is not executing yet.
- * @param ta	Task to which the thread should be attached.
- */
-void udebug_thread_b_event_attach(struct thread *t, struct task *ta)
-{
-	call_t *call;
-
+ * @param thread Structure of the thread being created. Not locked, as the
+ *               thread is not executing yet.
+ * @param task   Task to which the thread should be attached.
+ *
+ */
+void udebug_thread_b_event_attach(struct thread *thread, struct task *task)
+{
 	mutex_lock(&TASK->udebug.lock);
 	mutex_lock(&THREAD->udebug.lock);
-
-	thread_attach(t, ta);
-
+	
+	thread_attach(thread, task);
+	
 	LOG("Check state");
-
+	
 	/* Must only generate events when in debugging session */
 	if (THREAD->udebug.active != true) {
 		LOG("udebug.active: %s, udebug.go: %s",
-			THREAD->udebug.active ? "Yes(+)" : "No",
-			THREAD->udebug.go ? "Yes(-)" : "No");
+		    THREAD->udebug.active ? "Yes(+)" : "No",
+		    THREAD->udebug.go ? "Yes(-)" : "No");
+		
 		mutex_unlock(&THREAD->udebug.lock);
 		mutex_unlock(&TASK->udebug.lock);
 		return;
 	}
-
+	
 	LOG("Trigger event");
-	call = THREAD->udebug.go_call;
+	
+	call_t *call = THREAD->udebug.go_call;
+	
 	THREAD->udebug.go_call = NULL;
 	IPC_SET_RETVAL(call->data, 0);
 	IPC_SET_ARG1(call->data, UDEBUG_EVENT_THREAD_B);
-	IPC_SET_ARG2(call->data, (unative_t)t);
-
+	IPC_SET_ARG2(call->data, (unative_t) thread);
+	
 	/*
 	 * Make sure udebug.go is false when going to sleep
 	 * in case we get woken up by DEBUG_END. (At which
 	 * point it must be back to the initial true value).
+	 *
 	 */
 	THREAD->udebug.go = false;
 	THREAD->udebug.cur_event = UDEBUG_EVENT_THREAD_B;
-
+	
 	ipc_answer(&TASK->answerbox, call);
-
+	
 	mutex_unlock(&THREAD->udebug.lock);
 	mutex_unlock(&TASK->udebug.lock);
-
+	
 	LOG("Wait for Go");
 	udebug_wait_for_go(&THREAD->udebug.go_wq);
@@ -333,128 +338,125 @@
  * Must be called when the current thread is terminating.
  * Generates a THREAD_E event.
+ *
  */
 void udebug_thread_e_event(void)
 {
-	call_t *call;
-
 	mutex_lock(&TASK->udebug.lock);
 	mutex_lock(&THREAD->udebug.lock);
-
+	
 	LOG("Check state");
-
+	
 	/* Must only generate events when in debugging session. */
 	if (THREAD->udebug.active != true) {
 		LOG("udebug.active: %s, udebug.go: %s",
-			THREAD->udebug.active ? "Yes" : "No",
-			THREAD->udebug.go ? "Yes" : "No");
+		    THREAD->udebug.active ? "Yes" : "No",
+		    THREAD->udebug.go ? "Yes" : "No");
+		
 		mutex_unlock(&THREAD->udebug.lock);
 		mutex_unlock(&TASK->udebug.lock);
 		return;
 	}
-
+	
 	LOG("Trigger event");
-	call = THREAD->udebug.go_call;
+	
+	call_t *call = THREAD->udebug.go_call;
+	
 	THREAD->udebug.go_call = NULL;
 	IPC_SET_RETVAL(call->data, 0);
 	IPC_SET_ARG1(call->data, UDEBUG_EVENT_THREAD_E);
-
+	
 	/* Prevent any further debug activity in thread. */
 	THREAD->udebug.active = false;
-	THREAD->udebug.cur_event = 0;		/* none */
-	THREAD->udebug.go = false;	/* set to initial value */
-
+	THREAD->udebug.cur_event = 0;   /* None */
+	THREAD->udebug.go = false;      /* Set to initial value */
+	
 	ipc_answer(&TASK->answerbox, call);
-
+	
 	mutex_unlock(&THREAD->udebug.lock);
 	mutex_unlock(&TASK->udebug.lock);
-
-	/* 
+	
+	/*
 	 * This event does not sleep - debugging has finished
 	 * in this thread.
+	 *
 	 */
 }
 
-/**
- * Terminate task debugging session.
- *
- * Gracefully terminates the debugging session for a task. If the debugger
+/** Terminate task debugging session.
+ *
+ * Gracefully terminate the debugging session for a task. If the debugger
  * is still waiting for events on some threads, it will receive a
  * FINISHED event for each of them.
  *
- * @param ta	Task structure. ta->udebug.lock must be already locked.
- * @return	Zero on success or negative error code.
- */
-int udebug_task_cleanup(struct task *ta)
-{
-	thread_t *t;
+ * @param task Task structure. ta->udebug.lock must be already locked.
+ *
+ * @return Zero on success or negative error code.
+ *
+ */
+int udebug_task_cleanup(struct task *task)
+{
+	if ((task->udebug.dt_state != UDEBUG_TS_BEGINNING) &&
+	    (task->udebug.dt_state != UDEBUG_TS_ACTIVE)) {
+		return EINVAL;
+	}
+	
+	LOG("Task %" PRIu64, task->taskid);
+	
+	/* Finish debugging of all userspace threads */
 	link_t *cur;
-	int flags;
-	ipl_t ipl;
-
-	if (ta->udebug.dt_state != UDEBUG_TS_BEGINNING &&
-	    ta->udebug.dt_state != UDEBUG_TS_ACTIVE) {
-		return EINVAL;
-	}
-
-	LOG("Task %" PRIu64, ta->taskid);
-
-	/* Finish debugging of all userspace threads */
-	for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) {
-		t = list_get_instance(cur, thread_t, th_link);
-
-		mutex_lock(&t->udebug.lock);
-
-		ipl = interrupts_disable();
-		spinlock_lock(&t->lock);
-
-		flags = t->flags;
-
-		spinlock_unlock(&t->lock);
-		interrupts_restore(ipl);
-
+	for (cur = task->th_head.next; cur != &task->th_head; cur = cur->next) {
+		thread_t *thread = list_get_instance(cur, thread_t, th_link);
+		
+		mutex_lock(&thread->udebug.lock);
+		unsigned int flags = thread->flags;
+		
 		/* Only process userspace threads. */
 		if ((flags & THREAD_FLAG_USPACE) != 0) {
 			/* Prevent any further debug activity in thread. */
-			t->udebug.active = false;
-			t->udebug.cur_event = 0;	/* none */
-
+			thread->udebug.active = false;
+			thread->udebug.cur_event = 0;   /* None */
+			
 			/* Is the thread still go? */
-			if (t->udebug.go == true) {
+			if (thread->udebug.go == true) {
 				/*
-				* Yes, so clear go. As active == false,
+				 * Yes, so clear go. As active == false,
 				 * this doesn't affect anything.
+				 (
 				 */
-				t->udebug.go = false;	
-
+				thread->udebug.go = false;
+				
 				/* Answer GO call */
 				LOG("Answer GO call with EVENT_FINISHED.");
-				IPC_SET_RETVAL(t->udebug.go_call->data, 0);
-				IPC_SET_ARG1(t->udebug.go_call->data,
+				
+				IPC_SET_RETVAL(thread->udebug.go_call->data, 0);
+				IPC_SET_ARG1(thread->udebug.go_call->data,
 				    UDEBUG_EVENT_FINISHED);
-
-				ipc_answer(&ta->answerbox, t->udebug.go_call);
-				t->udebug.go_call = NULL;
+				
+				ipc_answer(&task->answerbox, thread->udebug.go_call);
+				thread->udebug.go_call = NULL;
 			} else {
 				/*
 				 * Debug_stop is already at initial value.
 				 * Yet this means the thread needs waking up.
+				 *
 				 */
-
+				
 				/*
-				 * t's lock must not be held when calling
+				 * thread's lock must not be held when calling
 				 * waitq_wakeup.
+				 *
 				 */
-				waitq_wakeup(&t->udebug.go_wq, WAKEUP_FIRST);
+				waitq_wakeup(&thread->udebug.go_wq, WAKEUP_FIRST);
 			}
-			mutex_unlock(&t->udebug.lock);
-			condvar_broadcast(&t->udebug.active_cv);
-		} else {
-			mutex_unlock(&t->udebug.lock);
-		}
-	}
-
-	ta->udebug.dt_state = UDEBUG_TS_INACTIVE;
-	ta->udebug.debugger = NULL;
-
+			
+			mutex_unlock(&thread->udebug.lock);
+			condvar_broadcast(&thread->udebug.active_cv);
+		} else
+			mutex_unlock(&thread->udebug.lock);
+	}
+	
+	task->udebug.dt_state = UDEBUG_TS_INACTIVE;
+	task->udebug.debugger = NULL;
+	
 	return 0;
 }
@@ -466,9 +468,10 @@
  * a chance to examine the faulting thead/task. When the debugging session
  * is over, this function returns (so that thread/task cleanup can continue).
+ *
  */
 void udebug_thread_fault(void)
 {
 	udebug_stoppable_begin();
-
+	
 	/* Wait until a debugger attends to us. */
 	mutex_lock(&THREAD->udebug.lock);
@@ -476,5 +479,5 @@
 		condvar_wait(&THREAD->udebug.active_cv, &THREAD->udebug.lock);
 	mutex_unlock(&THREAD->udebug.lock);
-
+	
 	/* Make sure the debugging session is over before proceeding. */
 	mutex_lock(&THREAD->udebug.lock);
@@ -482,5 +485,5 @@
 		condvar_wait(&THREAD->udebug.active_cv, &THREAD->udebug.lock);
 	mutex_unlock(&THREAD->udebug.lock);
-
+	
 	udebug_stoppable_end();
 }
Index: kernel/generic/src/udebug/udebug_ops.c
===================================================================
--- kernel/generic/src/udebug/udebug_ops.c	(revision 666f492f56681952d58041f88d5bbb968548091c)
+++ kernel/generic/src/udebug/udebug_ops.c	(revision da1bafb8cf9a3b3be8ef21bc114daaa476a85190)
@@ -33,5 +33,5 @@
 /**
  * @file
- * @brief	Udebug operations.
+ * @brief Udebug operations.
  *
  * Udebug operations on tasks and threads are implemented here. The
@@ -39,5 +39,5 @@
  * when servicing udebug IPC messages.
  */
- 
+
 #include <debug.h>
 #include <proc/task.h>
@@ -53,6 +53,5 @@
 #include <memstr.h>
 
-/**
- * Prepare a thread for a debugging operation.
+/** Prepare a thread for a debugging operation.
  *
  * Simply put, return thread t with t->udebug.lock held,
@@ -73,89 +72,84 @@
  * the t->lock spinlock to the t->udebug.lock mutex.
  *
- * @param t		Pointer, need not at all be valid.
- * @param being_go	Required thread state.
+ * @param thread   Pointer, need not at all be valid.
+ * @param being_go Required thread state.
  *
  * Returns EOK if all went well, or an error code otherwise.
- */
-static int _thread_op_begin(thread_t *t, bool being_go)
-{
-	ipl_t ipl;
-
-	mutex_lock(&TASK->udebug.lock);
-
+ *
+ */
+static int _thread_op_begin(thread_t *thread, bool being_go)
+{
+	mutex_lock(&TASK->udebug.lock);
+	
 	/* thread_exists() must be called with threads_lock held */
-	ipl = interrupts_disable();
-	spinlock_lock(&threads_lock);
-
-	if (!thread_exists(t)) {
-		spinlock_unlock(&threads_lock);
-		interrupts_restore(ipl);
+	irq_spinlock_lock(&threads_lock, true);
+	
+	if (!thread_exists(thread)) {
+		irq_spinlock_unlock(&threads_lock, true);
 		mutex_unlock(&TASK->udebug.lock);
 		return ENOENT;
 	}
-
-	/* t->lock is enough to ensure the thread's existence */
-	spinlock_lock(&t->lock);
-	spinlock_unlock(&threads_lock);
-
-	/* Verify that 't' is a userspace thread. */
-	if ((t->flags & THREAD_FLAG_USPACE) == 0) {
+	
+	/* thread->lock is enough to ensure the thread's existence */
+	irq_spinlock_exchange(&threads_lock, &thread->lock);
+	
+	/* Verify that 'thread' is a userspace thread. */
+	if ((thread->flags & THREAD_FLAG_USPACE) == 0) {
 		/* It's not, deny its existence */
-		spinlock_unlock(&t->lock);
-		interrupts_restore(ipl);
+		irq_spinlock_unlock(&thread->lock, true);
 		mutex_unlock(&TASK->udebug.lock);
 		return ENOENT;
 	}
-
+	
 	/* Verify debugging state. */
-	if (t->udebug.active != true) {
+	if (thread->udebug.active != true) {
 		/* Not in debugging session or undesired GO state */
-		spinlock_unlock(&t->lock);
-		interrupts_restore(ipl);
+		irq_spinlock_unlock(&thread->lock, true);
 		mutex_unlock(&TASK->udebug.lock);
 		return ENOENT;
 	}
-
+	
 	/*
 	 * Since the thread has active == true, TASK->udebug.lock
 	 * is enough to ensure its existence and that active remains
 	 * true.
+	 *
 	 */
-	spinlock_unlock(&t->lock);
-	interrupts_restore(ipl);
-
+	irq_spinlock_unlock(&thread->lock, true);
+	
 	/* Only mutex TASK->udebug.lock left. */
 	
 	/* Now verify that the thread belongs to the current task. */
-	if (t->task != TASK) {
+	if (thread->task != TASK) {
 		/* No such thread belonging this task*/
 		mutex_unlock(&TASK->udebug.lock);
 		return ENOENT;
 	}
-
+	
 	/*
 	 * Now we need to grab the thread's debug lock for synchronization
 	 * of the threads stoppability/stop state.
+	 *
 	 */
-	mutex_lock(&t->udebug.lock);
-
+	mutex_lock(&thread->udebug.lock);
+	
 	/* The big task mutex is no longer needed. */
 	mutex_unlock(&TASK->udebug.lock);
-
-	if (t->udebug.go != being_go) {
+	
+	if (thread->udebug.go != being_go) {
 		/* Not in debugging session or undesired GO state. */
-		mutex_unlock(&t->udebug.lock);
+		mutex_unlock(&thread->udebug.lock);
 		return EINVAL;
 	}
-
-	/* Only t->udebug.lock left. */
-
-	return EOK;	/* All went well. */
+	
+	/* Only thread->udebug.lock left. */
+	
+	return EOK;  /* All went well. */
 }
 
 /** End debugging operation on a thread. */
-static void _thread_op_end(thread_t *t)
-{
-	mutex_unlock(&t->udebug.lock);
+static void _thread_op_end(thread_t *thread)
+{
+	mutex_unlock(&thread->udebug.lock);
 }
 
@@ -171,49 +165,48 @@
  * all the threads become stoppable (i.e. they can be considered stopped).
  *
- * @param call	The BEGIN call we are servicing.
- * @return 	0 (OK, but not done yet), 1 (done) or negative error code.
+ * @param call The BEGIN call we are servicing.
+ *
+ * @return 0 (OK, but not done yet), 1 (done) or negative error code.
+ *
  */
 int udebug_begin(call_t *call)
 {
-	int reply;
-
-	thread_t *t;
-	link_t *cur;
-
-	LOG("Debugging task %llu", TASK->taskid);
-	mutex_lock(&TASK->udebug.lock);
-
+	LOG("Debugging task %" PRIu64, TASK->taskid);
+	
+	mutex_lock(&TASK->udebug.lock);
+	
 	if (TASK->udebug.dt_state != UDEBUG_TS_INACTIVE) {
 		mutex_unlock(&TASK->udebug.lock);
 		return EBUSY;
 	}
-
+	
 	TASK->udebug.dt_state = UDEBUG_TS_BEGINNING;
 	TASK->udebug.begin_call = call;
 	TASK->udebug.debugger = call->sender;
-
+	
+	int reply;
+	
 	if (TASK->udebug.not_stoppable_count == 0) {
 		TASK->udebug.dt_state = UDEBUG_TS_ACTIVE;
 		TASK->udebug.begin_call = NULL;
-		reply = 1; /* immediate reply */
-	} else {
-		reply = 0; /* no reply */
-	}
+		reply = 1;  /* immediate reply */
+	} else
+		reply = 0;  /* no reply */
 	
 	/* Set udebug.active on all of the task's userspace threads. */
-
+	
+	link_t *cur;
 	for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) {
-		t = list_get_instance(cur, thread_t, th_link);
-
-		mutex_lock(&t->udebug.lock);
-		if ((t->flags & THREAD_FLAG_USPACE) != 0) {
-			t->udebug.active = true;
-			mutex_unlock(&t->udebug.lock);
-			condvar_broadcast(&t->udebug.active_cv);
-		} else {
-			mutex_unlock(&t->udebug.lock);
-		}
-	}
-
+		thread_t *thread = list_get_instance(cur, thread_t, th_link);
+		
+		mutex_lock(&thread->udebug.lock);
+		if ((thread->flags & THREAD_FLAG_USPACE) != 0) {
+			thread->udebug.active = true;
+			mutex_unlock(&thread->udebug.lock);
+			condvar_broadcast(&thread->udebug.active_cv);
+		} else
+			mutex_unlock(&thread->udebug.lock);
+	}
+	
 	mutex_unlock(&TASK->udebug.lock);
 	return reply;
@@ -223,16 +216,16 @@
  *
  * Closes the debugging session for the current task.
+ *
  * @return Zero on success or negative error code.
+ *
  */
 int udebug_end(void)
 {
-	int rc;
-
 	LOG("Task %" PRIu64, TASK->taskid);
-
-	mutex_lock(&TASK->udebug.lock);
-	rc = udebug_task_cleanup(TASK);
-	mutex_unlock(&TASK->udebug.lock);
-
+	
+	mutex_lock(&TASK->udebug.lock);
+	int rc = udebug_task_cleanup(TASK);
+	mutex_unlock(&TASK->udebug.lock);
+	
 	return rc;
 }
@@ -242,21 +235,23 @@
  * Sets the event mask that determines which events are enabled.
  *
- * @param mask	Or combination of events that should be enabled.
- * @return	Zero on success or negative error code.
+ * @param mask Or combination of events that should be enabled.
+ *
+ * @return Zero on success or negative error code.
+ *
  */
 int udebug_set_evmask(udebug_evmask_t mask)
 {
 	LOG("mask = 0x%x", mask);
-
-	mutex_lock(&TASK->udebug.lock);
-
+	
+	mutex_lock(&TASK->udebug.lock);
+	
 	if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) {
 		mutex_unlock(&TASK->udebug.lock);
 		return EINVAL;
 	}
-
+	
 	TASK->udebug.evmask = mask;
 	mutex_unlock(&TASK->udebug.lock);
-
+	
 	return 0;
 }
@@ -268,28 +263,27 @@
  * a debugging event or STOP occurs, at which point the thread loses GO.
  *
- * @param t	The thread to operate on (unlocked and need not be valid).
- * @param call	The GO call that we are servicing.
- */
-int udebug_go(thread_t *t, call_t *call)
-{
-	int rc;
-
-	/* On success, this will lock t->udebug.lock. */
-	rc = _thread_op_begin(t, false);
-	if (rc != EOK) {
+ * @param thread The thread to operate on (unlocked and need not be valid).
+ * @param call   The GO call that we are servicing.
+ *
+ */
+int udebug_go(thread_t *thread, call_t *call)
+{
+	/* On success, this will lock thread->udebug.lock. */
+	int rc = _thread_op_begin(thread, false);
+	if (rc != EOK)
 		return rc;
-	}
-
-	t->udebug.go_call = call;
-	t->udebug.go = true;
-	t->udebug.cur_event = 0;	/* none */
-
+	
+	thread->udebug.go_call = call;
+	thread->udebug.go = true;
+	thread->udebug.cur_event = 0;  /* none */
+	
 	/*
-	 * Neither t's lock nor threads_lock may be held during wakeup.
+	 * Neither thread's lock nor threads_lock may be held during wakeup.
+	 *
 	 */
-	waitq_wakeup(&t->udebug.go_wq, WAKEUP_FIRST);
-
-	_thread_op_end(t);
-
+	waitq_wakeup(&thread->udebug.go_wq, WAKEUP_FIRST);
+	
+	_thread_op_end(thread);
+	
 	return 0;
 }
@@ -300,50 +294,50 @@
  * can be considered stopped).
  *
- * @param t	The thread to operate on (unlocked and need not be valid).
- * @param call	The GO call that we are servicing.
- */
-int udebug_stop(thread_t *t, call_t *call)
-{
-	int rc;
-
+ * @param thread The thread to operate on (unlocked and need not be valid).
+ * @param call   The GO call that we are servicing.
+ *
+ */
+int udebug_stop(thread_t *thread, call_t *call)
+{
 	LOG("udebug_stop()");
-
+	
 	/*
-	 * On success, this will lock t->udebug.lock. Note that this makes sure
-	 * the thread is not stopped.
+	 * On success, this will lock thread->udebug.lock. Note that this
+	 * makes sure the thread is not stopped.
+	 *
 	 */
-	rc = _thread_op_begin(t, true);
-	if (rc != EOK) {
+	int rc = _thread_op_begin(thread, true);
+	if (rc != EOK)
 		return rc;
-	}
-
+	
 	/* Take GO away from the thread. */
-	t->udebug.go = false;
-
-	if (t->udebug.stoppable != true) {
+	thread->udebug.go = false;
+	
+	if (thread->udebug.stoppable != true) {
 		/* Answer will be sent when the thread becomes stoppable. */
-		_thread_op_end(t);
+		_thread_op_end(thread);
 		return 0;
 	}
-
+	
 	/*
 	 * Answer GO call.
+	 *
 	 */
-
+	
 	/* Make sure nobody takes this call away from us. */
-	call = t->udebug.go_call;
-	t->udebug.go_call = NULL;
-
+	call = thread->udebug.go_call;
+	thread->udebug.go_call = NULL;
+	
 	IPC_SET_RETVAL(call->data, 0);
 	IPC_SET_ARG1(call->data, UDEBUG_EVENT_STOP);
-
+	
 	THREAD->udebug.cur_event = UDEBUG_EVENT_STOP;
-
-	_thread_op_end(t);
-
+	
+	_thread_op_end(thread);
+	
 	mutex_lock(&TASK->udebug.lock);
 	ipc_answer(&TASK->answerbox, call);
 	mutex_unlock(&TASK->udebug.lock);
-
+	
 	return 0;
 }
@@ -365,29 +359,20 @@
  * a maximum size for the userspace buffer.
  *
- * @param buffer	The buffer for storing thread hashes.
- * @param buf_size	Buffer size in bytes.
- * @param stored	The actual number of bytes copied will be stored here.
- * @param needed	Total number of hashes that could have been saved.
+ * @param buffer   The buffer for storing thread hashes.
+ * @param buf_size Buffer size in bytes.
+ * @param stored   The actual number of bytes copied will be stored here.
+ * @param needed   Total number of hashes that could have been saved.
+ *
  */
 int udebug_thread_read(void **buffer, size_t buf_size, size_t *stored,
     size_t *needed)
 {
-	thread_t *t;
-	link_t *cur;
-	unative_t tid;
-	size_t copied_ids;
-	size_t extra_ids;
-	ipl_t ipl;
-	unative_t *id_buffer;
-	int flags;
-	size_t max_ids;
-
 	LOG("udebug_thread_read()");
-
+	
 	/* Allocate a buffer to hold thread IDs */
-	id_buffer = malloc(buf_size + 1, 0);
-
-	mutex_lock(&TASK->udebug.lock);
-
+	unative_t *id_buffer = malloc(buf_size + 1, 0);
+	
+	mutex_lock(&TASK->udebug.lock);
+	
 	/* Verify task state */
 	if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) {
@@ -395,43 +380,41 @@
 		return EINVAL;
 	}
-
-	ipl = interrupts_disable();
-	spinlock_lock(&TASK->lock);
+	
+	irq_spinlock_lock(&TASK->lock, true);
+	
 	/* Copy down the thread IDs */
-
-	max_ids = buf_size / sizeof(unative_t);
-	copied_ids = 0;
-	extra_ids = 0;
-
+	
+	size_t max_ids = buf_size / sizeof(unative_t);
+	size_t copied_ids = 0;
+	size_t extra_ids = 0;
+	
 	/* FIXME: make sure the thread isn't past debug shutdown... */
+	link_t *cur;
 	for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) {
-		t = list_get_instance(cur, thread_t, th_link);
-
-		spinlock_lock(&t->lock);
-		flags = t->flags;
-		spinlock_unlock(&t->lock);
-
+		thread_t *thread = list_get_instance(cur, thread_t, th_link);
+		
+		irq_spinlock_lock(&thread->lock, false);
+		int flags = thread->flags;
+		irq_spinlock_unlock(&thread->lock, false);
+		
 		/* Not interested in kernel threads. */
 		if ((flags & THREAD_FLAG_USPACE) == 0)
 			continue;
-
+		
 		if (copied_ids < max_ids) {
 			/* Using thread struct pointer as identification hash */
-			tid = (unative_t) t;
-			id_buffer[copied_ids++] = tid;
-		} else {
+			id_buffer[copied_ids++] = (unative_t) thread;
+		} else
 			extra_ids++;
-		}
-	}
-
-	spinlock_unlock(&TASK->lock);
-	interrupts_restore(ipl);
-
-	mutex_unlock(&TASK->udebug.lock);
-
+	}
+	
+	irq_spinlock_unlock(&TASK->lock, true);
+	
+	mutex_unlock(&TASK->udebug.lock);
+	
 	*buffer = id_buffer;
 	*stored = copied_ids * sizeof(unative_t);
 	*needed = (copied_ids + extra_ids) * sizeof(unative_t);
-
+	
 	return 0;
 }
@@ -442,19 +425,19 @@
  * Also returns the size of the data.
  *
- * @param data		Place to store pointer to newly allocated block.
- * @param data_size	Place to store size of the data.
- *
- * @returns		EOK.
+ * @param data      Place to store pointer to newly allocated block.
+ * @param data_size Place to store size of the data.
+ *
+ * @returns EOK.
+ *
  */
 int udebug_name_read(char **data, size_t *data_size)
 {
-	size_t name_size;
-
-	name_size = str_size(TASK->name) + 1;
+	size_t name_size = str_size(TASK->name) + 1;
+	
 	*data = malloc(name_size, 0);
 	*data_size = name_size;
-
+	
 	memcpy(*data, TASK->name, name_size);
-
+	
 	return 0;
 }
@@ -470,35 +453,33 @@
  * this function will fail with an EINVAL error code.
  *
- * @param t		Thread where call arguments are to be read.
- * @param buffer	Place to store pointer to new buffer.
- * @return		EOK on success, ENOENT if @a t is invalid, EINVAL
- *			if thread state is not valid for this operation.
- */
-int udebug_args_read(thread_t *t, void **buffer)
-{
-	int rc;
-	unative_t *arg_buffer;
-
+ * @param thread Thread where call arguments are to be read.
+ * @param buffer Place to store pointer to new buffer.
+ *
+ * @return EOK on success, ENOENT if @a t is invalid, EINVAL
+ *         if thread state is not valid for this operation.
+ *
+ */
+int udebug_args_read(thread_t *thread, void **buffer)
+{
 	/* Prepare a buffer to hold the arguments. */
-	arg_buffer = malloc(6 * sizeof(unative_t), 0);
-
+	unative_t *arg_buffer = malloc(6 * sizeof(unative_t), 0);
+	
 	/* On success, this will lock t->udebug.lock. */
-	rc = _thread_op_begin(t, false);
-	if (rc != EOK) {
+	int rc = _thread_op_begin(thread, false);
+	if (rc != EOK)
 		return rc;
-	}
-
+	
 	/* Additionally we need to verify that we are inside a syscall. */
-	if (t->udebug.cur_event != UDEBUG_EVENT_SYSCALL_B &&
-	    t->udebug.cur_event != UDEBUG_EVENT_SYSCALL_E) {
-		_thread_op_end(t);
+	if ((thread->udebug.cur_event != UDEBUG_EVENT_SYSCALL_B) &&
+	    (thread->udebug.cur_event != UDEBUG_EVENT_SYSCALL_E)) {
+		_thread_op_end(thread);
 		return EINVAL;
 	}
-
+	
 	/* Copy to a local buffer before releasing the lock. */
-	memcpy(arg_buffer, t->udebug.syscall_args, 6 * sizeof(unative_t));
-
-	_thread_op_end(t);
-
+	memcpy(arg_buffer, thread->udebug.syscall_args, 6 * sizeof(unative_t));
+	
+	_thread_op_end(thread);
+	
 	*buffer = arg_buffer;
 	return 0;
@@ -514,35 +495,33 @@
  * call (as opposed to an exception). This is an implementation limit.
  *
- * @param t		Thread whose state is to be read.
- * @param buffer	Place to store pointer to new buffer.
- * @return		EOK on success, ENOENT if @a t is invalid, EINVAL
- *			if thread is not in valid state, EBUSY if istate
- *			is not available.
- */
-int udebug_regs_read(thread_t *t, void **buffer)
-{
-	istate_t *state, *state_buf;
-	int rc;
-
+ * @param thread Thread whose state is to be read.
+ * @param buffer Place to store pointer to new buffer.
+ *
+ * @return EOK on success, ENOENT if @a t is invalid, EINVAL
+ *         if thread is not in valid state, EBUSY if istate
+ *         is not available.
+ *
+ */
+int udebug_regs_read(thread_t *thread, void **buffer)
+{
 	/* Prepare a buffer to hold the data. */
-	state_buf = malloc(sizeof(istate_t), 0);
-
+	istate_t *state_buf = malloc(sizeof(istate_t), 0);
+	
 	/* On success, this will lock t->udebug.lock */
-	rc = _thread_op_begin(t, false);
-	if (rc != EOK) {
+	int rc = _thread_op_begin(thread, false);
+	if (rc != EOK)
 		return rc;
-	}
-
-	state = t->udebug.uspace_state;
+	
+	istate_t *state = thread->udebug.uspace_state;
 	if (state == NULL) {
-		_thread_op_end(t);
+		_thread_op_end(thread);
 		return EBUSY;
 	}
-
+	
 	/* Copy to the allocated buffer */
 	memcpy(state_buf, state, sizeof(istate_t));
-
-	_thread_op_end(t);
-
+	
+	_thread_op_end(thread);
+	
 	*buffer = (void *) state_buf;
 	return 0;
@@ -555,30 +534,32 @@
  * and a pointer to it is written into @a buffer.
  *
- * @param uspace_addr	Address from where to start reading.
- * @param n		Number of bytes to read.
- * @param buffer	For storing a pointer to the allocated buffer.
+ * @param uspace_addr Address from where to start reading.
+ * @param n           Number of bytes to read.
+ * @param buffer      For storing a pointer to the allocated buffer.
+ *
  */
 int udebug_mem_read(unative_t uspace_addr, size_t n, void **buffer)
 {
-	void *data_buffer;
-	int rc;
-
 	/* Verify task state */
 	mutex_lock(&TASK->udebug.lock);
-
+	
 	if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) {
 		mutex_unlock(&TASK->udebug.lock);
 		return EBUSY;
 	}
-
-	data_buffer = malloc(n, 0);
-
-	/* NOTE: this is not strictly from a syscall... but that shouldn't
-	 * be a problem */
-	rc = copy_from_uspace(data_buffer, (void *)uspace_addr, n);
-	mutex_unlock(&TASK->udebug.lock);
-
-	if (rc != 0) return rc;
-
+	
+	void *data_buffer = malloc(n, 0);
+	
+	/*
+	 * NOTE: this is not strictly from a syscall... but that shouldn't
+	 * be a problem
+	 *
+	 */
+	int rc = copy_from_uspace(data_buffer, (void *) uspace_addr, n);
+	mutex_unlock(&TASK->udebug.lock);
+	
+	if (rc != 0)
+		return rc;
+	
 	*buffer = data_buffer;
 	return 0;
