Index: kernel/arch/ia32/include/asm.h
===================================================================
--- kernel/arch/ia32/include/asm.h	(revision deca67b8f66138a6a55b92cb5e6baa32bb733f52)
+++ kernel/arch/ia32/include/asm.h	(revision da58187297a4ba3471d2a800ec2cfb016c16e6ae)
@@ -28,5 +28,5 @@
  */
 
-/** @addtogroup ia32	
+/** @addtogroup ia32
  * @{
  */
@@ -57,4 +57,5 @@
  *
  * Halt the current CPU until interrupt event.
+ *
  */
 static inline void cpu_halt(void)
@@ -69,14 +70,20 @@
 
 #define GEN_READ_REG(reg) static inline unative_t read_ ##reg (void) \
-    { \
-	unative_t res; \
-	asm volatile ("movl %%" #reg ", %0" : "=r" (res) ); \
-	return res; \
-    }
+	{ \
+		unative_t res; \
+		asm volatile ( \
+			"movl %%" #reg ", %[res]" \
+			: [res] "=r" (res) \
+		); \
+		return res; \
+	}
 
 #define GEN_WRITE_REG(reg) static inline void write_ ##reg (unative_t regn) \
-    { \
-	asm volatile ("movl %0, %%" #reg : : "r" (regn)); \
-    }
+	{ \
+		asm volatile ( \
+			"movl %[regn], %%" #reg \
+			:: [regn] "r" (regn) \
+		); \
+	}
 
 GEN_READ_REG(cr0)
@@ -105,8 +112,12 @@
  * @param port Port to write to
  * @param val Value to write
+ *
  */
 static inline void pio_write_8(ioport8_t *port, uint8_t val)
 {
-	asm volatile ("outb %b0, %w1\n" : : "a" (val), "d" (port));
+	asm volatile (
+		"outb %b[val], %w[port]\n"
+		:: [val] "a" (val), [port] "d" (port)
+	);
 }
 
@@ -117,8 +128,12 @@
  * @param port Port to write to
  * @param val Value to write
+ *
  */
 static inline void pio_write_16(ioport16_t *port, uint16_t val)
 {
-	asm volatile ("outw %w0, %w1\n" : : "a" (val), "d" (port));
+	asm volatile (
+		"outw %w[val], %w[port]\n"
+		:: [val] "a" (val), [port] "d" (port)
+	);
 }
 
@@ -129,8 +144,12 @@
  * @param port Port to write to
  * @param val Value to write
+ *
  */
 static inline void pio_write_32(ioport32_t *port, uint32_t val)
 {
-	asm volatile ("outl %0, %w1\n" : : "a" (val), "d" (port));
+	asm volatile (
+		"outl %[val], %w[port]\n"
+		:: [val] "a" (val), [port] "d" (port)
+	);
 }
 
@@ -141,4 +160,5 @@
  * @param port Port to read from
  * @return Value read
+ *
  */
 static inline uint8_t pio_read_8(ioport8_t *port)
@@ -146,5 +166,10 @@
 	uint8_t val;
 	
-	asm volatile ("inb %w1, %b0 \n" : "=a" (val) : "d" (port));
+	asm volatile (
+		"inb %w[port], %b[val]\n"
+		: [val] "=a" (val)
+		: [port] "d" (port)
+	);
+	
 	return val;
 }
@@ -156,4 +181,5 @@
  * @param port Port to read from
  * @return Value read
+ *
  */
 static inline uint16_t pio_read_16(ioport16_t *port)
@@ -161,5 +187,10 @@
 	uint16_t val;
 	
-	asm volatile ("inw %w1, %w0 \n" : "=a" (val) : "d" (port));
+	asm volatile (
+		"inw %w[port], %w[val]\n"
+		: [val] "=a" (val)
+		: [port] "d" (port)
+	);
+	
 	return val;
 }
@@ -171,4 +202,5 @@
  * @param port Port to read from
  * @return Value read
+ *
  */
 static inline uint32_t pio_read_32(ioport32_t *port)
@@ -176,5 +208,10 @@
 	uint32_t val;
 	
-	asm volatile ("inl %w1, %0 \n" : "=a" (val) : "d" (port));
+	asm volatile (
+		"inl %w[port], %[val]\n"
+		: [val] "=a" (val)
+		: [port] "d" (port)
+	);
+	
 	return val;
 }
@@ -186,14 +223,17 @@
  *
  * @return Old interrupt priority level.
+ *
  */
 static inline ipl_t interrupts_enable(void)
 {
 	ipl_t v;
-	asm volatile (
-		"pushf\n\t"
-		"popl %0\n\t"
+	
+	asm volatile (
+		"pushf\n"
+		"popl %[v]\n"
 		"sti\n"
-		: "=r" (v)
-	);
+		: [v] "=r" (v)
+	);
+	
 	return v;
 }
@@ -205,14 +245,17 @@
  *
  * @return Old interrupt priority level.
+ *
  */
 static inline ipl_t interrupts_disable(void)
 {
 	ipl_t v;
-	asm volatile (
-		"pushf\n\t"
-		"popl %0\n\t"
+	
+	asm volatile (
+		"pushf\n"
+		"popl %[v]\n"
 		"cli\n"
-		: "=r" (v)
-	);
+		: [v] "=r" (v)
+	);
+	
 	return v;
 }
@@ -223,11 +266,12 @@
  *
  * @param ipl Saved interrupt priority level.
+ *
  */
 static inline void interrupts_restore(ipl_t ipl)
 {
 	asm volatile (
-		"pushl %0\n\t"
+		"pushl %[ipl]\n"
 		"popf\n"
-		: : "r" (ipl)
+		:: [ipl] "r" (ipl)
 	);
 }
@@ -236,13 +280,16 @@
  *
  * @return EFLAFS.
+ *
  */
 static inline ipl_t interrupts_read(void)
 {
 	ipl_t v;
-	asm volatile (
-		"pushf\n\t"
-		"popl %0\n"
-		: "=r" (v)
-	);
+	
+	asm volatile (
+		"pushf\n"
+		"popl %[v]\n"
+		: [v] "=r" (v)
+	);
+	
 	return v;
 }
@@ -251,6 +298,9 @@
 static inline void write_msr(uint32_t msr, uint64_t value)
 {
-	asm volatile ("wrmsr" : : "c" (msr), "a" ((uint32_t)(value)),
-	    "d" ((uint32_t)(value >> 32)));
+	asm volatile (
+		"wrmsr"
+		:: "c" (msr), "a" ((uint32_t) (value)),
+		   "d" ((uint32_t) (value >> 32))
+	);
 }
 
@@ -258,7 +308,12 @@
 {
 	uint32_t ax, dx;
-
-	asm volatile ("rdmsr" : "=a"(ax), "=d"(dx) : "c" (msr));
-	return ((uint64_t)dx << 32) | ax;
+	
+	asm volatile (
+		"rdmsr"
+		: "=a" (ax), "=d" (dx)
+		: "c" (msr)
+	);
+	
+	return ((uint64_t) dx << 32) | ax;
 }
 
@@ -269,4 +324,5 @@
  * The stack is assumed to be STACK_SIZE bytes long.
  * The stack must start on page boundary.
+ *
  */
 static inline uintptr_t get_stack_base(void)
@@ -275,6 +331,6 @@
 	
 	asm volatile (
-		"andl %%esp, %0\n"
-		: "=r" (v)
+		"andl %%esp, %[v]\n"
+		: [v] "=r" (v)
 		: "0" (~(STACK_SIZE - 1))
 	);
@@ -287,9 +343,10 @@
 {
 	uintptr_t *ip;
-
-	asm volatile (
-		"mov %%eip, %0"
-		: "=r" (ip)
-		);
+	
+	asm volatile (
+		"mov %%eip, %[ip]"
+		: [ip] "=r" (ip)
+	);
+	
 	return ip;
 }
@@ -298,8 +355,12 @@
  *
  * @param addr Address on a page whose TLB entry is to be invalidated.
+ *
  */
 static inline void invlpg(uintptr_t addr)
 {
-	asm volatile ("invlpg %0\n" :: "m" (*(unative_t *)addr));
+	asm volatile (
+		"invlpg %[addr]\n"
+		:: [addr] "m" (*(unative_t *) addr)
+	);
 }
 
@@ -307,8 +368,12 @@
  *
  * @param gdtr_reg Address of memory from where to load GDTR.
+ *
  */
 static inline void gdtr_load(ptr_16_32_t *gdtr_reg)
 {
-	asm volatile ("lgdtl %0\n" : : "m" (*gdtr_reg));
+	asm volatile (
+		"lgdtl %[gdtr_reg]\n"
+		:: [gdtr_reg] "m" (*gdtr_reg)
+	);
 }
 
@@ -316,8 +381,12 @@
  *
  * @param gdtr_reg Address of memory to where to load GDTR.
+ *
  */
 static inline void gdtr_store(ptr_16_32_t *gdtr_reg)
 {
-	asm volatile ("sgdtl %0\n" : : "m" (*gdtr_reg));
+	asm volatile (
+		"sgdtl %[gdtr_reg]\n"
+		:: [gdtr_reg] "m" (*gdtr_reg)
+	);
 }
 
@@ -325,8 +394,12 @@
  *
  * @param idtr_reg Address of memory from where to load IDTR.
+ *
  */
 static inline void idtr_load(ptr_16_32_t *idtr_reg)
 {
-	asm volatile ("lidtl %0\n" : : "m" (*idtr_reg));
+	asm volatile (
+		"lidtl %[idtr_reg]\n" 
+		:: [idtr_reg] "m" (*idtr_reg)
+	);
 }
 
@@ -334,8 +407,12 @@
  *
  * @param sel Selector specifying descriptor of TSS segment.
+ *
  */
 static inline void tr_load(uint16_t sel)
 {
-	asm volatile ("ltr %0" : : "r" (sel));
+	asm volatile (
+		"ltr %[sel]"
+		:: [sel] "r" (sel)
+	);
 }
 
Index: kernel/arch/ia32/include/atomic.h
===================================================================
--- kernel/arch/ia32/include/atomic.h	(revision deca67b8f66138a6a55b92cb5e6baa32bb733f52)
+++ kernel/arch/ia32/include/atomic.h	(revision da58187297a4ba3471d2a800ec2cfb016c16e6ae)
@@ -27,5 +27,5 @@
  */
 
-/** @addtogroup ia32	
+/** @addtogroup ia32
  * @{
  */
@@ -42,7 +42,13 @@
 static inline void atomic_inc(atomic_t *val) {
 #ifdef CONFIG_SMP
-	asm volatile ("lock incl %0\n" : "+m" (val->count));
+	asm volatile (
+		"lock incl %[count]\n"
+		: [count] "+m" (val->count)
+	);
 #else
-	asm volatile ("incl %0\n" : "+m" (val->count));
+	asm volatile (
+		"incl %[count]\n"
+		: [count] "+m" (val->count)
+	);
 #endif /* CONFIG_SMP */
 }
@@ -50,7 +56,13 @@
 static inline void atomic_dec(atomic_t *val) {
 #ifdef CONFIG_SMP
-	asm volatile ("lock decl %0\n" : "+m" (val->count));
+	asm volatile (
+		"lock decl %[count]\n"
+		: [count] "+m" (val->count)
+	);
 #else
-	asm volatile ("decl %0\n" : "+m" (val->count));
+	asm volatile (
+		"decl %[count]\n"
+		: "+m" (val->count)
+	);
 #endif /* CONFIG_SMP */
 }
@@ -59,10 +71,10 @@
 {
 	long r = 1;
-
+	
 	asm volatile (
-		"lock xaddl %1, %0\n"
-		: "+m" (val->count), "+r" (r)
+		"lock xaddl %[r], %[count]\n"
+		: [count] "+m" (val->count), [r] "+r" (r)
 	);
-
+	
 	return r;
 }
@@ -73,6 +85,6 @@
 	
 	asm volatile (
-		"lock xaddl %1, %0\n"
-		: "+m" (val->count), "+r"(r)
+		"lock xaddl %[r], %[count]\n"
+		: [count] "+m" (val->count), [r] "+r"(r)
 	);
 	
@@ -80,6 +92,6 @@
 }
 
-#define atomic_preinc(val) (atomic_postinc(val) + 1)
-#define atomic_predec(val) (atomic_postdec(val) - 1)
+#define atomic_preinc(val)  (atomic_postinc(val) + 1)
+#define atomic_predec(val)  (atomic_postdec(val) - 1)
 
 static inline uint32_t test_and_set(atomic_t *val) {
@@ -87,7 +99,7 @@
 	
 	asm volatile (
-		"movl $1, %0\n"
-		"xchgl %0, %1\n"
-		: "=r" (v),"+m" (val->count)
+		"movl $1, %[v]\n"
+		"xchgl %[v], %[count]\n"
+		: [v] "=r" (v), [count] "+m" (val->count)
 	);
 	
@@ -99,20 +111,20 @@
 {
 	uint32_t tmp;
-
+	
 	preemption_disable();
 	asm volatile (
 		"0:\n"
 #ifdef CONFIG_HT
-		"pause\n" /* Pentium 4's HT love this instruction */
+		"pause\n"        /* Pentium 4's HT love this instruction */
 #endif
-		"mov %0, %1\n"
-		"testl %1, %1\n"
+		"mov %[count], %[tmp]\n"
+		"testl %[tmp], %[tmp]\n"
 		"jnz 0b\n"       /* lightweight looping on locked spinlock */
 		
-		"incl %1\n"      /* now use the atomic operation */
-		"xchgl %0, %1\n"	
-		"testl %1, %1\n"
+		"incl %[tmp]\n"  /* now use the atomic operation */
+		"xchgl %[count], %[tmp]\n"
+		"testl %[tmp], %[tmp]\n"
 		"jnz 0b\n"
-                : "+m" (val->count), "=&r"(tmp)
+		: [count] "+m" (val->count), [tmp] "=&r" (tmp)
 	);
 	/*
Index: kernel/arch/ia32/include/barrier.h
===================================================================
--- kernel/arch/ia32/include/barrier.h	(revision deca67b8f66138a6a55b92cb5e6baa32bb733f52)
+++ kernel/arch/ia32/include/barrier.h	(revision da58187297a4ba3471d2a800ec2cfb016c16e6ae)
@@ -27,5 +27,5 @@
  */
 
-/** @addtogroup ia32	
+/** @addtogroup ia32
  * @{
  */
@@ -47,6 +47,6 @@
  */
 
-#define CS_ENTER_BARRIER()	asm volatile ("" ::: "memory")
-#define CS_LEAVE_BARRIER()	asm volatile ("" ::: "memory")
+#define CS_ENTER_BARRIER()  asm volatile ("" ::: "memory")
+#define CS_LEAVE_BARRIER()  asm volatile ("" ::: "memory")
 
 static inline void cpuid_serialization(void)
@@ -60,27 +60,27 @@
 
 #if defined(CONFIG_FENCES_P4)
-#	define memory_barrier()		asm volatile ("mfence\n" ::: "memory")
-#	define read_barrier()		asm volatile ("lfence\n" ::: "memory")
-#	ifdef CONFIG_WEAK_MEMORY
-#		define write_barrier()	asm volatile ("sfence\n" ::: "memory")
-#	else
-#		define write_barrier()  asm volatile( "" ::: "memory");
-#	endif
+	#define memory_barrier()  asm volatile ("mfence\n" ::: "memory")
+	#define read_barrier()    asm volatile ("lfence\n" ::: "memory")
+	#ifdef CONFIG_WEAK_MEMORY
+		#define write_barrier()  asm volatile ("sfence\n" ::: "memory")
+	#else
+		#define write_barrier()  asm volatile ("" ::: "memory");
+	#endif
 #elif defined(CONFIG_FENCES_P3)
-#	define memory_barrier()		cpuid_serialization()
-#	define read_barrier()		cpuid_serialization()
-#	ifdef CONFIG_WEAK_MEMORY
-#		define write_barrier()	asm volatile ("sfence\n" ::: "memory")
-#	else
-#		define write_barrier()  asm volatile( "" ::: "memory");
-#	endif
+	#define memory_barrier()  cpuid_serialization()
+	#define read_barrier()    cpuid_serialization()
+	#ifdef CONFIG_WEAK_MEMORY
+		#define write_barrier()  asm volatile ("sfence\n" ::: "memory")
+	#else
+		#define write_barrier()  asm volatile ("" ::: "memory");
+	#endif
 #else
-#	define memory_barrier()		cpuid_serialization()
-#	define read_barrier()		cpuid_serialization()
-#	ifdef CONFIG_WEAK_MEMORY
-#		define write_barrier()	cpuid_serialization()
-#	else
-#		define write_barrier()  asm volatile( "" ::: "memory");
-#	endif
+	#define memory_barrier()  cpuid_serialization()
+	#define read_barrier()    cpuid_serialization()
+	#ifdef CONFIG_WEAK_MEMORY
+		#define write_barrier()  cpuid_serialization()
+	#else
+		#define write_barrier()  asm volatile ("" ::: "memory");
+	#endif
 #endif
 
@@ -91,6 +91,6 @@
  * sufficient for them to drain to the D-cache).
  */
-#define smc_coherence(a)		write_barrier()
-#define smc_coherence_block(a, l)	write_barrier()
+#define smc_coherence(a)           write_barrier()
+#define smc_coherence_block(a, l)  write_barrier()
 
 #endif
Index: kernel/arch/ia32/include/cpuid.h
===================================================================
--- kernel/arch/ia32/include/cpuid.h	(revision deca67b8f66138a6a55b92cb5e6baa32bb733f52)
+++ kernel/arch/ia32/include/cpuid.h	(revision da58187297a4ba3471d2a800ec2cfb016c16e6ae)
@@ -75,19 +75,19 @@
 	
 	asm volatile (
-		"pushf\n"               /* read flags */
-		"popl %0\n"
-		"movl %0, %1\n"
+		"pushf\n"                    /* read flags */
+		"popl %[ret]\n"
+		"movl %[ret], %[val]\n"
 		
-		"btcl $21, %1\n"        /* swap the ID bit */
+		"btcl $21, %[val]\n"         /* swap the ID bit */
 		
-		"pushl %1\n"            /* propagate the change into flags */
+		"pushl %[val]\n"             /* propagate the change into flags */
 		"popf\n"
 		"pushf\n"
-		"popl %1\n"
+		"popl %[val]\n"
 		
-		"andl $(1 << 21), %0\n" /* interrested only in ID bit */
-		"andl $(1 << 21), %1\n"
-		"xorl %1, %0\n"
-		: "=r" (ret), "=r" (val)
+		"andl $(1 << 21), %[ret]\n"  /* interrested only in ID bit */
+		"andl $(1 << 21), %[val]\n"
+		"xorl %[val], %[ret]\n"
+		: [ret] "=r" (ret), [val] "=r" (val)
 	);
 	
@@ -99,5 +99,6 @@
 	asm volatile (
 		"cpuid\n"
-		: "=a" (info->cpuid_eax), "=b" (info->cpuid_ebx), "=c" (info->cpuid_ecx), "=d" (info->cpuid_edx)
+		: "=a" (info->cpuid_eax), "=b" (info->cpuid_ebx),
+		  "=c" (info->cpuid_ecx), "=d" (info->cpuid_edx)
 		: "a" (cmd)
 	);
Index: kernel/arch/ia32/src/cpu/cpu.c
===================================================================
--- kernel/arch/ia32/src/cpu/cpu.c	(revision deca67b8f66138a6a55b92cb5e6baa32bb733f52)
+++ kernel/arch/ia32/src/cpu/cpu.c	(revision da58187297a4ba3471d2a800ec2cfb016c16e6ae)
@@ -49,15 +49,15 @@
  * Contains only non-MP-Specification specific SMP code.
  */
-#define AMD_CPUID_EBX	0x68747541
-#define AMD_CPUID_ECX 	0x444d4163
-#define AMD_CPUID_EDX 	0x69746e65
+#define AMD_CPUID_EBX  0x68747541
+#define AMD_CPUID_ECX  0x444d4163
+#define AMD_CPUID_EDX  0x69746e65
 
-#define INTEL_CPUID_EBX	0x756e6547
-#define INTEL_CPUID_ECX 0x6c65746e
-#define INTEL_CPUID_EDX 0x49656e69
+#define INTEL_CPUID_EBX  0x756e6547
+#define INTEL_CPUID_ECX  0x6c65746e
+#define INTEL_CPUID_EDX  0x49656e69
 
 
 enum vendor {
-	VendorUnknown=0,
+	VendorUnknown = 0,
 	VendorAMD,
 	VendorIntel
@@ -73,10 +73,8 @@
 {
 	asm volatile (
-		"mov %%cr0,%%eax;"
-		"or $8,%%eax;"
-		"mov %%eax,%%cr0;"
-		:
-		:
-		: "%eax"
+		"mov %%cr0, %%eax\n"
+		"or $8, %%eax\n"
+		"mov %%eax, %%cr0\n"
+		::: "%eax"
 	);
 }
@@ -85,11 +83,9 @@
 {
 	asm volatile (
-		"mov %%cr0,%%eax;"
-		"and $0xffFFffF7,%%eax;"
-		"mov %%eax,%%cr0;"
-		:
-		:
-		: "%eax"
-	);	
+		"mov %%cr0, %%eax\n"
+		"and $0xffFFffF7, %%eax\n"
+		"mov %%eax,%%cr0\n"
+		::: "%eax"
+	);
 }
 
@@ -103,9 +99,9 @@
 	CPU->arch.tss = tss_p;
 	CPU->arch.tss->iomap_base = &CPU->arch.tss->iomap[0] - ((uint8_t *) CPU->arch.tss);
-
+	
 	CPU->fpu_owner = NULL;
-
+	
 	cpuid(1, &info);
-
+	
 	fi.word = info.cpuid_edx;
 	efi.word = info.cpuid_ecx;
@@ -114,13 +110,13 @@
 		fpu_fxsr();
 	else
-		fpu_fsr();	
+		fpu_fsr();
 	
 	if (fi.bits.sse) {
 		asm volatile (
-			"mov %%cr4,%0\n"
-			"or %1,%0\n"
-			"mov %0,%%cr4\n"
-			: "+r" (help)
-			: "i" (CR4_OSFXSR_MASK|(1<<10)) 
+			"mov %%cr4, %[help]\n"
+			"or %[mask], %[help]\n"
+			"mov %[help], %%cr4\n"
+			: [help] "+r" (help)
+			: [mask] "i" (CR4_OSFXSR_MASK | (1 << 10))
 		);
 	}
Index: kernel/arch/ia32/src/fpu_context.c
===================================================================
--- kernel/arch/ia32/src/fpu_context.c	(revision deca67b8f66138a6a55b92cb5e6baa32bb733f52)
+++ kernel/arch/ia32/src/fpu_context.c	(revision da58187297a4ba3471d2a800ec2cfb016c16e6ae)
@@ -45,7 +45,7 @@
 {
 	asm volatile (
-		"fnsave %0"
-		: "=m"(*fctx)
-		);
+		"fnsave %[fctx]"
+		: [fctx] "=m" (*fctx)
+	);
 }
 
@@ -53,7 +53,7 @@
 {
 	asm volatile (
-		"frstor %0"
-		: "=m"(*fctx)
-		);
+		"frstor %[fctx]"
+		: [fctx] "=m" (*fctx)
+	);
 }
 
@@ -61,7 +61,7 @@
 {
 	asm volatile (
-		"fxsave %0"
-		: "=m"(*fctx)
-		);
+		"fxsave %[fctx]"
+		: [fctx] "=m" (*fctx)
+	);
 }
 
@@ -69,12 +69,10 @@
 {
 	asm volatile (
-		"fxrstor %0"
-		: "=m"(*fctx)
-		);
+		"fxrstor %[fctx]"
+		: [fctx] "=m" (*fctx)
+	);
 }
 
-/*
-	Setup using fxsr instruction
-*/
+/* Setup using fxsr instruction */
 void fpu_fxsr(void)
 {
@@ -82,7 +80,6 @@
 	fpu_restore=fpu_context_fx_restore;
 }
-/*
-	Setup using not fxsr instruction
-*/
+
+/* Setup using not fxsr instruction */
 void fpu_fsr(void)
 {
@@ -103,14 +100,16 @@
 void fpu_init()
 {
-	uint32_t help0 = 0, help1 = 0;
+	uint32_t help0 = 0;
+	uint32_t help1 = 0;
+	
 	asm volatile (
-		"fninit;\n"
-		"stmxcsr %0\n"
-		"mov %0,%1;\n"
-		"or %2,%1;\n"
-		"mov %1,%0;\n"
-		"ldmxcsr %0;\n"
-		: "+m" (help0), "+r" (help1)
-		: "i" (0x1f80)
+		"fninit\n"
+		"stmxcsr %[help0]\n"
+		"mov %[help0], %[help1]\n"
+		"or %[magic], %[help1]\n"
+		"mov %[help1], %[help0]\n"
+		"ldmxcsr %[help0]\n"
+		: [help0] "+m" (help0), [help1] "+r" (help1)
+		: [magic] "i" (0x1f80)
 	);
 }
Index: kernel/arch/ia32/src/interrupt.c
===================================================================
--- kernel/arch/ia32/src/interrupt.c	(revision deca67b8f66138a6a55b92cb5e6baa32bb733f52)
+++ kernel/arch/ia32/src/interrupt.c	(revision da58187297a4ba3471d2a800ec2cfb016c16e6ae)
@@ -139,10 +139,10 @@
 	uint32_t mxcsr;
 	asm (
-		"stmxcsr %0;\n"
-		: "=m" (mxcsr)
+		"stmxcsr %[mxcsr]\n"
+		: [mxcsr] "=m" (mxcsr)
 	);
 	fault_if_from_uspace(istate, "SIMD FP exception(19), MXCSR: %#zx.",
 	    (unative_t) mxcsr);
-
+	
 	decode_istate(istate);
 	printf("MXCSR: %#lx\n", mxcsr);
Index: kernel/arch/ia32/src/pm.c
===================================================================
--- kernel/arch/ia32/src/pm.c	(revision deca67b8f66138a6a55b92cb5e6baa32bb733f52)
+++ kernel/arch/ia32/src/pm.c	(revision da58187297a4ba3471d2a800ec2cfb016c16e6ae)
@@ -27,5 +27,5 @@
  */
 
-/** @addtogroup ia32	
+/** @addtogroup ia32
  * @{
  */
@@ -155,5 +155,5 @@
 		"push %%eax\n"
 		"popfl\n"
-		: : : "eax"
+		::: "eax"
 	);
 }
@@ -166,5 +166,5 @@
 		"and $0xfffbffff, %%eax\n"
 		"mov %%eax, %%cr0\n"
-		: : : "eax"
+		::: "eax"
 	);
 }
Index: kernel/arch/ia32/src/userspace.c
===================================================================
--- kernel/arch/ia32/src/userspace.c	(revision deca67b8f66138a6a55b92cb5e6baa32bb733f52)
+++ kernel/arch/ia32/src/userspace.c	(revision da58187297a4ba3471d2a800ec2cfb016c16e6ae)
@@ -27,5 +27,5 @@
  */
 
-/** @addtogroup ia32	
+/** @addtogroup ia32
  * @{
  */
@@ -48,8 +48,6 @@
 void userspace(uspace_arg_t *kernel_uarg)
 {
-	ipl_t ipl;
-
-	ipl = interrupts_disable();
-
+	ipl_t ipl = interrupts_disable();
+	
 	asm volatile (
 		/*
@@ -61,33 +59,31 @@
 		"push %%eax\n"
 		"popfl\n"
-
+		
 		/* Set up GS register (TLS) */
-		"movl %6, %%gs\n"
-
-		"pushl %0\n"
-		"pushl %1\n"
-		"pushl %2\n"
-		"pushl %3\n"
-		"pushl %4\n"
-		"movl %5, %%eax\n"
-
+		"movl %[tls_des], %%gs\n"
+		
+		"pushl %[udata_des]\n"
+		"pushl %[stack_size]\n"
+		"pushl %[ipl]\n"
+		"pushl %[utext_des]\n"
+		"pushl %[entry]\n"
+		"movl %[uarg], %%eax\n"
+		
 		/* %ebx is defined to hold pcb_ptr - set it to 0 */
-		"xorl %%ebx, %%ebx\n"	
-
+		"xorl %%ebx, %%ebx\n"
+		
 		"iret\n"
-		: 
-		: "i" (selector(UDATA_DES) | PL_USER),
-		  "r" ((uint8_t *) kernel_uarg->uspace_stack +
-		      THREAD_STACK_SIZE),
-		  "r" (ipl),
-		  "i" (selector(UTEXT_DES) | PL_USER),
-		  "r" (kernel_uarg->uspace_entry),
-		  "r" (kernel_uarg->uspace_uarg),
-		  "r" (selector(TLS_DES))
+		:
+		: [udata_des] "i" (selector(UDATA_DES) | PL_USER),
+		  [stack_size] "r" ((uint8_t *) kernel_uarg->uspace_stack + THREAD_STACK_SIZE),
+		  [ipl] "r" (ipl),
+		  [utext_des] "i" (selector(UTEXT_DES) | PL_USER),
+		  [entry] "r" (kernel_uarg->uspace_entry),
+		  [uarg] "r" (kernel_uarg->uspace_uarg),
+		  [tls_des] "r" (selector(TLS_DES))
 		: "eax");
 	
 	/* Unreachable */
-	for(;;)
-		;
+	while (1);
 }
 
