Index: kernel/arch/amd64/include/asm.h
===================================================================
--- kernel/arch/amd64/include/asm.h	(revision 0f94c3d5de72d78a731147c9a1063270222411f2)
+++ kernel/arch/amd64/include/asm.h	(revision 02fd705004996ffbc41a97f2ce2660ad1934d2ae)
@@ -27,5 +27,5 @@
  */
 
-/** @addtogroup amd64	
+/** @addtogroup amd64
  * @{
  */
@@ -46,4 +46,5 @@
  * The stack is assumed to be STACK_SIZE bytes long.
  * The stack must start on page boundary.
+ *
  */
 static inline uintptr_t get_stack_base(void)
@@ -51,5 +52,9 @@
 	uintptr_t v;
 	
-	asm volatile ("andq %%rsp, %0\n" : "=r" (v) : "0" (~((uint64_t)STACK_SIZE-1)));
+	asm volatile (
+		"andq %%rsp, %[v]\n"
+		: [v] "=r" (v)
+		: "0" (~((uint64_t) STACK_SIZE-1))
+	);
 	
 	return v;
@@ -73,10 +78,16 @@
  * @param port Port to read from
  * @return Value read
+ *
  */
 static inline uint8_t pio_read_8(ioport8_t *port)
 {
 	uint8_t val;
-
-	asm volatile ("inb %w1, %b0 \n" : "=a" (val) : "d" (port));
+	
+	asm volatile (
+		"inb %w[port], %b[val]\n"
+		: [val] "=a" (val)
+		: [port] "d" (port)
+	);
+	
 	return val;
 }
@@ -88,4 +99,5 @@
  * @param port Port to read from
  * @return Value read
+ *
  */
 static inline uint16_t pio_read_16(ioport16_t *port)
@@ -93,5 +105,10 @@
 	uint16_t val;
 	
-	asm volatile ("inw %w1, %w0 \n" : "=a" (val) : "d" (port));
+	asm volatile (
+		"inw %w[port], %w[val]\n"
+		: [val] "=a" (val)
+		: [port] "d" (port)
+	);
+	
 	return val;
 }
@@ -103,4 +120,5 @@
  * @param port Port to read from
  * @return Value read
+ *
  */
 static inline uint32_t pio_read_32(ioport32_t *port)
@@ -108,5 +126,10 @@
 	uint32_t val;
 	
-	asm volatile ("inl %w1, %0 \n" : "=a" (val) : "d" (port));
+	asm volatile (
+		"inl %w[port], %[val]\n"
+		: [val] "=a" (val)
+		: [port] "d" (port)
+	);
+	
 	return val;
 }
@@ -118,8 +141,12 @@
  * @param port Port to write to
  * @param val Value to write
+ *
  */
 static inline void pio_write_8(ioport8_t *port, uint8_t val)
 {
-	asm volatile ("outb %b0, %w1\n" : : "a" (val), "d" (port));
+	asm volatile (
+		"outb %b[val], %w[port]\n"
+		:: [val] "a" (val), [port] "d" (port)
+	);
 }
 
@@ -130,8 +157,12 @@
  * @param port Port to write to
  * @param val Value to write
+ *
  */
 static inline void pio_write_16(ioport16_t *port, uint16_t val)
 {
-	asm volatile ("outw %w0, %w1\n" : : "a" (val), "d" (port));
+	asm volatile (
+		"outw %w[val], %w[port]\n"
+		:: [val] "a" (val), [port] "d" (port)
+	);
 }
 
@@ -142,8 +173,12 @@
  * @param port Port to write to
  * @param val Value to write
+ *
  */
 static inline void pio_write_32(ioport32_t *port, uint32_t val)
 {
-	asm volatile ("outl %0, %w1\n" : : "a" (val), "d" (port));
+	asm volatile (
+		"outl %[val], %w[port]\n"
+		:: [val] "a" (val), [port] "d" (port)
+	);
 }
 
@@ -160,13 +195,16 @@
  *
  * @return Old interrupt priority level.
+ *
  */
 static inline ipl_t interrupts_enable(void) {
 	ipl_t v;
-	__asm__ volatile (
+	
+	asm volatile (
 		"pushfq\n"
-		"popq %0\n"
+		"popq %[v]\n"
 		"sti\n"
-		: "=r" (v)
-	);
+		: [v] "=r" (v)
+	);
+	
 	return v;
 }
@@ -178,13 +216,16 @@
  *
  * @return Old interrupt priority level.
+ *
  */
 static inline ipl_t interrupts_disable(void) {
 	ipl_t v;
-	__asm__ volatile (
+	
+	asm volatile (
 		"pushfq\n"
-		"popq %0\n"
+		"popq %[v]\n"
 		"cli\n"
-		: "=r" (v)
-		);
+		: [v] "=r" (v)
+	);
+	
 	return v;
 }
@@ -195,11 +236,12 @@
  *
  * @param ipl Saved interrupt priority level.
+ *
  */
 static inline void interrupts_restore(ipl_t ipl) {
-	__asm__ volatile (
-		"pushq %0\n"
+	asm volatile (
+		"pushq %[ipl]\n"
 		"popfq\n"
-		: : "r" (ipl)
-		);
+		:: [ipl] "r" (ipl)
+	);
 }
 
@@ -209,12 +251,15 @@
  *
  * @return Current interrupt priority level.
+ *
  */
 static inline ipl_t interrupts_read(void) {
 	ipl_t v;
-	__asm__ volatile (
+	
+	asm volatile (
 		"pushfq\n"
-		"popq %0\n"
-		: "=r" (v)
-	);
+		"popq %[v]\n"
+		: [v] "=r" (v)
+	);
+	
 	return v;
 }
@@ -223,9 +268,10 @@
 static inline void write_msr(uint32_t msr, uint64_t value)
 {
-	__asm__ volatile (
-		"wrmsr;" : : "c" (msr), 
-		"a" ((uint32_t)(value)),
-		"d" ((uint32_t)(value >> 32))
-		);
+	asm volatile (
+		"wrmsr\n"
+		:: "c" (msr),
+		   "a" ((uint32_t) (value)),
+		   "d" ((uint32_t) (value >> 32))
+	);
 }
 
@@ -233,9 +279,12 @@
 {
 	uint32_t ax, dx;
-
-	__asm__ volatile (
-		"rdmsr;" : "=a"(ax), "=d"(dx) : "c" (msr)
-		);
-	return ((uint64_t)dx << 32) | ax;
+	
+	asm volatile (
+		"rdmsr\n"
+		: "=a" (ax), "=d" (dx)
+		: "c" (msr)
+	);
+	
+	return ((uint64_t) dx << 32) | ax;
 }
 
@@ -244,17 +293,16 @@
  *
  * Enable local APIC in MSR.
+ *
  */
 static inline void enable_l_apic_in_msr()
 {
-	__asm__ volatile (
+	asm volatile (
 		"movl $0x1b, %%ecx\n"
 		"rdmsr\n"
-		"orl $(1<<11),%%eax\n"
+		"orl $(1 << 11),%%eax\n"
 		"orl $(0xfee00000),%%eax\n"
 		"wrmsr\n"
-		:
-		:
-		:"%eax","%ecx","%edx"
-		);
+		::: "%eax","%ecx","%edx"
+	);
 }
 
@@ -262,9 +310,10 @@
 {
 	uintptr_t *ip;
-
-	__asm__ volatile (
-		"mov %%rip, %0"
-		: "=r" (ip)
-		);
+	
+	asm volatile (
+		"mov %%rip, %[ip]"
+		: [ip] "=r" (ip)
+	);
+	
 	return ip;
 }
@@ -273,8 +322,12 @@
  *
  * @param addr Address on a page whose TLB entry is to be invalidated.
+ *
  */
 static inline void invlpg(uintptr_t addr)
 {
-	__asm__ volatile ("invlpg %0\n" :: "m" (*((unative_t *)addr)));
+	asm volatile (
+		"invlpg %[addr]\n"
+		:: [addr] "m" (*((unative_t *) addr))
+	);
 }
 
@@ -282,8 +335,12 @@
  *
  * @param gdtr_reg Address of memory from where to load GDTR.
+ *
  */
 static inline void gdtr_load(struct ptr_16_64 *gdtr_reg)
 {
-	__asm__ volatile ("lgdtq %0\n" : : "m" (*gdtr_reg));
+	asm volatile (
+		"lgdtq %[gdtr_reg]\n"
+		:: [gdtr_reg] "m" (*gdtr_reg)
+	);
 }
 
@@ -291,8 +348,12 @@
  *
  * @param gdtr_reg Address of memory to where to load GDTR.
+ *
  */
 static inline void gdtr_store(struct ptr_16_64 *gdtr_reg)
 {
-	__asm__ volatile ("sgdtq %0\n" : : "m" (*gdtr_reg));
+	asm volatile (
+		"sgdtq %[gdtr_reg]\n"
+		:: [gdtr_reg] "m" (*gdtr_reg)
+	);
 }
 
@@ -300,8 +361,11 @@
  *
  * @param idtr_reg Address of memory from where to load IDTR.
+ *
  */
 static inline void idtr_load(struct ptr_16_64 *idtr_reg)
 {
-	__asm__ volatile ("lidtq %0\n" : : "m" (*idtr_reg));
+	asm volatile (
+		"lidtq %[idtr_reg]\n"
+		:: [idtr_reg] "m" (*idtr_reg));
 }
 
@@ -309,21 +373,31 @@
  *
  * @param sel Selector specifying descriptor of TSS segment.
+ *
  */
 static inline void tr_load(uint16_t sel)
 {
-	__asm__ volatile ("ltr %0" : : "r" (sel));
+	asm volatile (
+		"ltr %[sel]"
+		:: [sel] "r" (sel)
+	);
 }
 
 #define GEN_READ_REG(reg) static inline unative_t read_ ##reg (void) \
-    { \
-	unative_t res; \
-	__asm__ volatile ("movq %%" #reg ", %0" : "=r" (res) ); \
-	return res; \
-    }
+	{ \
+		unative_t res; \
+		asm volatile ( \
+			"movq %%" #reg ", %[res]" \
+			: [res] "=r" (res) \
+		); \
+		return res; \
+	}
 
 #define GEN_WRITE_REG(reg) static inline void write_ ##reg (unative_t regn) \
-    { \
-	__asm__ volatile ("movq %0, %%" #reg : : "r" (regn)); \
-    }
+	{ \
+		asm volatile ( \
+			"movq %[regn], %%" #reg \
+			:: [regn] "r" (regn) \
+		); \
+	}
 
 GEN_READ_REG(cr0)
Index: kernel/arch/amd64/include/atomic.h
===================================================================
--- kernel/arch/amd64/include/atomic.h	(revision 0f94c3d5de72d78a731147c9a1063270222411f2)
+++ kernel/arch/amd64/include/atomic.h	(revision 02fd705004996ffbc41a97f2ce2660ad1934d2ae)
@@ -27,5 +27,5 @@
  */
 
-/** @addtogroup amd64	
+/** @addtogroup amd64
  * @{
  */
@@ -42,7 +42,13 @@
 static inline void atomic_inc(atomic_t *val) {
 #ifdef CONFIG_SMP
-	asm volatile ("lock incq %0\n" : "+m" (val->count));
+	asm volatile (
+		"lock incq %[count]\n"
+		: [count] "+m" (val->count)
+	);
 #else
-	asm volatile ("incq %0\n" : "+m" (val->count));
+	asm volatile (
+		"incq %[count]\n"
+		: [count] "+m" (val->count)
+	);
 #endif /* CONFIG_SMP */
 }
@@ -50,7 +56,13 @@
 static inline void atomic_dec(atomic_t *val) {
 #ifdef CONFIG_SMP
-	asm volatile ("lock decq %0\n" : "+m" (val->count));
+	asm volatile (
+		"lock decq %[count]\n"
+		: [count] "+m" (val->count)
+	);
 #else
-	asm volatile ("decq %0\n" : "+m" (val->count));
+	asm volatile (
+		"decq %[count]\n"
+		: [count] "+m" (val->count)
+	);
 #endif /* CONFIG_SMP */
 }
@@ -59,10 +71,10 @@
 {
 	long r = 1;
-
+	
 	asm volatile (
-		"lock xaddq %1, %0\n"
-		: "+m" (val->count), "+r" (r)
+		"lock xaddq %[r], %[count]\n"
+		: [count] "+m" (val->count), [r] "+r" (r)
 	);
-
+	
 	return r;
 }
@@ -73,6 +85,6 @@
 	
 	asm volatile (
-		"lock xaddq %1, %0\n"
-		: "+m" (val->count), "+r" (r)
+		"lock xaddq %[r], %[count]\n"
+		: [count] "+m" (val->count), [r] "+r" (r)
 	);
 	
@@ -80,6 +92,6 @@
 }
 
-#define atomic_preinc(val) (atomic_postinc(val) + 1)
-#define atomic_predec(val) (atomic_postdec(val) - 1)
+#define atomic_preinc(val)  (atomic_postinc(val) + 1)
+#define atomic_predec(val)  (atomic_postdec(val) - 1)
 
 static inline uint64_t test_and_set(atomic_t *val) {
@@ -87,7 +99,7 @@
 	
 	asm volatile (
-		"movq $1, %0\n"
-		"xchgq %0, %1\n"
-		: "=r" (v), "+m" (val->count)
+		"movq $1, %[v]\n"
+		"xchgq %[v], %[count]\n"
+		: [v] "=r" (v), [count] "+m" (val->count)
 	);
 	
@@ -100,5 +112,5 @@
 {
 	uint64_t tmp;
-
+	
 	preemption_disable();
 	asm volatile (
@@ -107,13 +119,13 @@
 		"pause\n"
 #endif
-		"mov %0, %1\n"
-		"testq %1, %1\n"
+		"mov %[count], %[tmp]\n"
+		"testq %[tmp], %[tmp]\n"
 		"jnz 0b\n"       /* lightweight looping on locked spinlock */
 		
-		"incq %1\n"      /* now use the atomic operation */
-		"xchgq %0, %1\n"
-		"testq %1, %1\n"
+		"incq %[tmp]\n"  /* now use the atomic operation */
+		"xchgq %[count], %[tmp]\n"
+		"testq %[tmp], %[tmp]\n"
 		"jnz 0b\n"
-                : "+m" (val->count), "=&r" (tmp)
+		: [count] "+m" (val->count), [tmp] "=&r" (tmp)
 	);
 	/*
Index: kernel/arch/amd64/src/amd64.c
===================================================================
--- kernel/arch/amd64/src/amd64.c	(revision 0f94c3d5de72d78a731147c9a1063270222411f2)
+++ kernel/arch/amd64/src/amd64.c	(revision 02fd705004996ffbc41a97f2ce2660ad1934d2ae)
@@ -73,5 +73,5 @@
 static void clean_IOPL_NT_flags(void)
 {
-	asm (
+	asm volatile (
 		"pushfq\n"
 		"pop %%rax\n"
@@ -79,7 +79,5 @@
 		"pushq %%rax\n"
 		"popfq\n"
-		:
-		:
-		: "%rax"
+		::: "%rax"
 	);
 }
@@ -91,11 +89,9 @@
 static void clean_AM_flag(void)
 {
-	asm (
+	asm volatile (
 		"mov %%cr0, %%rax\n"
 		"and $~(0x40000), %%rax\n"
 		"mov %%rax, %%cr0\n"
-		:
-		:
-		: "%rax"
+		::: "%rax"
 	);
 }
Index: kernel/arch/amd64/src/cpu/cpu.c
===================================================================
--- kernel/arch/amd64/src/cpu/cpu.c	(revision 0f94c3d5de72d78a731147c9a1063270222411f2)
+++ kernel/arch/amd64/src/cpu/cpu.c	(revision 02fd705004996ffbc41a97f2ce2660ad1934d2ae)
@@ -78,19 +78,17 @@
 {
 	asm volatile (
-		"movq %%cr0, %%rax;"
-		"btsq $1, %%rax;" /* cr0.mp */
-		"btrq $2, %%rax;"  /* cr0.em */
-		"movq %%rax, %%cr0;"
-
-		"movq %%cr4, %%rax;"
-		"bts $9, %%rax;" /* cr4.osfxsr */
-		"movq %%rax, %%cr4;"
-		:
-		:
-		:"%rax"
-		);
+		"movq %%cr0, %%rax\n"
+		"btsq $1, %%rax\n"  /* cr0.mp */
+		"btrq $2, %%rax\n"  /* cr0.em */
+		"movq %%rax, %%cr0\n"
+		
+		"movq %%cr4, %%rax\n"
+		"bts $9, %%rax\n"   /* cr4.osfxsr */
+		"movq %%rax, %%cr4\n"
+		::: "%rax"
+	);
 }
 
-/** Set the TS flag to 1. 
+/** Set the TS flag to 1.
  *
  * If a thread accesses coprocessor, exception is run, which 
@@ -100,24 +98,20 @@
 void fpu_disable(void)
 {
-	asm	volatile (
-		"mov %%cr0,%%rax;"
-		"bts $3,%%rax;"
-		"mov %%rax,%%cr0;"
-		:
-		:
-		:"%rax"
-		);
+	asm volatile (
+		"mov %%cr0, %%rax\n"
+		"bts $3, %%rax\n"
+		"mov %%rax, %%cr0\n"
+		::: "%rax"
+	);
 }
 
 void fpu_enable(void)
 {
-	asm	volatile (
-		"mov %%cr0,%%rax;"
-		"btr $3,%%rax;"
-		"mov %%rax,%%cr0;"
-		:
-		:
-		:"%rax"
-		);	
+	asm volatile (
+		"mov %%cr0, %%rax\n"
+		"btr $3, %%rax\n"
+		"mov %%rax, %%cr0\n"
+		::: "%rax"
+	);
 }
 
Index: kernel/arch/amd64/src/fpu_context.c
===================================================================
--- kernel/arch/amd64/src/fpu_context.c	(revision 0f94c3d5de72d78a731147c9a1063270222411f2)
+++ kernel/arch/amd64/src/fpu_context.c	(revision 02fd705004996ffbc41a97f2ce2660ad1934d2ae)
@@ -40,7 +40,7 @@
 {
 	asm volatile (
-		"fxsave %0"
-		: "=m"(*fctx)
-		);
+		"fxsave %[fctx]\n"
+		: [fctx] "=m" (*fctx)
+	);
 }
 
@@ -49,7 +49,7 @@
 {
 	asm volatile (
-		"fxrstor %0"
-		: "=m"(*fctx)
-		);
+		"fxrstor %[fctx]\n"
+		: [fctx] "=m" (*fctx)
+	);
 }
 
@@ -58,5 +58,5 @@
 	/* TODO: Zero all SSE, MMX etc. registers */
 	asm volatile (
-		"fninit;"
+		"fninit\n"
 	);
 }
Index: kernel/arch/amd64/src/userspace.c
===================================================================
--- kernel/arch/amd64/src/userspace.c	(revision 0f94c3d5de72d78a731147c9a1063270222411f2)
+++ kernel/arch/amd64/src/userspace.c	(revision 02fd705004996ffbc41a97f2ce2660ad1934d2ae)
@@ -27,5 +27,5 @@
  */
 
-/** @addtogroup amd64	
+/** @addtogroup amd64
  * @{
  */
@@ -48,34 +48,31 @@
 void userspace(uspace_arg_t *kernel_uarg)
 {
-	ipl_t ipl;
+	ipl_t ipl = interrupts_disable();
 	
-	ipl = interrupts_disable();
-
-	/* Clear CF,PF,AF,ZF,SF,DF,OF */
+	/* Clear CF, PF, AF, ZF, SF, DF, OF */
 	ipl &= ~(0xcd4);
-
-	asm volatile (""
-			  "pushq %0\n"
-			  "pushq %1\n"
-			  "pushq %2\n"
-			  "pushq %3\n"
-			  "pushq %4\n"
-			  "movq %5, %%rax\n"
-			  /* %rdi is defined to hold pcb_ptr - set it to 0 */
-			  "xorq %%rdi, %%rdi\n"
-			  "iretq\n"
-			  : : 
-			  "i" (gdtselector(UDATA_DES) | PL_USER), 
-			  "r" (kernel_uarg->uspace_stack+THREAD_STACK_SIZE), 
-			  "r" (ipl), 
-			  "i" (gdtselector(UTEXT_DES) | PL_USER), 
-			  "r" (kernel_uarg->uspace_entry),
-			  "r" (kernel_uarg->uspace_uarg)
-			  : "rax"
-			  );
+	
+	asm volatile (
+			"pushq %[udata_des]\n"
+			"pushq %[stack_size]\n"
+			"pushq %[ipl]\n"
+			"pushq %[utext_des]\n"
+			"pushq %[entry]\n"
+			"movq %[uarg], %%rax\n"
+			
+			/* %rdi is defined to hold pcb_ptr - set it to 0 */
+			"xorq %%rdi, %%rdi\n"
+			"iretq\n"
+			:: [udata_des] "i" (gdtselector(UDATA_DES) | PL_USER),
+			   [stack_size] "r" (kernel_uarg->uspace_stack + THREAD_STACK_SIZE),
+			   [ipl] "r" (ipl),
+			   [utext_des] "i" (gdtselector(UTEXT_DES) | PL_USER),
+			   [entry] "r" (kernel_uarg->uspace_entry),
+			   [uarg] "r" (kernel_uarg->uspace_uarg)
+			: "rax"
+		);
 	
 	/* Unreachable */
-	for(;;)
-		;
+	while (1);
 }
 
