Index: kernel/arch/amd64/include/atomic.h
===================================================================
--- kernel/arch/amd64/include/atomic.h	(revision d7c9fcbada5f87468ffaff65b3781cb5cfe0f582)
+++ kernel/arch/amd64/include/atomic.h	(revision 2a3124c420b0892b8e7da990c72b12a6d7b06a9c)
@@ -42,7 +42,7 @@
 static inline void atomic_inc(atomic_t *val) {
 #ifdef CONFIG_SMP
-	asm volatile ("lock incq %0\n" : "=m" (val->count));
+	asm volatile ("lock incq %0\n" : "+m" (val->count));
 #else
-	asm volatile ("incq %0\n" : "=m" (val->count));
+	asm volatile ("incq %0\n" : "+m" (val->count));
 #endif /* CONFIG_SMP */
 }
@@ -50,7 +50,7 @@
 static inline void atomic_dec(atomic_t *val) {
 #ifdef CONFIG_SMP
-	asm volatile ("lock decq %0\n" : "=m" (val->count));
+	asm volatile ("lock decq %0\n" : "+m" (val->count));
 #else
-	asm volatile ("decq %0\n" : "=m" (val->count));
+	asm volatile ("decq %0\n" : "+m" (val->count));
 #endif /* CONFIG_SMP */
 }
@@ -62,5 +62,5 @@
 	asm volatile (
 		"lock xaddq %1, %0\n"
-		: "=m" (val->count), "+r" (r)
+		: "+m" (val->count), "+r" (r)
 	);
 
@@ -74,5 +74,5 @@
 	asm volatile (
 		"lock xaddq %1, %0\n"
-		: "=m" (val->count), "+r" (r)
+		: "+m" (val->count), "+r" (r)
 	);
 	
@@ -80,6 +80,6 @@
 }
 
-#define atomic_preinc(val) (atomic_postinc(val)+1)
-#define atomic_predec(val) (atomic_postdec(val)-1)
+#define atomic_preinc(val) (atomic_postinc(val) + 1)
+#define atomic_predec(val) (atomic_postdec(val) - 1)
 
 static inline uint64_t test_and_set(atomic_t *val) {
@@ -89,5 +89,5 @@
 		"movq $1, %0\n"
 		"xchgq %0, %1\n"
-		: "=r" (v),"=m" (val->count)
+		: "=r" (v), "+m" (val->count)
 	);
 	
@@ -103,18 +103,18 @@
 	preemption_disable();
 	asm volatile (
-		"0:;"
+		"0:\n"
 #ifdef CONFIG_HT
-		"pause;"
+		"pause\n"
 #endif
-		"mov %0, %1;"
-		"testq %1, %1;"
-		"jnz 0b;"       /* Lightweight looping on locked spinlock */
+		"mov %0, %1\n"
+		"testq %1, %1\n"
+		"jnz 0b\n"       /* Lightweight looping on locked spinlock */
 		
-		"incq %1;"      /* now use the atomic operation */
-		"xchgq %0, %1;"
-		"testq %1, %1;"
-		"jnz 0b;"
-                : "=m"(val->count),"=r"(tmp)
-		);
+		"incq %1\n"      /* now use the atomic operation */
+		"xchgq %0, %1\n"
+		"testq %1, %1\n"
+		"jnz 0b\n"
+                : "+m" (val->count), "=r"(tmp)
+	);
 	/*
 	 * Prevent critical section code from bleeding out this way up.
