Index: kernel/arch/ia32/include/atomic.h
===================================================================
--- kernel/arch/ia32/include/atomic.h	(revision ad2e39b0d2314844ed6e863c4cb255155c40ad79)
+++ kernel/arch/ia32/include/atomic.h	(revision 9f491d755d1e213ec6dc6a92dfdd6a178e5971cc)
@@ -42,7 +42,7 @@
 static inline void atomic_inc(atomic_t *val) {
 #ifdef CONFIG_SMP
-	asm volatile ("lock incl %0\n" : "=m" (val->count));
+	asm volatile ("lock incl %0\n" : "+m" (val->count));
 #else
-	asm volatile ("incl %0\n" : "=m" (val->count));
+	asm volatile ("incl %0\n" : "+m" (val->count));
 #endif /* CONFIG_SMP */
 }
@@ -50,7 +50,7 @@
 static inline void atomic_dec(atomic_t *val) {
 #ifdef CONFIG_SMP
-	asm volatile ("lock decl %0\n" : "=m" (val->count));
+	asm volatile ("lock decl %0\n" : "+m" (val->count));
 #else
-	asm volatile ("decl %0\n" : "=m" (val->count));
+	asm volatile ("decl %0\n" : "+m" (val->count));
 #endif /* CONFIG_SMP */
 }
@@ -62,5 +62,5 @@
 	asm volatile (
 		"lock xaddl %1, %0\n"
-		: "=m" (val->count), "+r" (r)
+		: "+m" (val->count), "+r" (r)
 	);
 
@@ -74,5 +74,5 @@
 	asm volatile (
 		"lock xaddl %1, %0\n"
-		: "=m" (val->count), "+r"(r)
+		: "+m" (val->count), "+r"(r)
 	);
 	
@@ -80,6 +80,6 @@
 }
 
-#define atomic_preinc(val) (atomic_postinc(val)+1)
-#define atomic_predec(val) (atomic_postdec(val)-1)
+#define atomic_preinc(val) (atomic_postinc(val) + 1)
+#define atomic_predec(val) (atomic_postdec(val) - 1)
 
 static inline uint32_t test_and_set(atomic_t *val) {
@@ -89,5 +89,5 @@
 		"movl $1, %0\n"
 		"xchgl %0, %1\n"
-		: "=r" (v),"=m" (val->count)
+		: "=r" (v),"+m" (val->count)
 	);
 	
@@ -102,18 +102,18 @@
 	preemption_disable();
 	asm volatile (
-		"0:;"
+		"0:\n"
 #ifdef CONFIG_HT
-		"pause;" /* Pentium 4's HT love this instruction */
+		"pause\n" /* Pentium 4's HT love this instruction */
 #endif
-		"mov %0, %1;"
-		"testl %1, %1;"
-		"jnz 0b;"       /* Lightweight looping on locked spinlock */
+		"mov %0, %1\n"
+		"testl %1, %1\n"
+		"jnz 0b\n"       /* lightweight looping on locked spinlock */
 		
-		"incl %1;"      /* now use the atomic operation */
-		"xchgl %0, %1;"
-		"testl %1, %1;"
-		"jnz 0b;"
-                : "=m"(val->count),"=r"(tmp)
-		);
+		"incl %1\n"      /* now use the atomic operation */
+		"xchgl %0, %1\n"	
+		"testl %1, %1\n"
+		"jnz 0b\n"
+                : "+m" (val->count), "=r"(tmp)
+	);
 	/*
 	 * Prevent critical section code from bleeding out this way up.
