Index: uspace/libc/arch/amd64/include/atomic.h
===================================================================
--- uspace/libc/arch/amd64/include/atomic.h	(revision 81cfefaee34c6617759e2519a2dc4612cb7ba842)
+++ uspace/libc/arch/amd64/include/atomic.h	(revision 726e1043bcec8878ef81123f08f4dd7dc7d8e58f)
@@ -39,9 +39,9 @@
 
 static inline void atomic_inc(atomic_t *val) {
-	__asm__ volatile ("lock incq %0\n" : "=m" (val->count));
+	asm volatile ("lock incq %0\n" : "=m" (val->count));
 }
 
 static inline void atomic_dec(atomic_t *val) {
-	__asm__ volatile ("lock decq %0\n" : "=m" (val->count));
+	asm volatile ("lock decq %0\n" : "=m" (val->count));
 }
 
@@ -50,5 +50,5 @@
 	long r;
 
-	__asm__ volatile (
+	asm volatile (
 		"movq $1, %0\n"
 		"lock xaddq %0, %1\n"
@@ -63,5 +63,5 @@
 	long r;
 	
-	__asm__ volatile (
+	asm volatile (
 		"movq $-1, %0\n"
 		"lock xaddq %0, %1\n"
Index: uspace/libc/arch/amd64/include/thread.h
===================================================================
--- uspace/libc/arch/amd64/include/thread.h	(revision 81cfefaee34c6617759e2519a2dc4612cb7ba842)
+++ uspace/libc/arch/amd64/include/thread.h	(revision 726e1043bcec8878ef81123f08f4dd7dc7d8e58f)
@@ -52,5 +52,5 @@
 	void * retval;
 
-	__asm__ ("movq %%fs:0, %0" : "=r"(retval));
+	asm ("movq %%fs:0, %0" : "=r"(retval));
 	return retval;
 }
Index: uspace/libc/arch/ia32/include/atomic.h
===================================================================
--- uspace/libc/arch/ia32/include/atomic.h	(revision 81cfefaee34c6617759e2519a2dc4612cb7ba842)
+++ uspace/libc/arch/ia32/include/atomic.h	(revision 726e1043bcec8878ef81123f08f4dd7dc7d8e58f)
@@ -37,9 +37,9 @@
 
 static inline void atomic_inc(atomic_t *val) {
-	__asm__ volatile ("lock incl %0\n" : "=m" (val->count));
+	asm volatile ("lock incl %0\n" : "=m" (val->count));
 }
 
 static inline void atomic_dec(atomic_t *val) {
-	__asm__ volatile ("lock decl %0\n" : "=m" (val->count));
+	asm volatile ("lock decl %0\n" : "=m" (val->count));
 }
 
@@ -48,5 +48,5 @@
 	long r;
 
-	__asm__ volatile (
+	asm volatile (
 		"movl $1, %0\n"
 		"lock xaddl %0, %1\n"
@@ -61,5 +61,5 @@
 	long r;
 	
-	__asm__ volatile (
+	asm volatile (
 		"movl $-1, %0\n"
 		"lock xaddl %0, %1\n"
Index: uspace/libc/arch/ia32/include/thread.h
===================================================================
--- uspace/libc/arch/ia32/include/thread.h	(revision 81cfefaee34c6617759e2519a2dc4612cb7ba842)
+++ uspace/libc/arch/ia32/include/thread.h	(revision 726e1043bcec8878ef81123f08f4dd7dc7d8e58f)
@@ -52,5 +52,5 @@
 	void * retval;
 
-	__asm__ ("movl %%gs:0, %0" : "=r"(retval));
+	asm ("movl %%gs:0, %0" : "=r"(retval));
 	return retval;
 }
Index: uspace/libc/arch/ia64/include/atomic.h
===================================================================
--- uspace/libc/arch/ia64/include/atomic.h	(revision 81cfefaee34c6617759e2519a2dc4612cb7ba842)
+++ uspace/libc/arch/ia64/include/atomic.h	(revision 726e1043bcec8878ef81123f08f4dd7dc7d8e58f)
@@ -47,5 +47,5 @@
 	long v;
 
- 	__asm__ volatile ("fetchadd8.rel %0 = %1, %2\n" : "=r" (v), "+m" (val->count) : "i" (imm));
+ 	asm volatile ("fetchadd8.rel %0 = %1, %2\n" : "=r" (v), "+m" (val->count) : "i" (imm));
  
 	return v;
Index: uspace/libc/arch/ia64/include/thread.h
===================================================================
--- uspace/libc/arch/ia64/include/thread.h	(revision 81cfefaee34c6617759e2519a2dc4612cb7ba842)
+++ uspace/libc/arch/ia64/include/thread.h	(revision 726e1043bcec8878ef81123f08f4dd7dc7d8e58f)
@@ -46,5 +46,5 @@
 static inline void __tcb_set(tcb_t *tcb)
 {
-	__asm__ volatile ("mov r13 = %0\n" : : "r" (tcb) : "r13");
+	asm volatile ("mov r13 = %0\n" : : "r" (tcb) : "r13");
 }
 
@@ -53,5 +53,5 @@
 	void *retval;
 
-	__asm__ volatile ("mov %0 = r13\n" : "=r" (retval));
+	asm volatile ("mov %0 = r13\n" : "=r" (retval));
 
 	return retval;
Index: uspace/libc/arch/mips32/include/atomic.h
===================================================================
--- uspace/libc/arch/mips32/include/atomic.h	(revision 81cfefaee34c6617759e2519a2dc4612cb7ba842)
+++ uspace/libc/arch/mips32/include/atomic.h	(revision 726e1043bcec8878ef81123f08f4dd7dc7d8e58f)
@@ -57,5 +57,5 @@
 	long tmp, v;
 
-	__asm__ volatile (
+	asm volatile (
 		"1:\n"
 		"	ll %0, %1\n"
Index: uspace/libc/arch/mips32/include/thread.h
===================================================================
--- uspace/libc/arch/mips32/include/thread.h	(revision 81cfefaee34c6617759e2519a2dc4612cb7ba842)
+++ uspace/libc/arch/mips32/include/thread.h	(revision 726e1043bcec8878ef81123f08f4dd7dc7d8e58f)
@@ -62,5 +62,5 @@
 	tp += MIPS_TP_OFFSET + sizeof(tcb_t);
 
-	__asm__ volatile ("add $27, %0, $0" : : "r"(tp)); /* Move tls to K1 */
+	asm volatile ("add $27, %0, $0" : : "r"(tp)); /* Move tls to K1 */
 }
 
@@ -69,5 +69,5 @@
 	void * retval;
 
-	__asm__ volatile("add %0, $27, $0" : "=r"(retval));
+	asm volatile("add %0, $27, $0" : "=r"(retval));
 
 	return (tcb_t *)(retval - MIPS_TP_OFFSET - sizeof(tcb_t));
Index: uspace/libc/arch/ppc32/include/atomic.h
===================================================================
--- uspace/libc/arch/ppc32/include/atomic.h	(revision 81cfefaee34c6617759e2519a2dc4612cb7ba842)
+++ uspace/libc/arch/ppc32/include/atomic.h	(revision 726e1043bcec8878ef81123f08f4dd7dc7d8e58f)
@@ -40,5 +40,5 @@
 	long tmp;
 
-	asm __volatile__ (
+	asm volatile (
 		"1:\n"
 		"lwarx %0, 0, %2\n"
@@ -55,5 +55,5 @@
 	long tmp;
 
-	asm __volatile__(
+	asm volatile (
 		"1:\n"
 		"lwarx %0, 0, %2\n"
Index: uspace/libc/arch/ppc64/include/atomic.h
===================================================================
--- uspace/libc/arch/ppc64/include/atomic.h	(revision 81cfefaee34c6617759e2519a2dc4612cb7ba842)
+++ uspace/libc/arch/ppc64/include/atomic.h	(revision 726e1043bcec8878ef81123f08f4dd7dc7d8e58f)
@@ -40,5 +40,5 @@
 	long tmp;
 
-	asm __volatile__ (
+	asm volatile (
 		"1:\n"
 		"lwarx %0, 0, %2\n"
@@ -55,5 +55,5 @@
 	long tmp;
 
-	asm __volatile__(
+	asm volatile (
 		"1:\n"
 		"lwarx %0, 0, %2\n"
Index: uspace/libc/arch/sparc64/include/atomic.h
===================================================================
--- uspace/libc/arch/sparc64/include/atomic.h	(revision 81cfefaee34c6617759e2519a2dc4612cb7ba842)
+++ uspace/libc/arch/sparc64/include/atomic.h	(revision 726e1043bcec8878ef81123f08f4dd7dc7d8e58f)
@@ -54,5 +54,5 @@
 		a = val->count;
 		b = a + i;
-		__asm__ volatile ("casx %0, %2, %1\n" : "+m" (*val), "+r" (b) : "r" (a));
+		asm volatile ("casx %0, %2, %1\n" : "+m" (*val), "+r" (b) : "r" (a));
 	} while (a != b);
 
Index: uspace/libc/arch/sparc64/include/syscall.h
===================================================================
--- uspace/libc/arch/sparc64/include/syscall.h	(revision 81cfefaee34c6617759e2519a2dc4612cb7ba842)
+++ uspace/libc/arch/sparc64/include/syscall.h	(revision 726e1043bcec8878ef81123f08f4dd7dc7d8e58f)
@@ -47,5 +47,5 @@
 	register uint64_t a4 asm("o3") = p4;
 
-	__asm__ volatile (
+	asm volatile (
 		"ta %5\n"
 		: "=r" (a1)
Index: uspace/libc/arch/sparc64/include/thread.h
===================================================================
--- uspace/libc/arch/sparc64/include/thread.h	(revision 81cfefaee34c6617759e2519a2dc4612cb7ba842)
+++ uspace/libc/arch/sparc64/include/thread.h	(revision 726e1043bcec8878ef81123f08f4dd7dc7d8e58f)
@@ -46,5 +46,5 @@
 static inline void __tcb_set(tcb_t *tcb)
 {
-	__asm__ volatile ("mov %0, %%g7\n" : : "r" (tcb) : "g7");
+	asm volatile ("mov %0, %%g7\n" : : "r" (tcb) : "g7");
 }
 
@@ -53,5 +53,5 @@
 	void *retval;
 
-	__asm__ volatile ("mov %%g7, %0\n" : "=r" (retval));
+	asm volatile ("mov %%g7, %0\n" : "=r" (retval));
 
 	return retval;
Index: uspace/libc/malloc/malloc.c
===================================================================
--- uspace/libc/malloc/malloc.c	(revision 81cfefaee34c6617759e2519a2dc4612cb7ba842)
+++ uspace/libc/malloc/malloc.c	(revision 726e1043bcec8878ef81123f08f4dd7dc7d8e58f)
@@ -1570,5 +1570,5 @@
   else {\
     unsigned int K;\
-    __asm__("bsrl %1,%0\n\t" : "=r" (K) : "rm"  (X));\
+    asm("bsrl %1,%0\n\t" : "=r" (K) : "rm"  (X));\
     I =  (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
   }\
@@ -1629,5 +1629,5 @@
 {\
   unsigned int J;\
-  __asm__("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\
+  asm("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\
   I = (bindex_t)J;\
 }
