Index: uspace/lib/c/Makefile
===================================================================
--- uspace/lib/c/Makefile	(revision d51cca81f54dcd276fed68e8d36c097fe888f846)
+++ uspace/lib/c/Makefile	(revision ffa73c60dc2eba8ce632c52a0be8bbf4ecd562d9)
@@ -129,5 +129,4 @@
 	generic/stdio/sstream.c \
 	generic/stdio/vsprintf.c \
-	generic/thread/atomic.c \
 	generic/thread/fibril.c \
 	generic/thread/fibril_synch.c \
Index: uspace/lib/c/arch/abs32le/include/libarch/atomic.h
===================================================================
--- uspace/lib/c/arch/abs32le/include/libarch/atomic.h	(revision d51cca81f54dcd276fed68e8d36c097fe888f846)
+++ 	(revision )
@@ -1,109 +1,0 @@
-/*
- * Copyright (c) 2010 Martin Decky
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- *   notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- *   notice, this list of conditions and the following disclaimer in the
- *   documentation and/or other materials provided with the distribution.
- * - The name of the author may not be used to endorse or promote products
- *   derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** @addtogroup libcabs32le
- * @{
- */
-/** @file
- */
-
-#ifndef LIBC_abs32le_ATOMIC_H_
-#define LIBC_abs32le_ATOMIC_H_
-
-#include <stdbool.h>
-
-#define LIBC_ARCH_ATOMIC_H_
-#define CAS
-
-#include <atomicdflt.h>
-
-static inline bool cas(atomic_t *val, atomic_count_t ov, atomic_count_t nv)
-{
-	if (val->count == ov) {
-		val->count = nv;
-		return true;
-	}
-
-	return false;
-}
-
-static inline void atomic_inc(atomic_t *val)
-{
-	/*
-	 * On real hardware the increment has to be done
-	 * as an atomic action.
-	 */
-
-	val->count++;
-}
-
-static inline void atomic_dec(atomic_t *val)
-{
-	/*
-	 * On real hardware the decrement has to be done
-	 * as an atomic action.
-	 */
-
-	val->count++;
-}
-
-static inline atomic_count_t atomic_postinc(atomic_t *val)
-{
-	/*
-	 * On real hardware both the storing of the previous
-	 * value and the increment have to be done as a single
-	 * atomic action.
-	 */
-
-	atomic_count_t prev = val->count;
-
-	val->count++;
-	return prev;
-}
-
-static inline atomic_count_t atomic_postdec(atomic_t *val)
-{
-	/*
-	 * On real hardware both the storing of the previous
-	 * value and the decrement have to be done as a single
-	 * atomic action.
-	 */
-
-	atomic_count_t prev = val->count;
-
-	val->count--;
-	return prev;
-}
-
-#define atomic_preinc(val) (atomic_postinc(val) + 1)
-#define atomic_predec(val) (atomic_postdec(val) - 1)
-
-#endif
-
-/** @}
- */
Index: uspace/lib/c/arch/amd64/include/libarch/atomic.h
===================================================================
--- uspace/lib/c/arch/amd64/include/libarch/atomic.h	(revision d51cca81f54dcd276fed68e8d36c097fe888f846)
+++ 	(revision )
@@ -1,122 +1,0 @@
-/*
- * Copyright (c) 2001-2004 Jakub Jermar
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- *   notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- *   notice, this list of conditions and the following disclaimer in the
- *   documentation and/or other materials provided with the distribution.
- * - The name of the author may not be used to endorse or promote products
- *   derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** @addtogroup libcamd64 amd64
- * @ingroup lc
- * @brief	amd64 architecture dependent parts of libc
- * @{
- */
-/** @file
- */
-
-#ifndef LIBC_amd64_ATOMIC_H_
-#define LIBC_amd64_ATOMIC_H_
-
-#define LIBC_ARCH_ATOMIC_H_
-
-#include <atomicdflt.h>
-
-static inline void atomic_inc(atomic_t *val)
-{
-#ifdef __PCC__
-	asm volatile (
-	    "lock incq %0\n"
-	    : "+m" (val->count)
-	);
-#else
-	asm volatile (
-	    "lock incq %[count]\n"
-	    : [count] "+m" (val->count)
-	);
-#endif
-}
-
-static inline void atomic_dec(atomic_t *val)
-{
-#ifdef __PCC__
-	asm volatile (
-	    "lock decq %0\n"
-	    : "+m" (val->count)
-	);
-#else
-	asm volatile (
-	    "lock decq %[count]\n"
-	    : [count] "+m" (val->count)
-	);
-#endif
-}
-
-static inline atomic_count_t atomic_postinc(atomic_t *val)
-{
-	atomic_count_t r = 1;
-
-#ifdef __PCC__
-	asm volatile (
-	    "lock xaddq %1, %0\n"
-	    : "+m" (val->count),
-	      "+r" (r)
-	);
-#else
-	asm volatile (
-	    "lock xaddq %[r], %[count]\n"
-	    : [count] "+m" (val->count),
-	      [r] "+r" (r)
-	);
-#endif
-
-	return r;
-}
-
-static inline atomic_count_t atomic_postdec(atomic_t *val)
-{
-	atomic_count_t r = -1;
-
-#ifdef __PCC__
-	asm volatile (
-	    "lock xaddq %1, %0\n"
-	    : "+m" (val->count),
-	      "+r" (r)
-	);
-#else
-	asm volatile (
-	    "lock xaddq %[r], %[count]\n"
-	    : [count] "+m" (val->count),
-	      [r] "+r" (r)
-	);
-#endif
-
-	return r;
-}
-
-#define atomic_preinc(val)  (atomic_postinc(val) + 1)
-#define atomic_predec(val)  (atomic_postdec(val) - 1)
-
-#endif
-
-/** @}
- */
Index: uspace/lib/c/arch/arm32/Makefile.inc
===================================================================
--- uspace/lib/c/arch/arm32/Makefile.inc	(revision d51cca81f54dcd276fed68e8d36c097fe888f846)
+++ uspace/lib/c/arch/arm32/Makefile.inc	(revision ffa73c60dc2eba8ce632c52a0be8bbf4ecd562d9)
@@ -29,4 +29,5 @@
 
 ARCH_SOURCES = \
+	arch/$(UARCH)/src/atomic.c \
 	arch/$(UARCH)/src/entryjmp.S \
 	arch/$(UARCH)/src/thread_entry.S \
Index: uspace/lib/c/arch/arm32/include/libarch/atomic.h
===================================================================
--- uspace/lib/c/arch/arm32/include/libarch/atomic.h	(revision d51cca81f54dcd276fed68e8d36c097fe888f846)
+++ 	(revision )
@@ -1,204 +1,0 @@
-/*
- * Copyright (c) 2007 Michal Kebrt
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- *   notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- *   notice, this list of conditions and the following disclaimer in the
- *   documentation and/or other materials provided with the distribution.
- * - The name of the author may not be used to endorse or promote products
- *   derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** @addtogroup libcarm32
- * @{
- */
-/** @file
- *  @brief Atomic operations.
- */
-
-#ifndef LIBC_arm32_ATOMIC_H_
-#define LIBC_arm32_ATOMIC_H_
-
-#define LIBC_ARCH_ATOMIC_H_
-#define CAS
-
-#include <atomicdflt.h>
-#include <stdbool.h>
-#include <stdint.h>
-
-extern uintptr_t *ras_page;
-
-static inline bool cas(atomic_t *val, atomic_count_t ov, atomic_count_t nv)
-{
-	atomic_count_t ret = 0;
-
-	/*
-	 * The following instructions between labels 1 and 2 constitute a
-	 * Restartable Atomic Sequence. Should the sequence be non-atomic,
-	 * the kernel will restart it.
-	 */
-	asm volatile (
-	    "1:\n"
-	    "	adr %[ret], 1b\n"
-	    "	str %[ret], %[rp0]\n"
-	    "	adr %[ret], 2f\n"
-	    "	str %[ret], %[rp1]\n"
-	    "	ldr %[ret], %[addr]\n"
-	    "	cmp %[ret], %[ov]\n"
-	    "	streq %[nv], %[addr]\n"
-	    "2:\n"
-	    "	moveq %[ret], #1\n"
-	    "	movne %[ret], #0\n"
-	    : [ret] "+&r" (ret),
-	      [rp0] "=m" (ras_page[0]),
-	      [rp1] "=m" (ras_page[1]),
-	      [addr] "+m" (val->count)
-	    : [ov] "r" (ov),
-	      [nv] "r" (nv)
-	    : "memory"
-	);
-
-	ras_page[0] = 0;
-	asm volatile (
-	    "" ::: "memory"
-	);
-	ras_page[1] = 0xffffffff;
-
-	return ret != 0;
-}
-
-/** Atomic addition.
- *
- * @param val Where to add.
- * @param i   Value to be added.
- *
- * @return Value after addition.
- *
- */
-static inline atomic_count_t atomic_add(atomic_t *val, atomic_count_t i)
-{
-	atomic_count_t ret = 0;
-
-	/*
-	 * The following instructions between labels 1 and 2 constitute a
-	 * Restartable Atomic Seqeunce. Should the sequence be non-atomic,
-	 * the kernel will restart it.
-	 */
-	asm volatile (
-	    "1:\n"
-	    "	adr %[ret], 1b\n"
-	    "	str %[ret], %[rp0]\n"
-	    "	adr %[ret], 2f\n"
-	    "	str %[ret], %[rp1]\n"
-	    "	ldr %[ret], %[addr]\n"
-	    "	add %[ret], %[ret], %[imm]\n"
-	    "	str %[ret], %[addr]\n"
-	    "2:\n"
-	    : [ret] "+&r" (ret),
-	      [rp0] "=m" (ras_page[0]),
-	      [rp1] "=m" (ras_page[1]),
-	      [addr] "+m" (val->count)
-	    : [imm] "r" (i)
-	);
-
-	ras_page[0] = 0;
-	asm volatile (
-	    "" ::: "memory"
-	);
-	ras_page[1] = 0xffffffff;
-
-	return ret;
-}
-
-
-/** Atomic increment.
- *
- * @param val Variable to be incremented.
- *
- */
-static inline void atomic_inc(atomic_t *val)
-{
-	atomic_add(val, 1);
-}
-
-
-/** Atomic decrement.
- *
- * @param val Variable to be decremented.
- *
- */
-static inline void atomic_dec(atomic_t *val)
-{
-	atomic_add(val, -1);
-}
-
-
-/** Atomic pre-increment.
- *
- * @param val Variable to be incremented.
- * @return    Value after incrementation.
- *
- */
-static inline atomic_count_t atomic_preinc(atomic_t *val)
-{
-	return atomic_add(val, 1);
-}
-
-
-/** Atomic pre-decrement.
- *
- * @param val Variable to be decremented.
- * @return    Value after decrementation.
- *
- */
-static inline atomic_count_t atomic_predec(atomic_t *val)
-{
-	return atomic_add(val, -1);
-}
-
-
-/** Atomic post-increment.
- *
- * @param val Variable to be incremented.
- * @return    Value before incrementation.
- *
- */
-static inline atomic_count_t atomic_postinc(atomic_t *val)
-{
-	return atomic_add(val, 1) - 1;
-}
-
-
-/** Atomic post-decrement.
- *
- * @param val Variable to be decremented.
- * @return    Value before decrementation.
- *
- */
-static inline atomic_count_t atomic_postdec(atomic_t *val)
-{
-	return atomic_add(val, -1) + 1;
-}
-
-
-#endif
-
-/** @}
- */
Index: uspace/lib/c/arch/arm32/src/atomic.c
===================================================================
--- uspace/lib/c/arch/arm32/src/atomic.c	(revision ffa73c60dc2eba8ce632c52a0be8bbf4ecd562d9)
+++ uspace/lib/c/arch/arm32/src/atomic.c	(revision ffa73c60dc2eba8ce632c52a0be8bbf4ecd562d9)
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2007 Michal Kebrt
+ * Copyright (c) 2018 CZ.NIC, z.s.p.o.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Older ARMs don't have atomic instructions, so we need to define a bunch
+ * of symbols for GCC to use.
+ */
+
+#include <stdbool.h>
+
+extern volatile unsigned *ras_page;
+
+bool __atomic_compare_exchange_4(volatile unsigned *mem, unsigned *expected, unsigned desired, bool weak, int success, int failure)
+{
+	(void) success;
+	(void) failure;
+	(void) weak;
+
+	unsigned ov = *expected;
+	unsigned ret;
+
+	/*
+	 * The following instructions between labels 1 and 2 constitute a
+	 * Restartable Atomic Sequence. Should the sequence be non-atomic,
+	 * the kernel will restart it.
+	 */
+	asm volatile (
+	    "1:\n"
+	    "	adr %[ret], 1b\n"
+	    "	str %[ret], %[rp0]\n"
+	    "	adr %[ret], 2f\n"
+	    "	str %[ret], %[rp1]\n"
+
+	    "	ldr %[ret], %[addr]\n"
+	    "	cmp %[ret], %[ov]\n"
+	    "	streq %[nv], %[addr]\n"
+	    "2:\n"
+	    : [ret] "=&r" (ret),
+	      [rp0] "=m" (ras_page[0]),
+	      [rp1] "=m" (ras_page[1]),
+	      [addr] "+m" (*mem)
+	    : [ov] "r" (ov),
+	      [nv] "r" (desired)
+	    : "memory"
+	);
+
+	ras_page[0] = 0;
+	ras_page[1] = 0xffffffff;
+
+	if (ret == ov)
+		return true;
+
+	*expected = ret;
+	return false;
+}
+
+unsigned __atomic_fetch_add_4(volatile unsigned *mem, unsigned val, int model)
+{
+	(void) model;
+
+	unsigned ret;
+
+	/*
+	 * The following instructions between labels 1 and 2 constitute a
+	 * Restartable Atomic Seqeunce. Should the sequence be non-atomic,
+	 * the kernel will restart it.
+	 */
+	asm volatile (
+	    "1:\n"
+	    "	adr %[ret], 1b\n"
+	    "	str %[ret], %[rp0]\n"
+	    "	adr %[ret], 2f\n"
+	    "	str %[ret], %[rp1]\n"
+	    "	ldr %[ret], %[addr]\n"
+	    "	add %[ret], %[ret], %[imm]\n"
+	    "	str %[ret], %[addr]\n"
+	    "2:\n"
+	    : [ret] "=&r" (ret),
+	      [rp0] "=m" (ras_page[0]),
+	      [rp1] "=m" (ras_page[1]),
+	      [addr] "+m" (*mem)
+	    : [imm] "r" (val)
+	);
+
+	ras_page[0] = 0;
+	ras_page[1] = 0xffffffff;
+
+	return ret - val;
+}
+
+unsigned __atomic_fetch_sub_4(volatile unsigned *mem, unsigned val, int model)
+{
+	return __atomic_fetch_add_4(mem, -val, model);
+}
+
+void __sync_synchronize(void)
+{
+	// FIXME: Full memory barrier. We might need a syscall for this.
+}
+
+unsigned __sync_add_and_fetch_4(volatile void *vptr, unsigned val)
+{
+	return __atomic_fetch_add_4(vptr, val, __ATOMIC_SEQ_CST) + val;
+}
+
+unsigned __sync_sub_and_fetch_4(volatile void *vptr, unsigned val)
+{
+	return __atomic_fetch_sub_4(vptr, val, __ATOMIC_SEQ_CST) - val;
+}
+
+bool __sync_bool_compare_and_swap_4(volatile void *ptr, unsigned old_val, unsigned new_val)
+{
+	return __atomic_compare_exchange_4(ptr, &old_val, new_val, false,
+	    __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+}
+
+unsigned __sync_val_compare_and_swap_4(volatile void *ptr, unsigned old_val, unsigned new_val)
+{
+	__atomic_compare_exchange_4(ptr, &old_val, new_val, false,
+	    __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+	return old_val;
+}
+
Index: uspace/lib/c/arch/ia32/include/libarch/atomic.h
===================================================================
--- uspace/lib/c/arch/ia32/include/libarch/atomic.h	(revision d51cca81f54dcd276fed68e8d36c097fe888f846)
+++ 	(revision )
@@ -1,120 +1,0 @@
-/*
- * Copyright (c) 2001-2004 Jakub Jermar
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- *   notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- *   notice, this list of conditions and the following disclaimer in the
- *   documentation and/or other materials provided with the distribution.
- * - The name of the author may not be used to endorse or promote products
- *   derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** @addtogroup libcia32
- * @{
- */
-/** @file
- */
-
-#ifndef LIBC_ia32_ATOMIC_H_
-#define LIBC_ia32_ATOMIC_H_
-
-#define LIBC_ARCH_ATOMIC_H_
-
-#include <atomicdflt.h>
-
-static inline void atomic_inc(atomic_t *val)
-{
-#ifdef __PCC__
-	asm volatile (
-	    "lock incl %0\n"
-	    : "+m" (val->count)
-	);
-#else
-	asm volatile (
-	    "lock incl %[count]\n"
-	    : [count] "+m" (val->count)
-	);
-#endif
-}
-
-static inline void atomic_dec(atomic_t *val)
-{
-#ifdef __PCC__
-	asm volatile (
-	    "lock decl %0\n"
-	    : "+m" (val->count)
-	);
-#else
-	asm volatile (
-	    "lock decl %[count]\n"
-	    : [count] "+m" (val->count)
-	);
-#endif
-}
-
-static inline atomic_count_t atomic_postinc(atomic_t *val)
-{
-	atomic_count_t r = 1;
-
-#ifdef __PCC__
-	asm volatile (
-	    "lock xaddl %1, %0\n"
-	    : "+m" (val->count),
-	      "+r" (r)
-	);
-#else
-	asm volatile (
-	    "lock xaddl %[r], %[count]\n"
-	    : [count] "+m" (val->count),
-	      [r] "+r" (r)
-	);
-#endif
-
-	return r;
-}
-
-static inline atomic_count_t atomic_postdec(atomic_t *val)
-{
-	atomic_count_t r = -1;
-
-#ifdef __PCC__
-	asm volatile (
-	    "lock xaddl %1, %0\n"
-	    : "+m" (val->count),
-	      "+r" (r)
-	);
-#else
-	asm volatile (
-	    "lock xaddl %[r], %[count]\n"
-	    : [count] "+m" (val->count),
-	      [r] "+r" (r)
-	);
-#endif
-
-	return r;
-}
-
-#define atomic_preinc(val)  (atomic_postinc(val) + 1)
-#define atomic_predec(val)  (atomic_postdec(val) - 1)
-
-#endif
-
-/** @}
- */
Index: uspace/lib/c/arch/ia64/include/libarch/atomic.h
===================================================================
--- uspace/lib/c/arch/ia64/include/libarch/atomic.h	(revision d51cca81f54dcd276fed68e8d36c097fe888f846)
+++ 	(revision )
@@ -1,119 +1,0 @@
-/*
- * Copyright (c) 2005 Jakub Jermar
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- *   notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- *   notice, this list of conditions and the following disclaimer in the
- *   documentation and/or other materials provided with the distribution.
- * - The name of the author may not be used to endorse or promote products
- *   derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** @addtogroup libcia64
- * @{
- */
-/** @file
- */
-
-#ifndef LIBC_ia64_ATOMIC_H_
-#define LIBC_ia64_ATOMIC_H_
-
-#define LIBC_ARCH_ATOMIC_H_
-
-#include <atomicdflt.h>
-
-static inline void atomic_inc(atomic_t *val)
-{
-	atomic_count_t v;
-
-	asm volatile (
-	    "fetchadd8.rel %[v] = %[count], 1\n"
-	    : [v] "=r" (v),
-	      [count] "+m" (val->count)
-	);
-}
-
-static inline void atomic_dec(atomic_t *val)
-{
-	atomic_count_t v;
-
-	asm volatile (
-	    "fetchadd8.rel %[v] = %[count], -1\n"
-	    : [v] "=r" (v),
-	      [count] "+m" (val->count)
-	);
-}
-
-static inline atomic_count_t atomic_preinc(atomic_t *val)
-{
-	atomic_count_t v;
-
-	asm volatile (
-	    "fetchadd8.rel %[v] = %[count], 1\n"
-	    : [v] "=r" (v),
-	      [count] "+m" (val->count)
-	);
-
-	return (v + 1);
-}
-
-static inline atomic_count_t atomic_predec(atomic_t *val)
-{
-	atomic_count_t v;
-
-	asm volatile (
-	    "fetchadd8.rel %[v] = %[count], -1\n"
-	    : [v] "=r" (v),
-	      [count] "+m" (val->count)
-	);
-
-	return (v - 1);
-}
-
-static inline atomic_count_t atomic_postinc(atomic_t *val)
-{
-	atomic_count_t v;
-
-	asm volatile (
-	    "fetchadd8.rel %[v] = %[count], 1\n"
-	    : [v] "=r" (v),
-	      [count] "+m" (val->count)
-	);
-
-	return v;
-}
-
-static inline atomic_count_t atomic_postdec(atomic_t *val)
-{
-	atomic_count_t v;
-
-	asm volatile (
-	    "fetchadd8.rel %[v] = %[count], -1\n"
-	    : [v] "=r" (v),
-	      [count] "+m" (val->count)
-	);
-
-	return v;
-}
-
-#endif
-
-/** @}
- */
Index: uspace/lib/c/arch/mips32/include/libarch/atomic.h
===================================================================
--- uspace/lib/c/arch/mips32/include/libarch/atomic.h	(revision d51cca81f54dcd276fed68e8d36c097fe888f846)
+++ 	(revision )
@@ -1,86 +1,0 @@
-/*
- * Copyright (c) 2005 Ondrej Palkovsky
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- *   notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- *   notice, this list of conditions and the following disclaimer in the
- *   documentation and/or other materials provided with the distribution.
- * - The name of the author may not be used to endorse or promote products
- *   derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** @addtogroup libcmips32
- * @{
- */
-/** @file
- * @ingroup libcmips32
- */
-
-#ifndef LIBC_mips32_ATOMIC_H_
-#define LIBC_mips32_ATOMIC_H_
-
-#define LIBC_ARCH_ATOMIC_H_
-
-#include <atomicdflt.h>
-
-#define atomic_inc(x)  ((void) atomic_add(x, 1))
-#define atomic_dec(x)  ((void) atomic_add(x, -1))
-
-#define atomic_postinc(x)  (atomic_add(x, 1) - 1)
-#define atomic_postdec(x)  (atomic_add(x, -1) + 1)
-
-#define atomic_preinc(x)  atomic_add(x, 1)
-#define atomic_predec(x)  atomic_add(x, -1)
-
-/** Atomic addition of immediate value.
- *
- * @param val Memory location to which will be the immediate value added.
- * @param i   Signed immediate that will be added to *val.
- *
- * @return Value after addition.
- *
- */
-static inline atomic_count_t atomic_add(atomic_t *val, atomic_count_t i)
-{
-	atomic_count_t tmp;
-	atomic_count_t v;
-
-	asm volatile (
-	    "1:\n"
-	    "	ll %0, %1\n"
-	    "	addu %0, %0, %3\n"	/* same as add, but never traps on overflow */
-	    "	move %2, %0\n"
-	    "	sc %0, %1\n"
-	    "	beq %0, %4, 1b\n"	/* if the atomic operation failed, try again */
-	    "	nop\n"
-	    : "=&r" (tmp),
-	      "+m" (val->count),
-	      "=&r" (v)
-	    : "r" (i),
-	      "i" (0)
-	);
-
-	return v;
-}
-
-#endif
-
-/** @}
- */
Index: uspace/lib/c/arch/mips32eb/include/libarch/atomic.h
===================================================================
--- uspace/lib/c/arch/mips32eb/include/libarch/atomic.h	(revision d51cca81f54dcd276fed68e8d36c097fe888f846)
+++ 	(revision )
@@ -1,1 +1,0 @@
-../../../mips32/include/libarch/atomic.h
Index: uspace/lib/c/arch/ppc32/include/libarch/atomic.h
===================================================================
--- uspace/lib/c/arch/ppc32/include/libarch/atomic.h	(revision d51cca81f54dcd276fed68e8d36c097fe888f846)
+++ 	(revision )
@@ -1,105 +1,0 @@
-/*
- * Copyright (c) 2005 Martin Decky
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- *   notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- *   notice, this list of conditions and the following disclaimer in the
- *   documentation and/or other materials provided with the distribution.
- * - The name of the author may not be used to endorse or promote products
- *   derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** @addtogroup libcppc32
- * @{
- */
-/** @file
- */
-
-#ifndef LIBC_ppc32_ATOMIC_H_
-#define LIBC_ppc32_ATOMIC_H_
-
-#define LIBC_ARCH_ATOMIC_H_
-
-#include <atomicdflt.h>
-
-static inline void atomic_inc(atomic_t *val)
-{
-	atomic_count_t tmp;
-
-	asm volatile (
-	    "1:\n"
-	    "lwarx %0, 0, %2\n"
-	    "addic %0, %0, 1\n"
-	    "stwcx. %0, 0, %2\n"
-	    "bne- 1b"
-	    : "=&r" (tmp),
-	      "=m" (val->count)
-	    : "r" (&val->count),
-	      "m" (val->count)
-	    : "cc"
-	);
-}
-
-static inline void atomic_dec(atomic_t *val)
-{
-	atomic_count_t tmp;
-
-	asm volatile (
-	    "1:\n"
-	    "lwarx %0, 0, %2\n"
-	    "addic %0, %0, -1\n"
-	    "stwcx. %0, 0, %2\n"
-	    "bne- 1b"
-	    : "=&r" (tmp),
-	      "=m" (val->count)
-	    : "r" (&val->count),
-	      "m" (val->count)
-	    : "cc"
-	);
-}
-
-static inline atomic_count_t atomic_postinc(atomic_t *val)
-{
-	atomic_inc(val);
-	return val->count - 1;
-}
-
-static inline atomic_count_t atomic_postdec(atomic_t *val)
-{
-	atomic_dec(val);
-	return val->count + 1;
-}
-
-static inline atomic_count_t atomic_preinc(atomic_t *val)
-{
-	atomic_inc(val);
-	return val->count;
-}
-
-static inline atomic_count_t atomic_predec(atomic_t *val)
-{
-	atomic_dec(val);
-	return val->count;
-}
-
-#endif
-
-/** @}
- */
Index: uspace/lib/c/arch/riscv64/include/libarch/atomic.h
===================================================================
--- uspace/lib/c/arch/riscv64/include/libarch/atomic.h	(revision d51cca81f54dcd276fed68e8d36c097fe888f846)
+++ 	(revision )
@@ -1,111 +1,0 @@
-/*
- * Copyright (c) 2016 Martin Decky
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- *   notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- *   notice, this list of conditions and the following disclaimer in the
- *   documentation and/or other materials provided with the distribution.
- * - The name of the author may not be used to endorse or promote products
- *   derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** @addtogroup libcriscv64
- * @{
- */
-/** @file
- */
-
-#ifndef LIBC_riscv64_ATOMIC_H_
-#define LIBC_riscv64_ATOMIC_H_
-
-#include <stdbool.h>
-
-#define LIBC_ARCH_ATOMIC_H_
-#define CAS
-
-#include <atomicdflt.h>
-
-// FIXME
-
-static inline bool cas(atomic_t *val, atomic_count_t ov, atomic_count_t nv)
-{
-	if (val->count == ov) {
-		val->count = nv;
-		return true;
-	}
-
-	return false;
-}
-
-static inline void atomic_inc(atomic_t *val)
-{
-	/*
-	 * On real hardware the increment has to be done
-	 * as an atomic action.
-	 */
-
-	val->count++;
-}
-
-static inline void atomic_dec(atomic_t *val)
-{
-	/*
-	 * On real hardware the decrement has to be done
-	 * as an atomic action.
-	 */
-
-	val->count++;
-}
-
-static inline atomic_count_t atomic_postinc(atomic_t *val)
-{
-	/*
-	 * On real hardware both the storing of the previous
-	 * value and the increment have to be done as a single
-	 * atomic action.
-	 */
-
-	atomic_count_t prev = val->count;
-
-	val->count++;
-	return prev;
-}
-
-static inline atomic_count_t atomic_postdec(atomic_t *val)
-{
-	/*
-	 * On real hardware both the storing of the previous
-	 * value and the decrement have to be done as a single
-	 * atomic action.
-	 */
-
-	atomic_count_t prev = val->count;
-
-	val->count--;
-	return prev;
-}
-
-#define atomic_preinc(val) (atomic_postinc(val) + 1)
-#define atomic_predec(val) (atomic_postdec(val) - 1)
-
-#endif
-
-/** @}
- */
Index: uspace/lib/c/arch/sparc64/include/libarch/atomic.h
===================================================================
--- uspace/lib/c/arch/sparc64/include/libarch/atomic.h	(revision d51cca81f54dcd276fed68e8d36c097fe888f846)
+++ 	(revision )
@@ -1,108 +1,0 @@
-/*
- * Copyright (c) 2005 Jakub Jermar
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- *   notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- *   notice, this list of conditions and the following disclaimer in the
- *   documentation and/or other materials provided with the distribution.
- * - The name of the author may not be used to endorse or promote products
- *   derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** @addtogroup libcsparc64
- * @{
- */
-/** @file
- */
-
-#ifndef LIBC_sparc64_ATOMIC_H_
-#define LIBC_sparc64_ATOMIC_H_
-
-#define LIBC_ARCH_ATOMIC_H_
-
-#include <atomicdflt.h>
-#include <stdint.h>
-
-/** Atomic add operation.
- *
- * Use atomic compare and swap operation to atomically add signed value.
- *
- * @param val Atomic variable.
- * @param i   Signed value to be added.
- *
- * @return Value of the atomic variable as it existed before addition.
- *
- */
-static inline atomic_count_t atomic_add(atomic_t *val, atomic_count_t i)
-{
-	atomic_count_t a;
-	atomic_count_t b;
-
-	do {
-		volatile uintptr_t ptr = (uintptr_t) &val->count;
-
-		a = *((atomic_count_t *) ptr);
-		b = a + i;
-
-		asm volatile (
-		    "casx %0, %2, %1\n"
-		    : "+m" (*((atomic_count_t *) ptr)),
-		      "+r" (b)
-		    : "r" (a)
-		);
-	} while (a != b);
-
-	return a;
-}
-
-static inline atomic_count_t atomic_preinc(atomic_t *val)
-{
-	return atomic_add(val, 1) + 1;
-}
-
-static inline atomic_count_t atomic_postinc(atomic_t *val)
-{
-	return atomic_add(val, 1);
-}
-
-static inline atomic_count_t atomic_predec(atomic_t *val)
-{
-	return atomic_add(val, -1) - 1;
-}
-
-static inline atomic_count_t atomic_postdec(atomic_t *val)
-{
-	return atomic_add(val, -1);
-}
-
-static inline void atomic_inc(atomic_t *val)
-{
-	(void) atomic_add(val, 1);
-}
-
-static inline void atomic_dec(atomic_t *val)
-{
-	(void) atomic_add(val, -1);
-}
-
-#endif
-
-/** @}
- */
Index: uspace/lib/c/generic/thread/atomic.c
===================================================================
--- uspace/lib/c/generic/thread/atomic.c	(revision d51cca81f54dcd276fed68e8d36c097fe888f846)
+++ 	(revision )
@@ -1,105 +1,0 @@
-/*
- * Copyright (c) 2018 CZ.NIC, z.s.p.o.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- *   notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- *   notice, this list of conditions and the following disclaimer in the
- *   documentation and/or other materials provided with the distribution.
- * - The name of the author may not be used to endorse or promote products
- *   derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <atomic.h>
-
-#ifdef PLATFORM_arm32
-
-/*
- * Older ARMs don't have atomic instructions, so we need to define a bunch
- * of symbols for GCC to use.
- */
-
-void __sync_synchronize(void)
-{
-	// FIXME: Full memory barrier. We need a syscall for this.
-	// Should we implement this or is empty definition ok here?
-}
-
-unsigned __sync_add_and_fetch_4(volatile void *vptr, unsigned val)
-{
-	return atomic_add((atomic_t *)vptr, val);
-}
-
-unsigned __sync_sub_and_fetch_4(volatile void *vptr, unsigned val)
-{
-	return atomic_add((atomic_t *)vptr, -(atomic_signed_t)val);
-}
-
-bool __sync_bool_compare_and_swap_4(volatile void *ptr, unsigned old_val, unsigned new_val)
-{
-	return cas((atomic_t *)ptr, old_val, new_val);
-}
-
-unsigned __sync_val_compare_and_swap_4(volatile void *ptr, unsigned old_val, unsigned new_val)
-{
-	while (true) {
-		if (__sync_bool_compare_and_swap_4(ptr, old_val, new_val)) {
-			return old_val;
-		}
-
-		unsigned current = *(volatile unsigned *)ptr;
-		if (current != old_val)
-			return current;
-
-		/* If the current value is the same as old_val, retry. */
-	}
-}
-
-unsigned __atomic_fetch_add_4(unsigned *mem, unsigned val, int model)
-{
-	// TODO
-	(void) model;
-
-	return __sync_add_and_fetch_4(mem, val) - val;
-}
-
-unsigned __atomic_fetch_sub_4(unsigned *mem, unsigned val, int model)
-{
-	// TODO
-	(void) model;
-
-	return __sync_sub_and_fetch_4(mem, val) + val;
-}
-
-bool __atomic_compare_exchange_4(unsigned *mem, unsigned *expected, unsigned desired, bool weak, int success, int failure)
-{
-	// TODO
-	(void) success;
-	(void) failure;
-	(void) weak;
-
-	unsigned old = __sync_val_compare_and_swap_4(mem, *expected, desired);
-	if (old == *expected)
-		return true;
-
-	*expected = old;
-	return false;
-}
-
-#endif
Index: uspace/lib/c/include/atomic.h
===================================================================
--- uspace/lib/c/include/atomic.h	(revision d51cca81f54dcd276fed68e8d36c097fe888f846)
+++ uspace/lib/c/include/atomic.h	(revision ffa73c60dc2eba8ce632c52a0be8bbf4ecd562d9)
@@ -36,5 +36,59 @@
 #define LIBC_ATOMIC_H_
 
-#include <libarch/atomic.h>
+#include <stdatomic.h>
+#include <stdbool.h>
+#include <stddef.h>
+
+typedef size_t atomic_count_t;
+typedef ssize_t atomic_signed_t;
+
+typedef struct atomic {
+	volatile atomic_size_t count;
+} atomic_t;
+
+static inline void atomic_set(atomic_t *val, atomic_count_t i)
+{
+	atomic_store(&val->count, i);
+}
+
+static inline atomic_count_t atomic_get(atomic_t *val)
+{
+	return atomic_load(&val->count);
+}
+
+static inline bool cas(atomic_t *val, atomic_count_t ov, atomic_count_t nv)
+{
+	return atomic_compare_exchange_strong(&val->count, &ov, nv);
+}
+
+static inline atomic_count_t atomic_postinc(atomic_t *val)
+{
+	return atomic_fetch_add(&val->count, 1);
+}
+
+static inline atomic_count_t atomic_postdec(atomic_t *val)
+{
+	return atomic_fetch_sub(&val->count, 1);
+}
+
+static inline atomic_count_t atomic_preinc(atomic_t *val)
+{
+	return atomic_postinc(val) + 1;
+}
+
+static inline atomic_count_t atomic_predec(atomic_t *val)
+{
+	return atomic_postdec(val) - 1;
+}
+
+static inline void atomic_inc(atomic_t *val)
+{
+	atomic_postinc(val);
+}
+
+static inline void atomic_dec(atomic_t *val)
+{
+	atomic_postdec(val);
+}
 
 #endif
