Index: uspace/Makefile
===================================================================
--- uspace/Makefile	(revision 42964a73ef087dec0f80db3182b0ff5a3f79b500)
+++ uspace/Makefile	(revision 9b7adc385e10a6ce5a0bedf71fdcd0ae1b9fd585)
@@ -67,5 +67,4 @@
 	app/redir \
 	app/rcutest \
-	app/rcubench \
 	app/sbi \
 	app/sportdmp \
Index: uspace/Makefile.common
===================================================================
--- uspace/Makefile.common	(revision 42964a73ef087dec0f80db3182b0ff5a3f79b500)
+++ uspace/Makefile.common	(revision 9b7adc385e10a6ce5a0bedf71fdcd0ae1b9fd585)
@@ -35,5 +35,4 @@
 #   DEFS               compiler defines
 #   EXTRA_CFLAGS       additional flags to pass to C compiler
-#   LINKER_SCRIPT      linker script
 #   PRE_DEPEND         targets required for dependency check
 #
@@ -153,12 +152,7 @@
 BASE_LIBS += $(LIBSOFTFLOAT_PREFIX)/libsoftfloat.a $(LIBSOFTINT_PREFIX)/libsoftint.a
 
-ifeq ($(LINK_DYNAMIC),y)
-	LINKER_SCRIPT ?= $(LIBC_PREFIX)/arch/$(UARCH)/_link-dlexe.ld
-else
+ifneq ($(LINK_DYNAMIC),y)
 	LDFLAGS += -static
-	LINKER_SCRIPT ?= $(LIBC_PREFIX)/arch/$(UARCH)/_link.ld
-endif
-
-LIB_LINKER_SCRIPT = $(LIBC_PREFIX)/arch/$(UARCH)/_link-shlib.ld
+endif
 
 INCLUDES_FLAGS = $(LIBC_INCLUDES_FLAGS)
@@ -261,5 +255,5 @@
 endif
 
-COMMON_CXXFLAGS = $(COMMON_CFLAGS)
+COMMON_CXXFLAGS = $(COMMON_CFLAGS) -fno-exceptions
 HELENOS_CXXFLAGS = \
 	-std=c++17 -frtti \
@@ -351,9 +345,9 @@
 
 ifneq ($(filter %.cpp %.cc %.cxx, $(SOURCES)),)
-$(BINARY): $(LINKER_SCRIPT) $(OBJECTS) $(LIBTAGS)
-	$(CXX) $(CXXFLAGS) $(LDFLAGS) $(EXTRA_LDFLAGS) -T $(LINKER_SCRIPT) -Wl,-Map,$@.map -o $@ $(START_FILES) $(OBJECTS) $(LIBARGS) $(CXX_BASE_LIBS)
+$(BINARY): $(OBJECTS) $(LIBTAGS)
+	$(CXX) $(CXXFLAGS) $(LDFLAGS) $(EXTRA_LDFLAGS) -Wl,-Map,$@.map -o $@ $(START_FILES) $(OBJECTS) $(LIBARGS) $(CXX_BASE_LIBS)
 else
-$(BINARY): $(LINKER_SCRIPT) $(OBJECTS) $(LIBTAGS)
-	$(CC) $(CFLAGS) $(LDFLAGS) $(EXTRA_LDFLAGS) -T $(LINKER_SCRIPT) -Wl,-Map,$@.map -o $@ $(START_FILES) $(OBJECTS) $(LIBARGS) $(BASE_LIBS)
+$(BINARY): $(OBJECTS) $(LIBTAGS)
+	$(CC) $(CFLAGS) $(LDFLAGS) $(EXTRA_LDFLAGS) -Wl,-Map,$@.map -o $@ $(START_FILES) $(OBJECTS) $(LIBARGS) $(BASE_LIBS)
 endif
 
@@ -361,6 +355,6 @@
 
 ifneq ($(TEST_BINARY),)
-$(TEST_BINARY): $(LINKER_SCRIPT) $(TEST_OBJECTS) $(TEST_BINARY_LIBS) $(LIBTAGS)
-	$(CC) $(CFLAGS) $(LDFLAGS) $(EXTRA_LDFLAGS) -T $(LINKER_SCRIPT) -Wl,-Map,$@.map -o $@ $(START_FILES) $(TEST_OBJECTS) $(TEST_BINARY_LIBS) $(LIBARGS) $(BASE_LIBS)
+$(TEST_BINARY): $(TEST_OBJECTS) $(TEST_BINARY_LIBS) $(LIBTAGS)
+	$(CC) $(CFLAGS) $(LDFLAGS) $(EXTRA_LDFLAGS) -Wl,-Map,$@.map -o $@ $(START_FILES) $(TEST_OBJECTS) $(TEST_BINARY_LIBS) $(LIBARGS) $(BASE_LIBS)
 endif
 
@@ -378,6 +372,6 @@
 	$(AR) rc $@ $(LOBJECTS)
 
-$(SLIBRARY): $(LIB_LINKER_SCRIPT) $(LIBRARY).la
-	$(CC) $(CFLAGS) $(LIB_LDFLAGS) $(EXTRA_LDFLAGS) -T $(LIB_LINKER_SCRIPT) -Wl,-Map,$@.map -o $@ -Wl,--whole-archive $(LIBRARY).la -Wl,--no-whole-archive $(LIBARGS) $(BASE_LIBS)
+$(SLIBRARY): $(LIBRARY).la
+	$(CC) $(CFLAGS) $(LIB_LDFLAGS) $(EXTRA_LDFLAGS) -Wl,-Map,$@.map -o $@ -Wl,--whole-archive $(LIBRARY).la -Wl,--no-whole-archive $(LIBARGS) $(BASE_LIBS)
 
 $(LSONAME):
Index: uspace/app/rcubench/Makefile
===================================================================
--- uspace/app/rcubench/Makefile	(revision 42964a73ef087dec0f80db3182b0ff5a3f79b500)
+++ 	(revision )
@@ -1,37 +1,0 @@
-#
-# Copyright (c) 2012 Adam Hraska
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# - Redistributions of source code must retain the above copyright
-#   notice, this list of conditions and the following disclaimer.
-# - Redistributions in binary form must reproduce the above copyright
-#   notice, this list of conditions and the following disclaimer in the
-#   documentation and/or other materials provided with the distribution.
-# - The name of the author may not be used to endorse or promote products
-#   derived from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-
-USPACE_PREFIX = ../..
-
-BINARY = rcubench
-
-SOURCES = \
-	rcubench.c
-
-include $(USPACE_PREFIX)/Makefile.common
-
Index: uspace/app/rcubench/rcubench.c
===================================================================
--- uspace/app/rcubench/rcubench.c	(revision 42964a73ef087dec0f80db3182b0ff5a3f79b500)
+++ 	(revision )
@@ -1,286 +1,0 @@
-/*
- * Copyright (c) 2012 Adam Hraska
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- *   notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- *   notice, this list of conditions and the following disclaimer in the
- *   documentation and/or other materials provided with the distribution.
- * - The name of the author may not be used to endorse or promote products
- *   derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** @addtogroup test
- * @{
- */
-
-/**
- * @file rcubench.c
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include <mem.h>
-#include <errno.h>
-#include <assert.h>
-#include <async.h>
-#include <fibril.h>
-#include <fibril_synch.h>
-#include <compiler/barrier.h>
-#include <futex.h>
-#include <str.h>
-
-#include <rcu.h>
-
-
-/* Results are printed to this file in addition to stdout. */
-static FILE *results_fd = NULL;
-
-typedef struct bench {
-	const char *name;
-	void (*func)(struct bench *);
-	size_t iters;
-	size_t nthreads;
-	fibril_semaphore_t done_threads;
-} bench_t;
-
-
-
-
-static void  kernel_futex_bench(bench_t *bench)
-{
-	const size_t iters = bench->iters;
-	int val = 0;
-
-	for (size_t i = 0; i < iters; ++i) {
-		__SYSCALL1(SYS_FUTEX_WAKEUP, (sysarg_t) &val);
-		__SYSCALL1(SYS_FUTEX_SLEEP, (sysarg_t) &val);
-	}
-}
-
-static void libc_futex_lock_bench(bench_t *bench)
-{
-	const size_t iters = bench->iters;
-	futex_t loc_fut = FUTEX_INITIALIZER;
-
-	for (size_t i = 0; i < iters; ++i) {
-		futex_lock(&loc_fut);
-		/* no-op */
-		compiler_barrier();
-		futex_unlock(&loc_fut);
-	}
-}
-
-static void libc_futex_sema_bench(bench_t *bench)
-{
-	const size_t iters = bench->iters;
-	futex_t loc_fut = FUTEX_INITIALIZER;
-
-	for (size_t i = 0; i < iters; ++i) {
-		futex_down(&loc_fut);
-		/* no-op */
-		compiler_barrier();
-		futex_up(&loc_fut);
-	}
-}
-
-static errno_t thread_func(void *arg)
-{
-	bench_t *bench = (bench_t *)arg;
-
-	bench->func(bench);
-
-	/* Signal another thread completed. */
-	fibril_semaphore_up(&bench->done_threads);
-	return EOK;
-}
-
-static void run_threads_and_wait(bench_t *bench)
-{
-	assert(1 <= bench->nthreads);
-
-	if (2 <= bench->nthreads) {
-		printf("Creating %zu additional threads...\n", bench->nthreads - 1);
-	}
-
-	fibril_test_spawn_runners(bench->nthreads - 1);
-
-	/* Create and run the first nthreads - 1 threads.*/
-	for (size_t k = 1; k < bench->nthreads; ++k) {
-		fid_t f = fibril_create(thread_func, bench);
-		if (!f) {
-			printf("Error: Failed to create benchmark thread.\n");
-			abort();
-		}
-		fibril_detach(f);
-		fibril_add_ready(f);
-	}
-
-	/*
-	 * Run the last thread in place so that we create multiple threads
-	 * only when needed. Otherwise libc would immediately upgrade
-	 * single-threaded futexes to proper multithreaded futexes
-	 */
-	thread_func(bench);
-
-	printf("Waiting for remaining threads to complete.\n");
-
-	/* Wait for threads to complete. */
-	for (size_t k = 0; k < bench->nthreads; ++k) {
-		fibril_semaphore_down(&bench->done_threads);
-	}
-}
-
-static const char *results_txt = "/tmp/urcu-bench-results.txt";
-
-static bool open_results(void)
-{
-	results_fd = fopen(results_txt, "a");
-	return NULL != results_fd;
-}
-
-static void close_results(void)
-{
-	if (results_fd) {
-		fclose(results_fd);
-	}
-}
-
-static void print_res(const char *fmt, ...)
-{
-	va_list args;
-
-	va_start(args, fmt);
-	vfprintf(results_fd, fmt, args);
-	va_end(args);
-
-	va_start(args, fmt);
-	vprintf(fmt, args);
-	va_end(args);
-}
-
-static void print_usage(void)
-{
-	printf("rcubench [test-name] [k-iterations] [n-threads]\n");
-	printf("Available tests: \n");
-	printf("  sys-futex.. threads make wakeup/sleepdown futex syscalls in a loop\n");
-	printf("              but for separate variables/futex kernel objects.\n");
-	printf("  lock     .. threads lock/unlock separate futexes.\n");
-	printf("  sema     .. threads down/up separate futexes.\n");
-	printf("eg:\n");
-	printf("  rcubench sys-futex  100000 3\n");
-	printf("  rcubench lock 100000 2 ..runs futex_lock/unlock in a loop\n");
-	printf("  rcubench sema 100000 2 ..runs futex_down/up in a loop\n");
-	printf("Results are stored in %s\n", results_txt);
-}
-
-static bool parse_cmd_line(int argc, char **argv, bench_t *bench,
-    const char **err)
-{
-	if (argc < 4) {
-		*err = "Not enough parameters";
-		return false;
-	}
-
-	if (0 == str_cmp(argv[1], "sys-futex")) {
-		bench->func = kernel_futex_bench;
-	} else if (0 == str_cmp(argv[1], "lock")) {
-		bench->func = libc_futex_lock_bench;
-	} else if (0 == str_cmp(argv[1], "sema")) {
-		bench->func = libc_futex_sema_bench;
-	} else {
-		*err = "Unknown test name";
-		return false;
-	}
-
-	bench->name = argv[1];
-
-	/* Determine iteration count. */
-	uint32_t iter_cnt = 0;
-	errno_t ret = str_uint32_t(argv[2], NULL, 0, true, &iter_cnt);
-
-	if (ret == EOK && 1 <= iter_cnt) {
-		bench->iters = iter_cnt;
-	} else {
-		*err = "Err: Invalid number of iterations";
-		return false;
-	}
-
-	/* Determine thread count. */
-	uint32_t thread_cnt = 0;
-	ret = str_uint32_t(argv[3], NULL, 0, true, &thread_cnt);
-
-	if (ret == EOK && 1 <= thread_cnt && thread_cnt <= 64) {
-		bench->nthreads = thread_cnt;
-	} else {
-		*err = "Err: Invalid number of threads";
-		return false;
-	}
-
-	return true;
-}
-
-int main(int argc, char **argv)
-{
-	const char *err = "(error)";
-	bench_t bench;
-
-	fibril_semaphore_initialize(&bench.done_threads, 0);
-
-	if (!parse_cmd_line(argc, argv, &bench, &err)) {
-		printf("%s\n", err);
-		print_usage();
-		return -1;
-	}
-
-	open_results();
-
-	print_res("Running '%s' futex bench in '%zu' threads with '%zu' iterations.\n",
-	    bench.name, bench.nthreads, bench.iters);
-
-	struct timeval start, end;
-	getuptime(&start);
-
-	run_threads_and_wait(&bench);
-
-	getuptime(&end);
-	int64_t duration = tv_sub_diff(&end, &start);
-
-	uint64_t secs = (uint64_t)duration / 1000 / 1000;
-	uint64_t total_iters = (uint64_t)bench.iters * bench.nthreads;
-	uint64_t iters_per_sec = 0;
-
-	if (0 < duration) {
-		iters_per_sec = total_iters * 1000 * 1000 / duration;
-	}
-
-	print_res("Completed %" PRIu64 " iterations in %" PRId64  " usecs (%" PRIu64
-	    " secs); %" PRIu64 " iters/sec\n",
-	    total_iters, duration, secs, iters_per_sec);
-
-	close_results();
-
-	return 0;
-}
-
-
-/**
- * @}
- */
Index: uspace/app/rcutest/rcutest.c
===================================================================
--- uspace/app/rcutest/rcutest.c	(revision 42964a73ef087dec0f80db3182b0ff5a3f79b500)
+++ uspace/app/rcutest/rcutest.c	(revision 9b7adc385e10a6ce5a0bedf71fdcd0ae1b9fd585)
@@ -46,5 +46,4 @@
 #include <fibril_synch.h>
 #include <compiler/barrier.h>
-#include <futex.h>
 #include <str.h>
 
Index: uspace/lib/c/Makefile
===================================================================
--- uspace/lib/c/Makefile	(revision 42964a73ef087dec0f80db3182b0ff5a3f79b500)
+++ uspace/lib/c/Makefile	(revision 9b7adc385e10a6ce5a0bedf71fdcd0ae1b9fd585)
@@ -33,12 +33,5 @@
 CONFIG_MAKEFILE = $(ROOT_PATH)/Makefile.config
 
-LINKER_SCRIPTS = \
-	$(LIBC_PREFIX)/arch/$(UARCH)/_link.ld \
-	$(LIBC_PREFIX)/arch/$(UARCH)/_link-shlib.ld \
-	$(LIBC_PREFIX)/arch/$(UARCH)/_link-dlexe.ld
-
-PRE_DEPEND =
-EXTRA_OUTPUT = $(LINKER_SCRIPTS) $(START_FILES)
-EXTRA_CLEAN = $(LINKER_SCRIPTS)
+EXTRA_OUTPUT = $(START_FILES)
 EXTRA_TEST_CFLAGS = -Wno-deprecated-declarations
 LIBRARY = libc
@@ -51,5 +44,4 @@
 	generic/libc.c \
 	generic/ddi.c \
-	generic/atomic.c \
 	generic/as.c \
 	generic/bd.c \
@@ -85,13 +77,8 @@
 	generic/strtol.c \
 	generic/l18n/langs.c \
-	generic/fibril.c \
-	generic/fibril_synch.c \
 	generic/pcb.c \
 	generic/smc.c \
 	generic/smp_memory_barrier.c \
-	generic/thread.c \
-	generic/tls.c \
 	generic/task.c \
-	generic/futex.c \
 	generic/imath.c \
 	generic/inet/addr.c \
@@ -142,4 +129,12 @@
 	generic/stdio/sstream.c \
 	generic/stdio/vsprintf.c \
+	generic/thread/atomic.c \
+	generic/thread/fibril.c \
+	generic/thread/fibril_synch.c \
+	generic/thread/thread.c \
+	generic/thread/tls.c \
+	generic/thread/futex.c \
+	generic/thread/rcu.c \
+	generic/thread/mpsc.c \
 	generic/sysinfo.c \
 	generic/ipc.c \
@@ -165,5 +160,4 @@
 	generic/vfs/mtab.c \
 	generic/vfs/vfs.c \
-	generic/rcu.c \
 	generic/setjmp.c \
 	generic/stack.c \
@@ -218,13 +212,4 @@
 	cp $< $@
 
-$(LIBC_PREFIX)/arch/$(UARCH)/_link.ld: $(LIBC_PREFIX)/arch/$(UARCH)/_link.ld.in
-	$(CC) $(DEFS) $(CFLAGS) -DLIBC_PATH=$(CURDIR) -E -x c $< | grep -v "^\#" > $@
-
-$(LIBC_PREFIX)/arch/$(UARCH)/_link-shlib.ld: $(LIBC_PREFIX)/arch/$(UARCH)/_link.ld.in
-	$(CC) $(DEFS) $(CFLAGS) -DLIBC_PATH=$(CURDIR) -DSHLIB -E -x c $< | grep -v "^\#" > $@
-
-$(LIBC_PREFIX)/arch/$(UARCH)/_link-dlexe.ld: $(LIBC_PREFIX)/arch/$(UARCH)/_link.ld.in
-	$(CC) $(DEFS) $(CFLAGS) -DLIBC_PATH=$(CURDIR) -DDLEXE -E -x c $< | grep -v "^\#" > $@
-
 AUTOCHECK = $(realpath $(ROOT_PATH)/tools/autocheck.awk)
 
Index: uspace/lib/c/arch/abs32le/_link.ld.in
===================================================================
--- uspace/lib/c/arch/abs32le/_link.ld.in	(revision 42964a73ef087dec0f80db3182b0ff5a3f79b500)
+++ 	(revision )
@@ -1,85 +1,0 @@
-ENTRY(_start)
-
-PHDRS {
-	text PT_LOAD FILEHDR PHDRS FLAGS(5);
-	data PT_LOAD FLAGS(6);
-	tls PT_TLS;
-}
-
-SECTIONS {
-#ifdef SHLIB
-	. = SEGMENT_START("text-segment", 0);
-#else
-	. = SEGMENT_START("text-segment", 0x400000);
-	PROVIDE (__executable_start = .);
-#endif
-	. = . + SIZEOF_HEADERS;
-
-	.text : {
-		*(.text .text.*);
-		*(.rodata .rodata.*);
-	} :text
-
-	. = . + 0x1000;
-
-	.data : {
-		*(.data);
-		*(.data.rel*);
-	} :data
-
-	.got.plt : {
-		*(.got.plt);
-	} :data
-
-	.tdata : {
-		*(.tdata);
-		*(.tdata.*);
-		*(.gnu.linkonce.td.*);
-	} :data :tls
-
-	.tbss : {
-		*(.tbss);
-		*(.tbss.*);
-		*(.gnu.linkonce.tb.*);
-	} :data :tls
-
-	.sbss : {
-		*(.scommon);
-		*(.sbss);
-	}
-
-	.bss : {
-		*(COMMON);
-		*(.bss);
-	} :data
-
-	__dso_handle = .;
-
-	.init_array : {
-#ifndef SHLIB
-		PROVIDE_HIDDEN (__init_array_start = .);
-#endif
-		KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
-		KEEP (*(.init_array .ctors))
-#ifndef SHLIB
-		PROVIDE_HIDDEN (__init_array_end = .);
-#endif
-	}
-
-	.fini_array : {
-#ifndef SHLIB
-		PROVIDE_HIDDEN (__fini_array_start = .);
-#endif
-		KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
-		KEEP (*(.fini_array .dtors))
-#ifndef SHLIB
-		PROVIDE_HIDDEN (__fini_array_end = .);
-#endif
-	}
-
-	_end = .;
-
-	/DISCARD/ : {
-		*(*);
-	}
-}
Index: uspace/lib/c/arch/amd64/_link.ld.in
===================================================================
--- uspace/lib/c/arch/amd64/_link.ld.in	(revision 42964a73ef087dec0f80db3182b0ff5a3f79b500)
+++ 	(revision )
@@ -1,98 +1,0 @@
-ENTRY(_start)
-
-PHDRS {
-	text PT_LOAD FILEHDR PHDRS FLAGS(5);
-	data PT_LOAD FLAGS(6);
-	tls PT_TLS;
-	debug PT_NOTE;
-}
-
-SECTIONS {
-#ifdef SHLIB
-	. = SEGMENT_START("text-segment", 0);
-#else
-	. = SEGMENT_START("text-segment", 0x400000);
-	PROVIDE (__executable_start = .);
-#endif
-	. = . + SIZEOF_HEADERS;
-
-	.init : {
-		*(.init);
-	} :text
-
-	.text : {
-		*(.text .text.*);
-		*(.rodata .rodata.*);
-	} :text
-
-	. = . + 0x1000;
-
-	.data : {
-		*(.data);
-		*(.data.rel*);
-	} :data
-
-	.got.plt : {
-		*(.got.plt);
-	} :data
-
-	.tdata : {
-		*(.tdata);
-		*(.tdata.*);
-		*(.gnu.linkonce.td.*);
-	} :data :tls
-
-	.tbss : {
-		*(.tbss);
-		*(.tbss.*);
-		*(.gnu.linkonce.tb.*);
-	} :data :tls
-
-	.bss : {
-		*(COMMON);
-		*(.bss);
-	} :data
-
-	__dso_handle = .;
-
-	.init_array : {
-#ifndef SHLIB
-		PROVIDE_HIDDEN (__init_array_start = .);
-#endif
-		KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
-		KEEP (*(.init_array .ctors))
-#ifndef SHLIB
-		PROVIDE_HIDDEN (__init_array_end = .);
-#endif
-	}
-
-	.fini_array : {
-#ifndef SHLIB
-		PROVIDE_HIDDEN (__fini_array_start = .);
-#endif
-		KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
-		KEEP (*(.fini_array .dtors))
-#ifndef SHLIB
-		PROVIDE_HIDDEN (__fini_array_end = .);
-#endif
-	}
-
-	_end = .;
-
-#ifdef CONFIG_LINE_DEBUG
-	.comment 0 : { *(.comment); } :debug
-	.debug_abbrev 0 : { *(.debug_abbrev); } :debug
-	.debug_aranges 0 : { *(.debug_aranges); } :debug
-	.debug_info 0 : { *(.debug_info); } :debug
-	.debug_line 0 : { *(.debug_line); } :debug
-	.debug_loc 0 : { *(.debug_loc); } :debug
-	.debug_pubnames 0 : { *(.debug_pubnames); } :debug
-	.debug_pubtypes 0 : { *(.debug_pubtypes); } :debug
-	.debug_ranges 0 : { *(.debug_ranges); } :debug
-	.debug_str 0 : { *(.debug_str); } :debug
-#endif
-
-	/DISCARD/ : {
-		*(*);
-	}
-}
Index: uspace/lib/c/arch/arm32/_link.ld.in
===================================================================
--- uspace/lib/c/arch/arm32/_link.ld.in	(revision 42964a73ef087dec0f80db3182b0ff5a3f79b500)
+++ 	(revision )
@@ -1,83 +1,0 @@
-ENTRY(_start)
-
-PHDRS {
-	text PT_LOAD FILEHDR PHDRS FLAGS(5);
-	data PT_LOAD FLAGS(6);
-	tls PT_TLS;
-}
-
-SECTIONS {
-#ifdef SHLIB
-	. = SEGMENT_START("text-segment", 0);
-#else
-	. = SEGMENT_START("text-segment", 0x400000);
-	PROVIDE (__executable_start = .);
-#endif
-	. = . + SIZEOF_HEADERS;
-
-	.init : {
-		*(.init);
-	} :text
-
-	.text : {
-		*(.text .text.*);
-		*(.rodata .rodata.*);
-	} :text
-
-	. = . + 0x1000;
-
-	.data : {
-		*(.opd);
-		*(.data .data.*);
-		*(.sdata);
-	} :data
-
-	.tdata : {
-		*(.tdata);
-		*(.tdata.*);
-		*(.gnu.linkonce.td.*);
-	} :data :tls
-
-	.tbss : {
-		*(.tbss);
-		*(.tbss.*);
-		*(.gnu.linkonce.tb.*);
-	} :data :tls
-
-	.bss : {
-		*(.sbss);
-		*(.scommon);
-		*(COMMON);
-		*(.bss);
-	} :data
-
-	__dso_handle = .;
-
-	.init_array : {
-#ifndef SHLIB
-		PROVIDE_HIDDEN (__init_array_start = .);
-#endif
-		KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
-		KEEP (*(.init_array .ctors))
-#ifndef SHLIB
-		PROVIDE_HIDDEN (__init_array_end = .);
-#endif
-	}
-
-	.fini_array : {
-#ifndef SHLIB
-		PROVIDE_HIDDEN (__fini_array_start = .);
-#endif
-		KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
-		KEEP (*(.fini_array .dtors))
-#ifndef SHLIB
-		PROVIDE_HIDDEN (__fini_array_end = .);
-#endif
-	}
-
-	_end = .;
-
-	/DISCARD/ : {
-		*(*);
-	}
-}
Index: uspace/lib/c/arch/ia32/_link.ld.in
===================================================================
--- uspace/lib/c/arch/ia32/_link.ld.in	(revision 42964a73ef087dec0f80db3182b0ff5a3f79b500)
+++ 	(revision )
@@ -1,157 +1,0 @@
-#ifndef SHLIB
-ENTRY(_start)
-#endif
-
-PHDRS {
-#if defined(DLEXE)
-	interp PT_INTERP;
-#endif
-	text PT_LOAD FILEHDR PHDRS FLAGS(5);
-	data PT_LOAD FLAGS(6);
-	tls PT_TLS;
-#if defined(SHLIB) || defined(DLEXE)
-	dynamic PT_DYNAMIC;
-#endif
-	debug PT_NOTE;
-}
-
-SECTIONS {
-#ifdef SHLIB
-	. = SEGMENT_START("text-segment", 0);
-#else
-	. = SEGMENT_START("text-segment", 0x400000);
-	PROVIDE (__executable_start = .);
-#endif
-	. = . + SIZEOF_HEADERS;
-
-	.init : {
-		*(.init);
-	} :text
-
-	.text : {
-		*(.text .text.*);
-		*(.rodata .rodata.*);
-	} :text
-
-#if defined(SHLIB) || defined(DLEXE)
-	.rel.plt : {
-		*(.rel.plt);
-	}
-	/*
-	 *.rel.dyn MUST FOLLOW IMMEDIATELY after .rel.plt
-	 * without alignment gap or DT_REL will be broken
-	 */
-	.rel.dyn : {
-		*(.rel.*);
-	} :text
-
-	.plt : {
-		*(.plt);
-	} :text
-
-	.dynsym : {
-		*(.dynsym);
-	} :text
-
-	.dynstr : {
-		*(.dynstr);
-	} :text
-
-	.hash : {
-		*(.hash .gnu.hash);
-	} :text
-#endif
-
-#if defined(DLEXE)
-	.interp : {
-		*(.interp);
-	} :interp :text
-#endif
-
-	. = . + 0x1000;
-
-#if defined(SHLIB) || defined(DLEXE)
-	.dynamic : {
-		*(.dynamic);
-	} :data :dynamic
-#endif
-
-	.data : {
-		*(.data);
-	} :data
-
-#if defined(SHLIB) || defined(DLEXE)
-	.data.rel : {
-		*(.data.rel .data.rel.*);
-	} :data
-
-	.got : {
-		*(.got);
-	} :data
-#endif
-
-	.got.plt : {
-		*(.got.plt);
-	} :data
-
-	.tdata : {
-		*(.tdata);
-		*(.tdata.*);
-		*(.gnu.linkonce.td.*);
-	} :data :tls
-
-	.tbss : {
-		*(.tbss);
-		*(.tbss.*);
-		*(.gnu.linkonce.tb.*);
-	} :data :tls
-
-	.bss : {
-		*(.dynbss);
-		*(COMMON);
-		*(.bss);
-	} :data
-
-	__dso_handle = .;
-
-	.init_array : {
-#ifndef SHLIB
-		PROVIDE_HIDDEN (__init_array_start = .);
-#endif
-		KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
-		KEEP (*(.init_array .ctors))
-#ifndef SHLIB
-		PROVIDE_HIDDEN (__init_array_end = .);
-#endif
-	}
-
-	.fini_array : {
-#ifndef SHLIB
-		PROVIDE_HIDDEN (__fini_array_start = .);
-#endif
-		KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
-		KEEP (*(.fini_array .dtors))
-#ifndef SHLIB
-		PROVIDE_HIDDEN (__fini_array_end = .);
-#endif
-	}
-
-	_end = .;
-
-#ifdef CONFIG_LINE_DEBUG
-	.comment 0 : { *(.comment); } :debug
-	.debug_abbrev 0 : { *(.debug_abbrev); } :debug
-	.debug_aranges 0 : { *(.debug_aranges); } :debug
-	.debug_info 0 : { *(.debug_info); } :debug
-	.debug_line 0 : { *(.debug_line); } :debug
-	.debug_loc 0 : { *(.debug_loc); } :debug
-	.debug_pubnames 0 : { *(.debug_pubnames); } :debug
-	.debug_pubtypes 0 : { *(.debug_pubtypes); } :debug
-	.debug_ranges 0 : { *(.debug_ranges); } :debug
-	.debug_str 0 : { *(.debug_str); } :debug
-#endif
-
-	/DISCARD/ : {
-		*(*);
-	}
-}
Index: uspace/lib/c/arch/ia64/_link.ld.in
===================================================================
--- uspace/lib/c/arch/ia64/_link.ld.in	(revision 42964a73ef087dec0f80db3182b0ff5a3f79b500)
+++ 	(revision )
@@ -1,93 +1,0 @@
-ENTRY(_start)
-
-PHDRS {
-	text PT_LOAD FILEHDR PHDRS FLAGS(5);
-	data PT_LOAD FLAGS(6);
-	tls PT_TLS;
-}
-
-SECTIONS {
-#ifdef SHLIB
-	. = SEGMENT_START("text-segment", 0);
-#else
-	. = SEGMENT_START("text-segment", 0x400000);
-	PROVIDE (__executable_start = .);
-#endif
-	. = . + SIZEOF_HEADERS;
-
-	/* Workaround proper alignment of the .init section */
-	. = ALIGN(., 16);
-
-	.init : {
-		*(.init);
-	} :text
-
-	.text : {
-		*(.text .text.*);
-		*(.rodata .rodata.*);
-	} :text
-
-	. = . + 0x4000;
-
-	.got : {
-		/* Tell the linker where we expect GP to point. */
-		__gp = .;
-		*(.got .got.*);
-	} :data
-
-	.data : {
-		*(.opd);
-		*(.data .data.*);
-		*(.sdata);
-		*(.sdata.*);
-	} :data
-
-	.tdata : {
-		*(.tdata);
-		*(.tdata.*);
-		*(.gnu.linkonce.td.*);
-	} :data :tls
-
-	.tbss : {
-		*(.tbss);
-		*(.tbss.*);
-		*(.gnu.linkonce.tb.*);
-	} :data :tls
-
-	.bss : {
-		*(.sbss);
-		*(.scommon);
-		*(COMMON);
-		*(.bss);
-	} :data
-
-	__dso_handle = .;
-
-	.init_array : {
-#ifndef SHLIB
-		PROVIDE_HIDDEN (__init_array_start = .);
-#endif
-		KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
-		KEEP (*(.init_array .ctors))
-#ifndef SHLIB
-		PROVIDE_HIDDEN (__init_array_end = .);
-#endif
-	}
-
-	.fini_array : {
-#ifndef SHLIB
-		PROVIDE_HIDDEN (__fini_array_start = .);
-#endif
-		KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
-		KEEP (*(.fini_array .dtors))
-#ifndef SHLIB
-		PROVIDE_HIDDEN (__fini_array_end = .);
-#endif
-	}
-
-	_end = .;
-
-	/DISCARD/ : {
-		*(*);
-	}
-}
Index: uspace/lib/c/arch/mips32/_link.ld.in
===================================================================
--- uspace/lib/c/arch/mips32/_link.ld.in	(revision 42964a73ef087dec0f80db3182b0ff5a3f79b500)
+++ 	(revision )
@@ -1,90 +1,0 @@
-ENTRY(__start)
-
-PHDRS {
-	text PT_LOAD FILEHDR PHDRS FLAGS(5);
-	data PT_LOAD FLAGS(6);
-	tls PT_TLS;
-}
-
-SECTIONS {
-#ifdef SHLIB
-	. = SEGMENT_START("text-segment", 0);
-#else
-	. = SEGMENT_START("text-segment", 0x400000);
-	PROVIDE (__executable_start = .);
-#endif
-	. = . + SIZEOF_HEADERS;
-
-	.init : {
-		*(.init);
-	} :text
-
-	.text : {
-		*(.text .text.*);
-		*(.rodata .rodata.*);
-	} :text
-
-	. = . + 0x4000;
-
-	.data : {
-		*(.data);
-		*(.data.rel*);
-	} :data
-
-	.got : {
-		_gp = .;
-		*(.got);
-	} :data
-
-	.tdata : {
-		*(.tdata);
-		*(.tdata.*);
-		*(.gnu.linkonce.td.*);
-	} :data :tls
-
-	.tbss : {
-		*(.tbss);
-		*(.tbss.*);
-		*(.gnu.linkonce.tb.*);
-	} :data :tls
-
-	.sbss : {
-		*(.scommon);
-		*(.sbss);
-	}
-
-	.bss : {
-		*(.bss);
-		*(COMMON);
-	} :data
-
-	__dso_handle = .;
-
-	.init_array : {
-#ifndef SHLIB
-		PROVIDE_HIDDEN (__init_array_start = .);
-#endif
-		KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
-		KEEP (*(.init_array .ctors))
-#ifndef SHLIB
-		PROVIDE_HIDDEN (__init_array_end = .);
-#endif
-	}
-
-	.fini_array : {
-#ifndef SHLIB
-		PROVIDE_HIDDEN (__fini_array_start = .);
-#endif
-		KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
-		KEEP (*(.fini_array .dtors))
-#ifndef SHLIB
-		PROVIDE_HIDDEN (__fini_array_end = .);
-#endif
-	}
-
-	_end = .;
-
-	/DISCARD/ : {
-		*(*);
-	}
-}
Index: uspace/lib/c/arch/mips32eb/_link.ld.in
===================================================================
--- uspace/lib/c/arch/mips32eb/_link.ld.in	(revision 42964a73ef087dec0f80db3182b0ff5a3f79b500)
+++ 	(revision )
@@ -1,1 +1,0 @@
-../mips32/_link.ld.in
Index: uspace/lib/c/arch/ppc32/_link.ld.in
===================================================================
--- uspace/lib/c/arch/ppc32/_link.ld.in	(revision 42964a73ef087dec0f80db3182b0ff5a3f79b500)
+++ 	(revision )
@@ -1,96 +1,0 @@
-ENTRY(_start)
-
-PHDRS {
-	text PT_LOAD FILEHDR PHDRS FLAGS(5);
-	data PT_LOAD FLAGS(6);
-	tls PT_TLS;
-	debug PT_NOTE;
-}
-
-SECTIONS {
-#ifdef SHLIB
-	. = SEGMENT_START("text-segment", 0);
-#else
-	. = SEGMENT_START("text-segment", 0x400000);
-	PROVIDE (__executable_start = .);
-#endif
-	. = . + SIZEOF_HEADERS;
-
-	.init : {
-		*(.init);
-	} :text
-
-	.text : {
-		*(.text .text.*);
-		*(.rodata .rodata.*);
-	} :text
-
-	. = . + 0x1000;
-
-	.data : {
-		*(.data);
-		*(.sdata);
-		*(.sdata.*);
-	} :data
-
-	.tdata : {
-		*(.tdata);
-		*(.tdata.*);
-		*(.gnu.linkonce.td.*);
-	} :data :tls
-
-	.tbss : {
-		*(.tbss);
-		*(.tbss.*);
-		*(.gnu.linkonce.tb.*);
-	} :data :tls
-
-	.bss : {
-		*(.sbss);
-		*(COMMON);
-		*(.bss);
-	} :data
-
-	__dso_handle = .;
-
-	.init_array : {
-#ifndef SHLIB
-		PROVIDE_HIDDEN (__init_array_start = .);
-#endif
-		KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
-		KEEP (*(.init_array .ctors))
-#ifndef SHLIB
-		PROVIDE_HIDDEN (__init_array_end = .);
-#endif
-	}
-
-	.fini_array : {
-#ifndef SHLIB
-		PROVIDE_HIDDEN (__fini_array_start = .);
-#endif
-		KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
-		KEEP (*(.fini_array .dtors))
-#ifndef SHLIB
-		PROVIDE_HIDDEN (__fini_array_end = .);
-#endif
-	}
-
-	_end = .;
-
-#ifdef CONFIG_LINE_DEBUG
-	.comment 0 : { *(.comment); } :debug
-	.debug_abbrev 0 : { *(.debug_abbrev); } :debug
-	.debug_aranges 0 : { *(.debug_aranges); } :debug
-	.debug_info 0 : { *(.debug_info); } :debug
-	.debug_line 0 : { *(.debug_line); } :debug
-	.debug_loc 0 : { *(.debug_loc); } :debug
-	.debug_pubnames 0 : { *(.debug_pubnames); } :debug
-	.debug_pubtypes 0 : { *(.debug_pubtypes); } :debug
-	.debug_ranges 0 : { *(.debug_ranges); } :debug
-	.debug_str 0 : { *(.debug_str); } :debug
-#endif
-
-	/DISCARD/ : {
-		*(*);
-	}
-}
Index: uspace/lib/c/arch/riscv64/_link.ld.in
===================================================================
--- uspace/lib/c/arch/riscv64/_link.ld.in	(revision 42964a73ef087dec0f80db3182b0ff5a3f79b500)
+++ 	(revision )
@@ -1,95 +1,0 @@
-ENTRY(_start)
-
-PHDRS {
-	text PT_LOAD FILEHDR PHDRS FLAGS(5);
-	data PT_LOAD FLAGS(6);
-	tls PT_TLS;
-	debug PT_NOTE;
-}
-
-SECTIONS {
-#ifdef SHLIB
-	. = SEGMENT_START("text-segment", 0);
-#else
-	. = SEGMENT_START("text-segment", 0x400000);
-	PROVIDE (__executable_start = .);
-#endif
-	. = . + SIZEOF_HEADERS;
-
-	.text : {
-		*(.text .text.*);
-		*(.rodata .rodata.*);
-		*(.srodata .srodata.*);
-	} :text
-
-	. = . + 0x1000;
-
-	.data : {
-		*(.data);
-		*(.sdata);
-		*(.sdata.*);
-		*(.data.rel*);
-	} :data
-
-	.tdata : {
-		*(.tdata);
-		*(.tdata.*);
-		*(.gnu.linkonce.td.*);
-	} :data :tls
-
-	.tbss : {
-		*(.tbss);
-		*(.tbss.*);
-		*(.gnu.linkonce.tb.*);
-	} :data :tls
-
-	.bss : {
-		*(.scommon);
-		*(.sbss);
-		*(COMMON);
-		*(.bss);
-	} :data
-
-	__dso_handle = .;
-
-	.init_array : {
-#ifndef SHLIB
-		PROVIDE_HIDDEN (__init_array_start = .);
-#endif
-		KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
-		KEEP (*(.init_array .ctors))
-#ifndef SHLIB
-		PROVIDE_HIDDEN (__init_array_end = .);
-#endif
-	}
-
-	.fini_array : {
-#ifndef SHLIB
-		PROVIDE_HIDDEN (__fini_array_start = .);
-#endif
-		KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
-		KEEP (*(.fini_array .dtors))
-#ifndef SHLIB
-		PROVIDE_HIDDEN (__fini_array_end = .);
-#endif
-	}
-
-	_end = .;
-
-#ifdef CONFIG_LINE_DEBUG
-	.comment 0 : { *(.comment); } :debug
-	.debug_abbrev 0 : { *(.debug_abbrev); } :debug
-	.debug_aranges 0 : { *(.debug_aranges); } :debug
-	.debug_info 0 : { *(.debug_info); } :debug
-	.debug_line 0 : { *(.debug_line); } :debug
-	.debug_loc 0 : { *(.debug_loc); } :debug
-	.debug_pubnames 0 : { *(.debug_pubnames); } :debug
-	.debug_pubtypes 0 : { *(.debug_pubtypes); } :debug
-	.debug_ranges 0 : { *(.debug_ranges); } :debug
-	.debug_str 0 : { *(.debug_str); } :debug
-#endif
-
-	/DISCARD/ : {
-		*(*);
-	}
-}
Index: uspace/lib/c/arch/sparc64/_link.ld.in
===================================================================
--- uspace/lib/c/arch/sparc64/_link.ld.in	(revision 42964a73ef087dec0f80db3182b0ff5a3f79b500)
+++ 	(revision )
@@ -1,99 +1,0 @@
-ENTRY(_start)
-
-PHDRS {
-	text PT_LOAD FILEHDR PHDRS FLAGS(5);
-	data PT_LOAD FLAGS(6);
-	tls PT_TLS;
-	debug PT_NOTE;
-}
-
-SECTIONS {
-#ifdef SHLIB
-	. = SEGMENT_START("text-segment", 0);
-#else
-	. = SEGMENT_START("text-segment", 0x400000);
-	PROVIDE (__executable_start = .);
-#endif
-	. = . + SIZEOF_HEADERS;
-
-	.init : {
-		*(.init);
-	} :text
-
-	.text : {
-		*(.text .text.*);
-		*(.rodata .rodata.*);
-	} :text
-
-	. = . + 0x4000;
-
-	.got : {
-		 *(.got*);
-	} :data
-
-	.data : {
-		*(.data);
-		*(.sdata);
-	} :data
-
-	.tdata : {
-		*(.tdata);
-		*(.tdata.*);
-		*(.gnu.linkonce.td.*);
-	} :data :tls
-
-	.tbss : {
-		*(.tbss);
-		*(.tbss.*);
-		*(.gnu.linkonce.tb.*);
-	} :data :tls
-
-	.bss : {
-		*(.sbss);
-		*(COMMON);
-		*(.bss);
-	} :data
-
-	__dso_handle = .;
-
-	.init_array : {
-#ifndef SHLIB
-		PROVIDE_HIDDEN (__init_array_start = .);
-#endif
-		KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
-		KEEP (*(.init_array .ctors))
-#ifndef SHLIB
-		PROVIDE_HIDDEN (__init_array_end = .);
-#endif
-	}
-
-	.fini_array : {
-#ifndef SHLIB
-		PROVIDE_HIDDEN (__fini_array_start = .);
-#endif
-		KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
-		KEEP (*(.fini_array .dtors))
-#ifndef SHLIB
-		PROVIDE_HIDDEN (__fini_array_end = .);
-#endif
-	}
-
-	_end = .;
-
-#ifdef CONFIG_LINE_DEBUG
-	.comment 0 : { *(.comment); } :debug
-	.debug_abbrev 0 : { *(.debug_abbrev); } :debug
-	.debug_aranges 0 : { *(.debug_aranges); } :debug
-	.debug_info 0 : { *(.debug_info); } :debug
-	.debug_line 0 : { *(.debug_line); } :debug
-	.debug_loc 0 : { *(.debug_loc); } :debug
-	.debug_pubnames 0 : { *(.debug_pubnames); } :debug
-	.debug_pubtypes 0 : { *(.debug_pubtypes); } :debug
-	.debug_ranges 0 : { *(.debug_ranges); } :debug
-	.debug_str 0 : { *(.debug_str); } :debug
-#endif
-
-	/DISCARD/ : {
-		*(*);
-	}
-}
Index: uspace/lib/c/generic/async/ports.c
===================================================================
--- uspace/lib/c/generic/async/ports.c	(revision 42964a73ef087dec0f80db3182b0ff5a3f79b500)
+++ uspace/lib/c/generic/async/ports.c	(revision 9b7adc385e10a6ce5a0bedf71fdcd0ae1b9fd585)
@@ -51,4 +51,5 @@
 #include <abi/mm/as.h>
 #include "../private/libc.h"
+#include "../private/fibril.h"
 
 /** Interface data */
Index: uspace/lib/c/generic/async/server.c
===================================================================
--- uspace/lib/c/generic/async/server.c	(revision 42964a73ef087dec0f80db3182b0ff5a3f79b500)
+++ uspace/lib/c/generic/async/server.c	(revision 9b7adc385e10a6ce5a0bedf71fdcd0ae1b9fd585)
@@ -124,10 +124,4 @@
 #define DPRINTF(...)  ((void) 0)
 
-/** Call data */
-typedef struct {
-	link_t link;
-	ipc_call_t call;
-} msg_t;
-
 /* Client connection data */
 typedef struct {
@@ -156,15 +150,9 @@
 	client_t *client;
 
-	/** Message event. */
-	fibril_event_t msg_arrived;
-
-	/** Messages that should be delivered to this fibril. */
-	list_t msg_queue;
+	/** Channel for messages that should be delivered to this fibril. */
+	mpsc_t *msg_channel;
 
 	/** Call data of the opening call. */
 	ipc_call_t call;
-
-	/** Identification of the closing call. */
-	cap_call_handle_t close_chandle;
 
 	/** Fibril function that will be used to handle the connection. */
@@ -423,12 +411,20 @@
 	async_client_put(client);
 
+	fibril_rmutex_lock(&conn_mutex);
+
 	/*
 	 * Remove myself from the connection hash table.
 	 */
-	fibril_rmutex_lock(&conn_mutex);
 	hash_table_remove(&conn_hash_table, &(conn_key_t){
 		.task_id = fibril_connection->in_task_id,
 		.phone_hash = fibril_connection->in_phone_hash
 	});
+
+	/*
+	 * Close the channel, if it isn't closed already.
+	 */
+	mpsc_t *c = fibril_connection->msg_channel;
+	mpsc_close(c);
+
 	fibril_rmutex_unlock(&conn_mutex);
 
@@ -436,21 +432,12 @@
 	 * Answer all remaining messages with EHANGUP.
 	 */
-	while (!list_empty(&fibril_connection->msg_queue)) {
-		msg_t *msg =
-		    list_get_instance(list_first(&fibril_connection->msg_queue),
-		    msg_t, link);
-
-		list_remove(&msg->link);
-		ipc_answer_0(msg->call.cap_handle, EHANGUP);
-		free(msg);
-	}
+	ipc_call_t call;
+	while (mpsc_receive(c, &call, NULL) == EOK)
+		ipc_answer_0(call.cap_handle, EHANGUP);
 
 	/*
-	 * If the connection was hung-up, answer the last call,
-	 * i.e. IPC_M_PHONE_HUNGUP.
+	 * Clean up memory.
 	 */
-	if (fibril_connection->close_chandle)
-		ipc_answer_0(fibril_connection->close_chandle, EOK);
-
+	mpsc_destroy(c);
 	free(fibril_connection);
 	return EOK;
@@ -488,7 +475,5 @@
 	conn->in_task_id = in_task_id;
 	conn->in_phone_hash = in_phone_hash;
-	conn->msg_arrived = FIBRIL_EVENT_INIT;
-	list_initialize(&conn->msg_queue);
-	conn->close_chandle = CAP_NIL;
+	conn->msg_channel = mpsc_create(sizeof(ipc_call_t));
 	conn->handler = handler;
 	conn->data = data;
@@ -503,4 +488,5 @@
 
 	if (conn->fid == 0) {
+		mpsc_destroy(conn->msg_channel);
 		free(conn);
 
@@ -606,9 +592,10 @@
  * @param call Data of the incoming call.
  *
- * @return False if the call doesn't match any connection.
- * @return True if the call was passed to the respective connection fibril.
- *
- */
-static bool route_call(ipc_call_t *call)
+ * @return EOK if the call was successfully passed to the respective fibril.
+ * @return ENOENT if the call doesn't match any connection.
+ * @return Other error code if routing failed for other reasons.
+ *
+ */
+static errno_t route_call(ipc_call_t *call)
 {
 	assert(call);
@@ -622,27 +609,22 @@
 	if (!link) {
 		fibril_rmutex_unlock(&conn_mutex);
-		return false;
+		return ENOENT;
 	}
 
 	connection_t *conn = hash_table_get_inst(link, connection_t, link);
 
-	// FIXME: malloc in critical section
-	msg_t *msg = malloc(sizeof(*msg));
-	if (!msg) {
-		fibril_rmutex_unlock(&conn_mutex);
-		return false;
-	}
-
-	msg->call = *call;
-	list_append(&msg->link, &conn->msg_queue);
-
-	if (IPC_GET_IMETHOD(*call) == IPC_M_PHONE_HUNGUP)
-		conn->close_chandle = call->cap_handle;
+	errno_t rc = mpsc_send(conn->msg_channel, call);
+
+	if (IPC_GET_IMETHOD(*call) == IPC_M_PHONE_HUNGUP) {
+		/* Close the channel, but let the connection fibril answer. */
+		mpsc_close(conn->msg_channel);
+		// FIXME: Ideally, we should be able to discard/answer the
+		//        hungup message here and just close the channel without
+		//        passing it out. Unfortunatelly, somehow that breaks
+		//        handling of CPU exceptions.
+	}
 
 	fibril_rmutex_unlock(&conn_mutex);
-
-	/* If the connection fibril is waiting for an event, activate it */
-	fibril_notify(&conn->msg_arrived);
-	return true;
+	return rc;
 }
 
@@ -939,13 +921,4 @@
 	assert(fibril_connection);
 
-	/*
-	 * Why doing this?
-	 * GCC 4.1.0 coughs on fibril_connection-> dereference.
-	 * GCC 4.1.1 happilly puts the rdhwr instruction in delay slot.
-	 *           I would never expect to find so many errors in
-	 *           a compiler.
-	 */
-	connection_t *conn = fibril_connection;
-
 	struct timeval tv;
 	struct timeval *expires = NULL;
@@ -956,40 +929,19 @@
 	}
 
-	fibril_rmutex_lock(&conn_mutex);
-
-	/* If nothing in queue, wait until something arrives */
-	while (list_empty(&conn->msg_queue)) {
-		if (conn->close_chandle) {
-			/*
-			 * Handle the case when the connection was already
-			 * closed by the client but the server did not notice
-			 * the first IPC_M_PHONE_HUNGUP call and continues to
-			 * call async_get_call_timeout(). Repeat
-			 * IPC_M_PHONE_HUNGUP until the caller notices.
-			 */
-			memset(call, 0, sizeof(ipc_call_t));
-			IPC_SET_IMETHOD(*call, IPC_M_PHONE_HUNGUP);
-			fibril_rmutex_unlock(&conn_mutex);
-			return true;
-		}
-
-		// TODO: replace with cvar
-		fibril_rmutex_unlock(&conn_mutex);
-
-		errno_t rc = fibril_wait_timeout(&conn->msg_arrived, expires);
-		if (rc == ETIMEOUT)
-			return false;
-
-		fibril_rmutex_lock(&conn_mutex);
-	}
-
-	msg_t *msg = list_get_instance(list_first(&conn->msg_queue),
-	    msg_t, link);
-	list_remove(&msg->link);
-
-	*call = msg->call;
-	free(msg);
-
-	fibril_rmutex_unlock(&conn_mutex);
+	errno_t rc = mpsc_receive(fibril_connection->msg_channel,
+	    call, expires);
+
+	if (rc == ETIMEOUT)
+		return false;
+
+	if (rc != EOK) {
+		/*
+		 * The async_get_call_timeout() interface doesn't support
+		 * propagating errors. Return a null call instead.
+		 */
+
+		memset(call, 0, sizeof(ipc_call_t));
+	}
+
 	return true;
 }
@@ -1071,9 +1023,13 @@
 
 	/* Try to route the call through the connection hash table */
-	if (route_call(call))
+	errno_t rc = route_call(call);
+	if (rc == EOK)
 		return;
 
-	/* Unknown call from unknown phone - hang it up */
-	ipc_answer_0(call->cap_handle, EHANGUP);
+	// TODO: Log the error.
+
+	if (call->cap_handle != CAP_NIL)
+		/* Unknown call from unknown phone - hang it up */
+		ipc_answer_0(call->cap_handle, EHANGUP);
 }
 
Index: uspace/lib/c/generic/atomic.c
===================================================================
--- uspace/lib/c/generic/atomic.c	(revision 42964a73ef087dec0f80db3182b0ff5a3f79b500)
+++ 	(revision )
@@ -1,68 +1,0 @@
-/*
- * Copyright (c) 2018 CZ.NIC, z.s.p.o.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- *   notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- *   notice, this list of conditions and the following disclaimer in the
- *   documentation and/or other materials provided with the distribution.
- * - The name of the author may not be used to endorse or promote products
- *   derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <atomic.h>
-
-#ifdef PLATFORM_arm32
-
-/*
- * Older ARMs don't have atomic instructions, so we need to define a bunch
- * of symbols for GCC to use.
- */
-
-unsigned __sync_add_and_fetch_4(volatile void *vptr, unsigned val)
-{
-	return atomic_add((atomic_t *)vptr, val);
-}
-
-unsigned __sync_sub_and_fetch_4(volatile void *vptr, unsigned val)
-{
-	return atomic_add((atomic_t *)vptr, -(atomic_signed_t)val);
-}
-
-bool __sync_bool_compare_and_swap_4(volatile void *ptr, unsigned old_val, unsigned new_val)
-{
-	return cas((atomic_t *)ptr, old_val, new_val);
-}
-
-unsigned __sync_val_compare_and_swap_4(volatile void *ptr, unsigned old_val, unsigned new_val)
-{
-	while (true) {
-		if (__sync_bool_compare_and_swap_4(ptr, old_val, new_val)) {
-			return old_val;
-		}
-
-		unsigned current = *(volatile unsigned *)ptr;
-		if (current != old_val)
-			return current;
-
-		/* If the current value is the same as old_val, retry. */
-	}
-}
-
-#endif
Index: uspace/lib/c/generic/fibril.c
===================================================================
--- uspace/lib/c/generic/fibril.c	(revision 42964a73ef087dec0f80db3182b0ff5a3f79b500)
+++ 	(revision )
@@ -1,917 +1,0 @@
-/*
- * Copyright (c) 2006 Ondrej Palkovsky
- * Copyright (c) 2007 Jakub Jermar
- * Copyright (c) 2018 CZ.NIC, z.s.p.o.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- *   notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- *   notice, this list of conditions and the following disclaimer in the
- *   documentation and/or other materials provided with the distribution.
- * - The name of the author may not be used to endorse or promote products
- *   derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** @addtogroup libc
- * @{
- */
-/** @file
- */
-
-#include <adt/list.h>
-#include <fibril.h>
-#include <stack.h>
-#include <tls.h>
-#include <stdlib.h>
-#include <as.h>
-#include <context.h>
-#include <futex.h>
-#include <assert.h>
-
-#include <mem.h>
-#include <str.h>
-#include <ipc/ipc.h>
-#include <libarch/faddr.h>
-#include "private/thread.h"
-#include "private/fibril.h"
-#include "private/libc.h"
-
-#define DPRINTF(...) ((void)0)
-#undef READY_DEBUG
-
-/** Member of timeout_list. */
-typedef struct {
-	link_t link;
-	struct timeval expires;
-	fibril_event_t *event;
-} _timeout_t;
-
-typedef struct {
-	errno_t rc;
-	link_t link;
-	ipc_call_t *call;
-	fibril_event_t event;
-} _ipc_waiter_t;
-
-typedef struct {
-	errno_t rc;
-	link_t link;
-	ipc_call_t call;
-} _ipc_buffer_t;
-
-typedef enum {
-	SWITCH_FROM_DEAD,
-	SWITCH_FROM_HELPER,
-	SWITCH_FROM_YIELD,
-	SWITCH_FROM_BLOCKED,
-} _switch_type_t;
-
-static bool multithreaded = false;
-
-/* This futex serializes access to global data. */
-static futex_t fibril_futex = FUTEX_INITIALIZER;
-static futex_t ready_semaphore = FUTEX_INITIALIZE(0);
-static long ready_st_count;
-
-static LIST_INITIALIZE(ready_list);
-static LIST_INITIALIZE(fibril_list);
-static LIST_INITIALIZE(timeout_list);
-
-static futex_t ipc_lists_futex = FUTEX_INITIALIZER;
-static LIST_INITIALIZE(ipc_waiter_list);
-static LIST_INITIALIZE(ipc_buffer_list);
-static LIST_INITIALIZE(ipc_buffer_free_list);
-
-/* Only used as unique markers for triggered events. */
-static fibril_t _fibril_event_triggered;
-static fibril_t _fibril_event_timed_out;
-#define _EVENT_INITIAL   (NULL)
-#define _EVENT_TRIGGERED (&_fibril_event_triggered)
-#define _EVENT_TIMED_OUT (&_fibril_event_timed_out)
-
-static inline void _ready_debug_check(void)
-{
-#ifdef READY_DEBUG
-	assert(!multithreaded);
-	long count = (long) list_count(&ready_list) +
-	    (long) list_count(&ipc_buffer_free_list);
-	assert(ready_st_count == count);
-#endif
-}
-
-static inline long _ready_count(void)
-{
-	/*
-	 * The number of available tokens is always equal to the number
-	 * of fibrils in the ready list + the number of free IPC buffer
-	 * buckets.
-	 */
-
-	if (multithreaded)
-		return atomic_get(&ready_semaphore.val);
-
-	_ready_debug_check();
-	return ready_st_count;
-}
-
-static inline void _ready_up(void)
-{
-	if (multithreaded) {
-		futex_up(&ready_semaphore);
-	} else {
-		ready_st_count++;
-		_ready_debug_check();
-	}
-}
-
-static inline errno_t _ready_down(const struct timeval *expires)
-{
-	if (multithreaded)
-		return futex_down_timeout(&ready_semaphore, expires);
-
-	_ready_debug_check();
-	ready_st_count--;
-	return EOK;
-}
-
-static atomic_t threads_in_ipc_wait = { 0 };
-
-/** Function that spans the whole life-cycle of a fibril.
- *
- * Each fibril begins execution in this function. Then the function implementing
- * the fibril logic is called.  After its return, the return value is saved.
- * The fibril then switches to another fibril, which cleans up after it.
- *
- */
-static void _fibril_main(void)
-{
-	/* fibril_futex is locked when a fibril is started. */
-	futex_unlock(&fibril_futex);
-
-	fibril_t *fibril = fibril_self();
-
-	/* Call the implementing function. */
-	fibril_exit(fibril->func(fibril->arg));
-
-	/* Not reached */
-}
-
-/** Allocate a fibril structure and TCB, but don't do anything else with it. */
-fibril_t *fibril_alloc(void)
-{
-	tcb_t *tcb = tls_make(__progsymbols.elfstart);
-	if (!tcb)
-		return NULL;
-
-	fibril_t *fibril = calloc(1, sizeof(fibril_t));
-	if (!fibril) {
-		tls_free(tcb);
-		return NULL;
-	}
-
-	tcb->fibril_data = fibril;
-	fibril->tcb = tcb;
-	fibril->is_freeable = true;
-
-	fibril_setup(fibril);
-	return fibril;
-}
-
-/**
- * Put the fibril into fibril_list.
- */
-void fibril_setup(fibril_t *f)
-{
-	futex_lock(&fibril_futex);
-	list_append(&f->all_link, &fibril_list);
-	futex_unlock(&fibril_futex);
-}
-
-void fibril_teardown(fibril_t *fibril)
-{
-	futex_lock(&fibril_futex);
-	list_remove(&fibril->all_link);
-	futex_unlock(&fibril_futex);
-
-	if (fibril->is_freeable) {
-		tls_free(fibril->tcb);
-		free(fibril);
-	}
-}
-
-/**
- * Event notification with a given reason.
- *
- * @param reason  Reason of the notification.
- *                Can be either _EVENT_TRIGGERED or _EVENT_TIMED_OUT.
- */
-static fibril_t *_fibril_trigger_internal(fibril_event_t *event, fibril_t *reason)
-{
-	assert(reason != _EVENT_INITIAL);
-	assert(reason == _EVENT_TIMED_OUT || reason == _EVENT_TRIGGERED);
-
-	futex_assert_is_locked(&fibril_futex);
-
-	if (event->fibril == _EVENT_INITIAL) {
-		event->fibril = reason;
-		return NULL;
-	}
-
-	if (event->fibril == _EVENT_TIMED_OUT) {
-		assert(reason == _EVENT_TRIGGERED);
-		event->fibril = reason;
-		return NULL;
-	}
-
-	if (event->fibril == _EVENT_TRIGGERED) {
-		/* Already triggered. Nothing to do. */
-		return NULL;
-	}
-
-	fibril_t *f = event->fibril;
-	event->fibril = reason;
-
-	assert(f->sleep_event == event);
-	return f;
-}
-
-static errno_t _ipc_wait(ipc_call_t *call, const struct timeval *expires)
-{
-	if (!expires)
-		return ipc_wait(call, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE);
-
-	if (expires->tv_sec == 0)
-		return ipc_wait(call, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NON_BLOCKING);
-
-	struct timeval now;
-	getuptime(&now);
-
-	if (tv_gteq(&now, expires))
-		return ipc_wait(call, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NON_BLOCKING);
-
-	return ipc_wait(call, tv_sub_diff(expires, &now), SYNCH_FLAGS_NONE);
-}
-
-/*
- * Waits until a ready fibril is added to the list, or an IPC message arrives.
- * Returns NULL on timeout and may also return NULL if returning from IPC
- * wait after new ready fibrils are added.
- */
-static fibril_t *_ready_list_pop(const struct timeval *expires, bool locked)
-{
-	if (locked) {
-		futex_assert_is_locked(&fibril_futex);
-		assert(expires);
-		/* Must be nonblocking. */
-		assert(expires->tv_sec == 0);
-	} else {
-		futex_assert_is_not_locked(&fibril_futex);
-	}
-
-	errno_t rc = _ready_down(expires);
-	if (rc != EOK)
-		return NULL;
-
-	/*
-	 * Once we acquire a token from ready_semaphore, there are two options.
-	 * Either there is a ready fibril in the list, or it's our turn to
-	 * call `ipc_wait_cycle()`. There is one extra token on the semaphore
-	 * for each entry of the call buffer.
-	 */
-
-
-	if (!locked)
-		futex_lock(&fibril_futex);
-	fibril_t *f = list_pop(&ready_list, fibril_t, link);
-	if (!f)
-		atomic_inc(&threads_in_ipc_wait);
-	if (!locked)
-		futex_unlock(&fibril_futex);
-
-	if (f)
-		return f;
-
-	if (!multithreaded)
-		assert(list_empty(&ipc_buffer_list));
-
-	/* No fibril is ready, IPC wait it is. */
-	ipc_call_t call = { 0 };
-	rc = _ipc_wait(&call, expires);
-
-	atomic_dec(&threads_in_ipc_wait);
-
-	if (rc != EOK && rc != ENOENT) {
-		/* Return token. */
-		_ready_up();
-		return NULL;
-	}
-
-	/*
-	 * We might get ENOENT due to a poke.
-	 * In that case, we propagate the null call out of fibril_ipc_wait(),
-	 * because poke must result in that call returning.
-	 */
-
-	/*
-	 * If a fibril is already waiting for IPC, we wake up the fibril,
-	 * and return the token to ready_semaphore.
-	 * If there is no fibril waiting, we pop a buffer bucket and
-	 * put our call there. The token then returns when the bucket is
-	 * returned.
-	 */
-
-	if (!locked)
-		futex_lock(&fibril_futex);
-
-	futex_lock(&ipc_lists_futex);
-
-
-	_ipc_waiter_t *w = list_pop(&ipc_waiter_list, _ipc_waiter_t, link);
-	if (w) {
-		*w->call = call;
-		w->rc = rc;
-		/* We switch to the woken up fibril immediately if possible. */
-		f = _fibril_trigger_internal(&w->event, _EVENT_TRIGGERED);
-
-		/* Return token. */
-		_ready_up();
-	} else {
-		_ipc_buffer_t *buf = list_pop(&ipc_buffer_free_list, _ipc_buffer_t, link);
-		assert(buf);
-		*buf = (_ipc_buffer_t) { .call = call, .rc = rc };
-		list_append(&buf->link, &ipc_buffer_list);
-	}
-
-	futex_unlock(&ipc_lists_futex);
-
-	if (!locked)
-		futex_unlock(&fibril_futex);
-
-	return f;
-}
-
-static fibril_t *_ready_list_pop_nonblocking(bool locked)
-{
-	struct timeval tv = { .tv_sec = 0, .tv_usec = 0 };
-	return _ready_list_pop(&tv, locked);
-}
-
-static void _ready_list_push(fibril_t *f)
-{
-	if (!f)
-		return;
-
-	futex_assert_is_locked(&fibril_futex);
-
-	/* Enqueue in ready_list. */
-	list_append(&f->link, &ready_list);
-	_ready_up();
-
-	if (atomic_get(&threads_in_ipc_wait)) {
-		DPRINTF("Poking.\n");
-		/* Wakeup one thread sleeping in SYS_IPC_WAIT. */
-		ipc_poke();
-	}
-}
-
-/* Blocks the current fibril until an IPC call arrives. */
-static errno_t _wait_ipc(ipc_call_t *call, const struct timeval *expires)
-{
-	futex_assert_is_not_locked(&fibril_futex);
-
-	futex_lock(&ipc_lists_futex);
-	_ipc_buffer_t *buf = list_pop(&ipc_buffer_list, _ipc_buffer_t, link);
-	if (buf) {
-		*call = buf->call;
-		errno_t rc = buf->rc;
-
-		/* Return to freelist. */
-		list_append(&buf->link, &ipc_buffer_free_list);
-		/* Return IPC wait token. */
-		_ready_up();
-
-		futex_unlock(&ipc_lists_futex);
-		return rc;
-	}
-
-	_ipc_waiter_t w = { .call = call };
-	list_append(&w.link, &ipc_waiter_list);
-	futex_unlock(&ipc_lists_futex);
-
-	errno_t rc = fibril_wait_timeout(&w.event, expires);
-	if (rc == EOK)
-		return w.rc;
-
-	futex_lock(&ipc_lists_futex);
-	if (link_in_use(&w.link))
-		list_remove(&w.link);
-	else
-		rc = w.rc;
-	futex_unlock(&ipc_lists_futex);
-	return rc;
-}
-
-/** Fire all timeouts that expired. */
-static struct timeval *_handle_expired_timeouts(struct timeval *next_timeout)
-{
-	struct timeval tv;
-	getuptime(&tv);
-
-	futex_lock(&fibril_futex);
-
-	while (!list_empty(&timeout_list)) {
-		link_t *cur = list_first(&timeout_list);
-		_timeout_t *to = list_get_instance(cur, _timeout_t, link);
-
-		if (tv_gt(&to->expires, &tv)) {
-			*next_timeout = to->expires;
-			futex_unlock(&fibril_futex);
-			return next_timeout;
-		}
-
-		list_remove(&to->link);
-
-		_ready_list_push(_fibril_trigger_internal(
-		    to->event, _EVENT_TIMED_OUT));
-	}
-
-	futex_unlock(&fibril_futex);
-	return NULL;
-}
-
-/**
- * Clean up after a dead fibril from which we restored context, if any.
- * Called after a switch is made and fibril_futex is unlocked.
- */
-static void _fibril_cleanup_dead(void)
-{
-	fibril_t *srcf = fibril_self();
-	if (!srcf->clean_after_me)
-		return;
-
-	void *stack = srcf->clean_after_me->stack;
-	assert(stack);
-	as_area_destroy(stack);
-	fibril_teardown(srcf->clean_after_me);
-	srcf->clean_after_me = NULL;
-}
-
-/** Switch to a fibril. */
-static void _fibril_switch_to(_switch_type_t type, fibril_t *dstf, bool locked)
-{
-	assert(fibril_self()->rmutex_locks == 0);
-
-	if (!locked)
-		futex_lock(&fibril_futex);
-	else
-		futex_assert_is_locked(&fibril_futex);
-
-	fibril_t *srcf = fibril_self();
-	assert(srcf);
-	assert(dstf);
-
-	switch (type) {
-	case SWITCH_FROM_YIELD:
-		_ready_list_push(srcf);
-		break;
-	case SWITCH_FROM_DEAD:
-		dstf->clean_after_me = srcf;
-		break;
-	case SWITCH_FROM_HELPER:
-	case SWITCH_FROM_BLOCKED:
-		break;
-	}
-
-	dstf->thread_ctx = srcf->thread_ctx;
-	srcf->thread_ctx = NULL;
-
-	/* Just some bookkeeping to allow better debugging of futex locks. */
-	futex_give_to(&fibril_futex, dstf);
-
-	/* Swap to the next fibril. */
-	context_swap(&srcf->ctx, &dstf->ctx);
-
-	assert(srcf == fibril_self());
-	assert(srcf->thread_ctx);
-
-	if (!locked) {
-		/* Must be after context_swap()! */
-		futex_unlock(&fibril_futex);
-		_fibril_cleanup_dead();
-	}
-}
-
-/**
- * Main function for a helper fibril.
- * The helper fibril executes on threads in the lightweight fibril pool when
- * there is no fibril ready to run. Its only purpose is to block until
- * another fibril is ready, or a timeout expires, or an IPC message arrives.
- *
- * There is at most one helper fibril per thread.
- *
- */
-static errno_t _helper_fibril_fn(void *arg)
-{
-	/* Set itself as the thread's own context. */
-	fibril_self()->thread_ctx = fibril_self();
-
-	(void) arg;
-
-	struct timeval next_timeout;
-	while (true) {
-		struct timeval *to = _handle_expired_timeouts(&next_timeout);
-		fibril_t *f = _ready_list_pop(to, false);
-		if (f) {
-			_fibril_switch_to(SWITCH_FROM_HELPER, f, false);
-		}
-	}
-
-	return EOK;
-}
-
-/** Create a new fibril.
- *
- * @param func Implementing function of the new fibril.
- * @param arg Argument to pass to func.
- * @param stksz Stack size in bytes.
- *
- * @return 0 on failure or TLS of the new fibril.
- *
- */
-fid_t fibril_create_generic(errno_t (*func)(void *), void *arg, size_t stksz)
-{
-	fibril_t *fibril;
-
-	fibril = fibril_alloc();
-	if (fibril == NULL)
-		return 0;
-
-	fibril->stack_size = (stksz == FIBRIL_DFLT_STK_SIZE) ?
-	    stack_size_get() : stksz;
-	fibril->stack = as_area_create(AS_AREA_ANY, fibril->stack_size,
-	    AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE | AS_AREA_GUARD |
-	    AS_AREA_LATE_RESERVE, AS_AREA_UNPAGED);
-	if (fibril->stack == AS_MAP_FAILED) {
-		fibril_teardown(fibril);
-		return 0;
-	}
-
-	fibril->func = func;
-	fibril->arg = arg;
-
-	context_create_t sctx = {
-		.fn = _fibril_main,
-		.stack_base = fibril->stack,
-		.stack_size = fibril->stack_size,
-		.tls = fibril->tcb,
-	};
-
-	context_create(&fibril->ctx, &sctx);
-	return (fid_t) fibril;
-}
-
-/** Delete a fibril that has never run.
- *
- * Free resources of a fibril that has been created with fibril_create()
- * but never started using fibril_start().
- *
- * @param fid Pointer to the fibril structure of the fibril to be
- *            added.
- */
-void fibril_destroy(fid_t fid)
-{
-	fibril_t *fibril = (fibril_t *) fid;
-
-	assert(!fibril->is_running);
-	assert(fibril->stack);
-	as_area_destroy(fibril->stack);
-	fibril_teardown(fibril);
-}
-
-static void _insert_timeout(_timeout_t *timeout)
-{
-	futex_assert_is_locked(&fibril_futex);
-	assert(timeout);
-
-	link_t *tmp = timeout_list.head.next;
-	while (tmp != &timeout_list.head) {
-		_timeout_t *cur = list_get_instance(tmp, _timeout_t, link);
-
-		if (tv_gteq(&cur->expires, &timeout->expires))
-			break;
-
-		tmp = tmp->next;
-	}
-
-	list_insert_before(&timeout->link, tmp);
-}
-
-/**
- * Same as `fibril_wait_for()`, except with a timeout.
- *
- * It is guaranteed that timing out cannot cause another thread's
- * `fibril_notify()` to be lost. I.e. the function returns success if and
- * only if `fibril_notify()` was called after the last call to
- * wait/wait_timeout returned, and before the call timed out.
- *
- * @return ETIMEOUT if timed out. EOK otherwise.
- */
-errno_t fibril_wait_timeout(fibril_event_t *event, const struct timeval *expires)
-{
-	assert(fibril_self()->rmutex_locks == 0);
-
-	DPRINTF("### Fibril %p sleeping on event %p.\n", fibril_self(), event);
-
-	if (!fibril_self()->thread_ctx) {
-		fibril_self()->thread_ctx =
-		    fibril_create_generic(_helper_fibril_fn, NULL, PAGE_SIZE);
-		if (!fibril_self()->thread_ctx)
-			return ENOMEM;
-	}
-
-	futex_lock(&fibril_futex);
-
-	if (event->fibril == _EVENT_TRIGGERED) {
-		DPRINTF("### Already triggered. Returning. \n");
-		event->fibril = _EVENT_INITIAL;
-		futex_unlock(&fibril_futex);
-		return EOK;
-	}
-
-	assert(event->fibril == _EVENT_INITIAL);
-
-	fibril_t *srcf = fibril_self();
-	fibril_t *dstf = NULL;
-
-	/*
-	 * We cannot block here waiting for another fibril becoming
-	 * ready, since that would require unlocking the fibril_futex,
-	 * and that in turn would allow another thread to restore
-	 * the source fibril before this thread finished switching.
-	 *
-	 * Instead, we switch to an internal "helper" fibril whose only
-	 * job is to wait for an event, freeing the source fibril for
-	 * wakeups. There is always one for each running thread.
-	 */
-
-	dstf = _ready_list_pop_nonblocking(true);
-	if (!dstf) {
-		// XXX: It is possible for the _ready_list_pop_nonblocking() to
-		//      check for IPC, find a pending message, and trigger the
-		//      event on which we are currently trying to sleep.
-		if (event->fibril == _EVENT_TRIGGERED) {
-			event->fibril = _EVENT_INITIAL;
-			futex_unlock(&fibril_futex);
-			return EOK;
-		}
-
-		dstf = srcf->thread_ctx;
-		assert(dstf);
-	}
-
-	_timeout_t timeout = { 0 };
-	if (expires) {
-		timeout.expires = *expires;
-		timeout.event = event;
-		_insert_timeout(&timeout);
-	}
-
-	assert(srcf);
-
-	event->fibril = srcf;
-	srcf->sleep_event = event;
-
-	assert(event->fibril != _EVENT_INITIAL);
-
-	_fibril_switch_to(SWITCH_FROM_BLOCKED, dstf, true);
-
-	assert(event->fibril != srcf);
-	assert(event->fibril != _EVENT_INITIAL);
-	assert(event->fibril == _EVENT_TIMED_OUT || event->fibril == _EVENT_TRIGGERED);
-
-	list_remove(&timeout.link);
-	errno_t rc = (event->fibril == _EVENT_TIMED_OUT) ? ETIMEOUT : EOK;
-	event->fibril = _EVENT_INITIAL;
-
-	futex_unlock(&fibril_futex);
-	_fibril_cleanup_dead();
-	return rc;
-}
-
-void fibril_wait_for(fibril_event_t *event)
-{
-	assert(fibril_self()->rmutex_locks == 0);
-
-	(void) fibril_wait_timeout(event, NULL);
-}
-
-void fibril_notify(fibril_event_t *event)
-{
-	futex_lock(&fibril_futex);
-	_ready_list_push(_fibril_trigger_internal(event, _EVENT_TRIGGERED));
-	futex_unlock(&fibril_futex);
-}
-
-/** Start a fibril that has not been running yet. */
-void fibril_start(fibril_t *fibril)
-{
-	futex_lock(&fibril_futex);
-	assert(!fibril->is_running);
-	fibril->is_running = true;
-
-	if (!link_in_use(&fibril->all_link))
-		list_append(&fibril->all_link, &fibril_list);
-
-	_ready_list_push(fibril);
-
-	futex_unlock(&fibril_futex);
-}
-
-/** Start a fibril that has not been running yet. (obsolete) */
-void fibril_add_ready(fibril_t *fibril)
-{
-	fibril_start(fibril);
-}
-
-/** @return the currently running fibril. */
-fibril_t *fibril_self(void)
-{
-	assert(__tcb_is_set());
-	tcb_t *tcb = __tcb_get();
-	assert(tcb->fibril_data);
-	return tcb->fibril_data;
-}
-
-/**
- * Obsolete, use fibril_self().
- *
- * @return ID of the currently running fibril.
- */
-fid_t fibril_get_id(void)
-{
-	return (fid_t) fibril_self();
-}
-
-/**
- * Switch to another fibril, if one is ready to run.
- * Has no effect on a heavy fibril.
- */
-void fibril_yield(void)
-{
-	if (fibril_self()->rmutex_locks > 0)
-		return;
-
-	fibril_t *f = _ready_list_pop_nonblocking(false);
-	if (f)
-		_fibril_switch_to(SWITCH_FROM_YIELD, f, false);
-}
-
-static void _runner_fn(void *arg)
-{
-	_helper_fibril_fn(arg);
-}
-
-/**
- * Spawn a given number of runners (i.e. OS threads) immediately, and
- * unconditionally. This is meant to be used for tests and debugging.
- * Regular programs should just use `fibril_enable_multithreaded()`.
- *
- * @param n  Number of runners to spawn.
- * @return   Number of runners successfully spawned.
- */
-int fibril_test_spawn_runners(int n)
-{
-	assert(fibril_self()->rmutex_locks == 0);
-
-	if (!multithreaded) {
-		_ready_debug_check();
-		atomic_set(&ready_semaphore.val, ready_st_count);
-		multithreaded = true;
-	}
-
-	errno_t rc;
-
-	for (int i = 0; i < n; i++) {
-		thread_id_t tid;
-		rc = thread_create(_runner_fn, NULL, "fibril runner", &tid);
-		if (rc != EOK)
-			return i;
-		thread_detach(tid);
-	}
-
-	return n;
-}
-
-/**
- * Opt-in to have more than one runner thread.
- *
- * Currently, a task only ever runs in one thread because multithreading
- * might break some existing code.
- *
- * Eventually, the number of runner threads for a given task should become
- * configurable in the environment and this function becomes no-op.
- */
-void fibril_enable_multithreaded(void)
-{
-	// TODO: Implement better.
-	//       For now, 4 total runners is a sensible default.
-	if (!multithreaded) {
-		fibril_test_spawn_runners(3);
-	}
-}
-
-/**
- * Detach a fibril.
- */
-void fibril_detach(fid_t f)
-{
-	// TODO: Currently all fibrils are detached by default, but they
-	//       won't always be. Code that explicitly spawns fibrils with
-	//       limited lifetime should call this function.
-}
-
-/**
- * Exit a fibril. Never returns.
- *
- * @param retval  Value to return from fibril_join() called on this fibril.
- */
-_Noreturn void fibril_exit(long retval)
-{
-	// TODO: implement fibril_join() and remember retval
-	(void) retval;
-
-	fibril_t *f = _ready_list_pop_nonblocking(false);
-	if (!f)
-		f = fibril_self()->thread_ctx;
-
-	_fibril_switch_to(SWITCH_FROM_DEAD, f, false);
-	__builtin_unreachable();
-}
-
-void __fibrils_init(void)
-{
-	/*
-	 * We allow a fixed, small amount of parallelism for IPC reads, but
-	 * since IPC is currently serialized in kernel, there's not much
-	 * we can get from more threads reading messages.
-	 */
-
-#define IPC_BUFFER_COUNT 1024
-	static _ipc_buffer_t buffers[IPC_BUFFER_COUNT];
-
-	for (int i = 0; i < IPC_BUFFER_COUNT; i++) {
-		list_append(&buffers[i].link, &ipc_buffer_free_list);
-		_ready_up();
-	}
-}
-
-void fibril_usleep(suseconds_t timeout)
-{
-	struct timeval expires;
-	getuptime(&expires);
-	tv_add_diff(&expires, timeout);
-
-	fibril_event_t event = FIBRIL_EVENT_INIT;
-	fibril_wait_timeout(&event, &expires);
-}
-
-void fibril_sleep(unsigned int sec)
-{
-	struct timeval expires;
-	getuptime(&expires);
-	expires.tv_sec += sec;
-
-	fibril_event_t event = FIBRIL_EVENT_INIT;
-	fibril_wait_timeout(&event, &expires);
-}
-
-void fibril_ipc_poke(void)
-{
-	DPRINTF("Poking.\n");
-	/* Wakeup one thread sleeping in SYS_IPC_WAIT. */
-	ipc_poke();
-}
-
-errno_t fibril_ipc_wait(ipc_call_t *call, const struct timeval *expires)
-{
-	return _wait_ipc(call, expires);
-}
-
-/** @}
- */
Index: uspace/lib/c/generic/fibril_synch.c
===================================================================
--- uspace/lib/c/generic/fibril_synch.c	(revision 42964a73ef087dec0f80db3182b0ff5a3f79b500)
+++ 	(revision )
@@ -1,718 +1,0 @@
-/*
- * Copyright (c) 2009 Jakub Jermar
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- *   notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- *   notice, this list of conditions and the following disclaimer in the
- *   documentation and/or other materials provided with the distribution.
- * - The name of the author may not be used to endorse or promote products
- *   derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** @addtogroup libc
- * @{
- */
-/** @file
- */
-
-#include <fibril_synch.h>
-#include <fibril.h>
-#include <async.h>
-#include <adt/list.h>
-#include <futex.h>
-#include <sys/time.h>
-#include <errno.h>
-#include <assert.h>
-#include <stacktrace.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <io/kio.h>
-#include <mem.h>
-#include <context.h>
-
-#include "private/async.h"
-#include "private/fibril.h"
-
-void fibril_rmutex_initialize(fibril_rmutex_t *m)
-{
-	futex_initialize(&m->futex, 1);
-}
-
-/**
- * Lock restricted mutex.
- * When a restricted mutex is locked, the fibril may not sleep or create new
- * threads. Any attempt to do so will abort the program.
- */
-void fibril_rmutex_lock(fibril_rmutex_t *m)
-{
-	futex_lock(&m->futex);
-	fibril_self()->rmutex_locks++;
-}
-
-bool fibril_rmutex_trylock(fibril_rmutex_t *m)
-{
-	if (futex_trylock(&m->futex)) {
-		fibril_self()->rmutex_locks++;
-		return true;
-	} else {
-		return false;
-	}
-}
-
-void fibril_rmutex_unlock(fibril_rmutex_t *m)
-{
-	fibril_self()->rmutex_locks--;
-	futex_unlock(&m->futex);
-}
-
-static fibril_local bool deadlocked = false;
-
-static futex_t fibril_synch_futex = FUTEX_INITIALIZER;
-
-typedef struct {
-	link_t link;
-	fibril_event_t event;
-	fibril_mutex_t *mutex;
-	fid_t fid;
-} awaiter_t;
-
-#define AWAITER_INIT { .fid = fibril_get_id() }
-
-static void print_deadlock(fibril_owner_info_t *oi)
-{
-	// FIXME: Print to stderr.
-
-	fibril_t *f = (fibril_t *) fibril_get_id();
-
-	if (deadlocked) {
-		kio_printf("Deadlock detected while printing deadlock. Aborting.\n");
-		abort();
-	}
-	deadlocked = true;
-
-	printf("Deadlock detected.\n");
-	stacktrace_print();
-
-	printf("Fibril %p waits for primitive %p.\n", f, oi);
-
-	while (oi && oi->owned_by) {
-		printf("Primitive %p is owned by fibril %p.\n",
-		    oi, oi->owned_by);
-		if (oi->owned_by == f)
-			break;
-		stacktrace_print_fp_pc(
-		    context_get_fp(&oi->owned_by->ctx),
-		    context_get_pc(&oi->owned_by->ctx));
-		printf("Fibril %p waits for primitive %p.\n",
-		    oi->owned_by, oi->owned_by->waits_for);
-		oi = oi->owned_by->waits_for;
-	}
-}
-
-
-static void check_fibril_for_deadlock(fibril_owner_info_t *oi, fibril_t *fib)
-{
-	futex_assert_is_locked(&fibril_synch_futex);
-
-	while (oi && oi->owned_by) {
-		if (oi->owned_by == fib) {
-			futex_unlock(&fibril_synch_futex);
-			print_deadlock(oi);
-			abort();
-		}
-		oi = oi->owned_by->waits_for;
-	}
-}
-
-static void check_for_deadlock(fibril_owner_info_t *oi)
-{
-	check_fibril_for_deadlock(oi, fibril_self());
-}
-
-void fibril_mutex_initialize(fibril_mutex_t *fm)
-{
-	fm->oi.owned_by = NULL;
-	fm->counter = 1;
-	list_initialize(&fm->waiters);
-}
-
-void fibril_mutex_lock(fibril_mutex_t *fm)
-{
-	fibril_t *f = (fibril_t *) fibril_get_id();
-
-	futex_lock(&fibril_synch_futex);
-
-	if (fm->counter-- > 0) {
-		fm->oi.owned_by = f;
-		futex_unlock(&fibril_synch_futex);
-		return;
-	}
-
-	awaiter_t wdata = AWAITER_INIT;
-	list_append(&wdata.link, &fm->waiters);
-	check_for_deadlock(&fm->oi);
-	f->waits_for = &fm->oi;
-
-	futex_unlock(&fibril_synch_futex);
-
-	fibril_wait_for(&wdata.event);
-}
-
-bool fibril_mutex_trylock(fibril_mutex_t *fm)
-{
-	bool locked = false;
-
-	futex_lock(&fibril_synch_futex);
-	if (fm->counter > 0) {
-		fm->counter--;
-		fm->oi.owned_by = (fibril_t *) fibril_get_id();
-		locked = true;
-	}
-	futex_unlock(&fibril_synch_futex);
-
-	return locked;
-}
-
-static void _fibril_mutex_unlock_unsafe(fibril_mutex_t *fm)
-{
-	assert(fm->oi.owned_by == (fibril_t *) fibril_get_id());
-
-	if (fm->counter++ < 0) {
-		awaiter_t *wdp = list_pop(&fm->waiters, awaiter_t, link);
-		assert(wdp);
-
-		fibril_t *f = (fibril_t *) wdp->fid;
-		fm->oi.owned_by = f;
-		f->waits_for = NULL;
-
-		fibril_notify(&wdp->event);
-	} else {
-		fm->oi.owned_by = NULL;
-	}
-}
-
-void fibril_mutex_unlock(fibril_mutex_t *fm)
-{
-	futex_lock(&fibril_synch_futex);
-	_fibril_mutex_unlock_unsafe(fm);
-	futex_unlock(&fibril_synch_futex);
-}
-
-bool fibril_mutex_is_locked(fibril_mutex_t *fm)
-{
-	futex_lock(&fibril_synch_futex);
-	bool locked = (fm->oi.owned_by == (fibril_t *) fibril_get_id());
-	futex_unlock(&fibril_synch_futex);
-	return locked;
-}
-
-void fibril_rwlock_initialize(fibril_rwlock_t *frw)
-{
-	frw->oi.owned_by = NULL;
-	frw->writers = 0;
-	frw->readers = 0;
-	list_initialize(&frw->waiters);
-}
-
-void fibril_rwlock_read_lock(fibril_rwlock_t *frw)
-{
-	fibril_t *f = (fibril_t *) fibril_get_id();
-
-	futex_lock(&fibril_synch_futex);
-
-	if (!frw->writers) {
-		/* Consider the first reader the owner. */
-		if (frw->readers++ == 0)
-			frw->oi.owned_by = f;
-		futex_unlock(&fibril_synch_futex);
-		return;
-	}
-
-	f->is_writer = false;
-
-	awaiter_t wdata = AWAITER_INIT;
-	list_append(&wdata.link, &frw->waiters);
-	check_for_deadlock(&frw->oi);
-	f->waits_for = &frw->oi;
-
-	futex_unlock(&fibril_synch_futex);
-
-	fibril_wait_for(&wdata.event);
-}
-
-void fibril_rwlock_write_lock(fibril_rwlock_t *frw)
-{
-	fibril_t *f = (fibril_t *) fibril_get_id();
-
-	futex_lock(&fibril_synch_futex);
-
-	if (!frw->writers && !frw->readers) {
-		frw->oi.owned_by = f;
-		frw->writers++;
-		futex_unlock(&fibril_synch_futex);
-		return;
-	}
-
-	f->is_writer = true;
-
-	awaiter_t wdata = AWAITER_INIT;
-	list_append(&wdata.link, &frw->waiters);
-	check_for_deadlock(&frw->oi);
-	f->waits_for = &frw->oi;
-
-	futex_unlock(&fibril_synch_futex);
-
-	fibril_wait_for(&wdata.event);
-}
-
-static void _fibril_rwlock_common_unlock(fibril_rwlock_t *frw)
-{
-	if (frw->readers) {
-		if (--frw->readers) {
-			if (frw->oi.owned_by == (fibril_t *) fibril_get_id()) {
-				/*
-				 * If this reader fibril was considered the
-				 * owner of this rwlock, clear the ownership
-				 * information even if there are still more
-				 * readers.
-				 *
-				 * This is the limitation of the detection
-				 * mechanism rooted in the fact that tracking
-				 * all readers would require dynamically
-				 * allocated memory for keeping linkage info.
-				 */
-				frw->oi.owned_by = NULL;
-			}
-
-			return;
-		}
-	} else {
-		frw->writers--;
-	}
-
-	assert(!frw->readers && !frw->writers);
-
-	frw->oi.owned_by = NULL;
-
-	while (!list_empty(&frw->waiters)) {
-		link_t *tmp = list_first(&frw->waiters);
-		awaiter_t *wdp;
-		fibril_t *f;
-
-		wdp = list_get_instance(tmp, awaiter_t, link);
-		f = (fibril_t *) wdp->fid;
-
-		if (f->is_writer) {
-			if (frw->readers)
-				break;
-			frw->writers++;
-		} else {
-			frw->readers++;
-		}
-
-		f->waits_for = NULL;
-		list_remove(&wdp->link);
-		frw->oi.owned_by = f;
-		fibril_notify(&wdp->event);
-
-		if (frw->writers)
-			break;
-	}
-}
-
-void fibril_rwlock_read_unlock(fibril_rwlock_t *frw)
-{
-	futex_lock(&fibril_synch_futex);
-	assert(frw->readers > 0);
-	_fibril_rwlock_common_unlock(frw);
-	futex_unlock(&fibril_synch_futex);
-}
-
-void fibril_rwlock_write_unlock(fibril_rwlock_t *frw)
-{
-	futex_lock(&fibril_synch_futex);
-	assert(frw->writers == 1);
-	assert(frw->oi.owned_by == fibril_self());
-	_fibril_rwlock_common_unlock(frw);
-	futex_unlock(&fibril_synch_futex);
-}
-
-bool fibril_rwlock_is_read_locked(fibril_rwlock_t *frw)
-{
-	futex_lock(&fibril_synch_futex);
-	bool locked = (frw->readers > 0);
-	futex_unlock(&fibril_synch_futex);
-	return locked;
-}
-
-bool fibril_rwlock_is_write_locked(fibril_rwlock_t *frw)
-{
-	futex_lock(&fibril_synch_futex);
-	assert(frw->writers <= 1);
-	bool locked = (frw->writers > 0) && (frw->oi.owned_by == fibril_self());
-	futex_unlock(&fibril_synch_futex);
-	return locked;
-}
-
-bool fibril_rwlock_is_locked(fibril_rwlock_t *frw)
-{
-	return fibril_rwlock_is_read_locked(frw) ||
-	    fibril_rwlock_is_write_locked(frw);
-}
-
-void fibril_condvar_initialize(fibril_condvar_t *fcv)
-{
-	list_initialize(&fcv->waiters);
-}
-
-/**
- * FIXME: If `timeout` is negative, the function returns ETIMEOUT immediately,
- *        and if `timeout` is 0, the wait never times out.
- *        This is not consistent with other similar APIs.
- */
-errno_t
-fibril_condvar_wait_timeout(fibril_condvar_t *fcv, fibril_mutex_t *fm,
-    suseconds_t timeout)
-{
-	assert(fibril_mutex_is_locked(fm));
-
-	if (timeout < 0)
-		return ETIMEOUT;
-
-	awaiter_t wdata = AWAITER_INIT;
-	wdata.mutex = fm;
-
-	struct timeval tv;
-	struct timeval *expires = NULL;
-	if (timeout) {
-		getuptime(&tv);
-		tv_add_diff(&tv, timeout);
-		expires = &tv;
-	}
-
-	futex_lock(&fibril_synch_futex);
-	_fibril_mutex_unlock_unsafe(fm);
-	list_append(&wdata.link, &fcv->waiters);
-	futex_unlock(&fibril_synch_futex);
-
-	(void) fibril_wait_timeout(&wdata.event, expires);
-
-	futex_lock(&fibril_synch_futex);
-	bool timed_out = link_in_use(&wdata.link);
-	list_remove(&wdata.link);
-	futex_unlock(&fibril_synch_futex);
-
-	fibril_mutex_lock(fm);
-
-	return timed_out ? ETIMEOUT : EOK;
-}
-
-void fibril_condvar_wait(fibril_condvar_t *fcv, fibril_mutex_t *fm)
-{
-	(void) fibril_condvar_wait_timeout(fcv, fm, 0);
-}
-
-void fibril_condvar_signal(fibril_condvar_t *fcv)
-{
-	futex_lock(&fibril_synch_futex);
-
-	awaiter_t *w = list_pop(&fcv->waiters, awaiter_t, link);
-	if (w != NULL)
-		fibril_notify(&w->event);
-
-	futex_unlock(&fibril_synch_futex);
-}
-
-void fibril_condvar_broadcast(fibril_condvar_t *fcv)
-{
-	futex_lock(&fibril_synch_futex);
-
-	awaiter_t *w;
-	while ((w = list_pop(&fcv->waiters, awaiter_t, link)))
-		fibril_notify(&w->event);
-
-	futex_unlock(&fibril_synch_futex);
-}
-
-/** Timer fibril.
- *
- * @param arg	Timer
- */
-static errno_t fibril_timer_func(void *arg)
-{
-	fibril_timer_t *timer = (fibril_timer_t *) arg;
-	errno_t rc;
-
-	fibril_mutex_lock(timer->lockp);
-
-	while (timer->state != fts_cleanup) {
-		switch (timer->state) {
-		case fts_not_set:
-		case fts_fired:
-			fibril_condvar_wait(&timer->cv, timer->lockp);
-			break;
-		case fts_active:
-			rc = fibril_condvar_wait_timeout(&timer->cv,
-			    timer->lockp, timer->delay);
-			if (rc == ETIMEOUT && timer->state == fts_active) {
-				timer->state = fts_fired;
-				timer->handler_fid = fibril_get_id();
-				fibril_mutex_unlock(timer->lockp);
-				timer->fun(timer->arg);
-				fibril_mutex_lock(timer->lockp);
-				timer->handler_fid = 0;
-			}
-			break;
-		case fts_cleanup:
-		case fts_clean:
-			assert(false);
-			break;
-		}
-	}
-
-	/* Acknowledge timer fibril has finished cleanup. */
-	timer->state = fts_clean;
-	fibril_condvar_broadcast(&timer->cv);
-	fibril_mutex_unlock(timer->lockp);
-
-	return 0;
-}
-
-/** Create new timer.
- *
- * @return		New timer on success, @c NULL if out of memory.
- */
-fibril_timer_t *fibril_timer_create(fibril_mutex_t *lock)
-{
-	fid_t fid;
-	fibril_timer_t *timer;
-
-	timer = calloc(1, sizeof(fibril_timer_t));
-	if (timer == NULL)
-		return NULL;
-
-	fid = fibril_create(fibril_timer_func, (void *) timer);
-	if (fid == 0) {
-		free(timer);
-		return NULL;
-	}
-
-	fibril_mutex_initialize(&timer->lock);
-	fibril_condvar_initialize(&timer->cv);
-
-	timer->fibril = fid;
-	timer->state = fts_not_set;
-	timer->lockp = (lock != NULL) ? lock : &timer->lock;
-
-	fibril_add_ready(fid);
-	return timer;
-}
-
-/** Destroy timer.
- *
- * @param timer		Timer, must not be active or accessed by other threads.
- */
-void fibril_timer_destroy(fibril_timer_t *timer)
-{
-	fibril_mutex_lock(timer->lockp);
-	assert(timer->state == fts_not_set || timer->state == fts_fired);
-
-	/* Request timer fibril to terminate. */
-	timer->state = fts_cleanup;
-	fibril_condvar_broadcast(&timer->cv);
-
-	/* Wait for timer fibril to terminate */
-	while (timer->state != fts_clean)
-		fibril_condvar_wait(&timer->cv, timer->lockp);
-	fibril_mutex_unlock(timer->lockp);
-
-	free(timer);
-}
-
-/** Set timer.
- *
- * Set timer to execute a callback function after the specified
- * interval.
- *
- * @param timer		Timer
- * @param delay		Delay in microseconds
- * @param fun		Callback function
- * @param arg		Argument for @a fun
- */
-void fibril_timer_set(fibril_timer_t *timer, suseconds_t delay,
-    fibril_timer_fun_t fun, void *arg)
-{
-	fibril_mutex_lock(timer->lockp);
-	fibril_timer_set_locked(timer, delay, fun, arg);
-	fibril_mutex_unlock(timer->lockp);
-}
-
-/** Set locked timer.
- *
- * Set timer to execute a callback function after the specified
- * interval. Must be called when the timer is locked.
- *
- * @param timer		Timer
- * @param delay		Delay in microseconds
- * @param fun		Callback function
- * @param arg		Argument for @a fun
- */
-void fibril_timer_set_locked(fibril_timer_t *timer, suseconds_t delay,
-    fibril_timer_fun_t fun, void *arg)
-{
-	assert(fibril_mutex_is_locked(timer->lockp));
-	assert(timer->state == fts_not_set || timer->state == fts_fired);
-	timer->state = fts_active;
-	timer->delay = delay;
-	timer->fun = fun;
-	timer->arg = arg;
-	fibril_condvar_broadcast(&timer->cv);
-}
-
-/** Clear timer.
- *
- * Clears (cancels) timer and returns last state of the timer.
- * This can be one of:
- *    - fts_not_set	If the timer has not been set or has been cleared
- *    - fts_active	Timer was set but did not fire
- *    - fts_fired	Timer fired
- *
- * @param timer		Timer
- * @return		Last timer state
- */
-fibril_timer_state_t fibril_timer_clear(fibril_timer_t *timer)
-{
-	fibril_timer_state_t old_state;
-
-	fibril_mutex_lock(timer->lockp);
-	old_state = fibril_timer_clear_locked(timer);
-	fibril_mutex_unlock(timer->lockp);
-
-	return old_state;
-}
-
-/** Clear locked timer.
- *
- * Clears (cancels) timer and returns last state of the timer.
- * This can be one of:
- *    - fts_not_set	If the timer has not been set or has been cleared
- *    - fts_active	Timer was set but did not fire
- *    - fts_fired	Timer fired
- * Must be called when the timer is locked.
- *
- * @param timer		Timer
- * @return		Last timer state
- */
-fibril_timer_state_t fibril_timer_clear_locked(fibril_timer_t *timer)
-{
-	fibril_timer_state_t old_state;
-
-	assert(fibril_mutex_is_locked(timer->lockp));
-
-	while (timer->handler_fid != 0) {
-		if (timer->handler_fid == fibril_get_id()) {
-			printf("Deadlock detected.\n");
-			stacktrace_print();
-			printf("Fibril %p is trying to clear timer %p from "
-			    "inside its handler %p.\n",
-			    fibril_get_id(), timer, timer->fun);
-			abort();
-		}
-
-		fibril_condvar_wait(&timer->cv, timer->lockp);
-	}
-
-	old_state = timer->state;
-	timer->state = fts_not_set;
-
-	timer->delay = 0;
-	timer->fun = NULL;
-	timer->arg = NULL;
-	fibril_condvar_broadcast(&timer->cv);
-
-	return old_state;
-}
-
-/**
- * Initialize a semaphore with initial count set to the provided value.
- *
- * @param sem    Semaphore to initialize.
- * @param count  Initial count. Must not be negative.
- */
-void fibril_semaphore_initialize(fibril_semaphore_t *sem, long count)
-{
-	/*
-	 * Negative count denotes the length of waitlist,
-	 * so it makes no sense as an initial value.
-	 */
-	assert(count >= 0);
-	sem->count = count;
-	list_initialize(&sem->waiters);
-}
-
-/**
- * Produce one token.
- * If there are fibrils waiting for tokens, this operation satisfies
- * exactly one waiting `fibril_semaphore_down()`.
- * This operation never blocks the fibril.
- *
- * @param sem  Semaphore to use.
- */
-void fibril_semaphore_up(fibril_semaphore_t *sem)
-{
-	futex_lock(&fibril_synch_futex);
-	sem->count++;
-
-	if (sem->count <= 0) {
-		awaiter_t *w = list_pop(&sem->waiters, awaiter_t, link);
-		assert(w);
-		fibril_notify(&w->event);
-	}
-
-	futex_unlock(&fibril_synch_futex);
-}
-
-/**
- * Consume one token.
- * If there are no available tokens (count <= 0), this operation blocks until
- * another fibril produces a token using `fibril_semaphore_up()`.
- *
- * @param sem  Semaphore to use.
- */
-void fibril_semaphore_down(fibril_semaphore_t *sem)
-{
-	futex_lock(&fibril_synch_futex);
-	sem->count--;
-
-	if (sem->count >= 0) {
-		futex_unlock(&fibril_synch_futex);
-		return;
-	}
-
-	awaiter_t wdata = AWAITER_INIT;
-	list_append(&wdata.link, &sem->waiters);
-
-	futex_unlock(&fibril_synch_futex);
-
-	fibril_wait_for(&wdata.event);
-}
-
-/** @}
- */
Index: uspace/lib/c/generic/futex.c
===================================================================
--- uspace/lib/c/generic/futex.c	(revision 42964a73ef087dec0f80db3182b0ff5a3f79b500)
+++ 	(revision )
@@ -1,139 +1,0 @@
-/*
- * Copyright (c) 2008 Jakub Jermar
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- *   notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- *   notice, this list of conditions and the following disclaimer in the
- *   documentation and/or other materials provided with the distribution.
- * - The name of the author may not be used to endorse or promote products
- *   derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** @addtogroup libc
- * @{
- */
-/** @file
- */
-
-#include <futex.h>
-
-#include <assert.h>
-#include <atomic.h>
-#include <fibril.h>
-#include <io/kio.h>
-
-#include "private/fibril.h"
-
-//#define DPRINTF(...) kio_printf(__VA_ARGS__)
-#define DPRINTF(...) dummy_printf(__VA_ARGS__)
-
-/** Initialize futex counter.
- *
- * @param futex Futex.
- * @param val   Initialization value.
- *
- */
-void futex_initialize(futex_t *futex, int val)
-{
-	atomic_set(&futex->val, val);
-}
-
-#ifdef CONFIG_DEBUG_FUTEX
-
-void __futex_assert_is_locked(futex_t *futex, const char *name)
-{
-	void *owner = __atomic_load_n(&futex->owner, __ATOMIC_RELAXED);
-	fibril_t *self = (fibril_t *) fibril_get_id();
-	if (owner != self) {
-		DPRINTF("Assertion failed: %s (%p) is not locked by fibril %p (instead locked by fibril %p).\n", name, futex, self, owner);
-	}
-	assert(owner == self);
-}
-
-void __futex_assert_is_not_locked(futex_t *futex, const char *name)
-{
-	void *owner = __atomic_load_n(&futex->owner, __ATOMIC_RELAXED);
-	fibril_t *self = (fibril_t *) fibril_get_id();
-	if (owner == self) {
-		DPRINTF("Assertion failed: %s (%p) is already locked by fibril %p.\n", name, futex, self);
-	}
-	assert(owner != self);
-}
-
-void __futex_lock(futex_t *futex, const char *name)
-{
-	/*
-	 * We use relaxed atomics to avoid violating C11 memory model.
-	 * They should compile to regular load/stores, but simple assignments
-	 * would be UB by definition.
-	 * The proper ordering is ensured by the surrounding futex operation.
-	 */
-
-	fibril_t *self = (fibril_t *) fibril_get_id();
-	DPRINTF("Locking futex %s (%p) by fibril %p.\n", name, futex, self);
-	__futex_assert_is_not_locked(futex, name);
-	futex_down(futex);
-
-	void *prev_owner = __atomic_load_n(&futex->owner, __ATOMIC_RELAXED);
-	assert(prev_owner == NULL);
-	__atomic_store_n(&futex->owner, self, __ATOMIC_RELAXED);
-}
-
-void __futex_unlock(futex_t *futex, const char *name)
-{
-	fibril_t *self = (fibril_t *) fibril_get_id();
-	DPRINTF("Unlocking futex %s (%p) by fibril %p.\n", name, futex, self);
-	__futex_assert_is_locked(futex, name);
-	__atomic_store_n(&futex->owner, NULL, __ATOMIC_RELAXED);
-	futex_up(futex);
-}
-
-bool __futex_trylock(futex_t *futex, const char *name)
-{
-	fibril_t *self = (fibril_t *) fibril_get_id();
-	bool success = futex_trydown(futex);
-	if (success) {
-		void *owner = __atomic_load_n(&futex->owner, __ATOMIC_RELAXED);
-		assert(owner == NULL);
-
-		__atomic_store_n(&futex->owner, self, __ATOMIC_RELAXED);
-
-		DPRINTF("Trylock on futex %s (%p) by fibril %p succeeded.\n", name, futex, self);
-	} else {
-		DPRINTF("Trylock on futex %s (%p) by fibril %p failed.\n", name, futex, self);
-	}
-
-	return success;
-}
-
-void __futex_give_to(futex_t *futex, void *new_owner, const char *name)
-{
-	fibril_t *self = fibril_self();
-	fibril_t *no = new_owner;
-	DPRINTF("Passing futex %s (%p) from fibril %p to fibril %p.\n", name, futex, self, no);
-
-	__futex_assert_is_locked(futex, name);
-	__atomic_store_n(&futex->owner, new_owner, __ATOMIC_RELAXED);
-}
-
-#endif
-
-/** @}
- */
Index: uspace/lib/c/generic/io/kio.c
===================================================================
--- uspace/lib/c/generic/io/kio.c	(revision 42964a73ef087dec0f80db3182b0ff5a3f79b500)
+++ uspace/lib/c/generic/io/kio.c	(revision 9b7adc385e10a6ce5a0bedf71fdcd0ae1b9fd585)
@@ -44,5 +44,6 @@
 #include <macros.h>
 #include <libarch/config.h>
-#include <futex.h>
+
+#include "../private/futex.h"
 
 #define KIO_BUFFER_SIZE PAGE_SIZE
Index: uspace/lib/c/generic/ipc.c
===================================================================
--- uspace/lib/c/generic/ipc.c	(revision 42964a73ef087dec0f80db3182b0ff5a3f79b500)
+++ uspace/lib/c/generic/ipc.c	(revision 9b7adc385e10a6ce5a0bedf71fdcd0ae1b9fd585)
@@ -46,5 +46,4 @@
 #include <errno.h>
 #include <adt/list.h>
-#include <futex.h>
 #include <fibril.h>
 #include <macros.h>
Index: uspace/lib/c/generic/malloc.c
===================================================================
--- uspace/lib/c/generic/malloc.c	(revision 42964a73ef087dec0f80db3182b0ff5a3f79b500)
+++ uspace/lib/c/generic/malloc.c	(revision 9b7adc385e10a6ce5a0bedf71fdcd0ae1b9fd585)
@@ -44,8 +44,9 @@
 #include <bitops.h>
 #include <mem.h>
-#include <fibril_synch.h>
 #include <stdlib.h>
 #include <adt/gcdlcm.h>
+
 #include "private/malloc.h"
+#include "private/fibril.h"
 
 /** Magic used in heap headers. */
Index: uspace/lib/c/generic/private/fibril.h
===================================================================
--- uspace/lib/c/generic/private/fibril.h	(revision 42964a73ef087dec0f80db3182b0ff5a3f79b500)
+++ uspace/lib/c/generic/private/fibril.h	(revision 9b7adc385e10a6ce5a0bedf71fdcd0ae1b9fd585)
@@ -35,5 +35,13 @@
 #include <abi/proc/uarg.h>
 #include <atomic.h>
-#include <futex.h>
+#include <fibril.h>
+
+#include "./futex.h"
+
+typedef struct {
+	fibril_t *fibril;
+} fibril_event_t;
+
+#define FIBRIL_EVENT_INIT ((fibril_event_t) {0})
 
 struct fibril {
@@ -73,3 +81,55 @@
 extern void __fibrils_init(void);
 
+extern void fibril_wait_for(fibril_event_t *);
+extern errno_t fibril_wait_timeout(fibril_event_t *, const struct timeval *);
+extern void fibril_notify(fibril_event_t *);
+
+extern errno_t fibril_ipc_wait(ipc_call_t *, const struct timeval *);
+extern void fibril_ipc_poke(void);
+
+/**
+ * "Restricted" fibril mutex.
+ *
+ * Similar to `fibril_mutex_t`, but has a set of restrictions placed on its
+ * use. Within a rmutex critical section, you
+ *         - may not use any other synchronization primitive,
+ *           save for another `fibril_rmutex_t`. This includes nonblocking
+ *           operations like cvar signal and mutex unlock, unless otherwise
+ *           specified.
+ *         - may not read IPC messages
+ *         - may not start a new thread/fibril
+ *           (creating fibril without starting is fine)
+ *
+ * Additionally, locking with a timeout is not possible on this mutex,
+ * and there is no associated condition variable type.
+ * This is a design constraint, not a lack of implementation effort.
+ */
+typedef struct {
+	// TODO: At this point, this is just silly handwaving to hide current
+	//       futex use behind a fibril based abstraction. Later, the imple-
+	//       mentation will change, but the restrictions placed on this type
+	//       will allow it to be simpler and faster than a regular mutex.
+	//       There might also be optional debug checking of the assumptions.
+	//
+	//       Note that a consequence of the restrictions is that if we are
+	//       running on a single thread, no other fibril can ever get to run
+	//       while a fibril has a rmutex locked. That means that for
+	//       single-threaded programs, we can reduce all rmutex locks and
+	//       unlocks to simple branches on a global bool variable.
+
+	futex_t futex;
+} fibril_rmutex_t;
+
+#define FIBRIL_RMUTEX_INITIALIZER(name) \
+	{ .futex = FUTEX_INITIALIZE(1) }
+
+#define FIBRIL_RMUTEX_INITIALIZE(name) \
+	fibril_rmutex_t name = FIBRIL_RMUTEX_INITIALIZER(name)
+
+extern void fibril_rmutex_initialize(fibril_rmutex_t *);
+extern void fibril_rmutex_lock(fibril_rmutex_t *);
+extern bool fibril_rmutex_trylock(fibril_rmutex_t *);
+extern void fibril_rmutex_unlock(fibril_rmutex_t *);
+
+
 #endif
Index: uspace/lib/c/generic/private/futex.h
===================================================================
--- uspace/lib/c/generic/private/futex.h	(revision 9b7adc385e10a6ce5a0bedf71fdcd0ae1b9fd585)
+++ uspace/lib/c/generic/private/futex.h	(revision 9b7adc385e10a6ce5a0bedf71fdcd0ae1b9fd585)
@@ -0,0 +1,232 @@
+/*
+ * Copyright (c) 2006 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup libc
+ * @{
+ */
+/** @file
+ */
+
+#ifndef LIBC_FUTEX_H_
+#define LIBC_FUTEX_H_
+
+#include <assert.h>
+#include <atomic.h>
+#include <errno.h>
+#include <libc.h>
+#include <time.h>
+
+typedef struct futex {
+	atomic_t val;
+#ifdef CONFIG_DEBUG_FUTEX
+	void *owner;
+#endif
+} futex_t;
+
+extern void futex_initialize(futex_t *futex, int value);
+
+#ifdef CONFIG_DEBUG_FUTEX
+
+#define FUTEX_INITIALIZE(val) {{ (val) }, NULL }
+#define FUTEX_INITIALIZER     FUTEX_INITIALIZE(1)
+
+void __futex_assert_is_locked(futex_t *, const char *);
+void __futex_assert_is_not_locked(futex_t *, const char *);
+void __futex_lock(futex_t *, const char *);
+void __futex_unlock(futex_t *, const char *);
+bool __futex_trylock(futex_t *, const char *);
+void __futex_give_to(futex_t *, void *, const char *);
+
+#define futex_lock(futex) __futex_lock((futex), #futex)
+#define futex_unlock(futex) __futex_unlock((futex), #futex)
+#define futex_trylock(futex) __futex_trylock((futex), #futex)
+
+#define futex_give_to(futex, new_owner) __futex_give_to((futex), (new_owner), #futex)
+#define futex_assert_is_locked(futex) __futex_assert_is_locked((futex), #futex)
+#define futex_assert_is_not_locked(futex) __futex_assert_is_not_locked((futex), #futex)
+
+#else
+
+#define FUTEX_INITIALIZE(val) {{ (val) }}
+#define FUTEX_INITIALIZER     FUTEX_INITIALIZE(1)
+
+#define futex_lock(fut)     (void) futex_down((fut))
+#define futex_trylock(fut)  futex_trydown((fut))
+#define futex_unlock(fut)   (void) futex_up((fut))
+
+#define futex_give_to(fut, owner) ((void)0)
+#define futex_assert_is_locked(fut) assert((atomic_signed_t) (fut)->val.count <= 0)
+#define futex_assert_is_not_locked(fut) ((void)0)
+
+#endif
+
+/** Down the futex with timeout, composably.
+ *
+ * This means that when the operation fails due to a timeout or being
+ * interrupted, the next futex_up() is ignored, which allows certain kinds of
+ * composition of synchronization primitives.
+ *
+ * In most other circumstances, regular futex_down_timeout() is a better choice.
+ *
+ * @param futex Futex.
+ *
+ * @return ENOENT if there is no such virtual address.
+ * @return ETIMEOUT if timeout expires.
+ * @return EOK on success.
+ * @return Error code from <errno.h> otherwise.
+ *
+ */
+static inline errno_t futex_down_composable(futex_t *futex, const struct timeval *expires)
+{
+	// TODO: Add tests for this.
+
+	if ((atomic_signed_t) atomic_predec(&futex->val) >= 0)
+		return EOK;
+
+	suseconds_t timeout;
+
+	if (!expires) {
+		/* No timeout. */
+		timeout = 0;
+	} else {
+		if (expires->tv_sec == 0) {
+			/* We can't just return ETIMEOUT. That wouldn't be composable. */
+			timeout = 1;
+		} else {
+			struct timeval tv;
+			getuptime(&tv);
+			timeout = tv_gteq(&tv, expires) ? 1 :
+			    tv_sub_diff(expires, &tv);
+		}
+
+		assert(timeout > 0);
+	}
+
+	return __SYSCALL2(SYS_FUTEX_SLEEP, (sysarg_t) &futex->val.count, (sysarg_t) timeout);
+}
+
+/** Up the futex.
+ *
+ * @param futex Futex.
+ *
+ * @return ENOENT if there is no such virtual address.
+ * @return EOK on success.
+ * @return Error code from <errno.h> otherwise.
+ *
+ */
+static inline errno_t futex_up(futex_t *futex)
+{
+	if ((atomic_signed_t) atomic_postinc(&futex->val) < 0)
+		return __SYSCALL1(SYS_FUTEX_WAKEUP, (sysarg_t) &futex->val.count);
+
+	return EOK;
+}
+
+static inline errno_t futex_down_timeout(futex_t *futex, const struct timeval *expires)
+{
+	if (expires && expires->tv_sec == 0 && expires->tv_usec == 0) {
+		/* Nonblocking down. */
+
+		/*
+		 * Try good old CAS a few times.
+		 * Not too much though, we don't want to bloat the caller.
+		 */
+		for (int i = 0; i < 2; i++) {
+			atomic_signed_t old = atomic_get(&futex->val);
+			if (old <= 0)
+				return ETIMEOUT;
+
+			if (cas(&futex->val, old, old - 1))
+				return EOK;
+		}
+
+		// TODO: builtin atomics with relaxed ordering can make this
+		//       faster.
+
+		/*
+		 * If we don't succeed with CAS, we can't just return failure
+		 * because that would lead to spurious failures where
+		 * futex_down_timeout returns ETIMEOUT despite there being
+		 * available tokens. That could break some algorithms.
+		 * We also don't want to loop on CAS indefinitely, because
+		 * that would make the semaphore not wait-free, even when all
+		 * atomic operations and the underlying base semaphore are all
+		 * wait-free.
+		 * Instead, we fall back to regular down_timeout(), with
+		 * an already expired deadline. That way we delegate all these
+		 * concerns to the base semaphore.
+		 */
+	}
+
+	/*
+	 * This combination of a "composable" sleep followed by futex_up() on
+	 * failure is necessary to prevent breakage due to certain race
+	 * conditions.
+	 */
+	errno_t rc = futex_down_composable(futex, expires);
+	if (rc != EOK)
+		futex_up(futex);
+	return rc;
+}
+
+/** Try to down the futex.
+ *
+ * @param futex Futex.
+ *
+ * @return true if the futex was acquired.
+ * @return false if the futex was not acquired.
+ *
+ */
+static inline bool futex_trydown(futex_t *futex)
+{
+	/*
+	 * down_timeout with an already expired deadline should behave like
+	 * trydown.
+	 */
+	struct timeval tv = { .tv_sec = 0, .tv_usec = 0 };
+	return futex_down_timeout(futex, &tv) == EOK;
+}
+
+/** Down the futex.
+ *
+ * @param futex Futex.
+ *
+ * @return ENOENT if there is no such virtual address.
+ * @return EOK on success.
+ * @return Error code from <errno.h> otherwise.
+ *
+ */
+static inline errno_t futex_down(futex_t *futex)
+{
+	return futex_down_timeout(futex, NULL);
+}
+
+#endif
+
+/** @}
+ */
Index: uspace/lib/c/generic/rcu.c
===================================================================
--- uspace/lib/c/generic/rcu.c	(revision 42964a73ef087dec0f80db3182b0ff5a3f79b500)
+++ 	(revision )
@@ -1,442 +1,0 @@
-/*
- * Copyright (c) 2012 Adam Hraska
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- *   notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- *   notice, this list of conditions and the following disclaimer in the
- *   documentation and/or other materials provided with the distribution.
- * - The name of the author may not be used to endorse or promote products
- *   derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** @addtogroup liburcu
- * @{
- */
-/**
- * @file
- *
- * User space RCU is based on URCU utilizing signals [1]. This
- * implementation does not however signal each thread of the process
- * to issue a memory barrier. Instead, we introduced a syscall that
- * issues memory barriers (via IPIs) on cpus that are running threads
- * of the current process. First, it does not require us to schedule
- * and run every thread of the process. Second, IPIs are less intrusive
- * than switching contexts and entering user space.
- *
- * This algorithm is further modified to require a single instead of
- * two reader group changes per grace period. Signal-URCU flips
- * the reader group and waits for readers of the previous group
- * twice in succession in order to wait for new readers that were
- * delayed and mistakenly associated with the previous reader group.
- * The modified algorithm ensures that the new reader group is
- * always empty (by explicitly waiting for it to become empty).
- * Only then does it flip the reader group and wait for preexisting
- * readers of the old reader group (invariant of SRCU [2, 3]).
- *
- *
- * [1] User-level implementations of read-copy update,
- *     2012, appendix
- *     http://www.rdrop.com/users/paulmck/RCU/urcu-supp-accepted.2011.08.30a.pdf
- *
- * [2] linux/kernel/srcu.c in Linux 3.5-rc2,
- *     2012
- *     http://tomoyo.sourceforge.jp/cgi-bin/lxr/source/kernel/srcu.c?v=linux-3.5-rc2-ccs-1.8.3
- *
- * [3] [RFC PATCH 5/5 single-thread-version] implement
- *     per-domain single-thread state machine,
- *     2012, Lai
- *     https://lkml.org/lkml/2012/3/6/586
- */
-
-#include "rcu.h"
-#include <fibril_synch.h>
-#include <fibril.h>
-#include <stdio.h>
-#include <stddef.h>
-#include <compiler/barrier.h>
-#include <libarch/barrier.h>
-#include <futex.h>
-#include <macros.h>
-#include <async.h>
-#include <adt/list.h>
-#include <smp_memory_barrier.h>
-#include <assert.h>
-#include <time.h>
-
-#include "private/fibril.h"
-
-
-/** RCU sleeps for RCU_SLEEP_MS before polling an active RCU reader again. */
-#define RCU_SLEEP_MS        10
-
-#define RCU_NESTING_SHIFT   1
-#define RCU_NESTING_INC     (1 << RCU_NESTING_SHIFT)
-#define RCU_GROUP_BIT_MASK  (size_t)(RCU_NESTING_INC - 1)
-#define RCU_GROUP_A         (size_t)(0 | RCU_NESTING_INC)
-#define RCU_GROUP_B         (size_t)(1 | RCU_NESTING_INC)
-
-
-/** Fibril local RCU data. */
-typedef struct fibril_rcu_data {
-	size_t nesting_cnt;
-	link_t link;
-	bool registered;
-} fibril_rcu_data_t;
-
-/** Process global RCU data. */
-typedef struct rcu_data {
-	size_t cur_gp;
-	size_t reader_group;
-	fibril_rmutex_t list_mutex;
-	list_t fibrils_list;
-	struct {
-		fibril_rmutex_t mutex;
-		bool locked;
-		list_t blocked_fibrils;
-	} sync_lock;
-} rcu_data_t;
-
-typedef struct blocked_fibril {
-	fibril_event_t unblock;
-	link_t link;
-	bool is_ready;
-} blocked_fibril_t;
-
-
-/** Fibril local RCU data. */
-static fibril_local fibril_rcu_data_t fibril_rcu = {
-	.nesting_cnt = 0,
-	.link = {
-		.next = NULL,
-		.prev = NULL
-	},
-	.registered = false
-};
-
-/** Process global RCU data. */
-static rcu_data_t rcu = {
-	.cur_gp = 0,
-	.reader_group = RCU_GROUP_A,
-	.list_mutex = FIBRIL_RMUTEX_INITIALIZER(rcu.list_mutex),
-	.fibrils_list = LIST_INITIALIZER(rcu.fibrils_list),
-	.sync_lock = {
-		.mutex = FIBRIL_RMUTEX_INITIALIZER(rcu.sync_lock.mutex),
-		.locked = false,
-		.blocked_fibrils = LIST_INITIALIZER(rcu.sync_lock.blocked_fibrils),
-	},
-};
-
-
-static void wait_for_readers(size_t reader_group);
-static void force_mb_in_all_threads(void);
-static bool is_preexisting_reader(const fibril_rcu_data_t *fib, size_t group);
-
-static void lock_sync(void);
-static void unlock_sync(void);
-static void sync_sleep(void);
-
-static bool is_in_group(size_t nesting_cnt, size_t group);
-static bool is_in_reader_section(size_t nesting_cnt);
-static size_t get_other_group(size_t group);
-
-
-/** Registers a fibril so it may start using RCU read sections.
- *
- * A fibril must be registered with rcu before it can enter RCU critical
- * sections delineated by rcu_read_lock() and rcu_read_unlock().
- */
-void rcu_register_fibril(void)
-{
-	assert(!fibril_rcu.registered);
-
-	fibril_rmutex_lock(&rcu.list_mutex);
-	list_append(&fibril_rcu.link, &rcu.fibrils_list);
-	fibril_rmutex_unlock(&rcu.list_mutex);
-
-	fibril_rcu.registered = true;
-}
-
-/** Deregisters a fibril that had been using RCU read sections.
- *
- * A fibril must be deregistered before it exits if it had
- * been registered with rcu via rcu_register_fibril().
- */
-void rcu_deregister_fibril(void)
-{
-	assert(fibril_rcu.registered);
-
-	/*
-	 * Forcefully unlock any reader sections. The fibril is exiting
-	 * so it is not holding any references to data protected by the
-	 * rcu section. Therefore, it is safe to unlock. Otherwise,
-	 * rcu_synchronize() would wait indefinitely.
-	 */
-	memory_barrier();
-	fibril_rcu.nesting_cnt = 0;
-
-	fibril_rmutex_lock(&rcu.list_mutex);
-	list_remove(&fibril_rcu.link);
-	fibril_rmutex_unlock(&rcu.list_mutex);
-
-	fibril_rcu.registered = false;
-}
-
-/** Delimits the start of an RCU reader critical section.
- *
- * RCU reader sections may be nested.
- */
-void rcu_read_lock(void)
-{
-	assert(fibril_rcu.registered);
-
-	size_t nesting_cnt = ACCESS_ONCE(fibril_rcu.nesting_cnt);
-
-	if (0 == (nesting_cnt >> RCU_NESTING_SHIFT)) {
-		ACCESS_ONCE(fibril_rcu.nesting_cnt) = ACCESS_ONCE(rcu.reader_group);
-		/* Required by MB_FORCE_L */
-		compiler_barrier(); /* CC_BAR_L */
-	} else {
-		ACCESS_ONCE(fibril_rcu.nesting_cnt) = nesting_cnt + RCU_NESTING_INC;
-	}
-}
-
-/** Delimits the end of an RCU reader critical section. */
-void rcu_read_unlock(void)
-{
-	assert(fibril_rcu.registered);
-	assert(rcu_read_locked());
-
-	/* Required by MB_FORCE_U */
-	compiler_barrier(); /* CC_BAR_U */
-	/* todo: ACCESS_ONCE(nesting_cnt) ? */
-	fibril_rcu.nesting_cnt -= RCU_NESTING_INC;
-}
-
-/** Returns true if the current fibril is in an RCU reader section. */
-bool rcu_read_locked(void)
-{
-	return 0 != (fibril_rcu.nesting_cnt >> RCU_NESTING_SHIFT);
-}
-
-/** Blocks until all preexisting readers exit their critical sections. */
-void rcu_synchronize(void)
-{
-	assert(!rcu_read_locked());
-
-	/* Contain load of rcu.cur_gp. */
-	memory_barrier();
-
-	/* Approximately the number of the GP in progress. */
-	size_t gp_in_progress = ACCESS_ONCE(rcu.cur_gp);
-
-	lock_sync();
-
-	/*
-	 * Exit early if we were stuck waiting for the mutex for a full grace
-	 * period. Started waiting during gp_in_progress (or gp_in_progress + 1
-	 * if the value propagated to this cpu too late) so wait for the next
-	 * full GP, gp_in_progress + 1, to finish. Ie don't wait if the GP
-	 * after that, gp_in_progress + 2, already started.
-	 */
-	/* rcu.cur_gp >= gp_in_progress + 2, but tolerates overflows. */
-	if (rcu.cur_gp != gp_in_progress && rcu.cur_gp + 1 != gp_in_progress) {
-		unlock_sync();
-		return;
-	}
-
-	++ACCESS_ONCE(rcu.cur_gp);
-
-	/*
-	 * Pairs up with MB_FORCE_L (ie CC_BAR_L). Makes changes prior
-	 * to rcu_synchronize() visible to new readers.
-	 */
-	memory_barrier(); /* MB_A */
-
-	/*
-	 * Pairs up with MB_A.
-	 *
-	 * If the memory barrier is issued before CC_BAR_L in the target
-	 * thread, it pairs up with MB_A and the thread sees all changes
-	 * prior to rcu_synchronize(). Ie any reader sections are new
-	 * rcu readers.
-	 *
-	 * If the memory barrier is issued after CC_BAR_L, it pairs up
-	 * with MB_B and it will make the most recent nesting_cnt visible
-	 * in this thread. Since the reader may have already accessed
-	 * memory protected by RCU (it ran instructions passed CC_BAR_L),
-	 * it is a preexisting reader. Seeing the most recent nesting_cnt
-	 * ensures the thread will be identified as a preexisting reader
-	 * and we will wait for it in wait_for_readers(old_reader_group).
-	 */
-	force_mb_in_all_threads(); /* MB_FORCE_L */
-
-	/*
-	 * Pairs with MB_FORCE_L (ie CC_BAR_L, CC_BAR_U) and makes the most
-	 * current fibril.nesting_cnt visible to this cpu.
-	 */
-	read_barrier(); /* MB_B */
-
-	size_t new_reader_group = get_other_group(rcu.reader_group);
-	wait_for_readers(new_reader_group);
-
-	/* Separates waiting for readers in new_reader_group from group flip. */
-	memory_barrier();
-
-	/* Flip the group new readers should associate with. */
-	size_t old_reader_group = rcu.reader_group;
-	rcu.reader_group = new_reader_group;
-
-	/* Flip the group before waiting for preexisting readers in the old group.*/
-	memory_barrier();
-
-	wait_for_readers(old_reader_group);
-
-	/* MB_FORCE_U  */
-	force_mb_in_all_threads(); /* MB_FORCE_U */
-
-	unlock_sync();
-}
-
-/** Issues a memory barrier in each thread of this process. */
-static void force_mb_in_all_threads(void)
-{
-	/*
-	 * Only issue barriers in running threads. The scheduler will
-	 * execute additional memory barriers when switching to threads
-	 * of the process that are currently not running.
-	 */
-	smp_memory_barrier();
-}
-
-/** Waits for readers of reader_group to exit their readers sections. */
-static void wait_for_readers(size_t reader_group)
-{
-	fibril_rmutex_lock(&rcu.list_mutex);
-
-	list_t quiescent_fibrils;
-	list_initialize(&quiescent_fibrils);
-
-	while (!list_empty(&rcu.fibrils_list)) {
-		list_foreach_safe(rcu.fibrils_list, fibril_it, next_fibril) {
-			fibril_rcu_data_t *fib = member_to_inst(fibril_it,
-			    fibril_rcu_data_t, link);
-
-			if (is_preexisting_reader(fib, reader_group)) {
-				fibril_rmutex_unlock(&rcu.list_mutex);
-				sync_sleep();
-				fibril_rmutex_lock(&rcu.list_mutex);
-				/* Break to while loop. */
-				break;
-			} else {
-				list_remove(fibril_it);
-				list_append(fibril_it, &quiescent_fibrils);
-			}
-		}
-	}
-
-	list_concat(&rcu.fibrils_list, &quiescent_fibrils);
-	fibril_rmutex_unlock(&rcu.list_mutex);
-}
-
-static void lock_sync(void)
-{
-	fibril_rmutex_lock(&rcu.sync_lock.mutex);
-	if (rcu.sync_lock.locked) {
-		blocked_fibril_t blocked_fib;
-		blocked_fib.unblock = FIBRIL_EVENT_INIT;
-
-		list_append(&blocked_fib.link, &rcu.sync_lock.blocked_fibrils);
-
-		do {
-			blocked_fib.is_ready = false;
-			fibril_rmutex_unlock(&rcu.sync_lock.mutex);
-			fibril_wait_for(&blocked_fib.unblock);
-			fibril_rmutex_lock(&rcu.sync_lock.mutex);
-		} while (rcu.sync_lock.locked);
-
-		list_remove(&blocked_fib.link);
-		rcu.sync_lock.locked = true;
-	} else {
-		rcu.sync_lock.locked = true;
-	}
-}
-
-static void unlock_sync(void)
-{
-	assert(rcu.sync_lock.locked);
-
-	/* Unlock but wake up any fibrils waiting for the lock. */
-
-	if (!list_empty(&rcu.sync_lock.blocked_fibrils)) {
-		blocked_fibril_t *blocked_fib = member_to_inst(
-		    list_first(&rcu.sync_lock.blocked_fibrils), blocked_fibril_t, link);
-
-		if (!blocked_fib->is_ready) {
-			blocked_fib->is_ready = true;
-			fibril_notify(&blocked_fib->unblock);
-		}
-	}
-
-	rcu.sync_lock.locked = false;
-	fibril_rmutex_unlock(&rcu.sync_lock.mutex);
-}
-
-static void sync_sleep(void)
-{
-	assert(rcu.sync_lock.locked);
-	/*
-	 * Release the futex to avoid deadlocks in singlethreaded apps
-	 * but keep sync locked.
-	 */
-	fibril_rmutex_unlock(&rcu.sync_lock.mutex);
-	fibril_usleep(RCU_SLEEP_MS * 1000);
-	fibril_rmutex_lock(&rcu.sync_lock.mutex);
-}
-
-
-static bool is_preexisting_reader(const fibril_rcu_data_t *fib, size_t group)
-{
-	size_t nesting_cnt = ACCESS_ONCE(fib->nesting_cnt);
-
-	return is_in_group(nesting_cnt, group) && is_in_reader_section(nesting_cnt);
-}
-
-static size_t get_other_group(size_t group)
-{
-	if (group == RCU_GROUP_A)
-		return RCU_GROUP_B;
-	else
-		return RCU_GROUP_A;
-}
-
-static bool is_in_reader_section(size_t nesting_cnt)
-{
-	return RCU_NESTING_INC <= nesting_cnt;
-}
-
-static bool is_in_group(size_t nesting_cnt, size_t group)
-{
-	return (nesting_cnt & RCU_GROUP_BIT_MASK) == (group & RCU_GROUP_BIT_MASK);
-}
-
-
-
-/** @}
- */
Index: uspace/lib/c/generic/thread.c
===================================================================
--- uspace/lib/c/generic/thread.c	(revision 42964a73ef087dec0f80db3182b0ff5a3f79b500)
+++ 	(revision )
@@ -1,204 +1,0 @@
-/*
- * Copyright (c) 2006 Jakub Jermar
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- *   notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- *   notice, this list of conditions and the following disclaimer in the
- *   documentation and/or other materials provided with the distribution.
- * - The name of the author may not be used to endorse or promote products
- *   derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** @addtogroup libc
- * @{
- */
-/** @file
- */
-
-#include <libc.h>
-#include <stdbool.h>
-#include <stdlib.h>
-#include <libarch/faddr.h>
-#include <abi/proc/uarg.h>
-#include <fibril.h>
-#include <stack.h>
-#include <str.h>
-#include <async.h>
-#include <errno.h>
-#include <as.h>
-#include "private/thread.h"
-#include "private/fibril.h"
-
-/** Main thread function.
- *
- * This function is called from __thread_entry() and is used
- * to call the thread's implementing function and perform cleanup
- * and exit when thread returns back.
- *
- * @param uarg Pointer to userspace argument structure.
- *
- */
-void __thread_main(uspace_arg_t *uarg)
-{
-	assert(!__tcb_is_set());
-
-	fibril_t *fibril = uarg->uspace_thread_arg;
-	assert(fibril);
-
-	__tcb_set(fibril->tcb);
-
-	uarg->uspace_thread_function(fibril->arg);
-	/*
-	 * XXX: we cannot free the userspace stack while running on it
-	 *
-	 * free(uarg->uspace_stack);
-	 * free(uarg);
-	 */
-
-	fibril_teardown(fibril);
-	thread_exit(0);
-}
-
-/** Create userspace thread.
- *
- * This function creates new userspace thread and allocates userspace
- * stack and userspace argument structure for it.
- *
- * @param function Function implementing the thread.
- * @param arg Argument to be passed to thread.
- * @param name Symbolic name of the thread.
- * @param tid Thread ID of the newly created thread.
- *
- * @return Zero on success or a code from @ref errno.h on failure.
- */
-errno_t thread_create(void (*function)(void *), void *arg, const char *name,
-    thread_id_t *tid)
-{
-	uspace_arg_t *uarg = calloc(1, sizeof(uspace_arg_t));
-	if (!uarg)
-		return ENOMEM;
-
-	fibril_t *fibril = fibril_alloc();
-	if (!fibril) {
-		free(uarg);
-		return ENOMEM;
-	}
-
-	size_t stack_size = stack_size_get();
-	void *stack = as_area_create(AS_AREA_ANY, stack_size,
-	    AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE | AS_AREA_GUARD |
-	    AS_AREA_LATE_RESERVE, AS_AREA_UNPAGED);
-	if (stack == AS_MAP_FAILED) {
-		fibril_teardown(fibril);
-		free(uarg);
-		return ENOMEM;
-	}
-
-	fibril->arg = arg;
-	uarg->uspace_entry = (void *) FADDR(__thread_entry);
-	uarg->uspace_stack = stack;
-	uarg->uspace_stack_size = stack_size;
-	uarg->uspace_thread_function = function;
-	uarg->uspace_thread_arg = fibril;
-	uarg->uspace_uarg = uarg;
-
-	errno_t rc = (errno_t) __SYSCALL4(SYS_THREAD_CREATE, (sysarg_t) uarg,
-	    (sysarg_t) name, (sysarg_t) str_size(name), (sysarg_t) tid);
-
-	if (rc != EOK) {
-		/*
-		 * Failed to create a new thread.
-		 * Free up the allocated data.
-		 */
-		as_area_destroy(stack);
-		free(uarg);
-	}
-
-	return rc;
-}
-
-/** Terminate current thread.
- *
- * @param status Exit status. Currently not used.
- *
- */
-void thread_exit(int status)
-{
-	__SYSCALL1(SYS_THREAD_EXIT, (sysarg_t) status);
-
-	/* Unreachable */
-	while (true)
-		;
-}
-
-/** Detach thread.
- *
- * Currently not implemented.
- *
- * @param thread TID.
- */
-void thread_detach(thread_id_t thread)
-{
-}
-
-/** Get current thread ID.
- *
- * @return Current thread ID.
- */
-thread_id_t thread_get_id(void)
-{
-	thread_id_t thread_id;
-
-	(void) __SYSCALL1(SYS_THREAD_GET_ID, (sysarg_t) &thread_id);
-
-	return thread_id;
-}
-
-/** Wait unconditionally for specified number of microseconds
- *
- */
-int thread_usleep(useconds_t usec)
-{
-	(void) __SYSCALL1(SYS_THREAD_USLEEP, usec);
-	return 0;
-}
-
-/** Wait unconditionally for specified number of seconds
- *
- */
-unsigned int thread_sleep(unsigned int sec)
-{
-	/*
-	 * Sleep in 1000 second steps to support
-	 * full argument range
-	 */
-
-	while (sec > 0) {
-		unsigned int period = (sec > 1000) ? 1000 : sec;
-
-		thread_usleep(period * 1000000);
-		sec -= period;
-	}
-
-	return 0;
-}
-
-/** @}
- */
Index: uspace/lib/c/generic/thread/atomic.c
===================================================================
--- uspace/lib/c/generic/thread/atomic.c	(revision 9b7adc385e10a6ce5a0bedf71fdcd0ae1b9fd585)
+++ uspace/lib/c/generic/thread/atomic.c	(revision 9b7adc385e10a6ce5a0bedf71fdcd0ae1b9fd585)
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2018 CZ.NIC, z.s.p.o.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <atomic.h>
+
+#ifdef PLATFORM_arm32
+
+/*
+ * Older ARMs don't have atomic instructions, so we need to define a bunch
+ * of symbols for GCC to use.
+ */
+
+void __sync_synchronize(void)
+{
+	// FIXME: Full memory barrier. We need a syscall for this.
+	// Should we implement this or is empty definition ok here?
+}
+
+unsigned __sync_add_and_fetch_4(volatile void *vptr, unsigned val)
+{
+	return atomic_add((atomic_t *)vptr, val);
+}
+
+unsigned __sync_sub_and_fetch_4(volatile void *vptr, unsigned val)
+{
+	return atomic_add((atomic_t *)vptr, -(atomic_signed_t)val);
+}
+
+bool __sync_bool_compare_and_swap_4(volatile void *ptr, unsigned old_val, unsigned new_val)
+{
+	return cas((atomic_t *)ptr, old_val, new_val);
+}
+
+unsigned __sync_val_compare_and_swap_4(volatile void *ptr, unsigned old_val, unsigned new_val)
+{
+	while (true) {
+		if (__sync_bool_compare_and_swap_4(ptr, old_val, new_val)) {
+			return old_val;
+		}
+
+		unsigned current = *(volatile unsigned *)ptr;
+		if (current != old_val)
+			return current;
+
+		/* If the current value is the same as old_val, retry. */
+	}
+}
+
+#endif
Index: uspace/lib/c/generic/thread/fibril.c
===================================================================
--- uspace/lib/c/generic/thread/fibril.c	(revision 9b7adc385e10a6ce5a0bedf71fdcd0ae1b9fd585)
+++ uspace/lib/c/generic/thread/fibril.c	(revision 9b7adc385e10a6ce5a0bedf71fdcd0ae1b9fd585)
@@ -0,0 +1,924 @@
+/*
+ * Copyright (c) 2006 Ondrej Palkovsky
+ * Copyright (c) 2007 Jakub Jermar
+ * Copyright (c) 2018 CZ.NIC, z.s.p.o.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup libc
+ * @{
+ */
+/** @file
+ */
+
+#include <adt/list.h>
+#include <fibril.h>
+#include <stack.h>
+#include <tls.h>
+#include <stdlib.h>
+#include <as.h>
+#include <context.h>
+#include <assert.h>
+
+#include <mem.h>
+#include <str.h>
+#include <ipc/ipc.h>
+#include <libarch/faddr.h>
+
+#include "../private/thread.h"
+#include "../private/futex.h"
+#include "../private/fibril.h"
+#include "../private/libc.h"
+
+#define DPRINTF(...) ((void)0)
+#undef READY_DEBUG
+
+/** Member of timeout_list. */
+typedef struct {
+	link_t link;
+	struct timeval expires;
+	fibril_event_t *event;
+} _timeout_t;
+
+typedef struct {
+	errno_t rc;
+	link_t link;
+	ipc_call_t *call;
+	fibril_event_t event;
+} _ipc_waiter_t;
+
+typedef struct {
+	errno_t rc;
+	link_t link;
+	ipc_call_t call;
+} _ipc_buffer_t;
+
+typedef enum {
+	SWITCH_FROM_DEAD,
+	SWITCH_FROM_HELPER,
+	SWITCH_FROM_YIELD,
+	SWITCH_FROM_BLOCKED,
+} _switch_type_t;
+
+static bool multithreaded = false;
+
+/* This futex serializes access to global data. */
+static futex_t fibril_futex = FUTEX_INITIALIZER;
+static futex_t ready_semaphore = FUTEX_INITIALIZE(0);
+static long ready_st_count;
+
+static LIST_INITIALIZE(ready_list);
+static LIST_INITIALIZE(fibril_list);
+static LIST_INITIALIZE(timeout_list);
+
+static futex_t ipc_lists_futex = FUTEX_INITIALIZER;
+static LIST_INITIALIZE(ipc_waiter_list);
+static LIST_INITIALIZE(ipc_buffer_list);
+static LIST_INITIALIZE(ipc_buffer_free_list);
+
+/* Only used as unique markers for triggered events. */
+static fibril_t _fibril_event_triggered;
+static fibril_t _fibril_event_timed_out;
+#define _EVENT_INITIAL   (NULL)
+#define _EVENT_TRIGGERED (&_fibril_event_triggered)
+#define _EVENT_TIMED_OUT (&_fibril_event_timed_out)
+
+static inline void _ready_debug_check(void)
+{
+#ifdef READY_DEBUG
+	assert(!multithreaded);
+	long count = (long) list_count(&ready_list) +
+	    (long) list_count(&ipc_buffer_free_list);
+	assert(ready_st_count == count);
+#endif
+}
+
+static inline long _ready_count(void)
+{
+	/*
+	 * The number of available tokens is always equal to the number
+	 * of fibrils in the ready list + the number of free IPC buffer
+	 * buckets.
+	 */
+
+	if (multithreaded)
+		return atomic_get(&ready_semaphore.val);
+
+	_ready_debug_check();
+	return ready_st_count;
+}
+
+static inline void _ready_up(void)
+{
+	if (multithreaded) {
+		futex_up(&ready_semaphore);
+	} else {
+		ready_st_count++;
+		_ready_debug_check();
+	}
+}
+
+static inline errno_t _ready_down(const struct timeval *expires)
+{
+	if (multithreaded)
+		return futex_down_timeout(&ready_semaphore, expires);
+
+	_ready_debug_check();
+	ready_st_count--;
+	return EOK;
+}
+
+static atomic_t threads_in_ipc_wait = { 0 };
+
+/** Function that spans the whole life-cycle of a fibril.
+ *
+ * Each fibril begins execution in this function. Then the function implementing
+ * the fibril logic is called.  After its return, the return value is saved.
+ * The fibril then switches to another fibril, which cleans up after it.
+ *
+ */
+static void _fibril_main(void)
+{
+	/* fibril_futex is locked when a fibril is started. */
+	futex_unlock(&fibril_futex);
+
+	fibril_t *fibril = fibril_self();
+
+	/* Call the implementing function. */
+	fibril_exit(fibril->func(fibril->arg));
+
+	/* Not reached */
+}
+
+/** Allocate a fibril structure and TCB, but don't do anything else with it. */
+fibril_t *fibril_alloc(void)
+{
+	tcb_t *tcb = tls_make(__progsymbols.elfstart);
+	if (!tcb)
+		return NULL;
+
+	fibril_t *fibril = calloc(1, sizeof(fibril_t));
+	if (!fibril) {
+		tls_free(tcb);
+		return NULL;
+	}
+
+	tcb->fibril_data = fibril;
+	fibril->tcb = tcb;
+	fibril->is_freeable = true;
+
+	fibril_setup(fibril);
+	return fibril;
+}
+
+/**
+ * Put the fibril into fibril_list.
+ */
+void fibril_setup(fibril_t *f)
+{
+	futex_lock(&fibril_futex);
+	list_append(&f->all_link, &fibril_list);
+	futex_unlock(&fibril_futex);
+}
+
+void fibril_teardown(fibril_t *fibril)
+{
+	futex_lock(&fibril_futex);
+	list_remove(&fibril->all_link);
+	futex_unlock(&fibril_futex);
+
+	if (fibril->is_freeable) {
+		tls_free(fibril->tcb);
+		free(fibril);
+	}
+}
+
+/**
+ * Event notification with a given reason.
+ *
+ * @param reason  Reason of the notification.
+ *                Can be either _EVENT_TRIGGERED or _EVENT_TIMED_OUT.
+ */
+static fibril_t *_fibril_trigger_internal(fibril_event_t *event, fibril_t *reason)
+{
+	assert(reason != _EVENT_INITIAL);
+	assert(reason == _EVENT_TIMED_OUT || reason == _EVENT_TRIGGERED);
+
+	futex_assert_is_locked(&fibril_futex);
+
+	if (event->fibril == _EVENT_INITIAL) {
+		event->fibril = reason;
+		return NULL;
+	}
+
+	if (event->fibril == _EVENT_TIMED_OUT) {
+		assert(reason == _EVENT_TRIGGERED);
+		event->fibril = reason;
+		return NULL;
+	}
+
+	if (event->fibril == _EVENT_TRIGGERED) {
+		/* Already triggered. Nothing to do. */
+		return NULL;
+	}
+
+	fibril_t *f = event->fibril;
+	event->fibril = reason;
+
+	assert(f->sleep_event == event);
+	return f;
+}
+
+static errno_t _ipc_wait(ipc_call_t *call, const struct timeval *expires)
+{
+	if (!expires)
+		return ipc_wait(call, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE);
+
+	if (expires->tv_sec == 0)
+		return ipc_wait(call, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NON_BLOCKING);
+
+	struct timeval now;
+	getuptime(&now);
+
+	if (tv_gteq(&now, expires))
+		return ipc_wait(call, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NON_BLOCKING);
+
+	return ipc_wait(call, tv_sub_diff(expires, &now), SYNCH_FLAGS_NONE);
+}
+
+/*
+ * Waits until a ready fibril is added to the list, or an IPC message arrives.
+ * Returns NULL on timeout and may also return NULL if returning from IPC
+ * wait after new ready fibrils are added.
+ */
+static fibril_t *_ready_list_pop(const struct timeval *expires, bool locked)
+{
+	if (locked) {
+		futex_assert_is_locked(&fibril_futex);
+		assert(expires);
+		/* Must be nonblocking. */
+		assert(expires->tv_sec == 0);
+	} else {
+		futex_assert_is_not_locked(&fibril_futex);
+	}
+
+	errno_t rc = _ready_down(expires);
+	if (rc != EOK)
+		return NULL;
+
+	/*
+	 * Once we acquire a token from ready_semaphore, there are two options.
+	 * Either there is a ready fibril in the list, or it's our turn to
+	 * call `ipc_wait_cycle()`. There is one extra token on the semaphore
+	 * for each entry of the call buffer.
+	 */
+
+
+	if (!locked)
+		futex_lock(&fibril_futex);
+	fibril_t *f = list_pop(&ready_list, fibril_t, link);
+	if (!f)
+		atomic_inc(&threads_in_ipc_wait);
+	if (!locked)
+		futex_unlock(&fibril_futex);
+
+	if (f)
+		return f;
+
+	if (!multithreaded)
+		assert(list_empty(&ipc_buffer_list));
+
+	/* No fibril is ready, IPC wait it is. */
+	ipc_call_t call = { 0 };
+	rc = _ipc_wait(&call, expires);
+
+	atomic_dec(&threads_in_ipc_wait);
+
+	if (rc != EOK && rc != ENOENT) {
+		/* Return token. */
+		_ready_up();
+		return NULL;
+	}
+
+	/*
+	 * We might get ENOENT due to a poke.
+	 * In that case, we propagate the null call out of fibril_ipc_wait(),
+	 * because poke must result in that call returning.
+	 */
+
+	/*
+	 * If a fibril is already waiting for IPC, we wake up the fibril,
+	 * and return the token to ready_semaphore.
+	 * If there is no fibril waiting, we pop a buffer bucket and
+	 * put our call there. The token then returns when the bucket is
+	 * returned.
+	 */
+
+	if (!locked)
+		futex_lock(&fibril_futex);
+
+	futex_lock(&ipc_lists_futex);
+
+
+	_ipc_waiter_t *w = list_pop(&ipc_waiter_list, _ipc_waiter_t, link);
+	if (w) {
+		*w->call = call;
+		w->rc = rc;
+		/* We switch to the woken up fibril immediately if possible. */
+		f = _fibril_trigger_internal(&w->event, _EVENT_TRIGGERED);
+
+		/* Return token. */
+		_ready_up();
+	} else {
+		_ipc_buffer_t *buf = list_pop(&ipc_buffer_free_list, _ipc_buffer_t, link);
+		assert(buf);
+		*buf = (_ipc_buffer_t) { .call = call, .rc = rc };
+		list_append(&buf->link, &ipc_buffer_list);
+	}
+
+	futex_unlock(&ipc_lists_futex);
+
+	if (!locked)
+		futex_unlock(&fibril_futex);
+
+	return f;
+}
+
+static fibril_t *_ready_list_pop_nonblocking(bool locked)
+{
+	struct timeval tv = { .tv_sec = 0, .tv_usec = 0 };
+	return _ready_list_pop(&tv, locked);
+}
+
+static void _ready_list_push(fibril_t *f)
+{
+	if (!f)
+		return;
+
+	futex_assert_is_locked(&fibril_futex);
+
+	/* Enqueue in ready_list. */
+	list_append(&f->link, &ready_list);
+	_ready_up();
+
+	if (atomic_get(&threads_in_ipc_wait)) {
+		DPRINTF("Poking.\n");
+		/* Wakeup one thread sleeping in SYS_IPC_WAIT. */
+		ipc_poke();
+	}
+}
+
+/* Blocks the current fibril until an IPC call arrives. */
+static errno_t _wait_ipc(ipc_call_t *call, const struct timeval *expires)
+{
+	futex_assert_is_not_locked(&fibril_futex);
+
+	futex_lock(&ipc_lists_futex);
+	_ipc_buffer_t *buf = list_pop(&ipc_buffer_list, _ipc_buffer_t, link);
+	if (buf) {
+		*call = buf->call;
+		errno_t rc = buf->rc;
+
+		/* Return to freelist. */
+		list_append(&buf->link, &ipc_buffer_free_list);
+		/* Return IPC wait token. */
+		_ready_up();
+
+		futex_unlock(&ipc_lists_futex);
+		return rc;
+	}
+
+	_ipc_waiter_t w = { .call = call };
+	list_append(&w.link, &ipc_waiter_list);
+	futex_unlock(&ipc_lists_futex);
+
+	errno_t rc = fibril_wait_timeout(&w.event, expires);
+	if (rc == EOK)
+		return w.rc;
+
+	futex_lock(&ipc_lists_futex);
+	if (link_in_use(&w.link))
+		list_remove(&w.link);
+	else
+		rc = w.rc;
+	futex_unlock(&ipc_lists_futex);
+	return rc;
+}
+
+/** Fire all timeouts that expired. */
+static struct timeval *_handle_expired_timeouts(struct timeval *next_timeout)
+{
+	struct timeval tv;
+	getuptime(&tv);
+
+	futex_lock(&fibril_futex);
+
+	while (!list_empty(&timeout_list)) {
+		link_t *cur = list_first(&timeout_list);
+		_timeout_t *to = list_get_instance(cur, _timeout_t, link);
+
+		if (tv_gt(&to->expires, &tv)) {
+			*next_timeout = to->expires;
+			futex_unlock(&fibril_futex);
+			return next_timeout;
+		}
+
+		list_remove(&to->link);
+
+		_ready_list_push(_fibril_trigger_internal(
+		    to->event, _EVENT_TIMED_OUT));
+	}
+
+	futex_unlock(&fibril_futex);
+	return NULL;
+}
+
+/**
+ * Clean up after a dead fibril from which we restored context, if any.
+ * Called after a switch is made and fibril_futex is unlocked.
+ */
+static void _fibril_cleanup_dead(void)
+{
+	fibril_t *srcf = fibril_self();
+	if (!srcf->clean_after_me)
+		return;
+
+	void *stack = srcf->clean_after_me->stack;
+	assert(stack);
+	as_area_destroy(stack);
+	fibril_teardown(srcf->clean_after_me);
+	srcf->clean_after_me = NULL;
+}
+
+/** Switch to a fibril. */
+static void _fibril_switch_to(_switch_type_t type, fibril_t *dstf, bool locked)
+{
+	assert(fibril_self()->rmutex_locks == 0);
+
+	if (!locked)
+		futex_lock(&fibril_futex);
+	else
+		futex_assert_is_locked(&fibril_futex);
+
+	fibril_t *srcf = fibril_self();
+	assert(srcf);
+	assert(dstf);
+
+	switch (type) {
+	case SWITCH_FROM_YIELD:
+		_ready_list_push(srcf);
+		break;
+	case SWITCH_FROM_DEAD:
+		dstf->clean_after_me = srcf;
+		break;
+	case SWITCH_FROM_HELPER:
+	case SWITCH_FROM_BLOCKED:
+		break;
+	}
+
+	dstf->thread_ctx = srcf->thread_ctx;
+	srcf->thread_ctx = NULL;
+
+	/* Just some bookkeeping to allow better debugging of futex locks. */
+	futex_give_to(&fibril_futex, dstf);
+
+	/* Swap to the next fibril. */
+	context_swap(&srcf->ctx, &dstf->ctx);
+
+	assert(srcf == fibril_self());
+	assert(srcf->thread_ctx);
+
+	if (!locked) {
+		/* Must be after context_swap()! */
+		futex_unlock(&fibril_futex);
+		_fibril_cleanup_dead();
+	}
+}
+
+/**
+ * Main function for a helper fibril.
+ * The helper fibril executes on threads in the lightweight fibril pool when
+ * there is no fibril ready to run. Its only purpose is to block until
+ * another fibril is ready, or a timeout expires, or an IPC message arrives.
+ *
+ * There is at most one helper fibril per thread.
+ *
+ */
+static errno_t _helper_fibril_fn(void *arg)
+{
+	/* Set itself as the thread's own context. */
+	fibril_self()->thread_ctx = fibril_self();
+
+	(void) arg;
+
+	struct timeval next_timeout;
+	while (true) {
+		struct timeval *to = _handle_expired_timeouts(&next_timeout);
+		fibril_t *f = _ready_list_pop(to, false);
+		if (f) {
+			_fibril_switch_to(SWITCH_FROM_HELPER, f, false);
+		}
+	}
+
+	return EOK;
+}
+
+/** Create a new fibril.
+ *
+ * @param func Implementing function of the new fibril.
+ * @param arg Argument to pass to func.
+ * @param stksz Stack size in bytes.
+ *
+ * @return 0 on failure or TLS of the new fibril.
+ *
+ */
+fid_t fibril_create_generic(errno_t (*func)(void *), void *arg, size_t stksz)
+{
+	fibril_t *fibril;
+
+	fibril = fibril_alloc();
+	if (fibril == NULL)
+		return 0;
+
+	fibril->stack_size = (stksz == FIBRIL_DFLT_STK_SIZE) ?
+	    stack_size_get() : stksz;
+	fibril->stack = as_area_create(AS_AREA_ANY, fibril->stack_size,
+	    AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE | AS_AREA_GUARD |
+	    AS_AREA_LATE_RESERVE, AS_AREA_UNPAGED);
+	if (fibril->stack == AS_MAP_FAILED) {
+		fibril_teardown(fibril);
+		return 0;
+	}
+
+	fibril->func = func;
+	fibril->arg = arg;
+
+	context_create_t sctx = {
+		.fn = _fibril_main,
+		.stack_base = fibril->stack,
+		.stack_size = fibril->stack_size,
+		.tls = fibril->tcb,
+	};
+
+	context_create(&fibril->ctx, &sctx);
+	return (fid_t) fibril;
+}
+
+/** Delete a fibril that has never run.
+ *
+ * Free resources of a fibril that has been created with fibril_create()
+ * but never started using fibril_start().
+ *
+ * @param fid Pointer to the fibril structure of the fibril to be
+ *            added.
+ */
+void fibril_destroy(fid_t fid)
+{
+	fibril_t *fibril = (fibril_t *) fid;
+
+	assert(!fibril->is_running);
+	assert(fibril->stack);
+	as_area_destroy(fibril->stack);
+	fibril_teardown(fibril);
+}
+
+static void _insert_timeout(_timeout_t *timeout)
+{
+	futex_assert_is_locked(&fibril_futex);
+	assert(timeout);
+
+	link_t *tmp = timeout_list.head.next;
+	while (tmp != &timeout_list.head) {
+		_timeout_t *cur = list_get_instance(tmp, _timeout_t, link);
+
+		if (tv_gteq(&cur->expires, &timeout->expires))
+			break;
+
+		tmp = tmp->next;
+	}
+
+	list_insert_before(&timeout->link, tmp);
+}
+
+/**
+ * Same as `fibril_wait_for()`, except with a timeout.
+ *
+ * It is guaranteed that timing out cannot cause another thread's
+ * `fibril_notify()` to be lost. I.e. the function returns success if and
+ * only if `fibril_notify()` was called after the last call to
+ * wait/wait_timeout returned, and before the call timed out.
+ *
+ * @return ETIMEOUT if timed out. EOK otherwise.
+ */
+errno_t fibril_wait_timeout(fibril_event_t *event, const struct timeval *expires)
+{
+	assert(fibril_self()->rmutex_locks == 0);
+
+	DPRINTF("### Fibril %p sleeping on event %p.\n", fibril_self(), event);
+
+	if (!fibril_self()->thread_ctx) {
+		fibril_self()->thread_ctx =
+		    fibril_create_generic(_helper_fibril_fn, NULL, PAGE_SIZE);
+		if (!fibril_self()->thread_ctx)
+			return ENOMEM;
+	}
+
+	futex_lock(&fibril_futex);
+
+	if (event->fibril == _EVENT_TRIGGERED) {
+		DPRINTF("### Already triggered. Returning. \n");
+		event->fibril = _EVENT_INITIAL;
+		futex_unlock(&fibril_futex);
+		return EOK;
+	}
+
+	assert(event->fibril == _EVENT_INITIAL);
+
+	fibril_t *srcf = fibril_self();
+	fibril_t *dstf = NULL;
+
+	/*
+	 * We cannot block here waiting for another fibril becoming
+	 * ready, since that would require unlocking the fibril_futex,
+	 * and that in turn would allow another thread to restore
+	 * the source fibril before this thread finished switching.
+	 *
+	 * Instead, we switch to an internal "helper" fibril whose only
+	 * job is to wait for an event, freeing the source fibril for
+	 * wakeups. There is always one for each running thread.
+	 */
+
+	dstf = _ready_list_pop_nonblocking(true);
+	if (!dstf) {
+		// XXX: It is possible for the _ready_list_pop_nonblocking() to
+		//      check for IPC, find a pending message, and trigger the
+		//      event on which we are currently trying to sleep.
+		if (event->fibril == _EVENT_TRIGGERED) {
+			event->fibril = _EVENT_INITIAL;
+			futex_unlock(&fibril_futex);
+			return EOK;
+		}
+
+		dstf = srcf->thread_ctx;
+		assert(dstf);
+	}
+
+	_timeout_t timeout = { 0 };
+	if (expires) {
+		timeout.expires = *expires;
+		timeout.event = event;
+		_insert_timeout(&timeout);
+	}
+
+	assert(srcf);
+
+	event->fibril = srcf;
+	srcf->sleep_event = event;
+
+	assert(event->fibril != _EVENT_INITIAL);
+
+	_fibril_switch_to(SWITCH_FROM_BLOCKED, dstf, true);
+
+	assert(event->fibril != srcf);
+	assert(event->fibril != _EVENT_INITIAL);
+	assert(event->fibril == _EVENT_TIMED_OUT || event->fibril == _EVENT_TRIGGERED);
+
+	list_remove(&timeout.link);
+	errno_t rc = (event->fibril == _EVENT_TIMED_OUT) ? ETIMEOUT : EOK;
+	event->fibril = _EVENT_INITIAL;
+
+	futex_unlock(&fibril_futex);
+	_fibril_cleanup_dead();
+	return rc;
+}
+
+void fibril_wait_for(fibril_event_t *event)
+{
+	assert(fibril_self()->rmutex_locks == 0);
+
+	(void) fibril_wait_timeout(event, NULL);
+}
+
+/**
+ * Wake up the fibril waiting for the given event.
+ * Up to one wakeup is remembered if the fibril is not currently waiting.
+ *
+ * This function is safe for use under restricted mutex lock.
+ */
+void fibril_notify(fibril_event_t *event)
+{
+	futex_lock(&fibril_futex);
+	_ready_list_push(_fibril_trigger_internal(event, _EVENT_TRIGGERED));
+	futex_unlock(&fibril_futex);
+}
+
+/** Start a fibril that has not been running yet. */
+void fibril_start(fibril_t *fibril)
+{
+	futex_lock(&fibril_futex);
+	assert(!fibril->is_running);
+	fibril->is_running = true;
+
+	if (!link_in_use(&fibril->all_link))
+		list_append(&fibril->all_link, &fibril_list);
+
+	_ready_list_push(fibril);
+
+	futex_unlock(&fibril_futex);
+}
+
+/** Start a fibril that has not been running yet. (obsolete) */
+void fibril_add_ready(fibril_t *fibril)
+{
+	fibril_start(fibril);
+}
+
+/** @return the currently running fibril. */
+fibril_t *fibril_self(void)
+{
+	assert(__tcb_is_set());
+	tcb_t *tcb = __tcb_get();
+	assert(tcb->fibril_data);
+	return tcb->fibril_data;
+}
+
+/**
+ * Obsolete, use fibril_self().
+ *
+ * @return ID of the currently running fibril.
+ */
+fid_t fibril_get_id(void)
+{
+	return (fid_t) fibril_self();
+}
+
+/**
+ * Switch to another fibril, if one is ready to run.
+ * Has no effect on a heavy fibril.
+ */
+void fibril_yield(void)
+{
+	if (fibril_self()->rmutex_locks > 0)
+		return;
+
+	fibril_t *f = _ready_list_pop_nonblocking(false);
+	if (f)
+		_fibril_switch_to(SWITCH_FROM_YIELD, f, false);
+}
+
+static void _runner_fn(void *arg)
+{
+	_helper_fibril_fn(arg);
+}
+
+/**
+ * Spawn a given number of runners (i.e. OS threads) immediately, and
+ * unconditionally. This is meant to be used for tests and debugging.
+ * Regular programs should just use `fibril_enable_multithreaded()`.
+ *
+ * @param n  Number of runners to spawn.
+ * @return   Number of runners successfully spawned.
+ */
+int fibril_test_spawn_runners(int n)
+{
+	assert(fibril_self()->rmutex_locks == 0);
+
+	if (!multithreaded) {
+		_ready_debug_check();
+		atomic_set(&ready_semaphore.val, ready_st_count);
+		multithreaded = true;
+	}
+
+	errno_t rc;
+
+	for (int i = 0; i < n; i++) {
+		thread_id_t tid;
+		rc = thread_create(_runner_fn, NULL, "fibril runner", &tid);
+		if (rc != EOK)
+			return i;
+		thread_detach(tid);
+	}
+
+	return n;
+}
+
+/**
+ * Opt-in to have more than one runner thread.
+ *
+ * Currently, a task only ever runs in one thread because multithreading
+ * might break some existing code.
+ *
+ * Eventually, the number of runner threads for a given task should become
+ * configurable in the environment and this function becomes no-op.
+ */
+void fibril_enable_multithreaded(void)
+{
+	// TODO: Implement better.
+	//       For now, 4 total runners is a sensible default.
+	if (!multithreaded) {
+		fibril_test_spawn_runners(3);
+	}
+}
+
+/**
+ * Detach a fibril.
+ */
+void fibril_detach(fid_t f)
+{
+	// TODO: Currently all fibrils are detached by default, but they
+	//       won't always be. Code that explicitly spawns fibrils with
+	//       limited lifetime should call this function.
+}
+
+/**
+ * Exit a fibril. Never returns.
+ *
+ * @param retval  Value to return from fibril_join() called on this fibril.
+ */
+_Noreturn void fibril_exit(long retval)
+{
+	// TODO: implement fibril_join() and remember retval
+	(void) retval;
+
+	fibril_t *f = _ready_list_pop_nonblocking(false);
+	if (!f)
+		f = fibril_self()->thread_ctx;
+
+	_fibril_switch_to(SWITCH_FROM_DEAD, f, false);
+	__builtin_unreachable();
+}
+
+void __fibrils_init(void)
+{
+	/*
+	 * We allow a fixed, small amount of parallelism for IPC reads, but
+	 * since IPC is currently serialized in kernel, there's not much
+	 * we can get from more threads reading messages.
+	 */
+
+#define IPC_BUFFER_COUNT 1024
+	static _ipc_buffer_t buffers[IPC_BUFFER_COUNT];
+
+	for (int i = 0; i < IPC_BUFFER_COUNT; i++) {
+		list_append(&buffers[i].link, &ipc_buffer_free_list);
+		_ready_up();
+	}
+}
+
+void fibril_usleep(suseconds_t timeout)
+{
+	struct timeval expires;
+	getuptime(&expires);
+	tv_add_diff(&expires, timeout);
+
+	fibril_event_t event = FIBRIL_EVENT_INIT;
+	fibril_wait_timeout(&event, &expires);
+}
+
+void fibril_sleep(unsigned int sec)
+{
+	struct timeval expires;
+	getuptime(&expires);
+	expires.tv_sec += sec;
+
+	fibril_event_t event = FIBRIL_EVENT_INIT;
+	fibril_wait_timeout(&event, &expires);
+}
+
+void fibril_ipc_poke(void)
+{
+	DPRINTF("Poking.\n");
+	/* Wakeup one thread sleeping in SYS_IPC_WAIT. */
+	ipc_poke();
+}
+
+errno_t fibril_ipc_wait(ipc_call_t *call, const struct timeval *expires)
+{
+	return _wait_ipc(call, expires);
+}
+
+/** @}
+ */
Index: uspace/lib/c/generic/thread/fibril_synch.c
===================================================================
--- uspace/lib/c/generic/thread/fibril_synch.c	(revision 9b7adc385e10a6ce5a0bedf71fdcd0ae1b9fd585)
+++ uspace/lib/c/generic/thread/fibril_synch.c	(revision 9b7adc385e10a6ce5a0bedf71fdcd0ae1b9fd585)
@@ -0,0 +1,795 @@
+/*
+ * Copyright (c) 2009 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup libc
+ * @{
+ */
+/** @file
+ */
+
+#include <fibril_synch.h>
+#include <fibril.h>
+#include <async.h>
+#include <adt/list.h>
+#include <sys/time.h>
+#include <errno.h>
+#include <assert.h>
+#include <stacktrace.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <io/kio.h>
+#include <mem.h>
+#include <context.h>
+
+#include "../private/async.h"
+#include "../private/fibril.h"
+#include "../private/futex.h"
+
+void fibril_rmutex_initialize(fibril_rmutex_t *m)
+{
+	futex_initialize(&m->futex, 1);
+}
+
+/**
+ * Lock restricted mutex.
+ * When a restricted mutex is locked, the fibril may not sleep or create new
+ * threads. Any attempt to do so will abort the program.
+ */
+void fibril_rmutex_lock(fibril_rmutex_t *m)
+{
+	futex_lock(&m->futex);
+	fibril_self()->rmutex_locks++;
+}
+
+bool fibril_rmutex_trylock(fibril_rmutex_t *m)
+{
+	if (futex_trylock(&m->futex)) {
+		fibril_self()->rmutex_locks++;
+		return true;
+	} else {
+		return false;
+	}
+}
+
+void fibril_rmutex_unlock(fibril_rmutex_t *m)
+{
+	fibril_self()->rmutex_locks--;
+	futex_unlock(&m->futex);
+}
+
+static fibril_local bool deadlocked = false;
+
+static futex_t fibril_synch_futex = FUTEX_INITIALIZER;
+
+typedef struct {
+	link_t link;
+	fibril_event_t event;
+	fibril_mutex_t *mutex;
+	fid_t fid;
+} awaiter_t;
+
+#define AWAITER_INIT { .fid = fibril_get_id() }
+
+static void print_deadlock(fibril_owner_info_t *oi)
+{
+	// FIXME: Print to stderr.
+
+	fibril_t *f = (fibril_t *) fibril_get_id();
+
+	if (deadlocked) {
+		kio_printf("Deadlock detected while printing deadlock. Aborting.\n");
+		abort();
+	}
+	deadlocked = true;
+
+	printf("Deadlock detected.\n");
+	stacktrace_print();
+
+	printf("Fibril %p waits for primitive %p.\n", f, oi);
+
+	while (oi && oi->owned_by) {
+		printf("Primitive %p is owned by fibril %p.\n",
+		    oi, oi->owned_by);
+		if (oi->owned_by == f)
+			break;
+		stacktrace_print_fp_pc(
+		    context_get_fp(&oi->owned_by->ctx),
+		    context_get_pc(&oi->owned_by->ctx));
+		printf("Fibril %p waits for primitive %p.\n",
+		    oi->owned_by, oi->owned_by->waits_for);
+		oi = oi->owned_by->waits_for;
+	}
+}
+
+
+static void check_fibril_for_deadlock(fibril_owner_info_t *oi, fibril_t *fib)
+{
+	futex_assert_is_locked(&fibril_synch_futex);
+
+	while (oi && oi->owned_by) {
+		if (oi->owned_by == fib) {
+			futex_unlock(&fibril_synch_futex);
+			print_deadlock(oi);
+			abort();
+		}
+		oi = oi->owned_by->waits_for;
+	}
+}
+
+static void check_for_deadlock(fibril_owner_info_t *oi)
+{
+	check_fibril_for_deadlock(oi, fibril_self());
+}
+
+void fibril_mutex_initialize(fibril_mutex_t *fm)
+{
+	fm->oi.owned_by = NULL;
+	fm->counter = 1;
+	list_initialize(&fm->waiters);
+}
+
+void fibril_mutex_lock(fibril_mutex_t *fm)
+{
+	fibril_t *f = (fibril_t *) fibril_get_id();
+
+	futex_lock(&fibril_synch_futex);
+
+	if (fm->counter-- > 0) {
+		fm->oi.owned_by = f;
+		futex_unlock(&fibril_synch_futex);
+		return;
+	}
+
+	awaiter_t wdata = AWAITER_INIT;
+	list_append(&wdata.link, &fm->waiters);
+	check_for_deadlock(&fm->oi);
+	f->waits_for = &fm->oi;
+
+	futex_unlock(&fibril_synch_futex);
+
+	fibril_wait_for(&wdata.event);
+}
+
+bool fibril_mutex_trylock(fibril_mutex_t *fm)
+{
+	bool locked = false;
+
+	futex_lock(&fibril_synch_futex);
+	if (fm->counter > 0) {
+		fm->counter--;
+		fm->oi.owned_by = (fibril_t *) fibril_get_id();
+		locked = true;
+	}
+	futex_unlock(&fibril_synch_futex);
+
+	return locked;
+}
+
+static void _fibril_mutex_unlock_unsafe(fibril_mutex_t *fm)
+{
+	assert(fm->oi.owned_by == (fibril_t *) fibril_get_id());
+
+	if (fm->counter++ < 0) {
+		awaiter_t *wdp = list_pop(&fm->waiters, awaiter_t, link);
+		assert(wdp);
+
+		fibril_t *f = (fibril_t *) wdp->fid;
+		fm->oi.owned_by = f;
+		f->waits_for = NULL;
+
+		fibril_notify(&wdp->event);
+	} else {
+		fm->oi.owned_by = NULL;
+	}
+}
+
+void fibril_mutex_unlock(fibril_mutex_t *fm)
+{
+	futex_lock(&fibril_synch_futex);
+	_fibril_mutex_unlock_unsafe(fm);
+	futex_unlock(&fibril_synch_futex);
+}
+
+bool fibril_mutex_is_locked(fibril_mutex_t *fm)
+{
+	futex_lock(&fibril_synch_futex);
+	bool locked = (fm->oi.owned_by == (fibril_t *) fibril_get_id());
+	futex_unlock(&fibril_synch_futex);
+	return locked;
+}
+
+void fibril_rwlock_initialize(fibril_rwlock_t *frw)
+{
+	frw->oi.owned_by = NULL;
+	frw->writers = 0;
+	frw->readers = 0;
+	list_initialize(&frw->waiters);
+}
+
+void fibril_rwlock_read_lock(fibril_rwlock_t *frw)
+{
+	fibril_t *f = (fibril_t *) fibril_get_id();
+
+	futex_lock(&fibril_synch_futex);
+
+	if (!frw->writers) {
+		/* Consider the first reader the owner. */
+		if (frw->readers++ == 0)
+			frw->oi.owned_by = f;
+		futex_unlock(&fibril_synch_futex);
+		return;
+	}
+
+	f->is_writer = false;
+
+	awaiter_t wdata = AWAITER_INIT;
+	list_append(&wdata.link, &frw->waiters);
+	check_for_deadlock(&frw->oi);
+	f->waits_for = &frw->oi;
+
+	futex_unlock(&fibril_synch_futex);
+
+	fibril_wait_for(&wdata.event);
+}
+
+void fibril_rwlock_write_lock(fibril_rwlock_t *frw)
+{
+	fibril_t *f = (fibril_t *) fibril_get_id();
+
+	futex_lock(&fibril_synch_futex);
+
+	if (!frw->writers && !frw->readers) {
+		frw->oi.owned_by = f;
+		frw->writers++;
+		futex_unlock(&fibril_synch_futex);
+		return;
+	}
+
+	f->is_writer = true;
+
+	awaiter_t wdata = AWAITER_INIT;
+	list_append(&wdata.link, &frw->waiters);
+	check_for_deadlock(&frw->oi);
+	f->waits_for = &frw->oi;
+
+	futex_unlock(&fibril_synch_futex);
+
+	fibril_wait_for(&wdata.event);
+}
+
+static void _fibril_rwlock_common_unlock(fibril_rwlock_t *frw)
+{
+	if (frw->readers) {
+		if (--frw->readers) {
+			if (frw->oi.owned_by == (fibril_t *) fibril_get_id()) {
+				/*
+				 * If this reader fibril was considered the
+				 * owner of this rwlock, clear the ownership
+				 * information even if there are still more
+				 * readers.
+				 *
+				 * This is the limitation of the detection
+				 * mechanism rooted in the fact that tracking
+				 * all readers would require dynamically
+				 * allocated memory for keeping linkage info.
+				 */
+				frw->oi.owned_by = NULL;
+			}
+
+			return;
+		}
+	} else {
+		frw->writers--;
+	}
+
+	assert(!frw->readers && !frw->writers);
+
+	frw->oi.owned_by = NULL;
+
+	while (!list_empty(&frw->waiters)) {
+		link_t *tmp = list_first(&frw->waiters);
+		awaiter_t *wdp;
+		fibril_t *f;
+
+		wdp = list_get_instance(tmp, awaiter_t, link);
+		f = (fibril_t *) wdp->fid;
+
+		if (f->is_writer) {
+			if (frw->readers)
+				break;
+			frw->writers++;
+		} else {
+			frw->readers++;
+		}
+
+		f->waits_for = NULL;
+		list_remove(&wdp->link);
+		frw->oi.owned_by = f;
+		fibril_notify(&wdp->event);
+
+		if (frw->writers)
+			break;
+	}
+}
+
+void fibril_rwlock_read_unlock(fibril_rwlock_t *frw)
+{
+	futex_lock(&fibril_synch_futex);
+	assert(frw->readers > 0);
+	_fibril_rwlock_common_unlock(frw);
+	futex_unlock(&fibril_synch_futex);
+}
+
+void fibril_rwlock_write_unlock(fibril_rwlock_t *frw)
+{
+	futex_lock(&fibril_synch_futex);
+	assert(frw->writers == 1);
+	assert(frw->oi.owned_by == fibril_self());
+	_fibril_rwlock_common_unlock(frw);
+	futex_unlock(&fibril_synch_futex);
+}
+
+bool fibril_rwlock_is_read_locked(fibril_rwlock_t *frw)
+{
+	futex_lock(&fibril_synch_futex);
+	bool locked = (frw->readers > 0);
+	futex_unlock(&fibril_synch_futex);
+	return locked;
+}
+
+bool fibril_rwlock_is_write_locked(fibril_rwlock_t *frw)
+{
+	futex_lock(&fibril_synch_futex);
+	assert(frw->writers <= 1);
+	bool locked = (frw->writers > 0) && (frw->oi.owned_by == fibril_self());
+	futex_unlock(&fibril_synch_futex);
+	return locked;
+}
+
+bool fibril_rwlock_is_locked(fibril_rwlock_t *frw)
+{
+	return fibril_rwlock_is_read_locked(frw) ||
+	    fibril_rwlock_is_write_locked(frw);
+}
+
+void fibril_condvar_initialize(fibril_condvar_t *fcv)
+{
+	list_initialize(&fcv->waiters);
+}
+
+/**
+ * FIXME: If `timeout` is negative, the function returns ETIMEOUT immediately,
+ *        and if `timeout` is 0, the wait never times out.
+ *        This is not consistent with other similar APIs.
+ */
+errno_t
+fibril_condvar_wait_timeout(fibril_condvar_t *fcv, fibril_mutex_t *fm,
+    suseconds_t timeout)
+{
+	assert(fibril_mutex_is_locked(fm));
+
+	if (timeout < 0)
+		return ETIMEOUT;
+
+	awaiter_t wdata = AWAITER_INIT;
+	wdata.mutex = fm;
+
+	struct timeval tv;
+	struct timeval *expires = NULL;
+	if (timeout) {
+		getuptime(&tv);
+		tv_add_diff(&tv, timeout);
+		expires = &tv;
+	}
+
+	futex_lock(&fibril_synch_futex);
+	_fibril_mutex_unlock_unsafe(fm);
+	list_append(&wdata.link, &fcv->waiters);
+	futex_unlock(&fibril_synch_futex);
+
+	(void) fibril_wait_timeout(&wdata.event, expires);
+
+	futex_lock(&fibril_synch_futex);
+	bool timed_out = link_in_use(&wdata.link);
+	list_remove(&wdata.link);
+	futex_unlock(&fibril_synch_futex);
+
+	fibril_mutex_lock(fm);
+
+	return timed_out ? ETIMEOUT : EOK;
+}
+
+void fibril_condvar_wait(fibril_condvar_t *fcv, fibril_mutex_t *fm)
+{
+	(void) fibril_condvar_wait_timeout(fcv, fm, 0);
+}
+
+void fibril_condvar_signal(fibril_condvar_t *fcv)
+{
+	futex_lock(&fibril_synch_futex);
+
+	awaiter_t *w = list_pop(&fcv->waiters, awaiter_t, link);
+	if (w != NULL)
+		fibril_notify(&w->event);
+
+	futex_unlock(&fibril_synch_futex);
+}
+
+void fibril_condvar_broadcast(fibril_condvar_t *fcv)
+{
+	futex_lock(&fibril_synch_futex);
+
+	awaiter_t *w;
+	while ((w = list_pop(&fcv->waiters, awaiter_t, link)))
+		fibril_notify(&w->event);
+
+	futex_unlock(&fibril_synch_futex);
+}
+
+/** Timer fibril.
+ *
+ * @param arg	Timer
+ */
+static errno_t fibril_timer_func(void *arg)
+{
+	fibril_timer_t *timer = (fibril_timer_t *) arg;
+	errno_t rc;
+
+	fibril_mutex_lock(timer->lockp);
+
+	while (timer->state != fts_cleanup) {
+		switch (timer->state) {
+		case fts_not_set:
+		case fts_fired:
+			fibril_condvar_wait(&timer->cv, timer->lockp);
+			break;
+		case fts_active:
+			rc = fibril_condvar_wait_timeout(&timer->cv,
+			    timer->lockp, timer->delay);
+			if (rc == ETIMEOUT && timer->state == fts_active) {
+				timer->state = fts_fired;
+				timer->handler_fid = fibril_get_id();
+				fibril_mutex_unlock(timer->lockp);
+				timer->fun(timer->arg);
+				fibril_mutex_lock(timer->lockp);
+				timer->handler_fid = 0;
+			}
+			break;
+		case fts_cleanup:
+		case fts_clean:
+			assert(false);
+			break;
+		}
+	}
+
+	/* Acknowledge timer fibril has finished cleanup. */
+	timer->state = fts_clean;
+	fibril_condvar_broadcast(&timer->cv);
+	fibril_mutex_unlock(timer->lockp);
+
+	return 0;
+}
+
+/** Create new timer.
+ *
+ * @return		New timer on success, @c NULL if out of memory.
+ */
+fibril_timer_t *fibril_timer_create(fibril_mutex_t *lock)
+{
+	fid_t fid;
+	fibril_timer_t *timer;
+
+	timer = calloc(1, sizeof(fibril_timer_t));
+	if (timer == NULL)
+		return NULL;
+
+	fid = fibril_create(fibril_timer_func, (void *) timer);
+	if (fid == 0) {
+		free(timer);
+		return NULL;
+	}
+
+	fibril_mutex_initialize(&timer->lock);
+	fibril_condvar_initialize(&timer->cv);
+
+	timer->fibril = fid;
+	timer->state = fts_not_set;
+	timer->lockp = (lock != NULL) ? lock : &timer->lock;
+
+	fibril_add_ready(fid);
+	return timer;
+}
+
+/** Destroy timer.
+ *
+ * @param timer		Timer, must not be active or accessed by other threads.
+ */
+void fibril_timer_destroy(fibril_timer_t *timer)
+{
+	fibril_mutex_lock(timer->lockp);
+	assert(timer->state == fts_not_set || timer->state == fts_fired);
+
+	/* Request timer fibril to terminate. */
+	timer->state = fts_cleanup;
+	fibril_condvar_broadcast(&timer->cv);
+
+	/* Wait for timer fibril to terminate */
+	while (timer->state != fts_clean)
+		fibril_condvar_wait(&timer->cv, timer->lockp);
+	fibril_mutex_unlock(timer->lockp);
+
+	free(timer);
+}
+
+/** Set timer.
+ *
+ * Set timer to execute a callback function after the specified
+ * interval.
+ *
+ * @param timer		Timer
+ * @param delay		Delay in microseconds
+ * @param fun		Callback function
+ * @param arg		Argument for @a fun
+ */
+void fibril_timer_set(fibril_timer_t *timer, suseconds_t delay,
+    fibril_timer_fun_t fun, void *arg)
+{
+	fibril_mutex_lock(timer->lockp);
+	fibril_timer_set_locked(timer, delay, fun, arg);
+	fibril_mutex_unlock(timer->lockp);
+}
+
+/** Set locked timer.
+ *
+ * Set timer to execute a callback function after the specified
+ * interval. Must be called when the timer is locked.
+ *
+ * @param timer		Timer
+ * @param delay		Delay in microseconds
+ * @param fun		Callback function
+ * @param arg		Argument for @a fun
+ */
+void fibril_timer_set_locked(fibril_timer_t *timer, suseconds_t delay,
+    fibril_timer_fun_t fun, void *arg)
+{
+	assert(fibril_mutex_is_locked(timer->lockp));
+	assert(timer->state == fts_not_set || timer->state == fts_fired);
+	timer->state = fts_active;
+	timer->delay = delay;
+	timer->fun = fun;
+	timer->arg = arg;
+	fibril_condvar_broadcast(&timer->cv);
+}
+
+/** Clear timer.
+ *
+ * Clears (cancels) timer and returns last state of the timer.
+ * This can be one of:
+ *    - fts_not_set	If the timer has not been set or has been cleared
+ *    - fts_active	Timer was set but did not fire
+ *    - fts_fired	Timer fired
+ *
+ * @param timer		Timer
+ * @return		Last timer state
+ */
+fibril_timer_state_t fibril_timer_clear(fibril_timer_t *timer)
+{
+	fibril_timer_state_t old_state;
+
+	fibril_mutex_lock(timer->lockp);
+	old_state = fibril_timer_clear_locked(timer);
+	fibril_mutex_unlock(timer->lockp);
+
+	return old_state;
+}
+
+/** Clear locked timer.
+ *
+ * Clears (cancels) timer and returns last state of the timer.
+ * This can be one of:
+ *    - fts_not_set	If the timer has not been set or has been cleared
+ *    - fts_active	Timer was set but did not fire
+ *    - fts_fired	Timer fired
+ * Must be called when the timer is locked.
+ *
+ * @param timer		Timer
+ * @return		Last timer state
+ */
+fibril_timer_state_t fibril_timer_clear_locked(fibril_timer_t *timer)
+{
+	fibril_timer_state_t old_state;
+
+	assert(fibril_mutex_is_locked(timer->lockp));
+
+	while (timer->handler_fid != 0) {
+		if (timer->handler_fid == fibril_get_id()) {
+			printf("Deadlock detected.\n");
+			stacktrace_print();
+			printf("Fibril %p is trying to clear timer %p from "
+			    "inside its handler %p.\n",
+			    fibril_get_id(), timer, timer->fun);
+			abort();
+		}
+
+		fibril_condvar_wait(&timer->cv, timer->lockp);
+	}
+
+	old_state = timer->state;
+	timer->state = fts_not_set;
+
+	timer->delay = 0;
+	timer->fun = NULL;
+	timer->arg = NULL;
+	fibril_condvar_broadcast(&timer->cv);
+
+	return old_state;
+}
+
+/**
+ * Initialize a semaphore with initial count set to the provided value.
+ *
+ * @param sem    Semaphore to initialize.
+ * @param count  Initial count. Must not be negative.
+ */
+void fibril_semaphore_initialize(fibril_semaphore_t *sem, long count)
+{
+	/*
+	 * Negative count denotes the length of waitlist,
+	 * so it makes no sense as an initial value.
+	 */
+	assert(count >= 0);
+	sem->closed = false;
+	sem->count = count;
+	list_initialize(&sem->waiters);
+}
+
+/**
+ * Produce one token.
+ * If there are fibrils waiting for tokens, this operation satisfies
+ * exactly one waiting `fibril_semaphore_down()`.
+ * This operation never blocks the fibril.
+ *
+ * @param sem  Semaphore to use.
+ */
+void fibril_semaphore_up(fibril_semaphore_t *sem)
+{
+	futex_lock(&fibril_synch_futex);
+
+	if (sem->closed) {
+		futex_unlock(&fibril_synch_futex);
+		return;
+	}
+
+	sem->count++;
+
+	if (sem->count <= 0) {
+		awaiter_t *w = list_pop(&sem->waiters, awaiter_t, link);
+		assert(w);
+		fibril_notify(&w->event);
+	}
+
+	futex_unlock(&fibril_synch_futex);
+}
+
+/**
+ * Consume one token.
+ * If there are no available tokens (count <= 0), this operation blocks until
+ * another fibril produces a token using `fibril_semaphore_up()`.
+ *
+ * @param sem  Semaphore to use.
+ */
+void fibril_semaphore_down(fibril_semaphore_t *sem)
+{
+	futex_lock(&fibril_synch_futex);
+
+	if (sem->closed) {
+		futex_unlock(&fibril_synch_futex);
+		return;
+	}
+
+	sem->count--;
+
+	if (sem->count >= 0) {
+		futex_unlock(&fibril_synch_futex);
+		return;
+	}
+
+	awaiter_t wdata = AWAITER_INIT;
+	list_append(&wdata.link, &sem->waiters);
+
+	futex_unlock(&fibril_synch_futex);
+
+	fibril_wait_for(&wdata.event);
+}
+
+errno_t fibril_semaphore_down_timeout(fibril_semaphore_t *sem, suseconds_t timeout)
+{
+	if (timeout < 0)
+		return ETIMEOUT;
+
+	futex_lock(&fibril_synch_futex);
+	if (sem->closed) {
+		futex_unlock(&fibril_synch_futex);
+		return EOK;
+	}
+
+	sem->count--;
+
+	if (sem->count >= 0) {
+		futex_unlock(&fibril_synch_futex);
+		return EOK;
+	}
+
+	awaiter_t wdata = AWAITER_INIT;
+	list_append(&wdata.link, &sem->waiters);
+
+	futex_unlock(&fibril_synch_futex);
+
+	struct timeval tv;
+	struct timeval *expires = NULL;
+	if (timeout) {
+		getuptime(&tv);
+		tv_add_diff(&tv, timeout);
+		expires = &tv;
+	}
+
+	errno_t rc = fibril_wait_timeout(&wdata.event, expires);
+	if (rc == EOK)
+		return EOK;
+
+	futex_lock(&fibril_synch_futex);
+	if (!link_in_use(&wdata.link)) {
+		futex_unlock(&fibril_synch_futex);
+		return EOK;
+	}
+
+	list_remove(&wdata.link);
+	sem->count++;
+	futex_unlock(&fibril_synch_futex);
+
+	return rc;
+}
+
+/**
+ * Close the semaphore.
+ * All future down() operations return instantly.
+ */
+void fibril_semaphore_close(fibril_semaphore_t *sem)
+{
+	futex_lock(&fibril_synch_futex);
+	sem->closed = true;
+	awaiter_t *w;
+
+	while ((w = list_pop(&sem->waiters, awaiter_t, link)))
+		fibril_notify(&w->event);
+
+	futex_unlock(&fibril_synch_futex);
+}
+
+/** @}
+ */
Index: uspace/lib/c/generic/thread/futex.c
===================================================================
--- uspace/lib/c/generic/thread/futex.c	(revision 9b7adc385e10a6ce5a0bedf71fdcd0ae1b9fd585)
+++ uspace/lib/c/generic/thread/futex.c	(revision 9b7adc385e10a6ce5a0bedf71fdcd0ae1b9fd585)
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2008 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup libc
+ * @{
+ */
+/** @file
+ */
+
+#include <assert.h>
+#include <atomic.h>
+#include <fibril.h>
+#include <io/kio.h>
+
+#include "../private/fibril.h"
+#include "../private/futex.h"
+
+//#define DPRINTF(...) kio_printf(__VA_ARGS__)
+#define DPRINTF(...) dummy_printf(__VA_ARGS__)
+
+/** Initialize futex counter.
+ *
+ * @param futex Futex.
+ * @param val   Initialization value.
+ *
+ */
+void futex_initialize(futex_t *futex, int val)
+{
+	atomic_set(&futex->val, val);
+}
+
+#ifdef CONFIG_DEBUG_FUTEX
+
+void __futex_assert_is_locked(futex_t *futex, const char *name)
+{
+	void *owner = __atomic_load_n(&futex->owner, __ATOMIC_RELAXED);
+	fibril_t *self = (fibril_t *) fibril_get_id();
+	if (owner != self) {
+		DPRINTF("Assertion failed: %s (%p) is not locked by fibril %p (instead locked by fibril %p).\n", name, futex, self, owner);
+	}
+	assert(owner == self);
+}
+
+void __futex_assert_is_not_locked(futex_t *futex, const char *name)
+{
+	void *owner = __atomic_load_n(&futex->owner, __ATOMIC_RELAXED);
+	fibril_t *self = (fibril_t *) fibril_get_id();
+	if (owner == self) {
+		DPRINTF("Assertion failed: %s (%p) is already locked by fibril %p.\n", name, futex, self);
+	}
+	assert(owner != self);
+}
+
+void __futex_lock(futex_t *futex, const char *name)
+{
+	/*
+	 * We use relaxed atomics to avoid violating C11 memory model.
+	 * They should compile to regular load/stores, but simple assignments
+	 * would be UB by definition.
+	 * The proper ordering is ensured by the surrounding futex operation.
+	 */
+
+	fibril_t *self = (fibril_t *) fibril_get_id();
+	DPRINTF("Locking futex %s (%p) by fibril %p.\n", name, futex, self);
+	__futex_assert_is_not_locked(futex, name);
+	futex_down(futex);
+
+	void *prev_owner = __atomic_load_n(&futex->owner, __ATOMIC_RELAXED);
+	assert(prev_owner == NULL);
+	__atomic_store_n(&futex->owner, self, __ATOMIC_RELAXED);
+}
+
+void __futex_unlock(futex_t *futex, const char *name)
+{
+	fibril_t *self = (fibril_t *) fibril_get_id();
+	DPRINTF("Unlocking futex %s (%p) by fibril %p.\n", name, futex, self);
+	__futex_assert_is_locked(futex, name);
+	__atomic_store_n(&futex->owner, NULL, __ATOMIC_RELAXED);
+	futex_up(futex);
+}
+
+bool __futex_trylock(futex_t *futex, const char *name)
+{
+	fibril_t *self = (fibril_t *) fibril_get_id();
+	bool success = futex_trydown(futex);
+	if (success) {
+		void *owner = __atomic_load_n(&futex->owner, __ATOMIC_RELAXED);
+		assert(owner == NULL);
+
+		__atomic_store_n(&futex->owner, self, __ATOMIC_RELAXED);
+
+		DPRINTF("Trylock on futex %s (%p) by fibril %p succeeded.\n", name, futex, self);
+	} else {
+		DPRINTF("Trylock on futex %s (%p) by fibril %p failed.\n", name, futex, self);
+	}
+
+	return success;
+}
+
+void __futex_give_to(futex_t *futex, void *new_owner, const char *name)
+{
+	fibril_t *self = fibril_self();
+	fibril_t *no = new_owner;
+	DPRINTF("Passing futex %s (%p) from fibril %p to fibril %p.\n", name, futex, self, no);
+
+	__futex_assert_is_locked(futex, name);
+	__atomic_store_n(&futex->owner, new_owner, __ATOMIC_RELAXED);
+}
+
+#endif
+
+/** @}
+ */
Index: uspace/lib/c/generic/thread/mpsc.c
===================================================================
--- uspace/lib/c/generic/thread/mpsc.c	(revision 9b7adc385e10a6ce5a0bedf71fdcd0ae1b9fd585)
+++ uspace/lib/c/generic/thread/mpsc.c	(revision 9b7adc385e10a6ce5a0bedf71fdcd0ae1b9fd585)
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2018 CZ.NIC, z.s.p.o.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Authors:
+ *	Jiří Zárevúcky (jzr) <zarevucky.jiri@gmail.com>
+ */
+
+#include <fibril.h>
+#include <fibril_synch.h>
+#include <mem.h>
+#include <stdlib.h>
+
+#include "../private/fibril.h"
+
+/*
+ * A multi-producer, single-consumer concurrent FIFO channel with unlimited
+ * buffering.
+ *
+ * The current implementation is based on the super simple two-lock queue
+ * by Michael and Scott
+ * (http://www.cs.rochester.edu/~scott/papers/1996_PODC_queues.pdf)
+ *
+ * The original algorithm uses one lock on each side. Since this queue is
+ * single-consumer, we only use the tail lock.
+ */
+
+typedef struct mpsc_node mpsc_node_t;
+
+struct mpsc {
+	size_t elem_size;
+	fibril_rmutex_t t_lock;
+	mpsc_node_t *head;
+	mpsc_node_t *tail;
+	mpsc_node_t *close_node;
+	fibril_event_t event;
+};
+
+struct mpsc_node {
+	mpsc_node_t *next;
+	unsigned char data[];
+};
+
+mpsc_t *mpsc_create(size_t elem_size)
+{
+	mpsc_t *q = calloc(1, sizeof(mpsc_t));
+	mpsc_node_t *n = calloc(1, sizeof(mpsc_node_t) + elem_size);
+	mpsc_node_t *c = calloc(1, sizeof(mpsc_node_t) + elem_size);
+
+	if (!q || !n || !c) {
+		free(q);
+		free(n);
+		free(c);
+		return NULL;
+	}
+
+	q->elem_size = elem_size;
+	fibril_rmutex_initialize(&q->t_lock);
+	q->head = q->tail = n;
+	q->close_node = c;
+	return q;
+}
+
+void mpsc_destroy(mpsc_t *q)
+{
+	mpsc_node_t *n = q->head;
+	mpsc_node_t *next = NULL;
+	while (n != NULL) {
+		next = n->next;
+		free(n);
+		n = next;
+	}
+
+	// TODO: fibril_rmutex_destroy()
+
+	free(q);
+}
+
+static errno_t _mpsc_push(mpsc_t *q, mpsc_node_t *n)
+{
+	fibril_rmutex_lock(&q->t_lock);
+
+	if (q->tail == q->close_node) {
+		fibril_rmutex_unlock(&q->t_lock);
+		return EINVAL;
+	}
+
+	__atomic_store_n(&q->tail->next, n, __ATOMIC_RELEASE);
+	q->tail = n;
+
+	fibril_rmutex_unlock(&q->t_lock);
+
+	fibril_notify(&q->event);
+	return EOK;
+}
+
+/**
+ * Send data on the channel.
+ * The length of data is equal to the `elem_size` value set in `mpsc_create`.
+ *
+ * This function is safe for use under restricted mutex lock.
+ *
+ * @return ENOMEM if allocation failed, EINVAL if the queue is closed.
+ */
+errno_t mpsc_send(mpsc_t *q, const void *b)
+{
+	mpsc_node_t *n = malloc(sizeof(mpsc_node_t) + q->elem_size);
+	if (!n)
+		return ENOMEM;
+
+	n->next = NULL;
+	memcpy(n->data, b, q->elem_size);
+
+	return _mpsc_push(q, n);
+}
+
+/**
+ * Receive data from the channel.
+ *
+ * @return ETIMEOUT if deadline expires, ENOENT if the queue is closed and
+ * there is no message left in the queue.
+ */
+errno_t mpsc_receive(mpsc_t *q, void *b, const struct timeval *expires)
+{
+	mpsc_node_t *n;
+	mpsc_node_t *new_head;
+
+	while (true) {
+		n = q->head;
+		new_head = __atomic_load_n(&n->next, __ATOMIC_ACQUIRE);
+		if (new_head)
+			break;
+
+		errno_t rc = fibril_wait_timeout(&q->event, expires);
+		if (rc != EOK)
+			return rc;
+	}
+
+	if (new_head == q->close_node)
+		return ENOENT;
+
+	memcpy(b, new_head->data, q->elem_size);
+	q->head = new_head;
+
+	free(n);
+	return EOK;
+}
+
+/**
+ * Close the channel.
+ *
+ * This function is safe for use under restricted mutex lock.
+ */
+void mpsc_close(mpsc_t *q)
+{
+	_mpsc_push(q, q->close_node);
+}
+
Index: uspace/lib/c/generic/thread/rcu.c
===================================================================
--- uspace/lib/c/generic/thread/rcu.c	(revision 9b7adc385e10a6ce5a0bedf71fdcd0ae1b9fd585)
+++ uspace/lib/c/generic/thread/rcu.c	(revision 9b7adc385e10a6ce5a0bedf71fdcd0ae1b9fd585)
@@ -0,0 +1,441 @@
+/*
+ * Copyright (c) 2012 Adam Hraska
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup liburcu
+ * @{
+ */
+/**
+ * @file
+ *
+ * User space RCU is based on URCU utilizing signals [1]. This
+ * implementation does not however signal each thread of the process
+ * to issue a memory barrier. Instead, we introduced a syscall that
+ * issues memory barriers (via IPIs) on cpus that are running threads
+ * of the current process. First, it does not require us to schedule
+ * and run every thread of the process. Second, IPIs are less intrusive
+ * than switching contexts and entering user space.
+ *
+ * This algorithm is further modified to require a single instead of
+ * two reader group changes per grace period. Signal-URCU flips
+ * the reader group and waits for readers of the previous group
+ * twice in succession in order to wait for new readers that were
+ * delayed and mistakenly associated with the previous reader group.
+ * The modified algorithm ensures that the new reader group is
+ * always empty (by explicitly waiting for it to become empty).
+ * Only then does it flip the reader group and wait for preexisting
+ * readers of the old reader group (invariant of SRCU [2, 3]).
+ *
+ *
+ * [1] User-level implementations of read-copy update,
+ *     2012, appendix
+ *     http://www.rdrop.com/users/paulmck/RCU/urcu-supp-accepted.2011.08.30a.pdf
+ *
+ * [2] linux/kernel/srcu.c in Linux 3.5-rc2,
+ *     2012
+ *     http://tomoyo.sourceforge.jp/cgi-bin/lxr/source/kernel/srcu.c?v=linux-3.5-rc2-ccs-1.8.3
+ *
+ * [3] [RFC PATCH 5/5 single-thread-version] implement
+ *     per-domain single-thread state machine,
+ *     2012, Lai
+ *     https://lkml.org/lkml/2012/3/6/586
+ */
+
+#include "rcu.h"
+#include <fibril_synch.h>
+#include <fibril.h>
+#include <stdio.h>
+#include <stddef.h>
+#include <compiler/barrier.h>
+#include <libarch/barrier.h>
+#include <macros.h>
+#include <async.h>
+#include <adt/list.h>
+#include <smp_memory_barrier.h>
+#include <assert.h>
+#include <time.h>
+
+#include "../private/fibril.h"
+
+
+/** RCU sleeps for RCU_SLEEP_MS before polling an active RCU reader again. */
+#define RCU_SLEEP_MS        10
+
+#define RCU_NESTING_SHIFT   1
+#define RCU_NESTING_INC     (1 << RCU_NESTING_SHIFT)
+#define RCU_GROUP_BIT_MASK  (size_t)(RCU_NESTING_INC - 1)
+#define RCU_GROUP_A         (size_t)(0 | RCU_NESTING_INC)
+#define RCU_GROUP_B         (size_t)(1 | RCU_NESTING_INC)
+
+
+/** Fibril local RCU data. */
+typedef struct fibril_rcu_data {
+	size_t nesting_cnt;
+	link_t link;
+	bool registered;
+} fibril_rcu_data_t;
+
+/** Process global RCU data. */
+typedef struct rcu_data {
+	size_t cur_gp;
+	size_t reader_group;
+	fibril_rmutex_t list_mutex;
+	list_t fibrils_list;
+	struct {
+		fibril_rmutex_t mutex;
+		bool locked;
+		list_t blocked_fibrils;
+	} sync_lock;
+} rcu_data_t;
+
+typedef struct blocked_fibril {
+	fibril_event_t unblock;
+	link_t link;
+	bool is_ready;
+} blocked_fibril_t;
+
+
+/** Fibril local RCU data. */
+static fibril_local fibril_rcu_data_t fibril_rcu = {
+	.nesting_cnt = 0,
+	.link = {
+		.next = NULL,
+		.prev = NULL
+	},
+	.registered = false
+};
+
+/** Process global RCU data. */
+static rcu_data_t rcu = {
+	.cur_gp = 0,
+	.reader_group = RCU_GROUP_A,
+	.list_mutex = FIBRIL_RMUTEX_INITIALIZER(rcu.list_mutex),
+	.fibrils_list = LIST_INITIALIZER(rcu.fibrils_list),
+	.sync_lock = {
+		.mutex = FIBRIL_RMUTEX_INITIALIZER(rcu.sync_lock.mutex),
+		.locked = false,
+		.blocked_fibrils = LIST_INITIALIZER(rcu.sync_lock.blocked_fibrils),
+	},
+};
+
+
+static void wait_for_readers(size_t reader_group);
+static void force_mb_in_all_threads(void);
+static bool is_preexisting_reader(const fibril_rcu_data_t *fib, size_t group);
+
+static void lock_sync(void);
+static void unlock_sync(void);
+static void sync_sleep(void);
+
+static bool is_in_group(size_t nesting_cnt, size_t group);
+static bool is_in_reader_section(size_t nesting_cnt);
+static size_t get_other_group(size_t group);
+
+
+/** Registers a fibril so it may start using RCU read sections.
+ *
+ * A fibril must be registered with rcu before it can enter RCU critical
+ * sections delineated by rcu_read_lock() and rcu_read_unlock().
+ */
+void rcu_register_fibril(void)
+{
+	assert(!fibril_rcu.registered);
+
+	fibril_rmutex_lock(&rcu.list_mutex);
+	list_append(&fibril_rcu.link, &rcu.fibrils_list);
+	fibril_rmutex_unlock(&rcu.list_mutex);
+
+	fibril_rcu.registered = true;
+}
+
+/** Deregisters a fibril that had been using RCU read sections.
+ *
+ * A fibril must be deregistered before it exits if it had
+ * been registered with rcu via rcu_register_fibril().
+ */
+void rcu_deregister_fibril(void)
+{
+	assert(fibril_rcu.registered);
+
+	/*
+	 * Forcefully unlock any reader sections. The fibril is exiting
+	 * so it is not holding any references to data protected by the
+	 * rcu section. Therefore, it is safe to unlock. Otherwise,
+	 * rcu_synchronize() would wait indefinitely.
+	 */
+	memory_barrier();
+	fibril_rcu.nesting_cnt = 0;
+
+	fibril_rmutex_lock(&rcu.list_mutex);
+	list_remove(&fibril_rcu.link);
+	fibril_rmutex_unlock(&rcu.list_mutex);
+
+	fibril_rcu.registered = false;
+}
+
+/** Delimits the start of an RCU reader critical section.
+ *
+ * RCU reader sections may be nested.
+ */
+void rcu_read_lock(void)
+{
+	assert(fibril_rcu.registered);
+
+	size_t nesting_cnt = ACCESS_ONCE(fibril_rcu.nesting_cnt);
+
+	if (0 == (nesting_cnt >> RCU_NESTING_SHIFT)) {
+		ACCESS_ONCE(fibril_rcu.nesting_cnt) = ACCESS_ONCE(rcu.reader_group);
+		/* Required by MB_FORCE_L */
+		compiler_barrier(); /* CC_BAR_L */
+	} else {
+		ACCESS_ONCE(fibril_rcu.nesting_cnt) = nesting_cnt + RCU_NESTING_INC;
+	}
+}
+
+/** Delimits the end of an RCU reader critical section. */
+void rcu_read_unlock(void)
+{
+	assert(fibril_rcu.registered);
+	assert(rcu_read_locked());
+
+	/* Required by MB_FORCE_U */
+	compiler_barrier(); /* CC_BAR_U */
+	/* todo: ACCESS_ONCE(nesting_cnt) ? */
+	fibril_rcu.nesting_cnt -= RCU_NESTING_INC;
+}
+
+/** Returns true if the current fibril is in an RCU reader section. */
+bool rcu_read_locked(void)
+{
+	return 0 != (fibril_rcu.nesting_cnt >> RCU_NESTING_SHIFT);
+}
+
+/** Blocks until all preexisting readers exit their critical sections. */
+void rcu_synchronize(void)
+{
+	assert(!rcu_read_locked());
+
+	/* Contain load of rcu.cur_gp. */
+	memory_barrier();
+
+	/* Approximately the number of the GP in progress. */
+	size_t gp_in_progress = ACCESS_ONCE(rcu.cur_gp);
+
+	lock_sync();
+
+	/*
+	 * Exit early if we were stuck waiting for the mutex for a full grace
+	 * period. Started waiting during gp_in_progress (or gp_in_progress + 1
+	 * if the value propagated to this cpu too late) so wait for the next
+	 * full GP, gp_in_progress + 1, to finish. Ie don't wait if the GP
+	 * after that, gp_in_progress + 2, already started.
+	 */
+	/* rcu.cur_gp >= gp_in_progress + 2, but tolerates overflows. */
+	if (rcu.cur_gp != gp_in_progress && rcu.cur_gp + 1 != gp_in_progress) {
+		unlock_sync();
+		return;
+	}
+
+	++ACCESS_ONCE(rcu.cur_gp);
+
+	/*
+	 * Pairs up with MB_FORCE_L (ie CC_BAR_L). Makes changes prior
+	 * to rcu_synchronize() visible to new readers.
+	 */
+	memory_barrier(); /* MB_A */
+
+	/*
+	 * Pairs up with MB_A.
+	 *
+	 * If the memory barrier is issued before CC_BAR_L in the target
+	 * thread, it pairs up with MB_A and the thread sees all changes
+	 * prior to rcu_synchronize(). Ie any reader sections are new
+	 * rcu readers.
+	 *
+	 * If the memory barrier is issued after CC_BAR_L, it pairs up
+	 * with MB_B and it will make the most recent nesting_cnt visible
+	 * in this thread. Since the reader may have already accessed
+	 * memory protected by RCU (it ran instructions passed CC_BAR_L),
+	 * it is a preexisting reader. Seeing the most recent nesting_cnt
+	 * ensures the thread will be identified as a preexisting reader
+	 * and we will wait for it in wait_for_readers(old_reader_group).
+	 */
+	force_mb_in_all_threads(); /* MB_FORCE_L */
+
+	/*
+	 * Pairs with MB_FORCE_L (ie CC_BAR_L, CC_BAR_U) and makes the most
+	 * current fibril.nesting_cnt visible to this cpu.
+	 */
+	read_barrier(); /* MB_B */
+
+	size_t new_reader_group = get_other_group(rcu.reader_group);
+	wait_for_readers(new_reader_group);
+
+	/* Separates waiting for readers in new_reader_group from group flip. */
+	memory_barrier();
+
+	/* Flip the group new readers should associate with. */
+	size_t old_reader_group = rcu.reader_group;
+	rcu.reader_group = new_reader_group;
+
+	/* Flip the group before waiting for preexisting readers in the old group.*/
+	memory_barrier();
+
+	wait_for_readers(old_reader_group);
+
+	/* MB_FORCE_U  */
+	force_mb_in_all_threads(); /* MB_FORCE_U */
+
+	unlock_sync();
+}
+
+/** Issues a memory barrier in each thread of this process. */
+static void force_mb_in_all_threads(void)
+{
+	/*
+	 * Only issue barriers in running threads. The scheduler will
+	 * execute additional memory barriers when switching to threads
+	 * of the process that are currently not running.
+	 */
+	smp_memory_barrier();
+}
+
+/** Waits for readers of reader_group to exit their readers sections. */
+static void wait_for_readers(size_t reader_group)
+{
+	fibril_rmutex_lock(&rcu.list_mutex);
+
+	list_t quiescent_fibrils;
+	list_initialize(&quiescent_fibrils);
+
+	while (!list_empty(&rcu.fibrils_list)) {
+		list_foreach_safe(rcu.fibrils_list, fibril_it, next_fibril) {
+			fibril_rcu_data_t *fib = member_to_inst(fibril_it,
+			    fibril_rcu_data_t, link);
+
+			if (is_preexisting_reader(fib, reader_group)) {
+				fibril_rmutex_unlock(&rcu.list_mutex);
+				sync_sleep();
+				fibril_rmutex_lock(&rcu.list_mutex);
+				/* Break to while loop. */
+				break;
+			} else {
+				list_remove(fibril_it);
+				list_append(fibril_it, &quiescent_fibrils);
+			}
+		}
+	}
+
+	list_concat(&rcu.fibrils_list, &quiescent_fibrils);
+	fibril_rmutex_unlock(&rcu.list_mutex);
+}
+
+static void lock_sync(void)
+{
+	fibril_rmutex_lock(&rcu.sync_lock.mutex);
+	if (rcu.sync_lock.locked) {
+		blocked_fibril_t blocked_fib;
+		blocked_fib.unblock = FIBRIL_EVENT_INIT;
+
+		list_append(&blocked_fib.link, &rcu.sync_lock.blocked_fibrils);
+
+		do {
+			blocked_fib.is_ready = false;
+			fibril_rmutex_unlock(&rcu.sync_lock.mutex);
+			fibril_wait_for(&blocked_fib.unblock);
+			fibril_rmutex_lock(&rcu.sync_lock.mutex);
+		} while (rcu.sync_lock.locked);
+
+		list_remove(&blocked_fib.link);
+		rcu.sync_lock.locked = true;
+	} else {
+		rcu.sync_lock.locked = true;
+	}
+}
+
+static void unlock_sync(void)
+{
+	assert(rcu.sync_lock.locked);
+
+	/* Unlock but wake up any fibrils waiting for the lock. */
+
+	if (!list_empty(&rcu.sync_lock.blocked_fibrils)) {
+		blocked_fibril_t *blocked_fib = member_to_inst(
+		    list_first(&rcu.sync_lock.blocked_fibrils), blocked_fibril_t, link);
+
+		if (!blocked_fib->is_ready) {
+			blocked_fib->is_ready = true;
+			fibril_notify(&blocked_fib->unblock);
+		}
+	}
+
+	rcu.sync_lock.locked = false;
+	fibril_rmutex_unlock(&rcu.sync_lock.mutex);
+}
+
+static void sync_sleep(void)
+{
+	assert(rcu.sync_lock.locked);
+	/*
+	 * Release the futex to avoid deadlocks in singlethreaded apps
+	 * but keep sync locked.
+	 */
+	fibril_rmutex_unlock(&rcu.sync_lock.mutex);
+	fibril_usleep(RCU_SLEEP_MS * 1000);
+	fibril_rmutex_lock(&rcu.sync_lock.mutex);
+}
+
+
+static bool is_preexisting_reader(const fibril_rcu_data_t *fib, size_t group)
+{
+	size_t nesting_cnt = ACCESS_ONCE(fib->nesting_cnt);
+
+	return is_in_group(nesting_cnt, group) && is_in_reader_section(nesting_cnt);
+}
+
+static size_t get_other_group(size_t group)
+{
+	if (group == RCU_GROUP_A)
+		return RCU_GROUP_B;
+	else
+		return RCU_GROUP_A;
+}
+
+static bool is_in_reader_section(size_t nesting_cnt)
+{
+	return RCU_NESTING_INC <= nesting_cnt;
+}
+
+static bool is_in_group(size_t nesting_cnt, size_t group)
+{
+	return (nesting_cnt & RCU_GROUP_BIT_MASK) == (group & RCU_GROUP_BIT_MASK);
+}
+
+
+
+/** @}
+ */
Index: uspace/lib/c/generic/thread/thread.c
===================================================================
--- uspace/lib/c/generic/thread/thread.c	(revision 9b7adc385e10a6ce5a0bedf71fdcd0ae1b9fd585)
+++ uspace/lib/c/generic/thread/thread.c	(revision 9b7adc385e10a6ce5a0bedf71fdcd0ae1b9fd585)
@@ -0,0 +1,205 @@
+/*
+ * Copyright (c) 2006 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup libc
+ * @{
+ */
+/** @file
+ */
+
+#include <libc.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <libarch/faddr.h>
+#include <abi/proc/uarg.h>
+#include <fibril.h>
+#include <stack.h>
+#include <str.h>
+#include <async.h>
+#include <errno.h>
+#include <as.h>
+
+#include "../private/thread.h"
+#include "../private/fibril.h"
+
+/** Main thread function.
+ *
+ * This function is called from __thread_entry() and is used
+ * to call the thread's implementing function and perform cleanup
+ * and exit when thread returns back.
+ *
+ * @param uarg Pointer to userspace argument structure.
+ *
+ */
+void __thread_main(uspace_arg_t *uarg)
+{
+	assert(!__tcb_is_set());
+
+	fibril_t *fibril = uarg->uspace_thread_arg;
+	assert(fibril);
+
+	__tcb_set(fibril->tcb);
+
+	uarg->uspace_thread_function(fibril->arg);
+	/*
+	 * XXX: we cannot free the userspace stack while running on it
+	 *
+	 * free(uarg->uspace_stack);
+	 * free(uarg);
+	 */
+
+	fibril_teardown(fibril);
+	thread_exit(0);
+}
+
+/** Create userspace thread.
+ *
+ * This function creates new userspace thread and allocates userspace
+ * stack and userspace argument structure for it.
+ *
+ * @param function Function implementing the thread.
+ * @param arg Argument to be passed to thread.
+ * @param name Symbolic name of the thread.
+ * @param tid Thread ID of the newly created thread.
+ *
+ * @return Zero on success or a code from @ref errno.h on failure.
+ */
+errno_t thread_create(void (*function)(void *), void *arg, const char *name,
+    thread_id_t *tid)
+{
+	uspace_arg_t *uarg = calloc(1, sizeof(uspace_arg_t));
+	if (!uarg)
+		return ENOMEM;
+
+	fibril_t *fibril = fibril_alloc();
+	if (!fibril) {
+		free(uarg);
+		return ENOMEM;
+	}
+
+	size_t stack_size = stack_size_get();
+	void *stack = as_area_create(AS_AREA_ANY, stack_size,
+	    AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE | AS_AREA_GUARD |
+	    AS_AREA_LATE_RESERVE, AS_AREA_UNPAGED);
+	if (stack == AS_MAP_FAILED) {
+		fibril_teardown(fibril);
+		free(uarg);
+		return ENOMEM;
+	}
+
+	fibril->arg = arg;
+	uarg->uspace_entry = (void *) FADDR(__thread_entry);
+	uarg->uspace_stack = stack;
+	uarg->uspace_stack_size = stack_size;
+	uarg->uspace_thread_function = function;
+	uarg->uspace_thread_arg = fibril;
+	uarg->uspace_uarg = uarg;
+
+	errno_t rc = (errno_t) __SYSCALL4(SYS_THREAD_CREATE, (sysarg_t) uarg,
+	    (sysarg_t) name, (sysarg_t) str_size(name), (sysarg_t) tid);
+
+	if (rc != EOK) {
+		/*
+		 * Failed to create a new thread.
+		 * Free up the allocated data.
+		 */
+		as_area_destroy(stack);
+		free(uarg);
+	}
+
+	return rc;
+}
+
+/** Terminate current thread.
+ *
+ * @param status Exit status. Currently not used.
+ *
+ */
+void thread_exit(int status)
+{
+	__SYSCALL1(SYS_THREAD_EXIT, (sysarg_t) status);
+
+	/* Unreachable */
+	while (true)
+		;
+}
+
+/** Detach thread.
+ *
+ * Currently not implemented.
+ *
+ * @param thread TID.
+ */
+void thread_detach(thread_id_t thread)
+{
+}
+
+/** Get current thread ID.
+ *
+ * @return Current thread ID.
+ */
+thread_id_t thread_get_id(void)
+{
+	thread_id_t thread_id;
+
+	(void) __SYSCALL1(SYS_THREAD_GET_ID, (sysarg_t) &thread_id);
+
+	return thread_id;
+}
+
+/** Wait unconditionally for specified number of microseconds
+ *
+ */
+int thread_usleep(useconds_t usec)
+{
+	(void) __SYSCALL1(SYS_THREAD_USLEEP, usec);
+	return 0;
+}
+
+/** Wait unconditionally for specified number of seconds
+ *
+ */
+unsigned int thread_sleep(unsigned int sec)
+{
+	/*
+	 * Sleep in 1000 second steps to support
+	 * full argument range
+	 */
+
+	while (sec > 0) {
+		unsigned int period = (sec > 1000) ? 1000 : sec;
+
+		thread_usleep(period * 1000000);
+		sec -= period;
+	}
+
+	return 0;
+}
+
+/** @}
+ */
Index: uspace/lib/c/generic/thread/tls.c
===================================================================
--- uspace/lib/c/generic/thread/tls.c	(revision 9b7adc385e10a6ce5a0bedf71fdcd0ae1b9fd585)
+++ uspace/lib/c/generic/thread/tls.c	(revision 9b7adc385e10a6ce5a0bedf71fdcd0ae1b9fd585)
@@ -0,0 +1,256 @@
+/*
+ * Copyright (c) 2006 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup libc
+ * @{
+ */
+/** @file
+ *
+ * Support for thread-local storage, as described in:
+ * 	Drepper U.: ELF Handling For Thread-Local Storage, 2005
+ */
+
+#include <assert.h>
+#include <stddef.h>
+#include <align.h>
+#include <tls.h>
+#include <stdlib.h>
+#include <str.h>
+#include <macros.h>
+#include <elf/elf.h>
+#include <as.h>
+
+#include <libarch/config.h>
+
+#ifdef CONFIG_RTLD
+#include <rtld/rtld.h>
+#endif
+
+#include "../private/libc.h"
+
+#if !defined(CONFIG_TLS_VARIANT_1) && !defined(CONFIG_TLS_VARIANT_2)
+#error Unknown TLS variant.
+#endif
+
+static ptrdiff_t _tcb_data_offset(void)
+{
+	const elf_segment_header_t *tls =
+	    elf_get_phdr(__progsymbols.elfstart, PT_TLS);
+
+	size_t tls_align = tls ? tls->p_align : 1;
+
+#ifdef CONFIG_TLS_VARIANT_1
+	return ALIGN_UP((ptrdiff_t) sizeof(tcb_t), tls_align);
+#else
+	size_t tls_size = tls ? tls->p_memsz : 0;
+	return -ALIGN_UP((ptrdiff_t) tls_size, max(tls_align, _Alignof(tcb_t)));
+#endif
+}
+
+/** Get address of static TLS block */
+void *tls_get(void)
+{
+#ifdef CONFIG_RTLD
+	assert(runtime_env == NULL);
+#endif
+	return (uint8_t *)__tcb_get() + _tcb_data_offset();
+}
+
+static tcb_t *tls_make_generic(const void *elf, void *(*alloc)(size_t, size_t))
+{
+	assert(!elf_get_phdr(elf, PT_DYNAMIC));
+#ifdef CONFIG_RTLD
+	assert(runtime_env == NULL);
+#endif
+
+	const elf_segment_header_t *tls = elf_get_phdr(elf, PT_TLS);
+	size_t tls_size = tls ? tls->p_memsz : 0;
+	size_t tls_align = tls ? tls->p_align : 1;
+
+	/*
+	 * We don't currently support alignment this big,
+	 * and neither should we need to.
+	 */
+	assert(tls_align <= PAGE_SIZE);
+
+#ifdef CONFIG_TLS_VARIANT_1
+	size_t alloc_size =
+	    ALIGN_UP(sizeof(tcb_t), tls_align) + tls_size;
+#else
+	size_t alloc_size =
+	    ALIGN_UP(tls_size, max(tls_align, _Alignof(tcb_t))) + sizeof(tcb_t);
+#endif
+
+	void *area = alloc(max(tls_align, _Alignof(tcb_t)), alloc_size);
+	if (!area)
+		return NULL;
+
+#ifdef CONFIG_TLS_VARIANT_1
+	tcb_t *tcb = area;
+	uint8_t *data = (uint8_t *)tcb + _tcb_data_offset();
+	memset(tcb, 0, sizeof(*tcb));
+#else
+	uint8_t *data = area;
+	tcb_t *tcb = (tcb_t *) (data - _tcb_data_offset());
+	memset(tcb, 0, sizeof(tcb_t));
+	tcb->self = tcb;
+#endif
+
+	if (!tls)
+		return tcb;
+
+	uintptr_t bias = elf_get_bias(elf);
+
+	/* Copy thread local data from the initialization image. */
+	memcpy(data, (void *)(tls->p_vaddr + bias), tls->p_filesz);
+	/* Zero out the thread local uninitialized data. */
+	memset(data + tls->p_filesz, 0, tls->p_memsz - tls->p_filesz);
+
+	return tcb;
+}
+
+static void *early_alloc(size_t align, size_t alloc_size)
+{
+	assert(align <= PAGE_SIZE);
+	alloc_size = ALIGN_UP(alloc_size, PAGE_SIZE);
+
+	void *area = as_area_create(AS_AREA_ANY, alloc_size,
+	    AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE, AS_AREA_UNPAGED);
+	if (area == AS_MAP_FAILED)
+		return NULL;
+	return area;
+}
+
+/** Same as tls_make(), but uses as_area_create() instead of memalign().
+ *  Only used in __libc_main() if the program was created by the kernel.
+ */
+tcb_t *tls_make_initial(const void *elf)
+{
+	return tls_make_generic(elf, early_alloc);
+}
+
+/** Create TLS (Thread Local Storage) data structures.
+ *
+ * @return Pointer to TCB.
+ */
+tcb_t *tls_make(const void *elf)
+{
+	// TODO: Always use rtld.
+
+#ifdef CONFIG_RTLD
+	if (runtime_env != NULL)
+		return rtld_tls_make(runtime_env);
+#endif
+
+	return tls_make_generic(elf, memalign);
+}
+
+void tls_free(tcb_t *tcb)
+{
+#ifdef CONFIG_RTLD
+	free(tcb->dtv);
+
+	if (runtime_env != NULL) {
+		tls_free_arch(tcb, runtime_env->tls_size, runtime_env->tls_align);
+		return;
+	}
+#endif
+	const elf_segment_header_t *tls =
+	    elf_get_phdr(__progsymbols.elfstart, PT_TLS);
+
+	assert(tls != NULL);
+	tls_free_arch(tcb,
+	    ALIGN_UP(tls->p_memsz, tls->p_align) + sizeof(tcb_t),
+	    max(tls->p_align, _Alignof(tcb_t)));
+}
+
+#ifdef CONFIG_TLS_VARIANT_1
+/** Allocate TLS variant 1 data structures.
+ *
+ * @param data 		Start of TLS section. This is an output argument.
+ * @param size		Size of tdata + tbss section.
+ * @return 		Pointer to tcb_t structure.
+ */
+tcb_t *tls_alloc_variant_1(size_t size, size_t align)
+{
+	tcb_t *tcb = memalign(align, size);
+	if (!tcb)
+		return NULL;
+	memset(tcb, 0, sizeof(tcb_t));
+	return tcb;
+}
+
+/** Free TLS variant I data structures.
+ *
+ * @param tcb		Pointer to TCB structure.
+ * @param size		This argument is ignored.
+ */
+void tls_free_variant_1(tcb_t *tcb, size_t size, size_t align)
+{
+	free(tcb);
+}
+#endif
+
+#ifdef CONFIG_TLS_VARIANT_2
+/** Allocate TLS variant II data structures.
+ *
+ * @param data		Pointer to pointer to thread local data. This is
+ * 			actually an output argument.
+ * @param size		Size of thread local data.
+ * @param align		Alignment of thread local data.
+ * @return		Pointer to TCB structure.
+ */
+tcb_t *tls_alloc_variant_2(size_t size, size_t align)
+{
+	void *data = memalign(align, size);
+	if (data == NULL)
+		return NULL;
+
+	tcb_t *tcb = (tcb_t *) (data + size - sizeof(tcb_t));
+	memset(tcb, 0, sizeof(tcb_t));
+	tcb->self = tcb;
+	return tcb;
+}
+
+/** Free TLS variant II data structures.
+ *
+ * @param tcb		Pointer to TCB structure.
+ * @param size		Size of thread local data.
+ * @param align		Alignment of thread local data.
+ */
+void tls_free_variant_2(tcb_t *tcb, size_t size, size_t align)
+{
+	if (tcb != NULL) {
+		void *start = ((void *) tcb) + sizeof(tcb_t) - size;
+		free(start);
+	}
+}
+#endif
+
+/** @}
+ */
Index: uspace/lib/c/generic/tls.c
===================================================================
--- uspace/lib/c/generic/tls.c	(revision 42964a73ef087dec0f80db3182b0ff5a3f79b500)
+++ 	(revision )
@@ -1,256 +1,0 @@
-/*
- * Copyright (c) 2006 Jakub Jermar
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- *   notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- *   notice, this list of conditions and the following disclaimer in the
- *   documentation and/or other materials provided with the distribution.
- * - The name of the author may not be used to endorse or promote products
- *   derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** @addtogroup libc
- * @{
- */
-/** @file
- *
- * Support for thread-local storage, as described in:
- * 	Drepper U.: ELF Handling For Thread-Local Storage, 2005
- */
-
-#include <assert.h>
-#include <stddef.h>
-#include <align.h>
-#include <tls.h>
-#include <stdlib.h>
-#include <str.h>
-#include <macros.h>
-#include <elf/elf.h>
-#include <as.h>
-
-#include <libarch/config.h>
-
-#ifdef CONFIG_RTLD
-#include <rtld/rtld.h>
-#endif
-
-#include "private/libc.h"
-
-#if !defined(CONFIG_TLS_VARIANT_1) && !defined(CONFIG_TLS_VARIANT_2)
-#error Unknown TLS variant.
-#endif
-
-static ptrdiff_t _tcb_data_offset(void)
-{
-	const elf_segment_header_t *tls =
-	    elf_get_phdr(__progsymbols.elfstart, PT_TLS);
-
-	size_t tls_align = tls ? tls->p_align : 1;
-
-#ifdef CONFIG_TLS_VARIANT_1
-	return ALIGN_UP((ptrdiff_t) sizeof(tcb_t), tls_align);
-#else
-	size_t tls_size = tls ? tls->p_memsz : 0;
-	return -ALIGN_UP((ptrdiff_t) tls_size, max(tls_align, _Alignof(tcb_t)));
-#endif
-}
-
-/** Get address of static TLS block */
-void *tls_get(void)
-{
-#ifdef CONFIG_RTLD
-	assert(runtime_env == NULL);
-#endif
-	return (uint8_t *)__tcb_get() + _tcb_data_offset();
-}
-
-static tcb_t *tls_make_generic(const void *elf, void *(*alloc)(size_t, size_t))
-{
-	assert(!elf_get_phdr(elf, PT_DYNAMIC));
-#ifdef CONFIG_RTLD
-	assert(runtime_env == NULL);
-#endif
-
-	const elf_segment_header_t *tls = elf_get_phdr(elf, PT_TLS);
-	size_t tls_size = tls ? tls->p_memsz : 0;
-	size_t tls_align = tls ? tls->p_align : 1;
-
-	/*
-	 * We don't currently support alignment this big,
-	 * and neither should we need to.
-	 */
-	assert(tls_align <= PAGE_SIZE);
-
-#ifdef CONFIG_TLS_VARIANT_1
-	size_t alloc_size =
-	    ALIGN_UP(sizeof(tcb_t), tls_align) + tls_size;
-#else
-	size_t alloc_size =
-	    ALIGN_UP(tls_size, max(tls_align, _Alignof(tcb_t))) + sizeof(tcb_t);
-#endif
-
-	void *area = alloc(max(tls_align, _Alignof(tcb_t)), alloc_size);
-	if (!area)
-		return NULL;
-
-#ifdef CONFIG_TLS_VARIANT_1
-	tcb_t *tcb = area;
-	uint8_t *data = (uint8_t *)tcb + _tcb_data_offset();
-	memset(tcb, 0, sizeof(*tcb));
-#else
-	uint8_t *data = area;
-	tcb_t *tcb = (tcb_t *) (data - _tcb_data_offset());
-	memset(tcb, 0, sizeof(tcb_t));
-	tcb->self = tcb;
-#endif
-
-	if (!tls)
-		return tcb;
-
-	uintptr_t bias = elf_get_bias(elf);
-
-	/* Copy thread local data from the initialization image. */
-	memcpy(data, (void *)(tls->p_vaddr + bias), tls->p_filesz);
-	/* Zero out the thread local uninitialized data. */
-	memset(data + tls->p_filesz, 0, tls->p_memsz - tls->p_filesz);
-
-	return tcb;
-}
-
-static void *early_alloc(size_t align, size_t alloc_size)
-{
-	assert(align <= PAGE_SIZE);
-	alloc_size = ALIGN_UP(alloc_size, PAGE_SIZE);
-
-	void *area = as_area_create(AS_AREA_ANY, alloc_size,
-	    AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE, AS_AREA_UNPAGED);
-	if (area == AS_MAP_FAILED)
-		return NULL;
-	return area;
-}
-
-/** Same as tls_make(), but uses as_area_create() instead of memalign().
- *  Only used in __libc_main() if the program was created by the kernel.
- */
-tcb_t *tls_make_initial(const void *elf)
-{
-	return tls_make_generic(elf, early_alloc);
-}
-
-/** Create TLS (Thread Local Storage) data structures.
- *
- * @return Pointer to TCB.
- */
-tcb_t *tls_make(const void *elf)
-{
-	// TODO: Always use rtld.
-
-#ifdef CONFIG_RTLD
-	if (runtime_env != NULL)
-		return rtld_tls_make(runtime_env);
-#endif
-
-	return tls_make_generic(elf, memalign);
-}
-
-void tls_free(tcb_t *tcb)
-{
-#ifdef CONFIG_RTLD
-	free(tcb->dtv);
-
-	if (runtime_env != NULL) {
-		tls_free_arch(tcb, runtime_env->tls_size, runtime_env->tls_align);
-		return;
-	}
-#endif
-	const elf_segment_header_t *tls =
-	    elf_get_phdr(__progsymbols.elfstart, PT_TLS);
-
-	assert(tls != NULL);
-	tls_free_arch(tcb,
-	    ALIGN_UP(tls->p_memsz, tls->p_align) + sizeof(tcb_t),
-	    max(tls->p_align, _Alignof(tcb_t)));
-}
-
-#ifdef CONFIG_TLS_VARIANT_1
-/** Allocate TLS variant 1 data structures.
- *
- * @param data 		Start of TLS section. This is an output argument.
- * @param size		Size of tdata + tbss section.
- * @return 		Pointer to tcb_t structure.
- */
-tcb_t *tls_alloc_variant_1(size_t size, size_t align)
-{
-	tcb_t *tcb = memalign(align, size);
-	if (!tcb)
-		return NULL;
-	memset(tcb, 0, sizeof(tcb_t));
-	return tcb;
-}
-
-/** Free TLS variant I data structures.
- *
- * @param tcb		Pointer to TCB structure.
- * @param size		This argument is ignored.
- */
-void tls_free_variant_1(tcb_t *tcb, size_t size, size_t align)
-{
-	free(tcb);
-}
-#endif
-
-#ifdef CONFIG_TLS_VARIANT_2
-/** Allocate TLS variant II data structures.
- *
- * @param data		Pointer to pointer to thread local data. This is
- * 			actually an output argument.
- * @param size		Size of thread local data.
- * @param align		Alignment of thread local data.
- * @return		Pointer to TCB structure.
- */
-tcb_t *tls_alloc_variant_2(size_t size, size_t align)
-{
-	void *data = memalign(align, size);
-	if (data == NULL)
-		return NULL;
-
-	tcb_t *tcb = (tcb_t *) (data + size - sizeof(tcb_t));
-	memset(tcb, 0, sizeof(tcb_t));
-	tcb->self = tcb;
-	return tcb;
-}
-
-/** Free TLS variant II data structures.
- *
- * @param tcb		Pointer to TCB structure.
- * @param size		Size of thread local data.
- * @param align		Alignment of thread local data.
- */
-void tls_free_variant_2(tcb_t *tcb, size_t size, size_t align)
-{
-	if (tcb != NULL) {
-		void *start = ((void *) tcb) + sizeof(tcb_t) - size;
-		free(start);
-	}
-}
-#endif
-
-/** @}
- */
Index: uspace/lib/c/include/fibril.h
===================================================================
--- uspace/lib/c/include/fibril.h	(revision 42964a73ef087dec0f80db3182b0ff5a3f79b500)
+++ uspace/lib/c/include/fibril.h	(revision 9b7adc385e10a6ce5a0bedf71fdcd0ae1b9fd585)
@@ -49,10 +49,4 @@
 typedef fibril_t *fid_t;
 
-typedef struct {
-	fibril_t *fibril;
-} fibril_event_t;
-
-#define FIBRIL_EVENT_INIT ((fibril_event_t) {0})
-
 /** Fibril-local variable specifier */
 #define fibril_local __thread
@@ -82,11 +76,4 @@
 extern __noreturn void fibril_exit(long);
 
-extern void fibril_wait_for(fibril_event_t *);
-extern errno_t fibril_wait_timeout(fibril_event_t *, const struct timeval *);
-extern void fibril_notify(fibril_event_t *);
-
-extern errno_t fibril_ipc_wait(ipc_call_t *, const struct timeval *);
-extern void fibril_ipc_poke(void);
-
 #endif
 
Index: uspace/lib/c/include/fibril_synch.h
===================================================================
--- uspace/lib/c/include/fibril_synch.h	(revision 42964a73ef087dec0f80db3182b0ff5a3f79b500)
+++ uspace/lib/c/include/fibril_synch.h	(revision 9b7adc385e10a6ce5a0bedf71fdcd0ae1b9fd585)
@@ -41,43 +41,4 @@
 #include <sys/time.h>
 #include <stdbool.h>
-#include <futex.h>
-
-/**
- * "Restricted" fibril mutex.
- *
- * Similar to `fibril_mutex_t`, but has a set of restrictions placed on its
- * use. Within a rmutex critical section, you
- *         - may not use any other synchronization primitive,
- *           save for another `fibril_rmutex_t`. This includes nonblocking
- *           operations like cvar signal and mutex unlock.
- *         - may not read IPC messages
- *         - may not start a new thread/fibril
- *           (creating fibril without starting is fine)
- *
- * Additionally, locking with a timeout is not possible on this mutex,
- * and there is no associated condition variable type.
- * This is a design constraint, not a lack of implementation effort.
- */
-typedef struct {
-	// TODO: At this point, this is just silly handwaving to hide current
-	//       futex use behind a fibril based abstraction. Later, the imple-
-	//       mentation will change, but the restrictions placed on this type
-	//       will allow it to be simpler and faster than a regular mutex.
-	//       There might also be optional debug checking of the assumptions.
-	//
-	//       Note that a consequence of the restrictions is that if we are
-	//       running on a single thread, no other fibril can ever get to run
-	//       while a fibril has a rmutex locked. That means that for
-	//       single-threaded programs, we can reduce all rmutex locks and
-	//       unlocks to simple branches on a global bool variable.
-
-	futex_t futex;
-} fibril_rmutex_t;
-
-#define FIBRIL_RMUTEX_INITIALIZER(name) \
-	{ .futex = FUTEX_INITIALIZE(1) }
-
-#define FIBRIL_RMUTEX_INITIALIZE(name) \
-	fibril_rmutex_t name = FIBRIL_RMUTEX_INITIALIZER(name)
 
 typedef struct {
@@ -186,4 +147,5 @@
 	long int count;
 	list_t waiters;
+	bool closed;
 } fibril_semaphore_t;
 
@@ -201,9 +163,4 @@
 #define FIBRIL_SEMAPHORE_INITIALIZE(name, cnt) \
 	fibril_semaphore_t name = FIBRIL_SEMAPHORE_INITIALIZER(name, cnt)
-
-extern void fibril_rmutex_initialize(fibril_rmutex_t *);
-extern void fibril_rmutex_lock(fibril_rmutex_t *);
-extern bool fibril_rmutex_trylock(fibril_rmutex_t *);
-extern void fibril_rmutex_unlock(fibril_rmutex_t *);
 
 extern void fibril_mutex_initialize(fibril_mutex_t *);
@@ -241,4 +198,13 @@
 extern void fibril_semaphore_up(fibril_semaphore_t *);
 extern void fibril_semaphore_down(fibril_semaphore_t *);
+extern errno_t fibril_semaphore_down_timeout(fibril_semaphore_t *, suseconds_t);
+extern void fibril_semaphore_close(fibril_semaphore_t *);
+
+typedef struct mpsc mpsc_t;
+extern mpsc_t *mpsc_create(size_t);
+extern void mpsc_destroy(mpsc_t *);
+extern errno_t mpsc_send(mpsc_t *, const void *);
+extern errno_t mpsc_receive(mpsc_t *, void *, const struct timeval *);
+extern void mpsc_close(mpsc_t *);
 
 #endif
Index: uspace/lib/c/include/futex.h
===================================================================
--- uspace/lib/c/include/futex.h	(revision 42964a73ef087dec0f80db3182b0ff5a3f79b500)
+++ 	(revision )
@@ -1,232 +1,0 @@
-/*
- * Copyright (c) 2006 Jakub Jermar
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- *   notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- *   notice, this list of conditions and the following disclaimer in the
- *   documentation and/or other materials provided with the distribution.
- * - The name of the author may not be used to endorse or promote products
- *   derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** @addtogroup libc
- * @{
- */
-/** @file
- */
-
-#ifndef LIBC_FUTEX_H_
-#define LIBC_FUTEX_H_
-
-#include <assert.h>
-#include <atomic.h>
-#include <errno.h>
-#include <libc.h>
-#include <time.h>
-
-typedef struct futex {
-	atomic_t val;
-#ifdef CONFIG_DEBUG_FUTEX
-	void *owner;
-#endif
-} futex_t;
-
-extern void futex_initialize(futex_t *futex, int value);
-
-#ifdef CONFIG_DEBUG_FUTEX
-
-#define FUTEX_INITIALIZE(val) {{ (val) }, NULL }
-#define FUTEX_INITIALIZER     FUTEX_INITIALIZE(1)
-
-void __futex_assert_is_locked(futex_t *, const char *);
-void __futex_assert_is_not_locked(futex_t *, const char *);
-void __futex_lock(futex_t *, const char *);
-void __futex_unlock(futex_t *, const char *);
-bool __futex_trylock(futex_t *, const char *);
-void __futex_give_to(futex_t *, void *, const char *);
-
-#define futex_lock(futex) __futex_lock((futex), #futex)
-#define futex_unlock(futex) __futex_unlock((futex), #futex)
-#define futex_trylock(futex) __futex_trylock((futex), #futex)
-
-#define futex_give_to(futex, new_owner) __futex_give_to((futex), (new_owner), #futex)
-#define futex_assert_is_locked(futex) __futex_assert_is_locked((futex), #futex)
-#define futex_assert_is_not_locked(futex) __futex_assert_is_not_locked((futex), #futex)
-
-#else
-
-#define FUTEX_INITIALIZE(val) {{ (val) }}
-#define FUTEX_INITIALIZER     FUTEX_INITIALIZE(1)
-
-#define futex_lock(fut)     (void) futex_down((fut))
-#define futex_trylock(fut)  futex_trydown((fut))
-#define futex_unlock(fut)   (void) futex_up((fut))
-
-#define futex_give_to(fut, owner) ((void)0)
-#define futex_assert_is_locked(fut) assert((atomic_signed_t) (fut)->val.count <= 0)
-#define futex_assert_is_not_locked(fut) ((void)0)
-
-#endif
-
-/** Down the futex with timeout, composably.
- *
- * This means that when the operation fails due to a timeout or being
- * interrupted, the next futex_up() is ignored, which allows certain kinds of
- * composition of synchronization primitives.
- *
- * In most other circumstances, regular futex_down_timeout() is a better choice.
- *
- * @param futex Futex.
- *
- * @return ENOENT if there is no such virtual address.
- * @return ETIMEOUT if timeout expires.
- * @return EOK on success.
- * @return Error code from <errno.h> otherwise.
- *
- */
-static inline errno_t futex_down_composable(futex_t *futex, const struct timeval *expires)
-{
-	// TODO: Add tests for this.
-
-	if ((atomic_signed_t) atomic_predec(&futex->val) >= 0)
-		return EOK;
-
-	suseconds_t timeout;
-
-	if (!expires) {
-		/* No timeout. */
-		timeout = 0;
-	} else {
-		if (expires->tv_sec == 0) {
-			/* We can't just return ETIMEOUT. That wouldn't be composable. */
-			timeout = 1;
-		} else {
-			struct timeval tv;
-			getuptime(&tv);
-			timeout = tv_gteq(&tv, expires) ? 1 :
-			    tv_sub_diff(expires, &tv);
-		}
-
-		assert(timeout > 0);
-	}
-
-	return __SYSCALL2(SYS_FUTEX_SLEEP, (sysarg_t) &futex->val.count, (sysarg_t) timeout);
-}
-
-/** Up the futex.
- *
- * @param futex Futex.
- *
- * @return ENOENT if there is no such virtual address.
- * @return EOK on success.
- * @return Error code from <errno.h> otherwise.
- *
- */
-static inline errno_t futex_up(futex_t *futex)
-{
-	if ((atomic_signed_t) atomic_postinc(&futex->val) < 0)
-		return __SYSCALL1(SYS_FUTEX_WAKEUP, (sysarg_t) &futex->val.count);
-
-	return EOK;
-}
-
-static inline errno_t futex_down_timeout(futex_t *futex, const struct timeval *expires)
-{
-	if (expires && expires->tv_sec == 0 && expires->tv_usec == 0) {
-		/* Nonblocking down. */
-
-		/*
-		 * Try good old CAS a few times.
-		 * Not too much though, we don't want to bloat the caller.
-		 */
-		for (int i = 0; i < 2; i++) {
-			atomic_signed_t old = atomic_get(&futex->val);
-			if (old <= 0)
-				return ETIMEOUT;
-
-			if (cas(&futex->val, old, old - 1))
-				return EOK;
-		}
-
-		// TODO: builtin atomics with relaxed ordering can make this
-		//       faster.
-
-		/*
-		 * If we don't succeed with CAS, we can't just return failure
-		 * because that would lead to spurious failures where
-		 * futex_down_timeout returns ETIMEOUT despite there being
-		 * available tokens. That could break some algorithms.
-		 * We also don't want to loop on CAS indefinitely, because
-		 * that would make the semaphore not wait-free, even when all
-		 * atomic operations and the underlying base semaphore are all
-		 * wait-free.
-		 * Instead, we fall back to regular down_timeout(), with
-		 * an already expired deadline. That way we delegate all these
-		 * concerns to the base semaphore.
-		 */
-	}
-
-	/*
-	 * This combination of a "composable" sleep followed by futex_up() on
-	 * failure is necessary to prevent breakage due to certain race
-	 * conditions.
-	 */
-	errno_t rc = futex_down_composable(futex, expires);
-	if (rc != EOK)
-		futex_up(futex);
-	return rc;
-}
-
-/** Try to down the futex.
- *
- * @param futex Futex.
- *
- * @return true if the futex was acquired.
- * @return false if the futex was not acquired.
- *
- */
-static inline bool futex_trydown(futex_t *futex)
-{
-	/*
-	 * down_timeout with an already expired deadline should behave like
-	 * trydown.
-	 */
-	struct timeval tv = { .tv_sec = 0, .tv_usec = 0 };
-	return futex_down_timeout(futex, &tv) == EOK;
-}
-
-/** Down the futex.
- *
- * @param futex Futex.
- *
- * @return ENOENT if there is no such virtual address.
- * @return EOK on success.
- * @return Error code from <errno.h> otherwise.
- *
- */
-static inline errno_t futex_down(futex_t *futex)
-{
-	return futex_down_timeout(futex, NULL);
-}
-
-#endif
-
-/** @}
- */
Index: uspace/lib/c/include/ipc/common.h
===================================================================
--- uspace/lib/c/include/ipc/common.h	(revision 42964a73ef087dec0f80db3182b0ff5a3f79b500)
+++ uspace/lib/c/include/ipc/common.h	(revision 9b7adc385e10a6ce5a0bedf71fdcd0ae1b9fd585)
@@ -37,8 +37,7 @@
 
 #include <abi/ipc/ipc.h>
-#include <atomic.h>
 #include <abi/proc/task.h>
-#include <futex.h>
 #include <abi/cap.h>
+#include <types/common.h>
 
 #define IPC_FLAG_BLOCKING  0x01
Index: uspace/lib/drv/include/dev_iface.h
===================================================================
--- uspace/lib/drv/include/dev_iface.h	(revision 42964a73ef087dec0f80db3182b0ff5a3f79b500)
+++ uspace/lib/drv/include/dev_iface.h	(revision 9b7adc385e10a6ce5a0bedf71fdcd0ae1b9fd585)
@@ -38,4 +38,5 @@
 #include <ipc/common.h>
 #include <ipc/dev_iface.h>
+#include <stdbool.h>
 
 /*
Index: uspace/lib/pcut/Makefile
===================================================================
--- uspace/lib/pcut/Makefile	(revision 42964a73ef087dec0f80db3182b0ff5a3f79b500)
+++ uspace/lib/pcut/Makefile	(revision 9b7adc385e10a6ce5a0bedf71fdcd0ae1b9fd585)
@@ -36,4 +36,4 @@
 
 test-libpcut-%: $(LIBRARY).a
-	$(CC) $(CFLAGS) $(LDFLAGS) -T $(LINKER_SCRIPT) -o $@ $(START_FILES) $^ $(LIBRARY).a $(BASE_LIBS)
+	$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(START_FILES) $^ $(LIBRARY).a $(BASE_LIBS)
 
Index: uspace/lib/pcut/update-from-master.sh
===================================================================
--- uspace/lib/pcut/update-from-master.sh	(revision 42964a73ef087dec0f80db3182b0ff5a3f79b500)
+++ uspace/lib/pcut/update-from-master.sh	(revision 9b7adc385e10a6ce5a0bedf71fdcd0ae1b9fd585)
@@ -80,5 +80,5 @@
 
 test-libpcut-%: $(LIBRARY).a
-	$(CC) $(CFLAGS) $(LDFLAGS) -T $(LINKER_SCRIPT) -o $@ $(START_FILES) $^ $(LIBRARY).a $(BASE_LIBS)
+	$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(START_FILES) $^ $(LIBRARY).a $(BASE_LIBS)
 
 EOF_MAKEFILE_TAIL
Index: uspace/lib/posix/Makefile
===================================================================
--- uspace/lib/posix/Makefile	(revision 42964a73ef087dec0f80db3182b0ff5a3f79b500)
+++ uspace/lib/posix/Makefile	(revision 9b7adc385e10a6ce5a0bedf71fdcd0ae1b9fd585)
@@ -36,6 +36,4 @@
 
 SPECS = gcc.specs
-LIBC_LINKER_SCRIPT = $(LIBC_PREFIX)/arch/$(UARCH)/_link.ld
-EXPORT_LINKER_SCRIPT = link.ld
 
 EXPORT_FILES = \
@@ -48,5 +46,4 @@
 	$(LIBC_PREFIX)/crt1.o \
 	$(LIBRARY).a \
-	$(EXPORT_LINKER_SCRIPT) \
 	$(SPECS)
 
@@ -54,5 +51,5 @@
 EXTRA_CLEAN = $(INCLUDE_LIBC)
 
-EXTRA_OUTPUT = $(SPECS) $(EXPORT_LINKER_SCRIPT) $(EXPORT_STARTUP_FILE)
+EXTRA_OUTPUT = $(SPECS)
 
 SOURCES = \
@@ -92,5 +89,4 @@
 EXPORT_LDFLAGS = \
 	-L$$(HELENOS_EXPORT_ROOT)/lib \
-	-T link.ld \
 	$$(HELENOS_EXPORT_ROOT)/lib/crt0.o \
 	$$(HELENOS_EXPORT_ROOT)/lib/crt1.o
@@ -114,10 +110,4 @@
 	echo '$(EXPORT_LDLIBS)' >> $@.new
 	mv $@.new $@
-
-$(EXPORT_LINKER_SCRIPT): $(LIBC_LINKER_SCRIPT)
-	cp $< $@
-
-$(EXPORT_STARTUP_FILE): $(LIBC_STARTUP_FILE)
-	cp $< $@
 
 $(INCLUDE_LIBC): $(shell find ../c/include -name '*.h')
Index: uspace/srv/hid/input/ctl/stty.c
===================================================================
--- uspace/srv/hid/input/ctl/stty.c	(revision 42964a73ef087dec0f80db3182b0ff5a3f79b500)
+++ uspace/srv/hid/input/ctl/stty.c	(revision 9b7adc385e10a6ce5a0bedf71fdcd0ae1b9fd585)
@@ -38,4 +38,5 @@
  */
 
+#include <errno.h>
 #include <io/keycode.h>
 #include "../stroke.h"
Index: uspace/srv/loader/Makefile
===================================================================
--- uspace/srv/loader/Makefile	(revision 42964a73ef087dec0f80db3182b0ff5a3f79b500)
+++ uspace/srv/loader/Makefile	(revision 9b7adc385e10a6ce5a0bedf71fdcd0ae1b9fd585)
@@ -29,6 +29,19 @@
 
 USPACE_PREFIX = ../..
+ROOT_PATH = $(USPACE_PREFIX)/..
+CONFIG_MAKEFILE = $(ROOT_PATH)/Makefile.config
 
-EXTRA_LDFLAGS = -Wl,-Ttext-segment=0x70000000
+include $(CONFIG_MAKEFILE)
+
+ifeq ($(UARCH),ia64)
+	# IA64 has a peculiar linker script with a fixed data segment address.
+	# Because the loader is a separate nonrelocatable binary in the same
+	# address space as the application, we provide a modified copy of
+	# the default linker script to work around that.
+	EXTRA_LDFLAGS = -T elf64_ia64_loader.x
+else
+	# On all other architectures, we can simply move the text segment.
+	EXTRA_LDFLAGS = -Wl,-Ttext-segment=0x70000000
+endif
 
 BINARY = loader
Index: uspace/srv/loader/elf64_ia64_loader.x
===================================================================
--- uspace/srv/loader/elf64_ia64_loader.x	(revision 9b7adc385e10a6ce5a0bedf71fdcd0ae1b9fd585)
+++ uspace/srv/loader/elf64_ia64_loader.x	(revision 9b7adc385e10a6ce5a0bedf71fdcd0ae1b9fd585)
@@ -0,0 +1,263 @@
+/* Default linker script, for normal executables */
+/* Copyright (C) 2014-2018 Free Software Foundation, Inc.
+   Copying and distribution of this script, with or without modification,
+   are permitted in any medium without royalty provided the copyright
+   notice and this notice are preserved.  */
+OUTPUT_FORMAT("elf64-ia64-little", "elf64-ia64-little",
+	      "elf64-ia64-little")
+OUTPUT_ARCH(ia64)
+ENTRY(_start)
+SEARCH_DIR("=/lib");
+SECTIONS
+{
+  /* Read-only sections, merged into text segment: */
+  PROVIDE (__executable_start = SEGMENT_START("text-segment", 0x7FFF000000000000)); . = SEGMENT_START("text-segment", 0x7FFF000000000000) + SIZEOF_HEADERS;
+  .interp         : { *(.interp) }
+  .note.gnu.build-id : { *(.note.gnu.build-id) }
+  .hash           : { *(.hash) }
+  .gnu.hash       : { *(.gnu.hash) }
+  .dynsym         : { *(.dynsym) }
+  .dynstr         : { *(.dynstr) }
+  .gnu.version    : { *(.gnu.version) }
+  .gnu.version_d  : { *(.gnu.version_d) }
+  .gnu.version_r  : { *(.gnu.version_r) }
+  .rel.init       : { *(.rel.init) }
+  .rela.init      : { *(.rela.init) }
+  .rel.text       : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) }
+  .rela.text      : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) }
+  .rel.fini       : { *(.rel.fini) }
+  .rela.fini      : { *(.rela.fini) }
+  .rel.rodata     : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) }
+  .rela.rodata    : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) }
+  .rel.data.rel.ro   : { *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*) }
+  .rela.data.rel.ro   : { *(.rela.data.rel.ro .rela.data.rel.ro.* .rela.gnu.linkonce.d.rel.ro.*) }
+  .rel.data       : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) }
+  .rela.data      : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) }
+  .rel.tdata	  : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) }
+  .rela.tdata	  : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) }
+  .rel.tbss	  : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) }
+  .rela.tbss	  : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) }
+  .rel.ctors      : { *(.rel.ctors) }
+  .rela.ctors     : { *(.rela.ctors) }
+  .rel.dtors      : { *(.rel.dtors) }
+  .rela.dtors     : { *(.rela.dtors) }
+  .rel.got        : { *(.rel.got) }
+  .rela.got       : { *(.rela.got) }
+  .rel.sdata      : { *(.rel.sdata .rel.sdata.* .rel.gnu.linkonce.s.*) }
+  .rela.sdata     : { *(.rela.sdata .rela.sdata.* .rela.gnu.linkonce.s.*) }
+  .rel.sbss       : { *(.rel.sbss .rel.sbss.* .rel.gnu.linkonce.sb.*) }
+  .rela.sbss      : { *(.rela.sbss .rela.sbss.* .rela.gnu.linkonce.sb.*) }
+  .rel.sdata2     : { *(.rel.sdata2 .rel.sdata2.* .rel.gnu.linkonce.s2.*) }
+  .rela.sdata2    : { *(.rela.sdata2 .rela.sdata2.* .rela.gnu.linkonce.s2.*) }
+  .rel.sbss2      : { *(.rel.sbss2 .rel.sbss2.* .rel.gnu.linkonce.sb2.*) }
+  .rela.sbss2     : { *(.rela.sbss2 .rela.sbss2.* .rela.gnu.linkonce.sb2.*) }
+  .rel.bss        : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) }
+  .rela.bss       : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) }
+  .rel.iplt       :
+    {
+      PROVIDE_HIDDEN (__rel_iplt_start = .);
+      *(.rel.iplt)
+      PROVIDE_HIDDEN (__rel_iplt_end = .);
+    }
+  .rela.iplt      :
+    {
+      PROVIDE_HIDDEN (__rela_iplt_start = .);
+      *(.rela.iplt)
+      PROVIDE_HIDDEN (__rela_iplt_end = .);
+    }
+  .rel.plt        :
+    {
+      *(.rel.plt)
+    }
+  .rela.plt       :
+    {
+      *(.rela.plt)
+    }
+  .rela.IA_64.pltoff   : { *(.rela.IA_64.pltoff) }
+  .init           :
+  {
+    KEEP (*(SORT_NONE(.init)))
+  } =0x00300000010070000002000001000400
+  .plt            : { *(.plt) }
+  .iplt           : { *(.iplt) }
+  .text           :
+  {
+    *(.text.unlikely .text.*_unlikely .text.unlikely.*)
+    *(.text.exit .text.exit.*)
+    *(.text.startup .text.startup.*)
+    *(.text.hot .text.hot.*)
+    *(.text .stub .text.* .gnu.linkonce.t.*)
+    /* .gnu.warning sections are handled specially by elf32.em.  */
+    *(.gnu.warning)
+  } =0x00300000010070000002000001000400
+  .fini           :
+  {
+    KEEP (*(SORT_NONE(.fini)))
+  } =0x00300000010070000002000001000400
+  PROVIDE (__etext = .);
+  PROVIDE (_etext = .);
+  PROVIDE (etext = .);
+  .rodata         : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+  .rodata1        : { *(.rodata1) }
+  .sdata2         :
+  {
+    *(.sdata2 .sdata2.* .gnu.linkonce.s2.*)
+  }
+  .sbss2          : { *(.sbss2 .sbss2.* .gnu.linkonce.sb2.*) }
+  .opd            : { *(.opd) }
+  .IA_64.unwind_info   : { *(.IA_64.unwind_info* .gnu.linkonce.ia64unwi.*) }
+  .IA_64.unwind   : { *(.IA_64.unwind* .gnu.linkonce.ia64unw.*) }
+  .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
+  .eh_frame       : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+  .gcc_except_table   : ONLY_IF_RO { *(.gcc_except_table
+  .gcc_except_table.*) }
+  .gnu_extab   : ONLY_IF_RO { *(.gnu_extab*) }
+  /* These sections are generated by the Sun/Oracle C++ compiler.  */
+  .exception_ranges   : ONLY_IF_RO { *(.exception_ranges
+  .exception_ranges*) }
+  /* Adjust the address for the data segment.  We want to adjust up to
+     the same address within the page on the next page up.  */
+  . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+  /* Exception handling  */
+  .eh_frame       : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+  .gnu_extab      : ONLY_IF_RW { *(.gnu_extab) }
+  .gcc_except_table   : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+  .exception_ranges   : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+  /* Thread Local Storage sections  */
+  .tdata	  : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+  .tbss		  : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+  .preinit_array     :
+  {
+    PROVIDE_HIDDEN (__preinit_array_start = .);
+    KEEP (*(.preinit_array))
+    PROVIDE_HIDDEN (__preinit_array_end = .);
+  }
+  .init_array     :
+  {
+    PROVIDE_HIDDEN (__init_array_start = .);
+    KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+    KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .ctors))
+    PROVIDE_HIDDEN (__init_array_end = .);
+  }
+  .fini_array     :
+  {
+    PROVIDE_HIDDEN (__fini_array_start = .);
+    KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+    KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .dtors))
+    PROVIDE_HIDDEN (__fini_array_end = .);
+  }
+  .jcr            : { KEEP (*(.jcr)) }
+  .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+  .dynamic        : { *(.dynamic) }
+  .data           :
+  {
+    *(.data .data.* .gnu.linkonce.d.*)
+    SORT(CONSTRUCTORS)
+  }
+  .data1          : { *(.data1) }
+  .ctors          :
+  {
+    /* gcc uses crtbegin.o to find the start of
+       the constructors, so we make sure it is
+       first.  Because this is a wildcard, it
+       doesn't matter if the user does not
+       actually link against crtbegin.o; the
+       linker won't look for a file to match a
+       wildcard.  The wildcard also means that it
+       doesn't matter which directory crtbegin.o
+       is in.  */
+    KEEP (*crtbegin.o(.ctors))
+    KEEP (*crtbegin?.o(.ctors))
+    /* We don't want to include the .ctor section from
+       the crtend.o file until after the sorted ctors.
+       The .ctor section from the crtend file contains the
+       end of ctors marker and it must be last */
+    KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .ctors))
+    KEEP (*(SORT(.ctors.*)))
+    KEEP (*(.ctors))
+  }
+  .dtors          :
+  {
+    KEEP (*crtbegin.o(.dtors))
+    KEEP (*crtbegin?.o(.dtors))
+    KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .dtors))
+    KEEP (*(SORT(.dtors.*)))
+    KEEP (*(.dtors))
+  }
+  .got            : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+  .IA_64.pltoff   : { *(.IA_64.pltoff) }
+  /* We want the small data sections together, so single-instruction offsets
+     can access them all, and initialized data all before uninitialized, so
+     we can shorten the on-disk segment size.  */
+  .sdata          :
+  {
+    *(.sdata .sdata.* .gnu.linkonce.s.*)
+  }
+  _edata = .; PROVIDE (edata = .);
+  . = .;
+  __bss_start = .;
+  .sbss           :
+  {
+    *(.dynsbss)
+    *(.sbss .sbss.* .gnu.linkonce.sb.*)
+    *(.scommon)
+  }
+  .bss            :
+  {
+   *(.dynbss)
+   *(.bss .bss.* .gnu.linkonce.b.*)
+   *(COMMON)
+   /* Align here to ensure that the .bss section occupies space up to
+      _end.  Align after .bss to ensure correct alignment even if the
+      .bss section disappears because there are no input sections.
+      FIXME: Why do we need it? When there is no .bss section, we don't
+      pad the .data section.  */
+   . = ALIGN(. != 0 ? 64 / 8 : 1);
+  }
+  . = ALIGN(64 / 8);
+  . = SEGMENT_START("ldata-segment", .);
+  . = ALIGN(64 / 8);
+  _end = .; PROVIDE (end = .);
+  /* Stabs debugging sections.  */
+  .stab          0 : { *(.stab) }
+  .stabstr       0 : { *(.stabstr) }
+  .stab.excl     0 : { *(.stab.excl) }
+  .stab.exclstr  0 : { *(.stab.exclstr) }
+  .stab.index    0 : { *(.stab.index) }
+  .stab.indexstr 0 : { *(.stab.indexstr) }
+  .comment       0 : { *(.comment) }
+  /* DWARF debug sections.
+     Symbols in the DWARF debugging sections are relative to the beginning
+     of the section so we begin them at 0.  */
+  /* DWARF 1 */
+  .debug          0 : { *(.debug) }
+  .line           0 : { *(.line) }
+  /* GNU DWARF 1 extensions */
+  .debug_srcinfo  0 : { *(.debug_srcinfo) }
+  .debug_sfnames  0 : { *(.debug_sfnames) }
+  /* DWARF 1.1 and DWARF 2 */
+  .debug_aranges  0 : { *(.debug_aranges) }
+  .debug_pubnames 0 : { *(.debug_pubnames) }
+  /* DWARF 2 */
+  .debug_info     0 : { *(.debug_info .gnu.linkonce.wi.*) }
+  .debug_abbrev   0 : { *(.debug_abbrev) }
+  .debug_line     0 : { *(.debug_line .debug_line.* .debug_line_end ) }
+  .debug_frame    0 : { *(.debug_frame) }
+  .debug_str      0 : { *(.debug_str) }
+  .debug_loc      0 : { *(.debug_loc) }
+  .debug_macinfo  0 : { *(.debug_macinfo) }
+  /* SGI/MIPS DWARF 2 extensions */
+  .debug_weaknames 0 : { *(.debug_weaknames) }
+  .debug_funcnames 0 : { *(.debug_funcnames) }
+  .debug_typenames 0 : { *(.debug_typenames) }
+  .debug_varnames  0 : { *(.debug_varnames) }
+  /* DWARF 3 */
+  .debug_pubtypes 0 : { *(.debug_pubtypes) }
+  .debug_ranges   0 : { *(.debug_ranges) }
+  /* DWARF Extension.  */
+  .debug_macro    0 : { *(.debug_macro) }
+  .debug_addr     0 : { *(.debug_addr) }
+  .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+  /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) }
+}
+
