Index: kernel/generic/src/cpu/cpu.c
===================================================================
--- kernel/generic/src/cpu/cpu.c	(revision 1761268c1ab1e57b9fa5694aead0b2e9952761b6)
+++ kernel/generic/src/cpu/cpu.c	(revision 7aaed09d88be49fac8360d3017e3328ed9b0635c)
@@ -74,5 +74,5 @@
 		for (i = 0; i < config.cpu_count; i++) {
 			cpus[i].stack = (uint8_t *) frame_alloc(STACK_FRAMES,
-			    FRAME_KA | FRAME_ATOMIC);
+			    FRAME_LOWMEM | FRAME_KA | FRAME_ATOMIC);
 			cpus[i].id = i;
 			
Index: kernel/generic/src/lib/ra.c
===================================================================
--- kernel/generic/src/lib/ra.c	(revision 7aaed09d88be49fac8360d3017e3328ed9b0635c)
+++ kernel/generic/src/lib/ra.c	(revision 7aaed09d88be49fac8360d3017e3328ed9b0635c)
@@ -0,0 +1,424 @@
+/*
+ * Copyright (c) 2011 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup generic
+ * @{
+ */
+
+/**
+ * @file
+ * @brief Resource allocator.
+ *
+ * This is a generic resource allocator, loosely based on the ideas presented
+ * in chapter 4 of the following paper and further simplified:
+ *
+ *   Bonwick J., Adams J.: Magazines and Vmem: Extending the Slab Allocator to
+ *   Many CPUs and Arbitrary Resources, USENIX 2001
+ *
+ */
+
+#include <lib/ra.h>
+#include <typedefs.h>
+#include <mm/slab.h>
+#include <bitops.h>
+#include <debug.h>
+#include <panic.h>
+#include <adt/list.h>
+#include <adt/hash_table.h>
+#include <align.h>
+#include <macros.h>
+#include <synch/spinlock.h>
+
+#define USED_BUCKETS	1024
+
+static size_t used_hash(sysarg_t *key)
+{
+	return ((*key >> 2) & (USED_BUCKETS - 1));
+}
+
+static bool used_compare(sysarg_t *key, size_t keys, link_t *item)
+{
+	ra_segment_t *seg;
+
+	seg = hash_table_get_instance(item, ra_segment_t, fu_link);
+	return seg->base == *key;
+}
+
+static hash_table_operations_t used_ops = {
+	.hash = used_hash,
+	.compare = used_compare,
+	.remove_callback = NULL,
+};
+
+/** Calculate the segment size. */
+static size_t ra_segment_size_get(ra_segment_t *seg)
+{
+	ra_segment_t *nextseg;
+
+	nextseg = list_get_instance(seg->segment_link.next, ra_segment_t,
+	    segment_link);
+	return nextseg->base - seg->base;
+}
+
+static ra_segment_t *ra_segment_create(uintptr_t base)
+{
+	ra_segment_t *seg;
+
+	seg = (ra_segment_t *) malloc(sizeof(ra_segment_t), FRAME_ATOMIC);
+	if (!seg)
+		return NULL;
+
+	link_initialize(&seg->segment_link);
+	link_initialize(&seg->fu_link);
+
+	seg->base = base;
+	seg->flags = 0;
+
+	return seg;
+}
+
+static void ra_segment_destroy(ra_segment_t *seg)
+{
+	free(seg);
+}
+
+static ra_span_t *ra_span_create(uintptr_t base, size_t size)
+{
+	ra_span_t *span;
+	ra_segment_t *seg, *lastseg;
+	unsigned int i;
+
+	span = (ra_span_t *) malloc(sizeof(ra_span_t), FRAME_ATOMIC);
+	if (!span)
+		return NULL;
+
+	span->max_order = fnzb(size);
+	span->base = base;
+	span->size = size;
+
+	span->free = (list_t *) malloc((span->max_order + 1) * sizeof(list_t),
+	    FRAME_ATOMIC);
+	if (!span->free) {
+		free(span);
+		return NULL;
+	}
+
+	/*
+	 * Create a segment to represent the entire size of the span.
+	 */
+	seg = ra_segment_create(base);
+	if (!seg) {
+		free(span->free);
+		free(span);
+		return NULL;
+	}
+	seg->flags = RA_SEGMENT_FREE;
+
+	/*
+	 * The last segment will be used as a sentinel at the end of the
+	 * segment list so that it is possible to calculate the size for
+	 * all other segments. It will not be placed in any free list or
+	 * in the used segment hash and adjacent segments will not be
+	 * coalesced with it.
+	 */
+	lastseg = ra_segment_create(base + size);
+	if (!lastseg) {
+		ra_segment_destroy(seg);
+		free(span->free);
+		free(span);
+		return NULL;
+	}
+
+	link_initialize(&span->span_link);
+	list_initialize(&span->segments);
+
+	hash_table_create(&span->used, USED_BUCKETS, 1, &used_ops);
+
+	for (i = 0; i <= span->max_order; i++)
+		list_initialize(&span->free[i]);
+
+	/* Insert the first segment into the list of segments. */
+	list_append(&seg->segment_link, &span->segments);
+	/* Insert the last segment into the list of segments. */
+	list_append(&lastseg->segment_link, &span->segments);
+
+	/* Insert the first segment into the respective free list. */
+	list_append(&seg->fu_link, &span->free[span->max_order]);
+
+	return span;
+}
+
+/** Create an empty arena. */
+ra_arena_t *ra_arena_create(void)
+{
+	ra_arena_t *arena;
+
+	arena = (ra_arena_t *) malloc(sizeof(ra_arena_t), FRAME_ATOMIC);
+	if (!arena)
+		return NULL;
+
+	spinlock_initialize(&arena->lock, "arena_lock");
+	list_initialize(&arena->spans);
+
+	return arena;
+}
+
+/** Add a span to arena. */
+bool ra_span_add(ra_arena_t *arena, uintptr_t base, size_t size)
+{
+	ra_span_t *span;
+
+	/*
+	 * At the moment, we can only create resources that don't include 0.
+	 * If 0 needs to be considered as a valid resource, we would need to
+	 * slightly change the API of the resource allocator.
+	 */
+	if (base == 0)
+		return false;
+
+	span = ra_span_create(base, size);
+	if (!span)
+		return false;
+
+	/* TODO: check for overlaps */
+	spinlock_lock(&arena->lock);
+	list_append(&span->span_link, &arena->spans);
+	spinlock_unlock(&arena->lock);
+	return true;
+}
+
+static uintptr_t ra_span_alloc(ra_span_t *span, size_t size, size_t align)
+{
+	/*
+	 * We need to add the maximum of align - 1 to be able to compensate for
+	 * the worst case unaligned segment.
+	 */
+	size_t needed = size + align - 1;
+	size_t order = ispwr2(needed) ? fnzb(needed) : fnzb(needed) + 1;
+	ra_segment_t *pred = NULL;
+	ra_segment_t *succ = NULL;
+
+	/*
+	 * Find the free list of the smallest order which can satisfy this
+	 * request.
+	 */
+	for (; order <= span->max_order; order++) {
+		ra_segment_t *seg;
+		uintptr_t newbase;
+
+		if (list_empty(&span->free[order]))
+			continue;
+
+		/* Take the first segment from the free list. */
+		seg = list_get_instance(list_first(&span->free[order]),
+		    ra_segment_t, fu_link);
+
+		ASSERT(seg->flags & RA_SEGMENT_FREE);
+
+		/*
+		 * See if we need to allocate new segments for the chopped-off
+		 * parts of this segment.
+		 */
+		if (!IS_ALIGNED(seg->base, align)) {
+			pred = ra_segment_create(seg->base);
+			if (!pred) {
+				/*
+				 * Fail as we are unable to split the segment.
+				 */
+				break;
+			}
+			pred->flags |= RA_SEGMENT_FREE;
+		}
+		newbase = ALIGN_UP(seg->base, align);
+		if (newbase + size != seg->base + ra_segment_size_get(seg)) {
+			ASSERT(newbase + (size - 1) < seg->base +
+			    (ra_segment_size_get(seg) - 1));
+			succ = ra_segment_create(newbase + size);
+			if (!succ) {
+				if (pred)
+					ra_segment_destroy(pred);
+				/*
+				 * Fail as we are unable to split the segment.
+				 */
+				break;
+			}
+			succ->flags |= RA_SEGMENT_FREE;
+		}
+		
+	
+		/* Put unneeded parts back. */
+		if (pred) {
+			size_t pred_order;
+
+			list_insert_before(&pred->segment_link,
+			    &seg->segment_link);
+			pred_order = fnzb(ra_segment_size_get(pred));
+			list_append(&pred->fu_link, &span->free[pred_order]);
+		}
+		if (succ) {
+			size_t succ_order;
+
+			list_insert_after(&succ->segment_link,
+			    &seg->segment_link);
+			succ_order = fnzb(ra_segment_size_get(succ));
+			list_append(&succ->fu_link, &span->free[succ_order]);
+		}
+
+		/* Now remove the found segment from the free list. */
+		list_remove(&seg->fu_link);
+		seg->base = newbase;
+		seg->flags &= ~RA_SEGMENT_FREE;
+
+		/* Hash-in the segment into the used hash. */
+		sysarg_t key = seg->base;
+		hash_table_insert(&span->used, &key, &seg->fu_link);
+
+		return newbase;
+	}
+
+	return 0;
+}
+
+static void ra_span_free(ra_span_t *span, size_t base, size_t size)
+{
+	sysarg_t key = base;
+	link_t *link;
+	ra_segment_t *seg;
+	ra_segment_t *pred;
+	ra_segment_t *succ;
+	size_t order;
+
+	/*
+	 * Locate the segment in the used hash table.
+	 */
+	link = hash_table_find(&span->used, &key);
+	if (!link) {
+		panic("Freeing segment which is not known to be used (base=%"
+		    PRIxn ", size=%" PRIdn ").", base, size);
+	}
+	seg = hash_table_get_instance(link, ra_segment_t, fu_link);
+
+	/*
+	 * Hash out the segment.
+	 */
+	hash_table_remove(&span->used, &key, 1);
+
+	ASSERT(!(seg->flags & RA_SEGMENT_FREE));
+	ASSERT(seg->base == base);
+	ASSERT(ra_segment_size_get(seg) == size);
+
+	/*
+	 * Check whether the segment can be coalesced with its left neighbor.
+	 */
+	if (list_first(&span->segments) != &seg->segment_link) {
+		pred = hash_table_get_instance(seg->segment_link.prev,
+		    ra_segment_t, segment_link);
+
+		ASSERT(pred->base < seg->base);
+
+		if (pred->flags & RA_SEGMENT_FREE) {
+			/*
+			 * The segment can be coalesced with its predecessor.
+			 * Remove the predecessor from the free and segment
+			 * lists, rebase the segment and throw the predecessor
+			 * away.
+			 */
+			list_remove(&pred->fu_link);
+			list_remove(&pred->segment_link);
+			seg->base = pred->base;
+			ra_segment_destroy(pred);
+		}
+	}
+
+	/*
+	 * Check whether the segment can be coalesced with its right neighbor.
+	 */
+	succ = hash_table_get_instance(seg->segment_link.next, ra_segment_t,
+	    segment_link);
+	ASSERT(succ->base > seg->base);
+	if (succ->flags & RA_SEGMENT_FREE) {
+		/*
+		 * The segment can be coalesced with its successor.
+		 * Remove the successor from the free and segment lists
+		 * and throw it away.
+		 */
+		list_remove(&succ->fu_link);
+		list_remove(&succ->segment_link);
+		ra_segment_destroy(succ);
+	}
+
+	/* Put the segment on the appropriate free list. */
+	seg->flags |= RA_SEGMENT_FREE;
+	order = fnzb(ra_segment_size_get(seg));
+	list_append(&seg->fu_link, &span->free[order]);
+}
+
+/** Allocate resources from arena. */
+uintptr_t ra_alloc(ra_arena_t *arena, size_t size, size_t alignment)
+{
+	uintptr_t base = 0;
+
+	ASSERT(size >= 1);
+	ASSERT(alignment >= 1);
+	ASSERT(ispwr2(alignment));
+
+	spinlock_lock(&arena->lock);
+	list_foreach(arena->spans, cur) {
+		ra_span_t *span = list_get_instance(cur, ra_span_t, span_link);
+
+		base = ra_span_alloc(span, size, alignment);
+		if (base)
+			break;
+	}
+	spinlock_unlock(&arena->lock);
+
+	return base;
+}
+
+/* Return resources to arena. */
+void ra_free(ra_arena_t *arena, uintptr_t base, size_t size)
+{
+	spinlock_lock(&arena->lock);
+	list_foreach(arena->spans, cur) {
+		ra_span_t *span = list_get_instance(cur, ra_span_t, span_link);
+
+		if (iswithin(span->base, span->size, base, size)) {
+			ra_span_free(span, base, size);
+			spinlock_unlock(&arena->lock);
+			return;
+		}
+	}
+	spinlock_unlock(&arena->lock);
+
+	panic("Freeing to wrong arena (base=%" PRIxn ", size=%" PRIdn ").",
+	    base, size);
+}
+
+
+/** @}
+ */
Index: kernel/generic/src/main/main.c
===================================================================
--- kernel/generic/src/main/main.c	(revision 1761268c1ab1e57b9fa5694aead0b2e9952761b6)
+++ kernel/generic/src/main/main.c	(revision 7aaed09d88be49fac8360d3017e3328ed9b0635c)
@@ -68,4 +68,5 @@
 #include <mm/page.h>
 #include <genarch/mm/page_pt.h>
+#include <mm/km.h>
 #include <mm/tlb.h>
 #include <mm/as.h>
@@ -88,5 +89,9 @@
 
 /** Global configuration structure. */
-config_t config;
+config_t config = {
+	.identity_configured = false,
+	.non_identity_configured = false,
+	.physmem_end = 0
+};
 
 /** Initial user-space tasks */
@@ -205,6 +210,6 @@
 	 */
 	arch_pre_mm_init();
+	km_identity_init();
 	frame_init();
-	
 	/* Initialize at least 1 memory segment big enough for slab to work. */
 	slab_cache_init();
@@ -214,4 +219,5 @@
 	page_init();
 	tlb_init();
+	km_non_identity_init();
 	ddi_init();
 	arch_post_mm_init();
Index: kernel/generic/src/mm/frame.c
===================================================================
--- kernel/generic/src/mm/frame.c	(revision 1761268c1ab1e57b9fa5694aead0b2e9952761b6)
+++ kernel/generic/src/mm/frame.c	(revision 7aaed09d88be49fac8360d3017e3328ed9b0635c)
@@ -240,6 +240,6 @@
 NO_TRACE static bool zone_can_alloc(zone_t *zone, uint8_t order)
 {
-	return (zone_flags_available(zone->flags)
-	    && buddy_system_can_alloc(zone->buddy_system, order));
+	return ((zone->flags & ZONE_AVAILABLE) &&
+	    buddy_system_can_alloc(zone->buddy_system, order));
 }
 
@@ -265,5 +265,5 @@
 		 * Check whether the zone meets the search criteria.
 		 */
-		if ((zones.info[i].flags & flags) == flags) {
+		if (ZONE_FLAGS_MATCH(zones.info[i].flags, flags)) {
 			/*
 			 * Check if the zone has 2^order frames area available.
@@ -460,5 +460,5 @@
 NO_TRACE static pfn_t zone_frame_alloc(zone_t *zone, uint8_t order)
 {
-	ASSERT(zone_flags_available(zone->flags));
+	ASSERT(zone->flags & ZONE_AVAILABLE);
 	
 	/* Allocate frames from zone buddy system */
@@ -490,5 +490,5 @@
 NO_TRACE static size_t zone_frame_free(zone_t *zone, size_t frame_idx)
 {
-	ASSERT(zone_flags_available(zone->flags));
+	ASSERT(zone->flags & ZONE_AVAILABLE);
 	
 	frame_t *frame = &zone->frames[frame_idx];
@@ -518,5 +518,5 @@
 NO_TRACE static void zone_mark_unavailable(zone_t *zone, size_t frame_idx)
 {
-	ASSERT(zone_flags_available(zone->flags));
+	ASSERT(zone->flags & ZONE_AVAILABLE);
 	
 	frame_t *frame = zone_get_frame(zone, frame_idx);
@@ -549,6 +549,6 @@
     buddy_system_t *buddy)
 {
-	ASSERT(zone_flags_available(zones.info[z1].flags));
-	ASSERT(zone_flags_available(zones.info[z2].flags));
+	ASSERT(zones.info[z1].flags & ZONE_AVAILABLE);
+	ASSERT(zones.info[z2].flags & ZONE_AVAILABLE);
 	ASSERT(zones.info[z1].flags == zones.info[z2].flags);
 	ASSERT(zones.info[z1].base < zones.info[z2].base);
@@ -645,5 +645,5 @@
 NO_TRACE static void return_config_frames(size_t znum, pfn_t pfn, size_t count)
 {
-	ASSERT(zone_flags_available(zones.info[znum].flags));
+	ASSERT(zones.info[znum].flags & ZONE_AVAILABLE);
 	
 	size_t cframes = SIZE2FRAMES(zone_conf_size(count));
@@ -681,5 +681,5 @@
     size_t count)
 {
-	ASSERT(zone_flags_available(zones.info[znum].flags));
+	ASSERT(zones.info[znum].flags & ZONE_AVAILABLE);
 	ASSERT(frame_idx + count < zones.info[znum].count);
 	
@@ -723,9 +723,6 @@
 	 * set of flags
 	 */
-	if ((z1 >= zones.count) || (z2 >= zones.count)
-	    || (z2 - z1 != 1)
-	    || (!zone_flags_available(zones.info[z1].flags))
-	    || (!zone_flags_available(zones.info[z2].flags))
-	    || (zones.info[z1].flags != zones.info[z2].flags)) {
+	if ((z1 >= zones.count) || (z2 >= zones.count) || (z2 - z1 != 1) ||
+	    (zones.info[z1].flags != zones.info[z2].flags)) {
 		ret = false;
 		goto errout;
@@ -828,5 +825,5 @@
 	zone->buddy_system = buddy;
 	
-	if (zone_flags_available(flags)) {
+	if (flags & ZONE_AVAILABLE) {
 		/*
 		 * Compute order for buddy system and initialize
@@ -865,4 +862,13 @@
 {
 	return (count * sizeof(frame_t) + buddy_conf_size(fnzb(count)));
+}
+
+/** Allocate external configuration frames from low memory. */
+pfn_t zone_external_conf_alloc(size_t count)
+{
+	size_t size = zone_conf_size(count);
+	size_t order = ispwr2(size) ? fnzb(size) : (fnzb(size) + 1);
+
+	return ADDR2PFN((uintptr_t) frame_alloc(order - FRAME_WIDTH, FRAME_LOWMEM));
 }
 
@@ -888,5 +894,5 @@
 	irq_spinlock_lock(&zones.lock, true);
 	
-	if (zone_flags_available(flags)) {  /* Create available zone */
+	if (flags & ZONE_AVAILABLE) {  /* Create available zone */
 		/* Theoretically we could have NULL here, practically make sure
 		 * nobody tries to do that. If some platform requires, remove
@@ -894,4 +900,7 @@
 		 */
 		ASSERT(confframe != ADDR2PFN((uintptr_t ) NULL));
+
+		/* Update the known end of physical memory. */
+		config.physmem_end = max(config.physmem_end, PFN2ADDR(start + count));
 		
 		/* If confframe is supposed to be inside our zone, then make sure
@@ -1232,5 +1241,5 @@
 	
 	/* Tell the architecture to create some memory */
-	frame_arch_init();
+	frame_low_arch_init();
 	if (config.cpu_active == 1) {
 		frame_mark_unavailable(ADDR2PFN(KA2PA(config.base)),
@@ -1255,4 +1264,37 @@
 		frame_mark_unavailable(0, 1);
 	}
+	frame_high_arch_init();
+}
+
+/** Adjust bounds of physical memory region according to low/high memory split.
+ *
+ * @param low[in]	If true, the adujstment is performed to make the region
+ *			fit in the low memory. Otherwise the adjustment is
+ *			performed to make the region fit in the high memory.
+ * @param basep[inout]	Pointer to a variable which contains the region's base
+ *			address and which may receive the adjusted base address.
+ * @param sizep[inout]	Pointer to a variable which contains the region's size
+ *			and which may receive the adjusted size.
+ * @retun		True if the region still exists even after the
+ *			adjustment, false otherwise.
+ */
+bool frame_adjust_zone_bounds(bool low, uintptr_t *basep, size_t *sizep)
+{
+	uintptr_t limit = config.identity_size;
+
+	if (low) {
+		if (*basep > limit)
+			return false;
+		if (*basep + *sizep > limit)
+			*sizep = limit - *basep;
+	} else {
+		if (*basep + *sizep <= limit)
+			return false;
+		if (*basep <= limit) {
+			*sizep -= limit - *basep;
+			*basep = limit;
+		}
+	}
+	return true;
 }
 
@@ -1293,5 +1335,5 @@
 		*total += (uint64_t) FRAMES2SIZE(zones.info[i].count);
 		
-		if (zone_flags_available(zones.info[i].flags)) {
+		if (zones.info[i].flags & ZONE_AVAILABLE) {
 			*busy += (uint64_t) FRAMES2SIZE(zones.info[i].busy_count);
 			*free += (uint64_t) FRAMES2SIZE(zones.info[i].free_count);
@@ -1344,5 +1386,5 @@
 		irq_spinlock_unlock(&zones.lock, true);
 		
-		bool available = zone_flags_available(flags);
+		bool available = ((flags & ZONE_AVAILABLE) != 0);
 		
 		printf("%-4zu", i);
@@ -1356,8 +1398,10 @@
 #endif
 		
-		printf(" %12zu %c%c%c      ", count,
-		    available ? 'A' : ' ',
-		    (flags & ZONE_RESERVED) ? 'R' : ' ',
-		    (flags & ZONE_FIRMWARE) ? 'F' : ' ');
+		printf(" %12zu %c%c%c%c%c    ", count,
+		    available ? 'A' : '-',
+		    (flags & ZONE_RESERVED) ? 'R' : '-',
+		    (flags & ZONE_FIRMWARE) ? 'F' : '-',
+		    (flags & ZONE_LOWMEM) ? 'L' : '-',
+		    (flags & ZONE_HIGHMEM) ? 'H' : '-');
 		
 		if (available)
@@ -1401,5 +1445,5 @@
 	irq_spinlock_unlock(&zones.lock, true);
 	
-	bool available = zone_flags_available(flags);
+	bool available = ((flags & ZONE_AVAILABLE) != 0);
 	
 	uint64_t size;
@@ -1411,8 +1455,10 @@
 	printf("Zone size:         %zu frames (%" PRIu64 " %s)\n", count,
 	    size, size_suffix);
-	printf("Zone flags:        %c%c%c\n",
-	    available ? 'A' : ' ',
-	    (flags & ZONE_RESERVED) ? 'R' : ' ',
-	    (flags & ZONE_FIRMWARE) ? 'F' : ' ');
+	printf("Zone flags:        %c%c%c%c%c\n",
+	    available ? 'A' : '-',
+	    (flags & ZONE_RESERVED) ? 'R' : '-',
+	    (flags & ZONE_FIRMWARE) ? 'F' : '-',
+	    (flags & ZONE_LOWMEM) ? 'L' : '-',
+	    (flags & ZONE_HIGHMEM) ? 'H' : '-');
 	
 	if (available) {
Index: kernel/generic/src/mm/km.c
===================================================================
--- kernel/generic/src/mm/km.c	(revision 7aaed09d88be49fac8360d3017e3328ed9b0635c)
+++ kernel/generic/src/mm/km.c	(revision 7aaed09d88be49fac8360d3017e3328ed9b0635c)
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2011 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup genericmm
+ * @{
+ */
+
+/**
+ * @file
+ * @brief Kernel virtual memory setup.
+ */
+
+#include <mm/km.h>
+#include <arch/mm/km.h>
+#include <config.h>
+#include <typedefs.h>
+#include <lib/ra.h>
+#include <debug.h>
+
+static ra_arena_t *km_ni_arena;
+
+/** Architecture dependent setup of identity-mapped kernel memory. */
+void km_identity_init(void)
+{
+	km_identity_arch_init();
+	config.identity_configured = true;
+}
+
+/** Architecture dependent setup of non-identity-mapped kernel memory. */
+void km_non_identity_init(void)
+{
+	km_ni_arena = ra_arena_create();
+	ASSERT(km_ni_arena != NULL);
+	km_non_identity_arch_init();
+	config.non_identity_configured = true;
+}
+
+bool km_is_non_identity(uintptr_t addr)
+{
+	return km_is_non_identity_arch(addr);
+}
+
+void km_non_identity_span_add(uintptr_t base, size_t size)
+{
+	bool span_added;
+
+	span_added = ra_span_add(km_ni_arena, base, size);
+	ASSERT(span_added);
+}
+
+uintptr_t km_page_alloc(size_t size, size_t align)
+{
+	return ra_alloc(km_ni_arena, size, align);
+}
+
+void km_page_free(uintptr_t page, size_t size)
+{
+	ra_free(km_ni_arena, page, size);
+}
+
+
+/** @}
+ */
+
Index: kernel/generic/src/mm/page.c
===================================================================
--- kernel/generic/src/mm/page.c	(revision 1761268c1ab1e57b9fa5694aead0b2e9952761b6)
+++ kernel/generic/src/mm/page.c	(revision 7aaed09d88be49fac8360d3017e3328ed9b0635c)
@@ -65,4 +65,5 @@
 #include <arch/mm/asid.h>
 #include <mm/as.h>
+#include <mm/km.h>
 #include <mm/frame.h>
 #include <arch/barrier.h>
@@ -177,4 +178,24 @@
 }
 
+uintptr_t hw_map(uintptr_t physaddr, size_t size)
+{
+	uintptr_t virtaddr;
+	size_t asize;
+	pfn_t i;
+
+	asize = ALIGN_UP(size, PAGE_SIZE);
+	virtaddr = km_page_alloc(asize, PAGE_SIZE);
+
+	page_table_lock(AS_KERNEL, true);
+	for (i = 0; i < ADDR2PFN(asize); i++) {
+		uintptr_t addr = PFN2ADDR(i);
+		page_mapping_insert(AS_KERNEL, virtaddr + addr, physaddr + addr,
+		    PAGE_NOT_CACHEABLE | PAGE_WRITE);
+	}
+	page_table_unlock(AS_KERNEL, true);
+	
+	return virtaddr;
+}
+
 int page_find_mapping(uintptr_t virt, void **phys)
 {
Index: kernel/generic/src/mm/reserve.c
===================================================================
--- kernel/generic/src/mm/reserve.c	(revision 1761268c1ab1e57b9fa5694aead0b2e9952761b6)
+++ kernel/generic/src/mm/reserve.c	(revision 7aaed09d88be49fac8360d3017e3328ed9b0635c)
@@ -42,4 +42,7 @@
 #include <typedefs.h>
 #include <arch/types.h>
+#include <debug.h>
+
+static bool reserve_initialized = false;
 
 IRQ_SPINLOCK_STATIC_INITIALIZE_NAME(reserve_lock, "reserve_lock");
@@ -54,4 +57,5 @@
 {
 	reserve = frame_total_free_get();
+	reserve_initialized = true;
 }
 
@@ -67,4 +71,6 @@
 {
 	bool reserved = false;
+
+	ASSERT(reserve_initialized);
 
 	irq_spinlock_lock(&reserve_lock, true);
@@ -111,4 +117,7 @@
 void reserve_force_alloc(size_t size)
 {
+	if (!reserve_initialized)
+		return;
+
 	irq_spinlock_lock(&reserve_lock, true);
 	reserve -= size;
@@ -122,4 +131,7 @@
 void reserve_free(size_t size)
 {
+	if (!reserve_initialized)
+		return;
+
 	irq_spinlock_lock(&reserve_lock, true);
 	reserve += size;
Index: kernel/generic/src/proc/thread.c
===================================================================
--- kernel/generic/src/proc/thread.c	(revision 1761268c1ab1e57b9fa5694aead0b2e9952761b6)
+++ kernel/generic/src/proc/thread.c	(revision 7aaed09d88be49fac8360d3017e3328ed9b0635c)
@@ -173,4 +173,23 @@
 #endif /* CONFIG_FPU */
 	
+	/*
+	 * Allocate the kernel stack from the low-memory to prevent an infinite
+	 * nesting of TLB-misses when accessing the stack from the part of the
+	 * TLB-miss handler written in C.
+	 *
+	 * Note that low-memory is safe to be used for the stack as it will be
+	 * covered by the kernel identity mapping, which guarantees not to
+	 * nest TLB-misses infinitely (either via some hardware mechanism or
+	 * by the construciton of the assembly-language part of the TLB-miss
+	 * handler).
+	 *
+	 * This restriction can be lifted once each architecture provides
+	 * a similar guarantee, for example by locking the kernel stack
+	 * in the TLB whenever it is allocated from the high-memory and the
+	 * thread is being scheduled to run.
+	 */
+	kmflags |= FRAME_LOWMEM;
+	kmflags &= ~FRAME_HIGHMEM;
+
 	thread->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags);
 	if (!thread->kstack) {
