Index: kernel/generic/src/mm/as.c
===================================================================
--- kernel/generic/src/mm/as.c	(revision 55132b820fb53298e1e5905e0b9d45a6fcb6064b)
+++ kernel/generic/src/mm/as.c	(revision 8b65570552691ad6fa2a012cf20ea6571577cd46)
@@ -79,8 +79,4 @@
 #include <syscall/copy.h>
 #include <arch/interrupt.h>
-
-#ifdef CONFIG_VIRT_IDX_DCACHE
-#include <arch/mm/cache.h>
-#endif /* CONFIG_VIRT_IDX_DCACHE */
 
 /**
Index: kernel/generic/src/mm/backend_anon.c
===================================================================
--- kernel/generic/src/mm/backend_anon.c	(revision 55132b820fb53298e1e5905e0b9d45a6fcb6064b)
+++ kernel/generic/src/mm/backend_anon.c	(revision 8b65570552691ad6fa2a012cf20ea6571577cd46)
@@ -39,4 +39,5 @@
 #include <mm/as.h>
 #include <mm/page.h>
+#include <mm/reserve.h>
 #include <genarch/mm/page_pt.h>
 #include <genarch/mm/page_ht.h>
@@ -51,17 +52,101 @@
 #include <arch.h>
 
-#ifdef CONFIG_VIRT_IDX_DCACHE
-#include <arch/mm/cache.h>
-#endif
+static bool anon_create(as_area_t *);
+static bool anon_resize(as_area_t *, size_t);
+static void anon_share(as_area_t *area);
+static void anon_destroy(as_area_t *);
 
 static int anon_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access);
 static void anon_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame);
-static void anon_share(as_area_t *area);
 
 mem_backend_t anon_backend = {
+	.create = anon_create,
+	.resize = anon_resize,
+	.share = anon_share,
+	.destroy = anon_destroy,
+
 	.page_fault = anon_page_fault,
 	.frame_free = anon_frame_free,
-	.share = anon_share
 };
+
+bool anon_create(as_area_t *area)
+{
+	return reserve_try_alloc(area->pages);
+}
+
+bool anon_resize(as_area_t *area, size_t new_pages)
+{
+	/**
+	 * @todo
+	 * Reserve also space needed for the supporting strutures allocated
+	 * during page fault.
+	 */
+
+	if (new_pages > area->pages)
+		return reserve_try_alloc(new_pages - area->pages);
+	else if (new_pages < area->pages)
+		reserve_free(area->pages - new_pages);
+
+	return true;
+}
+
+/** Share the anonymous address space area.
+ *
+ * Sharing of anonymous area is done by duplicating its entire mapping
+ * to the pagemap. Page faults will primarily search for frames there.
+ *
+ * The address space and address space area must be already locked.
+ *
+ * @param area Address space area to be shared.
+ */
+void anon_share(as_area_t *area)
+{
+	link_t *cur;
+
+	ASSERT(mutex_locked(&area->as->lock));
+	ASSERT(mutex_locked(&area->lock));
+
+	/*
+	 * Copy used portions of the area to sh_info's page map.
+	 */
+	mutex_lock(&area->sh_info->lock);
+	for (cur = area->used_space.leaf_head.next;
+	    cur != &area->used_space.leaf_head; cur = cur->next) {
+		btree_node_t *node;
+		unsigned int i;
+		
+		node = list_get_instance(cur, btree_node_t, leaf_link);
+		for (i = 0; i < node->keys; i++) {
+			uintptr_t base = node->key[i];
+			size_t count = (size_t) node->value[i];
+			unsigned int j;
+			
+			for (j = 0; j < count; j++) {
+				pte_t *pte;
+			
+				page_table_lock(area->as, false);
+				pte = page_mapping_find(area->as,
+				    base + j * PAGE_SIZE);
+				ASSERT(pte && PTE_VALID(pte) &&
+				    PTE_PRESENT(pte));
+				btree_insert(&area->sh_info->pagemap,
+				    (base + j * PAGE_SIZE) - area->base,
+				    (void *) PTE_GET_FRAME(pte), NULL);
+				page_table_unlock(area->as, false);
+
+				pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte));
+				frame_reference_add(pfn);
+			}
+
+		}
+	}
+	mutex_unlock(&area->sh_info->lock);
+}
+
+void anon_destroy(as_area_t *area)
+{
+	reserve_free(area->pages);
+}
+
 
 /** Service a page fault in the anonymous memory address space area.
@@ -115,5 +200,6 @@
 			}
 			if (allocate) {
-				frame = (uintptr_t) frame_alloc(ONE_FRAME, 0);
+				frame = (uintptr_t) frame_alloc_noreserve(
+				    ONE_FRAME, 0);
 				memsetb((void *) PA2KA(frame), FRAME_SIZE, 0);
 				
@@ -145,5 +231,5 @@
 		 *   the different causes
 		 */
-		frame = (uintptr_t) frame_alloc(ONE_FRAME, 0);
+		frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0);
 		memsetb((void *) PA2KA(frame), FRAME_SIZE, 0);
 	}
@@ -174,58 +260,5 @@
 	ASSERT(mutex_locked(&area->lock));
 
-	frame_free(frame);
-}
-
-/** Share the anonymous address space area.
- *
- * Sharing of anonymous area is done by duplicating its entire mapping
- * to the pagemap. Page faults will primarily search for frames there.
- *
- * The address space and address space area must be already locked.
- *
- * @param area Address space area to be shared.
- */
-void anon_share(as_area_t *area)
-{
-	link_t *cur;
-
-	ASSERT(mutex_locked(&area->as->lock));
-	ASSERT(mutex_locked(&area->lock));
-
-	/*
-	 * Copy used portions of the area to sh_info's page map.
-	 */
-	mutex_lock(&area->sh_info->lock);
-	for (cur = area->used_space.leaf_head.next;
-	    cur != &area->used_space.leaf_head; cur = cur->next) {
-		btree_node_t *node;
-		unsigned int i;
-		
-		node = list_get_instance(cur, btree_node_t, leaf_link);
-		for (i = 0; i < node->keys; i++) {
-			uintptr_t base = node->key[i];
-			size_t count = (size_t) node->value[i];
-			unsigned int j;
-			
-			for (j = 0; j < count; j++) {
-				pte_t *pte;
-			
-				page_table_lock(area->as, false);
-				pte = page_mapping_find(area->as,
-				    base + j * PAGE_SIZE);
-				ASSERT(pte && PTE_VALID(pte) &&
-				    PTE_PRESENT(pte));
-				btree_insert(&area->sh_info->pagemap,
-				    (base + j * PAGE_SIZE) - area->base,
-				    (void *) PTE_GET_FRAME(pte), NULL);
-				page_table_unlock(area->as, false);
-
-				pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte));
-				frame_reference_add(pfn);
-			}
-
-		}
-	}
-	mutex_unlock(&area->sh_info->lock);
+	frame_free_noreserve(frame);
 }
 
Index: kernel/generic/src/mm/backend_elf.c
===================================================================
--- kernel/generic/src/mm/backend_elf.c	(revision 55132b820fb53298e1e5905e0b9d45a6fcb6064b)
+++ kernel/generic/src/mm/backend_elf.c	(revision 8b65570552691ad6fa2a012cf20ea6571577cd46)
@@ -43,4 +43,5 @@
 #include <mm/slab.h>
 #include <mm/page.h>
+#include <mm/reserve.h>
 #include <genarch/mm/page_pt.h>
 #include <genarch/mm/page_ht.h>
@@ -51,219 +52,40 @@
 #include <arch/barrier.h>
 
-#ifdef CONFIG_VIRT_IDX_DCACHE
-#include <arch/mm/cache.h>
-#endif
+static bool elf_create(as_area_t *);
+static bool elf_resize(as_area_t *, size_t);
+static void elf_share(as_area_t *);
+static void elf_destroy(as_area_t *);
 
 static int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access);
 static void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame);
-static void elf_share(as_area_t *area);
 
 mem_backend_t elf_backend = {
+	.create = elf_create,
+	.resize = elf_resize,
+	.share = elf_share,
+	.destroy = elf_destroy,
+
 	.page_fault = elf_page_fault,
 	.frame_free = elf_frame_free,
-	.share = elf_share
 };
 
-/** Service a page fault in the ELF backend address space area.
- *
- * The address space area and page tables must be already locked.
- *
- * @param area		Pointer to the address space area.
- * @param addr		Faulting virtual address.
- * @param access	Access mode that caused the fault (i.e.
- * 			read/write/exec).
- *
- * @return		AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK
- * 			on success (i.e. serviced).
- */
-int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access)
-{
-	elf_header_t *elf = area->backend_data.elf;
-	elf_segment_header_t *entry = area->backend_data.segment;
-	btree_node_t *leaf;
-	uintptr_t base, frame, page, start_anon;
-	size_t i;
-	bool dirty = false;
-
-	ASSERT(page_table_locked(AS));
-	ASSERT(mutex_locked(&area->lock));
-
-	if (!as_area_check_access(area, access))
-		return AS_PF_FAULT;
+bool elf_create(as_area_t *area)
+{
+	/**
+	 * @todo:
+	 * Reserve only how much is necessary for anonymous pages plus the
+	 * supporting structures allocated during the page fault.
+	 */
+	return reserve_try_alloc(area->pages);
+}
+
+bool elf_resize(as_area_t *area, size_t new_pages)
+{
+	if (new_pages > area->pages)
+		return reserve_try_alloc(new_pages - area->pages);
+	else if (new_pages < area->pages)
+		reserve_free(area->pages - new_pages);
 	
-	if (addr < ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE))
-		return AS_PF_FAULT;
-	
-	if (addr >= entry->p_vaddr + entry->p_memsz)
-		return AS_PF_FAULT;
-	
-	i = (addr - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH;
-	base = (uintptr_t)
-	    (((void *) elf) + ALIGN_DOWN(entry->p_offset, PAGE_SIZE));
-
-	/* Virtual address of faulting page*/
-	page = ALIGN_DOWN(addr, PAGE_SIZE);
-
-	/* Virtual address of the end of initialized part of segment */
-	start_anon = entry->p_vaddr + entry->p_filesz;
-
-	if (area->sh_info) {
-		bool found = false;
-
-		/*
-		 * The address space area is shared.
-		 */
-		
-		mutex_lock(&area->sh_info->lock);
-		frame = (uintptr_t) btree_search(&area->sh_info->pagemap,
-		    page - area->base, &leaf);
-		if (!frame) {
-			unsigned int i;
-
-			/*
-			 * Workaround for valid NULL address.
-			 */
-
-			for (i = 0; i < leaf->keys; i++) {
-				if (leaf->key[i] == page - area->base) {
-					found = true;
-					break;
-				}
-			}
-		}
-		if (frame || found) {
-			frame_reference_add(ADDR2PFN(frame));
-			page_mapping_insert(AS, addr, frame,
-			    as_area_get_flags(area));
-			if (!used_space_insert(area, page, 1))
-				panic("Cannot insert used space.");
-			mutex_unlock(&area->sh_info->lock);
-			return AS_PF_OK;
-		}
-	}
-
-	/*
-	 * The area is either not shared or the pagemap does not contain the
-	 * mapping.
-	 */
-	if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) {
-		/*
-		 * Initialized portion of the segment. The memory is backed
-		 * directly by the content of the ELF image. Pages are
-		 * only copied if the segment is writable so that there
-		 * can be more instantions of the same memory ELF image
-		 * used at a time. Note that this could be later done
-		 * as COW.
-		 */
-		if (entry->p_flags & PF_W) {
-			frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
-			memcpy((void *) PA2KA(frame),
-			    (void *) (base + i * FRAME_SIZE), FRAME_SIZE);
-			if (entry->p_flags & PF_X) {
-				smc_coherence_block((void *) PA2KA(frame),
-				    FRAME_SIZE);
-			}
-			dirty = true;
-		} else {
-			frame = KA2PA(base + i * FRAME_SIZE);
-		}	
-	} else if (page >= start_anon) {
-		/*
-		 * This is the uninitialized portion of the segment.
-		 * It is not physically present in the ELF image.
-		 * To resolve the situation, a frame must be allocated
-		 * and cleared.
-		 */
-		frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
-		memsetb((void *) PA2KA(frame), FRAME_SIZE, 0);
-		dirty = true;
-	} else {
-		size_t pad_lo, pad_hi;
-		/*
-		 * The mixed case.
-		 *
-		 * The middle part is backed by the ELF image and
-		 * the lower and upper parts are anonymous memory.
-		 * (The segment can be and often is shorter than 1 page).
-		 */
-		if (page < entry->p_vaddr)
-			pad_lo = entry->p_vaddr - page;
-		else
-			pad_lo = 0;
-
-		if (start_anon < page + PAGE_SIZE)
-			pad_hi = page + PAGE_SIZE - start_anon;
-		else
-			pad_hi = 0;
-
-		frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
-		memcpy((void *) (PA2KA(frame) + pad_lo),
-		    (void *) (base + i * FRAME_SIZE + pad_lo),
-		    FRAME_SIZE - pad_lo - pad_hi);
-		if (entry->p_flags & PF_X) {
-			smc_coherence_block((void *) (PA2KA(frame) + pad_lo), 
-			    FRAME_SIZE - pad_lo - pad_hi);
-		}
-		memsetb((void *) PA2KA(frame), pad_lo, 0);
-		memsetb((void *) (PA2KA(frame) + FRAME_SIZE - pad_hi), pad_hi,
-		    0);
-		dirty = true;
-	}
-
-	if (dirty && area->sh_info) {
-		frame_reference_add(ADDR2PFN(frame));
-		btree_insert(&area->sh_info->pagemap, page - area->base,
-		    (void *) frame, leaf);
-	}
-
-	if (area->sh_info)
-		mutex_unlock(&area->sh_info->lock);
-
-	page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
-	if (!used_space_insert(area, page, 1))
-		panic("Cannot insert used space.");
-
-	return AS_PF_OK;
-}
-
-/** Free a frame that is backed by the ELF backend.
- *
- * The address space area and page tables must be already locked.
- *
- * @param area		Pointer to the address space area.
- * @param page		Page that is mapped to frame. Must be aligned to
- * 			PAGE_SIZE.
- * @param frame		Frame to be released.
- *
- */
-void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame)
-{
-	elf_segment_header_t *entry = area->backend_data.segment;
-	uintptr_t start_anon;
-
-	ASSERT(page_table_locked(area->as));
-	ASSERT(mutex_locked(&area->lock));
-
-	ASSERT(page >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE));
-	ASSERT(page < entry->p_vaddr + entry->p_memsz);
-
-	start_anon = entry->p_vaddr + entry->p_filesz;
-
-	if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) {
-		if (entry->p_flags & PF_W) {
-			/*
-			 * Free the frame with the copy of writable segment
-			 * data.
-			 */
-			frame_free(frame);
-		}
-	} else {
-		/*
-		 * The frame is either anonymous memory or the mixed case (i.e.
-		 * lower part is backed by the ELF image and the upper is
-		 * anonymous). In any case, a frame needs to be freed.
-		 */
-		frame_free(frame);
-	}
+	return true;
 }
 
@@ -356,4 +178,216 @@
 }
 
+void elf_destroy(as_area_t *area)
+{
+	/**
+	 * @todo:
+	 * Unreserve only how much was really reserved.
+	 */
+	reserve_free(area->pages);
+}
+
+/** Service a page fault in the ELF backend address space area.
+ *
+ * The address space area and page tables must be already locked.
+ *
+ * @param area		Pointer to the address space area.
+ * @param addr		Faulting virtual address.
+ * @param access	Access mode that caused the fault (i.e.
+ * 			read/write/exec).
+ *
+ * @return		AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK
+ * 			on success (i.e. serviced).
+ */
+int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access)
+{
+	elf_header_t *elf = area->backend_data.elf;
+	elf_segment_header_t *entry = area->backend_data.segment;
+	btree_node_t *leaf;
+	uintptr_t base, frame, page, start_anon;
+	size_t i;
+	bool dirty = false;
+
+	ASSERT(page_table_locked(AS));
+	ASSERT(mutex_locked(&area->lock));
+
+	if (!as_area_check_access(area, access))
+		return AS_PF_FAULT;
+	
+	if (addr < ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE))
+		return AS_PF_FAULT;
+	
+	if (addr >= entry->p_vaddr + entry->p_memsz)
+		return AS_PF_FAULT;
+	
+	i = (addr - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH;
+	base = (uintptr_t)
+	    (((void *) elf) + ALIGN_DOWN(entry->p_offset, PAGE_SIZE));
+
+	/* Virtual address of faulting page*/
+	page = ALIGN_DOWN(addr, PAGE_SIZE);
+
+	/* Virtual address of the end of initialized part of segment */
+	start_anon = entry->p_vaddr + entry->p_filesz;
+
+	if (area->sh_info) {
+		bool found = false;
+
+		/*
+		 * The address space area is shared.
+		 */
+		
+		mutex_lock(&area->sh_info->lock);
+		frame = (uintptr_t) btree_search(&area->sh_info->pagemap,
+		    page - area->base, &leaf);
+		if (!frame) {
+			unsigned int i;
+
+			/*
+			 * Workaround for valid NULL address.
+			 */
+
+			for (i = 0; i < leaf->keys; i++) {
+				if (leaf->key[i] == page - area->base) {
+					found = true;
+					break;
+				}
+			}
+		}
+		if (frame || found) {
+			frame_reference_add(ADDR2PFN(frame));
+			page_mapping_insert(AS, addr, frame,
+			    as_area_get_flags(area));
+			if (!used_space_insert(area, page, 1))
+				panic("Cannot insert used space.");
+			mutex_unlock(&area->sh_info->lock);
+			return AS_PF_OK;
+		}
+	}
+
+	/*
+	 * The area is either not shared or the pagemap does not contain the
+	 * mapping.
+	 */
+	if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) {
+		/*
+		 * Initialized portion of the segment. The memory is backed
+		 * directly by the content of the ELF image. Pages are
+		 * only copied if the segment is writable so that there
+		 * can be more instantions of the same memory ELF image
+		 * used at a time. Note that this could be later done
+		 * as COW.
+		 */
+		if (entry->p_flags & PF_W) {
+			frame = (uintptr_t)frame_alloc_noreserve(ONE_FRAME, 0);
+			memcpy((void *) PA2KA(frame),
+			    (void *) (base + i * FRAME_SIZE), FRAME_SIZE);
+			if (entry->p_flags & PF_X) {
+				smc_coherence_block((void *) PA2KA(frame),
+				    FRAME_SIZE);
+			}
+			dirty = true;
+		} else {
+			frame = KA2PA(base + i * FRAME_SIZE);
+		}	
+	} else if (page >= start_anon) {
+		/*
+		 * This is the uninitialized portion of the segment.
+		 * It is not physically present in the ELF image.
+		 * To resolve the situation, a frame must be allocated
+		 * and cleared.
+		 */
+		frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0);
+		memsetb((void *) PA2KA(frame), FRAME_SIZE, 0);
+		dirty = true;
+	} else {
+		size_t pad_lo, pad_hi;
+		/*
+		 * The mixed case.
+		 *
+		 * The middle part is backed by the ELF image and
+		 * the lower and upper parts are anonymous memory.
+		 * (The segment can be and often is shorter than 1 page).
+		 */
+		if (page < entry->p_vaddr)
+			pad_lo = entry->p_vaddr - page;
+		else
+			pad_lo = 0;
+
+		if (start_anon < page + PAGE_SIZE)
+			pad_hi = page + PAGE_SIZE - start_anon;
+		else
+			pad_hi = 0;
+
+		frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0);
+		memcpy((void *) (PA2KA(frame) + pad_lo),
+		    (void *) (base + i * FRAME_SIZE + pad_lo),
+		    FRAME_SIZE - pad_lo - pad_hi);
+		if (entry->p_flags & PF_X) {
+			smc_coherence_block((void *) (PA2KA(frame) + pad_lo), 
+			    FRAME_SIZE - pad_lo - pad_hi);
+		}
+		memsetb((void *) PA2KA(frame), pad_lo, 0);
+		memsetb((void *) (PA2KA(frame) + FRAME_SIZE - pad_hi), pad_hi,
+		    0);
+		dirty = true;
+	}
+
+	if (dirty && area->sh_info) {
+		frame_reference_add(ADDR2PFN(frame));
+		btree_insert(&area->sh_info->pagemap, page - area->base,
+		    (void *) frame, leaf);
+	}
+
+	if (area->sh_info)
+		mutex_unlock(&area->sh_info->lock);
+
+	page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
+	if (!used_space_insert(area, page, 1))
+		panic("Cannot insert used space.");
+
+	return AS_PF_OK;
+}
+
+/** Free a frame that is backed by the ELF backend.
+ *
+ * The address space area and page tables must be already locked.
+ *
+ * @param area		Pointer to the address space area.
+ * @param page		Page that is mapped to frame. Must be aligned to
+ * 			PAGE_SIZE.
+ * @param frame		Frame to be released.
+ *
+ */
+void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame)
+{
+	elf_segment_header_t *entry = area->backend_data.segment;
+	uintptr_t start_anon;
+
+	ASSERT(page_table_locked(area->as));
+	ASSERT(mutex_locked(&area->lock));
+
+	ASSERT(page >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE));
+	ASSERT(page < entry->p_vaddr + entry->p_memsz);
+
+	start_anon = entry->p_vaddr + entry->p_filesz;
+
+	if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) {
+		if (entry->p_flags & PF_W) {
+			/*
+			 * Free the frame with the copy of writable segment
+			 * data.
+			 */
+			frame_free_noreserve(frame);
+		}
+	} else {
+		/*
+		 * The frame is either anonymous memory or the mixed case (i.e.
+		 * lower part is backed by the ELF image and the upper is
+		 * anonymous). In any case, a frame needs to be freed.
+		 */
+		frame_free_noreserve(frame);
+	}
+}
+
 /** @}
  */
Index: kernel/generic/src/mm/backend_phys.c
===================================================================
--- kernel/generic/src/mm/backend_phys.c	(revision 55132b820fb53298e1e5905e0b9d45a6fcb6064b)
+++ kernel/generic/src/mm/backend_phys.c	(revision 8b65570552691ad6fa2a012cf20ea6571577cd46)
@@ -48,12 +48,43 @@
 #include <align.h>
 
+static bool phys_create(as_area_t *);
+static void phys_share(as_area_t *area);
+static void phys_destroy(as_area_t *);
+
 static int phys_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access);
-static void phys_share(as_area_t *area);
 
 mem_backend_t phys_backend = {
+	.create = phys_create,
+	.resize = NULL,
+	.share = phys_share,
+	.destroy = phys_destroy,
+
 	.page_fault = phys_page_fault,
 	.frame_free = NULL,
-	.share = phys_share
 };
+
+bool phys_create(as_area_t *area)
+{
+	return true;
+}
+
+/** Share address space area backed by physical memory.
+ *
+ * Do actually nothing as sharing of address space areas
+ * that are backed up by physical memory is very easy.
+ * Note that the function must be defined so that
+ * as_area_share() will succeed.
+ */
+void phys_share(as_area_t *area)
+{
+	ASSERT(mutex_locked(&area->as->lock));
+	ASSERT(mutex_locked(&area->lock));
+}
+
+
+void phys_destroy(as_area_t *area)
+{
+	/* Nothing to do. */
+}
 
 /** Service a page fault in the address space area backed by physical memory.
@@ -88,17 +119,4 @@
 }
 
-/** Share address space area backed by physical memory.
- *
- * Do actually nothing as sharing of address space areas
- * that are backed up by physical memory is very easy.
- * Note that the function must be defined so that
- * as_area_share() will succeed.
- */
-void phys_share(as_area_t *area)
-{
-	ASSERT(mutex_locked(&area->as->lock));
-	ASSERT(mutex_locked(&area->lock));
-}
-
 /** @}
  */
Index: kernel/generic/src/mm/frame.c
===================================================================
--- kernel/generic/src/mm/frame.c	(revision 55132b820fb53298e1e5905e0b9d45a6fcb6064b)
+++ kernel/generic/src/mm/frame.c	(revision 8b65570552691ad6fa2a012cf20ea6571577cd46)
@@ -695,6 +695,4 @@
  * not to be 2^order size. Once the allocator is running it is no longer
  * possible, merged configuration data occupies more space :-/
- *
- * The function uses
  *
  */
@@ -1088,4 +1086,14 @@
 }
 
+void *frame_alloc(uint8_t order, frame_flags_t flags)
+{
+	return frame_alloc_generic(order, flags, NULL);
+}
+
+void *frame_alloc_noreserve(uint8_t order, frame_flags_t flags)
+{
+	return frame_alloc_generic(order, flags | FRAME_NO_RESERVE, NULL);
+}
+
 /** Free a frame.
  *
@@ -1095,7 +1103,8 @@
  *
  * @param frame Physical Address of of the frame to be freed.
- *
- */
-void frame_free(uintptr_t frame)
+ * @param flags Flags to control memory reservation.
+ *
+ */
+void frame_free_generic(uintptr_t frame, frame_flags_t flags)
 {
 	irq_spinlock_lock(&zones.lock, true);
@@ -1125,4 +1134,14 @@
 	}
 	mutex_unlock(&mem_avail_mtx);
+}
+
+void frame_free(uintptr_t frame)
+{
+	frame_free_generic(frame, 0);
+}
+
+void frame_free_noreserve(uintptr_t frame)
+{
+	frame_free_generic(frame, FRAME_NO_RESERVE);
 }
 
Index: kernel/generic/src/mm/reserve.c
===================================================================
--- kernel/generic/src/mm/reserve.c	(revision 8b65570552691ad6fa2a012cf20ea6571577cd46)
+++ kernel/generic/src/mm/reserve.c	(revision 8b65570552691ad6fa2a012cf20ea6571577cd46)
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2011 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup genericmm
+ * @{
+ */
+
+/**
+ * @file
+ * @brief Memory reservations.
+ */
+
+#include <typedefs.h>
+#include <mm/reserve.h>
+#include <mm/frame.h>
+
+bool reserve_try_alloc(size_t size)
+{
+	return false;	
+}
+
+void reserve_force_alloc(size_t size)
+{
+}
+
+void reserve_free(size_t size)
+{
+}
+
+/** @}
+ */
