Index: kernel/generic/src/mm/backend_elf.c
===================================================================
--- kernel/generic/src/mm/backend_elf.c	(revision 8b65570552691ad6fa2a012cf20ea6571577cd46)
+++ kernel/generic/src/mm/backend_elf.c	(revision 9dd730d10fe8be9055a0ee48e8d01c7e9c58f6dd)
@@ -72,4 +72,7 @@
 bool elf_create(as_area_t *area)
 {
+	elf_segment_header_t *entry = area->backend_data.segment;
+	size_t nonanon_pages = ALIGN_DOWN(entry->p_filesz, PAGE_SIZE);
+
 	/**
 	 * @todo:
@@ -77,13 +80,29 @@
 	 * supporting structures allocated during the page fault.
 	 */
-	return reserve_try_alloc(area->pages);
+	
+	if (area->pages <= nonanon_pages)
+		return true;
+	
+	return reserve_try_alloc(area->pages - nonanon_pages);
 }
 
 bool elf_resize(as_area_t *area, size_t new_pages)
 {
-	if (new_pages > area->pages)
-		return reserve_try_alloc(new_pages - area->pages);
-	else if (new_pages < area->pages)
-		reserve_free(area->pages - new_pages);
+	elf_segment_header_t *entry = area->backend_data.segment;
+	size_t nonanon_pages = ALIGN_DOWN(entry->p_filesz, PAGE_SIZE);
+
+	if (new_pages > area->pages) {
+		/* The area is growing. */
+		if (area->pages >= nonanon_pages)
+			return reserve_try_alloc(new_pages - area->pages);
+		else if (new_pages > nonanon_pages)
+			return reserve_try_alloc(new_pages - nonanon_pages);
+	} else if (new_pages < area->pages) {
+		/* The area is shrinking. */
+		if (new_pages >= nonanon_pages)
+			reserve_free(area->pages - new_pages);
+		else if (area->pages > nonanon_pages)
+			reserve_free(nonanon_pages - new_pages);
+	}
 	
 	return true;
@@ -180,9 +199,9 @@
 void elf_destroy(as_area_t *area)
 {
-	/**
-	 * @todo:
-	 * Unreserve only how much was really reserved.
-	 */
-	reserve_free(area->pages);
+	elf_segment_header_t *entry = area->backend_data.segment;
+	size_t nonanon_pages = ALIGN_DOWN(entry->p_filesz, PAGE_SIZE);
+
+	if (area->pages > nonanon_pages)
+		reserve_free(area->pages - nonanon_pages);
 }
 
