Index: kernel/generic/src/lib/elf.c
===================================================================
--- kernel/generic/src/lib/elf.c	(revision 0705fc5d3687b1ccff06bbb14c734f004dabdc9d)
+++ kernel/generic/src/lib/elf.c	(revision 103db9089df98061a5560c2c731e082f1f964f94)
@@ -142,6 +142,4 @@
 {
 	mem_backend_data_t backend_data;
-	backend_data.elf = elf;
-	backend_data.segment = entry;
 
 	if (entry->p_align > 1) {
@@ -172,4 +170,8 @@
 	size_t mem_sz = entry->p_memsz + (entry->p_vaddr - base);
 
+	backend_data.elf_base = base;
+	backend_data.elf = elf;
+	backend_data.segment = entry;
+
 	as_area_t *area = as_area_create(as, flags, mem_sz,
 	    AS_AREA_ATTR_NONE, &elf_backend, &backend_data, &base, 0);
Index: kernel/generic/src/mm/backend_elf.c
===================================================================
--- kernel/generic/src/mm/backend_elf.c	(revision 0705fc5d3687b1ccff06bbb14c734f004dabdc9d)
+++ kernel/generic/src/mm/backend_elf.c	(revision 103db9089df98061a5560c2c731e082f1f964f94)
@@ -96,4 +96,19 @@
 }
 
+/** Get page number in the task where the ELF page originates from.
+ *
+ * The ELF page can be shared to a different address than it originated from,
+ * but we need the originating address since that corresponds to the ELF's
+ * virtual addesses.
+ *
+ * @param area Area in which the page resides
+ * @param page Virtual address of the page in @a area
+ * @return Virtual address of the page in the origin address space
+ */
+static uintptr_t elf_orig_page(as_area_t *area, uintptr_t page)
+{
+	return page - area->base + area->backend_data.elf_base;
+}
+
 bool elf_create(as_area_t *area)
 {
@@ -152,7 +167,6 @@
 		    btree_node_t, leaf_link);
 	} else {
-		(void) btree_search(&area->sh_info->pagemap, start_anon, &leaf);
-		node = btree_leaf_node_left_neighbour(&area->sh_info->pagemap,
-		    leaf);
+		(void) btree_search(&area->used_space, start_anon, &leaf);
+		node = btree_leaf_node_left_neighbour(&area->used_space, leaf);
 		if (!node)
 			node = leaf;
@@ -258,4 +272,5 @@
 	uintptr_t kpage;
 	uintptr_t start_anon;
+	uintptr_t elfpage;
 	size_t i;
 	bool dirty = false;
@@ -265,14 +280,17 @@
 	assert(IS_ALIGNED(upage, PAGE_SIZE));
 
+	elfpage = elf_orig_page(area, upage);
+
 	if (!as_area_check_access(area, access))
 		return AS_PF_FAULT;
 
-	if (upage < ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE))
+	if (elfpage < ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE))
 		return AS_PF_FAULT;
 
-	if (upage >= entry->p_vaddr + entry->p_memsz)
+	if (elfpage >= entry->p_vaddr + entry->p_memsz)
 		return AS_PF_FAULT;
 
-	i = (upage - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH;
+	i = (elfpage - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >>
+	    PAGE_WIDTH;
 	base = (uintptr_t)
 	    (((void *) elf) + ALIGN_DOWN(entry->p_offset, PAGE_SIZE));
@@ -320,10 +338,10 @@
 	 * mapping.
 	 */
-	if (upage >= entry->p_vaddr && upage + PAGE_SIZE <= start_anon) {
+	if (elfpage >= entry->p_vaddr && elfpage + PAGE_SIZE <= start_anon) {
 		/*
 		 * Initialized portion of the segment. The memory is backed
 		 * directly by the content of the ELF image. Pages are
 		 * only copied if the segment is writable so that there
-		 * can be more instantions of the same memory ELF image
+		 * can be more instances of the same memory ELF image
 		 * used at a time. Note that this could be later done
 		 * as COW.
@@ -351,5 +369,5 @@
 			frame = PTE_GET_FRAME(&pte);
 		}
-	} else if (upage >= start_anon) {
+	} else if (elfpage >= start_anon) {
 		/*
 		 * This is the uninitialized portion of the segment.
@@ -424,14 +442,17 @@
 	elf_segment_header_t *entry = area->backend_data.segment;
 	uintptr_t start_anon;
+	uintptr_t elfpage;
 
 	assert(page_table_locked(area->as));
 	assert(mutex_locked(&area->lock));
 
-	assert(page >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE));
-	assert(page < entry->p_vaddr + entry->p_memsz);
+	elfpage = elf_orig_page(area, page);
+
+	assert(elfpage >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE));
+	assert(elfpage < entry->p_vaddr + entry->p_memsz);
 
 	start_anon = entry->p_vaddr + entry->p_filesz;
 
-	if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) {
+	if (elfpage >= entry->p_vaddr && elfpage + PAGE_SIZE <= start_anon) {
 		if (entry->p_flags & PF_W) {
 			/*
