Index: kernel/generic/src/mm/as.c
===================================================================
--- kernel/generic/src/mm/as.c	(revision e32720fffe177c9b6bf6489727240c59ac6a953b)
+++ kernel/generic/src/mm/as.c	(revision 2b3e88401eb51a34945bd40fdfd398cd7fdd4c1d)
@@ -544,5 +544,5 @@
     mem_backend_data_t *backend_data, uintptr_t *base, uintptr_t bound)
 {
-	if ((*base != (uintptr_t) -1) && ((*base % PAGE_SIZE) != 0))
+	if ((*base != (uintptr_t) -1) && !IS_ALIGNED(*base, PAGE_SIZE))
 		return NULL;
 	
@@ -688,4 +688,7 @@
 int as_area_resize(as_t *as, uintptr_t address, size_t size, unsigned int flags)
 {
+	if (!IS_ALIGNED(address, PAGE_SIZE))
+		return EINVAL;
+
 	mutex_lock(&as->lock);
 	
@@ -1350,8 +1353,8 @@
  * Interrupts are assumed disabled.
  *
- * @param page   Faulting page.
- * @param access Access mode that caused the page fault (i.e.
- *               read/write/exec).
- * @param istate Pointer to the interrupted state.
+ * @param address Faulting address.
+ * @param access  Access mode that caused the page fault (i.e.
+ *                read/write/exec).
+ * @param istate  Pointer to the interrupted state.
  *
  * @return AS_PF_FAULT on page fault.
@@ -1361,6 +1364,7 @@
  *
  */
-int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate)
-{
+int as_page_fault(uintptr_t address, pf_access_t access, istate_t *istate)
+{
+	uintptr_t page = ALIGN_DOWN(address, PAGE_SIZE);
 	int rc = AS_PF_FAULT;
 
@@ -1452,6 +1456,6 @@
 		task_kill_self(true);
 	} else {
-		fault_if_from_uspace(istate, "Page fault: %p.", (void *) page);
-		panic_memtrap(istate, access, page, NULL);
+		fault_if_from_uspace(istate, "Page fault: %p.", (void *) address);
+		panic_memtrap(istate, access, address, NULL);
 	}
 	
@@ -1679,5 +1683,5 @@
 {
 	ASSERT(mutex_locked(&area->lock));
-	ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
+	ASSERT(IS_ALIGNED(page, PAGE_SIZE));
 	ASSERT(count);
 	
@@ -1963,5 +1967,5 @@
 {
 	ASSERT(mutex_locked(&area->lock));
-	ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
+	ASSERT(IS_ALIGNED(page, PAGE_SIZE));
 	ASSERT(count);
 	
Index: kernel/generic/src/mm/backend_anon.c
===================================================================
--- kernel/generic/src/mm/backend_anon.c	(revision e32720fffe177c9b6bf6489727240c59ac6a953b)
+++ kernel/generic/src/mm/backend_anon.c	(revision 2b3e88401eb51a34945bd40fdfd398cd7fdd4c1d)
@@ -173,5 +173,5 @@
  *
  * @param area Pointer to the address space area.
- * @param addr Faulting virtual address.
+ * @param upage Faulting virtual page.
  * @param access Access mode that caused the fault (i.e. read/write/exec).
  *
@@ -179,7 +179,6 @@
  *     serviced).
  */
-int anon_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access)
-{
-	uintptr_t upage = ALIGN_DOWN(addr, PAGE_SIZE);
+int anon_page_fault(as_area_t *area, uintptr_t upage, pf_access_t access)
+{
 	uintptr_t kpage;
 	uintptr_t frame;
@@ -187,4 +186,5 @@
 	ASSERT(page_table_locked(AS));
 	ASSERT(mutex_locked(&area->lock));
+	ASSERT(IS_ALIGNED(upage, PAGE_SIZE));
 
 	if (!as_area_check_access(area, access))
Index: kernel/generic/src/mm/backend_elf.c
===================================================================
--- kernel/generic/src/mm/backend_elf.c	(revision e32720fffe177c9b6bf6489727240c59ac6a953b)
+++ kernel/generic/src/mm/backend_elf.c	(revision 2b3e88401eb51a34945bd40fdfd398cd7fdd4c1d)
@@ -235,5 +235,5 @@
  *
  * @param area		Pointer to the address space area.
- * @param addr		Faulting virtual address.
+ * @param upage		Faulting virtual page.
  * @param access	Access mode that caused the fault (i.e.
  * 			read/write/exec).
@@ -242,5 +242,5 @@
  * 			on success (i.e. serviced).
  */
-int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access)
+int elf_page_fault(as_area_t *area, uintptr_t upage, pf_access_t access)
 {
 	elf_header_t *elf = area->backend_data.elf;
@@ -250,5 +250,4 @@
 	uintptr_t frame;
 	uintptr_t kpage;
-	uintptr_t upage;
 	uintptr_t start_anon;
 	size_t i;
@@ -257,20 +256,18 @@
 	ASSERT(page_table_locked(AS));
 	ASSERT(mutex_locked(&area->lock));
+	ASSERT(IS_ALIGNED(upage, PAGE_SIZE));
 
 	if (!as_area_check_access(area, access))
 		return AS_PF_FAULT;
 	
-	if (addr < ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE))
+	if (upage < ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE))
 		return AS_PF_FAULT;
 	
-	if (addr >= entry->p_vaddr + entry->p_memsz)
+	if (upage >= entry->p_vaddr + entry->p_memsz)
 		return AS_PF_FAULT;
 	
-	i = (addr - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH;
+	i = (upage - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH;
 	base = (uintptr_t)
 	    (((void *) elf) + ALIGN_DOWN(entry->p_offset, PAGE_SIZE));
-
-	/* Virtual address of faulting page */
-	upage = ALIGN_DOWN(addr, PAGE_SIZE);
 
 	/* Virtual address of the end of initialized part of segment */
Index: kernel/generic/src/mm/backend_phys.c
===================================================================
--- kernel/generic/src/mm/backend_phys.c	(revision e32720fffe177c9b6bf6489727240c59ac6a953b)
+++ kernel/generic/src/mm/backend_phys.c	(revision 2b3e88401eb51a34945bd40fdfd398cd7fdd4c1d)
@@ -111,5 +111,5 @@
  *
  * @param area Pointer to the address space area.
- * @param addr Faulting virtual address.
+ * @param upage Faulting virtual page.
  * @param access Access mode that caused the fault (i.e. read/write/exec).
  *
@@ -117,5 +117,5 @@
  * serviced).
  */
-int phys_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access)
+int phys_page_fault(as_area_t *area, uintptr_t upage, pf_access_t access)
 {
 	uintptr_t base = area->backend_data.base;
@@ -123,13 +123,14 @@
 	ASSERT(page_table_locked(AS));
 	ASSERT(mutex_locked(&area->lock));
+	ASSERT(IS_ALIGNED(upage, PAGE_SIZE));
 
 	if (!as_area_check_access(area, access))
 		return AS_PF_FAULT;
 
-	ASSERT(addr - area->base < area->backend_data.frames * FRAME_SIZE);
-	page_mapping_insert(AS, addr, base + (addr - area->base),
+	ASSERT(upage - area->base < area->backend_data.frames * FRAME_SIZE);
+	page_mapping_insert(AS, upage, base + (upage - area->base),
 	    as_area_get_flags(area));
 	
-	if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
+	if (!used_space_insert(area, upage, 1))
 		panic("Cannot insert used space.");
 
Index: kernel/generic/src/mm/page.c
===================================================================
--- kernel/generic/src/mm/page.c	(revision e32720fffe177c9b6bf6489727240c59ac6a953b)
+++ kernel/generic/src/mm/page.c	(revision 2b3e88401eb51a34945bd40fdfd398cd7fdd4c1d)
@@ -104,5 +104,6 @@
 	ASSERT(page_mapping_operations->mapping_insert);
 
-	page_mapping_operations->mapping_insert(as, page, frame, flags);
+	page_mapping_operations->mapping_insert(as, ALIGN_DOWN(page, PAGE_SIZE),
+	    ALIGN_DOWN(frame, FRAME_SIZE), flags);
 	
 	/* Repel prefetched accesses to the old mapping. */
@@ -127,5 +128,6 @@
 	ASSERT(page_mapping_operations->mapping_remove);
 	
-	page_mapping_operations->mapping_remove(as, page);
+	page_mapping_operations->mapping_remove(as,
+	    ALIGN_DOWN(page, PAGE_SIZE));
 	
 	/* Repel prefetched accesses to the old mapping. */
@@ -150,5 +152,6 @@
 	ASSERT(page_mapping_operations->mapping_find);
 	
-	return page_mapping_operations->mapping_find(as, page, nolock);
+	return page_mapping_operations->mapping_find(as,
+	    ALIGN_DOWN(page, PAGE_SIZE), nolock);
 }
 
