Index: kernel/generic/include/mm/km.h
===================================================================
--- kernel/generic/include/mm/km.h	(revision bf3dd35e1dbbdb751e833d8750fe09a54f590040)
+++ kernel/generic/include/mm/km.h	(revision 3d4750f9e3b9de0037847d9d9e8b22c859444ea7)
@@ -50,4 +50,5 @@
 
 extern uintptr_t km_map(uintptr_t, size_t, unsigned int);
+extern void km_unmap(uintptr_t, size_t);
 
 extern uintptr_t km_temporary_page_get(uintptr_t *, frame_flags_t);
Index: kernel/generic/src/mm/km.c
===================================================================
--- kernel/generic/src/mm/km.c	(revision bf3dd35e1dbbdb751e833d8750fe09a54f590040)
+++ kernel/generic/src/mm/km.c	(revision 3d4750f9e3b9de0037847d9d9e8b22c859444ea7)
@@ -128,14 +128,15 @@
 {
 	uintptr_t vaddr;
-	size_t asize;
 	size_t align;
 	uintptr_t offs;
 
-	asize = ALIGN_UP(size, PAGE_SIZE);
+	ASSERT(ALIGN_DOWN(paddr, FRAME_SIZE) == paddr);
+	ASSERT(ALIGN_UP(size, FRAME_SIZE) == size);
+
 	align = ispwr2(size) ? size : (1U << (fnzb(size) + 1));
-	vaddr = km_page_alloc(asize, max(PAGE_SIZE, align));
+	vaddr = km_page_alloc(size, max(PAGE_SIZE, align));
 
 	page_table_lock(AS_KERNEL, true);
-	for (offs = 0; offs < asize; offs += PAGE_SIZE) {
+	for (offs = 0; offs < size; offs += PAGE_SIZE) {
 		page_mapping_insert(AS_KERNEL, vaddr + offs, paddr + offs,
 		    flags);
@@ -146,15 +147,64 @@
 }
 
+static void km_unmap_aligned(uintptr_t vaddr, size_t size)
+{
+	uintptr_t offs;
+	ipl_t ipl;
+
+	ASSERT(ALIGN_DOWN(vaddr, PAGE_SIZE) == vaddr);
+	ASSERT(ALIGN_UP(size, PAGE_SIZE) == size);
+
+	page_table_lock(AS_KERNEL, true);
+
+	ipl = tlb_shootdown_start(TLB_INVL_ASID, ASID_KERNEL, 0, 0);
+
+	for (offs = 0; offs < size; offs += PAGE_SIZE)
+		page_mapping_remove(AS_KERNEL, vaddr + offs);
+
+	tlb_invalidate_asid(ASID_KERNEL);
+
+	as_invalidate_translation_cache(AS_KERNEL, 0, -1);
+	tlb_shootdown_finalize(ipl);
+	page_table_unlock(AS_KERNEL, true);
+
+	km_page_free(vaddr, size);
+}
+
+/** Map a piece of physical address space into the virtual address space.
+ *
+ * @param paddr		Physical address to be mapped. May be unaligned.
+ * @param size		Size of area starting at paddr to be mapped.
+ * @param flags		Protection flags to be used for the mapping.
+ *
+ * @return New virtual address mapped to paddr.
+ */
 uintptr_t km_map(uintptr_t paddr, size_t size, unsigned int flags)
 {
-	size_t offs = paddr - ALIGN_DOWN(paddr, FRAME_SIZE); 
 	uintptr_t page;
-
-	page = km_map_aligned(ALIGN_DOWN(paddr, FRAME_SIZE), size + offs,
-	    flags);
+	size_t offs; 
+
+	offs = paddr - ALIGN_DOWN(paddr, FRAME_SIZE); 
+	page = km_map_aligned(ALIGN_DOWN(paddr, FRAME_SIZE),
+	    ALIGN_UP(size + offs, FRAME_SIZE), flags);
+
 	return page + offs;
 }
 
-/** Unmap kernen non-identity page.
+/** Unmap a piece of virtual address space.
+ *
+ * @param vaddr		Virtual address to be unmapped. May be unaligned, but
+ *			it must a value previously returned by km_map().
+ * @param size		Size of area starting at vaddr to be unmapped.
+ */
+void km_unmap(uintptr_t vaddr, size_t size)
+{
+	size_t offs; 
+
+	offs = vaddr - ALIGN_DOWN(vaddr, PAGE_SIZE); 
+	km_unmap_aligned(ALIGN_DOWN(vaddr, PAGE_SIZE),
+	    ALIGN_UP(size + offs, PAGE_SIZE));
+}
+
+/** Unmap kernel non-identity page.
  *
  * @param[in] page	Non-identity page to be unmapped.
