Index: kernel/generic/src/ddi/ddi.c
===================================================================
--- kernel/generic/src/ddi/ddi.c	(revision f542825fed6544dd459953d9391c3a004f2acd22)
+++ kernel/generic/src/ddi/ddi.c	(revision c1f7f6eaa570166428a8687a4e9c7cd839cd666d)
@@ -30,8 +30,8 @@
  * @{
  */
- 
+
 /**
  * @file
- * @brief	Device Driver Interface functions.
+ * @brief Device Driver Interface functions.
  *
  * This file contains functions that comprise the Device Driver Interface.
@@ -48,5 +48,5 @@
 #include <synch/spinlock.h>
 #include <syscall/copy.h>
-#include <adt/list.h>
+#include <adt/btree.h>
 #include <arch.h>
 #include <align.h>
@@ -56,11 +56,11 @@
 SPINLOCK_INITIALIZE(parea_lock);
 
-/** List with enabled physical memory areas. */
-static LIST_INITIALIZE(parea_head);
+/** B+tree with enabled physical memory areas. */
+static btree_t parea_btree;
 
 /** Initialize DDI. */
 void ddi_init(void)
 {
-	hw_area();
+	btree_create(&parea_btree);
 }
 
@@ -69,20 +69,14 @@
  * @param parea Pointer to physical area structure.
  *
- * @todo This function doesn't check for overlaps. It depends on the kernel to
- * create disjunct physical memory areas.
  */
 void ddi_parea_register(parea_t *parea)
 {
-	ipl_t ipl;
-	
-	ipl = interrupts_disable();
+	ipl_t ipl = interrupts_disable();
 	spinlock_lock(&parea_lock);
 	
 	/*
-	 * TODO: we should really check for overlaps here.
-	 * However, we should be safe because the kernel is pretty sane.
-	 */
-	link_initialize(&parea->link);
-	list_append(&parea->link, &parea_head);
+	 * We don't check for overlaps here as the kernel is pretty sane.
+	 */
+	btree_insert(&parea_btree, (btree_key_t) parea->pbase, parea, NULL);
 	
 	spinlock_unlock(&parea_lock);
@@ -92,62 +86,81 @@
 /** Map piece of physical memory into virtual address space of current task.
  *
- * @param pf Physical address of the starting frame.
- * @param vp Virtual address of the starting page.
+ * @param pf    Physical address of the starting frame.
+ * @param vp    Virtual address of the starting page.
  * @param pages Number of pages to map.
  * @param flags Address space area flags for the mapping.
  *
  * @return 0 on success, EPERM if the caller lacks capabilities to use this
- *  syscall, ENOENT if there is no task matching the specified ID or the
- *  physical address space is not enabled for mapping and ENOMEM if there
- *  was a problem in creating address space area.
- */
-static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, pfn_t pages, int flags)
-{
-	ipl_t ipl;
-	cap_t caps;
+ *         syscall, EBADMEM if pf or vf is not page aligned, ENOENT if there
+ *         is no task matching the specified ID or the physical address space
+ *         is not enabled for mapping and ENOMEM if there was a problem in
+ *         creating address space area.
+ *
+ */
+static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, count_t pages, int flags)
+{
+	ASSERT(TASK);
+	ASSERT((pf % FRAME_SIZE) == 0);
+	ASSERT((vp % PAGE_SIZE) == 0);
+	
+	/*
+	 * Make sure the caller is authorised to make this syscall.
+	 */
+	cap_t caps = cap_get(TASK);
+	if (!(caps & CAP_MEM_MANAGER))
+		return EPERM;
+	
 	mem_backend_data_t backend_data;
-	
 	backend_data.base = pf;
 	backend_data.frames = pages;
 	
-	/*
-	 * Make sure the caller is authorised to make this syscall.
-	 */
-	caps = cap_get(TASK);
-	if (!(caps & CAP_MEM_MANAGER))
-		return EPERM;
-	
-	ipl = interrupts_disable();
-	
-	/*
-	 * Check if the physical memory area is enabled for mapping.
-	 */
-	spinlock_lock(&parea_lock);
-	
-	bool fnd = false;
-	link_t *cur;
-	
-	for (cur = parea_head.next; cur != &parea_head; cur = cur->next) {
-		parea_t *parea = list_get_instance(cur, parea_t, link);
-		if ((parea->pbase <= pf) && (ADDR2PFN(pf - parea->pbase) + pages <= parea->frames)) {
-			fnd = true;
-			break;
-		}
-	}
-	
-	spinlock_unlock(&parea_lock);
-	
-	if (!fnd) {
-		/*
-		 * Physical memory area cannot be mapped.
-		 */
-		interrupts_restore(ipl);
-		return ENOENT;
-	}
-	
+	ipl_t ipl = interrupts_disable();
+	
+	/* Find the zone of the physical memory */
+	spinlock_lock(&zones.lock);
+	count_t znum = find_zone(ADDR2PFN(pf), pages, 0);
+	
+	if (znum == (count_t) -1) {
+		/* Frames not found in any zones
+		 * -> assume it is hardware device and allow mapping
+		 */
+		spinlock_unlock(&zones.lock);
+		goto map;
+	}
+	
+	if (zones.info[znum].flags & ZONE_FIRMWARE) {
+		/* Frames are part of firmware */
+		spinlock_unlock(&zones.lock);
+		goto map;
+	}
+	
+	if (zone_flags_available(zones.info[znum].flags)) {
+		/* Frames are part of physical memory, check if the memory
+		 * region is enabled for mapping.
+		 */
+		spinlock_unlock(&zones.lock);
+		
+		spinlock_lock(&parea_lock);
+		btree_node_t *nodep;
+		parea_t *parea = (parea_t *) btree_search(&parea_btree,
+		    (btree_key_t) pf, &nodep);
+		
+		if ((!parea) || (parea->frames < pages))
+			goto err;
+		
+		spinlock_unlock(&parea_lock);
+		goto map;
+	}
+	
+err:
+	spinlock_unlock(&zones.lock);
+	interrupts_restore(ipl);
+	return ENOENT;
+	
+map:
 	spinlock_lock(&TASK->lock);
 	
-	if (!as_area_create(TASK->as, flags, pages * PAGE_SIZE, vp, AS_AREA_ATTR_NONE,
-		&phys_backend, &backend_data)) {
+	if (!as_area_create(TASK->as, flags, pages * PAGE_SIZE, vp,
+	    AS_AREA_ATTR_NONE, &phys_backend, &backend_data)) {
 		/*
 		 * The address space area could not have been created.
@@ -175,26 +188,22 @@
  *
  * @return 0 on success, EPERM if the caller lacks capabilities to use this
- * 	syscall, ENOENT if there is no task matching the specified ID.
+ *           syscall, ENOENT if there is no task matching the specified ID.
+ *
  */
 static int ddi_iospace_enable(task_id_t id, uintptr_t ioaddr, size_t size)
 {
-	ipl_t ipl;
-	cap_t caps;
-	task_t *t;
-	int rc;
-	
 	/*
 	 * Make sure the caller is authorised to make this syscall.
 	 */
-	caps = cap_get(TASK);
+	cap_t caps = cap_get(TASK);
 	if (!(caps & CAP_IO_MANAGER))
 		return EPERM;
 	
-	ipl = interrupts_disable();
+	ipl_t ipl = interrupts_disable();
 	spinlock_lock(&tasks_lock);
 	
-	t = task_find_by_id(id);
-	
-	if ((!t) || (!context_check(CONTEXT, t->context))) {
+	task_t *task = task_find_by_id(id);
+	
+	if ((!task) || (!context_check(CONTEXT, task->context))) {
 		/*
 		 * There is no task with the specified ID
@@ -206,13 +215,14 @@
 		return ENOENT;
 	}
-
+	
 	/* Lock the task and release the lock protecting tasks_btree. */
-	spinlock_lock(&t->lock);
+	spinlock_lock(&task->lock);
 	spinlock_unlock(&tasks_lock);
-
-	rc = ddi_iospace_enable_arch(t, ioaddr, size);
-	
-	spinlock_unlock(&t->lock);
-	interrupts_restore(ipl);
+	
+	int rc = ddi_iospace_enable_arch(task, ioaddr, size);
+	
+	spinlock_unlock(&task->lock);
+	interrupts_restore(ipl);
+	
 	return rc;
 }
@@ -226,5 +236,6 @@
  *
  * @return 0 on success, otherwise it returns error code found in errno.h
- */ 
+ *
+ */
 unative_t sys_physmem_map(unative_t phys_base, unative_t virt_base,
     unative_t pages, unative_t flags)
@@ -232,5 +243,5 @@
 	return (unative_t) ddi_physmem_map(ALIGN_DOWN((uintptr_t) phys_base,
 	    FRAME_SIZE), ALIGN_DOWN((uintptr_t) virt_base, PAGE_SIZE),
-	    (pfn_t) pages, (int) flags);
+	    (count_t) pages, (int) flags);
 }
 
@@ -240,14 +251,13 @@
  *
  * @return 0 on success, otherwise it returns error code found in errno.h
- */ 
+ *
+ */
 unative_t sys_iospace_enable(ddi_ioarg_t *uspace_io_arg)
 {
 	ddi_ioarg_t arg;
-	int rc;
-	
-	rc = copy_from_uspace(&arg, uspace_io_arg, sizeof(ddi_ioarg_t));
+	int rc = copy_from_uspace(&arg, uspace_io_arg, sizeof(ddi_ioarg_t));
 	if (rc != 0)
 		return (unative_t) rc;
-		
+	
 	return (unative_t) ddi_iospace_enable((task_id_t) arg.task_id,
 	    (uintptr_t) arg.ioaddr, (size_t) arg.size);
@@ -257,17 +267,21 @@
  *
  * @param enable If non-zero, the preemption counter will be decremented,
- * 	leading to potential enabling of preemption. Otherwise the preemption
- * 	counter will be incremented, preventing preemption from occurring.
+ *               leading to potential enabling of preemption. Otherwise
+ *               the preemption counter will be incremented, preventing
+ *               preemption from occurring.
  *
  * @return Zero on success or EPERM if callers capabilities are not sufficient.
- */ 
+ *
+ */
 unative_t sys_preempt_control(int enable)
 {
 	if (!cap_get(TASK) & CAP_PREEMPT_CONTROL)
 		return EPERM;
+	
 	if (enable)
 		preemption_enable();
 	else
 		preemption_disable();
+	
 	return 0;
 }
