Index: kernel/generic/src/mm/as.c
===================================================================
--- kernel/generic/src/mm/as.c	(revision 1e00216b42ec36d97a169d4cf7053880fcb26aac)
+++ kernel/generic/src/mm/as.c	(revision e950803ed2d515a08b7d9fa5bb04ba2b26c41c99)
@@ -86,17 +86,15 @@
  * Each architecture decides what functions will be used to carry out
  * address space operations such as creating or locking page tables.
- *
  */
 as_operations_t *as_operations = NULL;
 
-/**
- * Slab for as_t objects.
+/** Slab for as_t objects.
  *
  */
 static slab_cache_t *as_slab;
 
-/**
- * This lock serializes access to the ASID subsystem.
- * It protects:
+/** ASID subsystem lock.
+ *
+ * This lock protects:
  * - inactive_as_with_asid_head list
  * - as->asid for each as of the as_t type
@@ -107,7 +105,6 @@
 
 /**
- * This list contains address spaces that are not active on any
- * processor and that have valid ASID.
- *
+ * Inactive address spaces (on all processors)
+ * that have valid ASID.
  */
 LIST_INITIALIZE(inactive_as_with_asid_head);
@@ -123,13 +120,10 @@
 	mutex_initialize(&as->lock, MUTEX_PASSIVE);
 	
-	int rc = as_constructor_arch(as, flags);
-	
-	return rc;
+	return as_constructor_arch(as, flags);
 }
 
 NO_TRACE static size_t as_destructor(void *obj)
 {
-	as_t *as = (as_t *) obj;
-	return as_destructor_arch(as);
+	return as_destructor_arch((as_t *) obj);
 }
 
@@ -146,5 +140,6 @@
 		panic("Cannot create kernel address space.");
 	
-	/* Make sure the kernel address space
+	/*
+	 * Make sure the kernel address space
 	 * reference count never drops to zero.
 	 */
@@ -195,5 +190,5 @@
 {
 	DEADLOCK_PROBE_INIT(p_asidlock);
-
+	
 	ASSERT(as != AS);
 	ASSERT(atomic_get(&as->refcount) == 0);
@@ -203,5 +198,5 @@
 	 * lock its mutex.
 	 */
-
+	
 	/*
 	 * We need to avoid deadlock between TLB shootdown and asidlock.
@@ -210,5 +205,4 @@
 	 * disabled to prevent nested context switches. We also depend on the
 	 * fact that so far no spinlocks are held.
-	 *
 	 */
 	preemption_disable();
@@ -235,5 +229,5 @@
 	spinlock_unlock(&asidlock);
 	interrupts_restore(ipl);
-
+	
 	
 	/*
@@ -241,5 +235,4 @@
 	 * The B+tree must be walked carefully because it is
 	 * also being destroyed.
-	 *
 	 */
 	bool cond = true;
@@ -268,6 +261,6 @@
 /** Hold a reference to an address space.
  *
- * Holding a reference to an address space prevents destruction of that address
- * space.
+ * Holding a reference to an address space prevents destruction
+ * of that address space.
  *
  * @param as Address space to be held.
@@ -281,6 +274,6 @@
 /** Release a reference to an address space.
  *
- * The last one to release a reference to an address space destroys the address
- * space.
+ * The last one to release a reference to an address space
+ * destroys the address space.
  *
  * @param asAddress space to be released.
@@ -310,5 +303,4 @@
 	/*
 	 * We don't want any area to have conflicts with NULL page.
-	 *
 	 */
 	if (overlaps(va, size, (uintptr_t) NULL, PAGE_SIZE))
@@ -321,5 +313,4 @@
 	 * record in the left neighbour, the leftmost record in the right
 	 * neighbour and all records in the leaf node itself.
-	 *
 	 */
 	btree_node_t *leaf;
@@ -382,5 +373,4 @@
 	 * So far, the area does not conflict with other areas.
 	 * Check if it doesn't conflict with kernel address space.
-	 *
 	 */
 	if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
@@ -437,4 +427,5 @@
 	area->attributes = attrs;
 	area->pages = SIZE2FRAMES(size);
+	area->resident = 0;
 	area->base = base;
 	area->sh_info = NULL;
@@ -479,5 +470,4 @@
 	 * to find out whether this is a miss or va belongs to an address
 	 * space area found there.
-	 *
 	 */
 	
@@ -499,5 +489,4 @@
 	 * Second, locate the left neighbour and test its last record.
 	 * Because of its position in the B+tree, it must have base < va.
-	 *
 	 */
 	btree_node_t *lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf);
@@ -534,5 +523,4 @@
 	/*
 	 * Locate the area.
-	 *
 	 */
 	as_area_t *area = find_area_and_lock(as, address);
@@ -546,5 +534,4 @@
 		 * Remapping of address space areas associated
 		 * with memory mapped devices is not supported.
-		 *
 		 */
 		mutex_unlock(&area->lock);
@@ -557,5 +544,4 @@
 		 * Remapping of shared address space areas
 		 * is not supported.
-		 *
 		 */
 		mutex_unlock(&area->lock);
@@ -568,5 +554,4 @@
 		/*
 		 * Zero size address space areas are not allowed.
-		 *
 		 */
 		mutex_unlock(&area->lock);
@@ -581,5 +566,4 @@
 		 * Shrinking the area.
 		 * No need to check for overlaps.
-		 *
 		 */
 		
@@ -588,5 +572,4 @@
 		/*
 		 * Start TLB shootdown sequence.
-		 *
 		 */
 		ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid,
@@ -599,5 +582,4 @@
 		 * is also the right way to remove part of the used_space
 		 * B+tree leaf list.
-		 *
 		 */
 		bool cond = true;
@@ -623,5 +605,4 @@
 						 * completely in the resized
 						 * address space area.
-						 *
 						 */
 						break;
@@ -632,5 +613,4 @@
 					 * to b and c overlaps with the resized
 					 * address space area.
-					 *
 					 */
 					
@@ -673,5 +653,4 @@
 		/*
 		 * Finish TLB shootdown sequence.
-		 *
 		 */
 		
@@ -681,5 +660,4 @@
 		/*
 		 * Invalidate software translation caches (e.g. TSB on sparc64).
-		 *
 		 */
 		as_invalidate_translation_cache(as, area->base +
@@ -692,5 +670,4 @@
 		 * Growing the area.
 		 * Check for overlaps with other address space areas.
-		 *
 		 */
 		if (!check_area_conflicts(as, address, pages * PAGE_SIZE,
@@ -813,5 +790,4 @@
 	/*
 	 * Finish TLB shootdown sequence.
-	 *
 	 */
 	
@@ -821,5 +797,4 @@
 	 * Invalidate potential software translation caches (e.g. TSB on
 	 * sparc64).
-	 *
 	 */
 	as_invalidate_translation_cache(as, area->base, area->pages);
@@ -839,5 +814,4 @@
 	/*
 	 * Remove the empty area from address space.
-	 *
 	 */
 	btree_remove(&as->as_area_btree, base, NULL);
@@ -881,5 +855,4 @@
 		/*
 		 * Could not find the source address space area.
-		 *
 		 */
 		mutex_unlock(&src_as->lock);
@@ -891,5 +864,4 @@
 		 * There is no backend or the backend does not
 		 * know how to share the area.
-		 *
 		 */
 		mutex_unlock(&src_area->lock);
@@ -918,5 +890,4 @@
 	 * First, prepare the area for sharing.
 	 * Then it will be safe to unlock it.
-	 *
 	 */
 	share_info_t *sh_info = src_area->sh_info;
@@ -930,5 +901,4 @@
 		/*
 		 * Call the backend to setup sharing.
-		 *
 		 */
 		src_area->backend->share(src_area);
@@ -949,5 +919,4 @@
 	 * The flags of the source area are masked against dst_flags_mask
 	 * to support sharing in less privileged mode.
-	 *
 	 */
 	as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask, src_size,
@@ -966,5 +935,4 @@
 	 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
 	 * attribute and set the sh_info.
-	 *
 	 */
 	mutex_lock(&dst_as->lock);
@@ -989,4 +957,6 @@
 NO_TRACE bool as_area_check_access(as_area_t *area, pf_access_t access)
 {
+	ASSERT(mutex_locked(&area->lock));
+	
 	int flagmap[] = {
 		[PF_ACCESS_READ] = AS_AREA_READ,
@@ -994,6 +964,4 @@
 		[PF_ACCESS_EXEC] = AS_AREA_EXEC
 	};
-
-	ASSERT(mutex_locked(&area->lock));
 	
 	if (!(area->flags & flagmap[access]))
@@ -1066,5 +1034,4 @@
 	/*
 	 * Compute total number of used pages in the used_space B+tree
-	 *
 	 */
 	size_t used_pages = 0;
@@ -1088,5 +1055,4 @@
 	/*
 	 * Start TLB shootdown sequence.
-	 *
 	 */
 	ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base,
@@ -1096,5 +1062,4 @@
 	 * Remove used pages from page tables and remember their frame
 	 * numbers.
-	 *
 	 */
 	size_t frame_idx = 0;
@@ -1127,5 +1092,4 @@
 	/*
 	 * Finish TLB shootdown sequence.
-	 *
 	 */
 	
@@ -1135,5 +1099,4 @@
 	 * Invalidate potential software translation caches (e.g. TSB on
 	 * sparc64).
-	 *
 	 */
 	as_invalidate_translation_cache(as, area->base, area->pages);
@@ -1217,5 +1180,4 @@
 		 * No area contained mapping for 'page'.
 		 * Signal page fault to low-level handler.
-		 *
 		 */
 		mutex_unlock(&AS->lock);
@@ -1237,5 +1199,4 @@
 		 * The address space area is not backed by any backend
 		 * or the backend cannot handle page faults.
-		 *
 		 */
 		mutex_unlock(&area->lock);
@@ -1249,5 +1210,4 @@
 	 * To avoid race condition between two page faults on the same address,
 	 * we need to make sure the mapping has not been already inserted.
-	 *
 	 */
 	pte_t *pte;
@@ -1267,5 +1227,4 @@
 	/*
 	 * Resort to the backend page fault handler.
-	 *
 	 */
 	if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
@@ -1322,5 +1281,4 @@
 		 * preemption is disabled. We should not be
 		 * holding any other lock.
-		 *
 		 */
 		(void) interrupts_enable();
@@ -1342,5 +1300,4 @@
 			 * list of inactive address spaces with assigned
 			 * ASID.
-			 *
 			 */
 			ASSERT(old_as->asid != ASID_INVALID);
@@ -1353,5 +1310,4 @@
 		 * Perform architecture-specific tasks when the address space
 		 * is being removed from the CPU.
-		 *
 		 */
 		as_deinstall_arch(old_as);
@@ -1360,5 +1316,4 @@
 	/*
 	 * Second, prepare the new address space.
-	 *
 	 */
 	if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) {
@@ -1376,5 +1331,4 @@
 	 * Perform architecture-specific steps.
 	 * (e.g. write ASID to hardware register etc.)
-	 *
 	 */
 	as_install_arch(new_as);
@@ -1395,5 +1349,5 @@
 {
 	ASSERT(mutex_locked(&area->lock));
-
+	
 	return area_flags_to_page_flags(area->flags);
 }
@@ -1516,8 +1470,8 @@
  * @param count Number of page to be marked.
  *
- * @return Zero on failure and non-zero on success.
- *
- */
-int used_space_insert(as_area_t *area, uintptr_t page, size_t count)
+ * @return False on failure or true on success.
+ *
+ */
+bool used_space_insert(as_area_t *area, uintptr_t page, size_t count)
 {
 	ASSERT(mutex_locked(&area->lock));
@@ -1530,12 +1484,11 @@
 		/*
 		 * We hit the beginning of some used space.
-		 *
-		 */
-		return 0;
+		 */
+		return false;
 	}
 	
 	if (!leaf->keys) {
 		btree_insert(&area->used_space, page, (void *) count, leaf);
-		return 1;
+		goto success;
 	}
 	
@@ -1551,5 +1504,4 @@
 		 * somewhere between the rightmost interval of
 		 * the left neigbour and the first interval of the leaf.
-		 *
 		 */
 		
@@ -1559,9 +1511,9 @@
 		    left_cnt * PAGE_SIZE)) {
 			/* The interval intersects with the left interval. */
-			return 0;
+			return false;
 		} else if (overlaps(page, count * PAGE_SIZE, right_pg,
 		    right_cnt * PAGE_SIZE)) {
 			/* The interval intersects with the right interval. */
-			return 0;
+			return false;
 		} else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
 		    (page + count * PAGE_SIZE == right_pg)) {
@@ -1569,17 +1521,15 @@
 			 * The interval can be added by merging the two already
 			 * present intervals.
-			 *
 			 */
 			node->value[node->keys - 1] += count + right_cnt;
 			btree_remove(&area->used_space, right_pg, leaf);
-			return 1;
+			goto success;
 		} else if (page == left_pg + left_cnt * PAGE_SIZE) {
 			/*
 			 * The interval can be added by simply growing the left
 			 * interval.
-			 *
 			 */
 			node->value[node->keys - 1] += count;
-			return 1;
+			goto success;
 		} else if (page + count * PAGE_SIZE == right_pg) {
 			/*
@@ -1587,18 +1537,16 @@
 			 * the right interval down and increasing its size
 			 * accordingly.
-			 *
 			 */
 			leaf->value[0] += count;
 			leaf->key[0] = page;
-			return 1;
+			goto success;
 		} else {
 			/*
 			 * The interval is between both neigbouring intervals,
 			 * but cannot be merged with any of them.
-			 *
 			 */
 			btree_insert(&area->used_space, page, (void *) count,
 			    leaf);
-			return 1;
+			goto success;
 		}
 	} else if (page < leaf->key[0]) {
@@ -1609,5 +1557,4 @@
 		 * Investigate the border case in which the left neighbour does
 		 * not exist but the interval fits from the left.
-		 *
 		 */
 		
@@ -1615,5 +1562,5 @@
 		    right_cnt * PAGE_SIZE)) {
 			/* The interval intersects with the right interval. */
-			return 0;
+			return false;
 		} else if (page + count * PAGE_SIZE == right_pg) {
 			/*
@@ -1621,18 +1568,16 @@
 			 * right interval down and increasing its size
 			 * accordingly.
-			 *
 			 */
 			leaf->key[0] = page;
 			leaf->value[0] += count;
-			return 1;
+			goto success;
 		} else {
 			/*
 			 * The interval doesn't adjoin with the right interval.
 			 * It must be added individually.
-			 *
 			 */
 			btree_insert(&area->used_space, page, (void *) count,
 			    leaf);
-			return 1;
+			goto success;
 		}
 	}
@@ -1649,5 +1594,4 @@
 		 * somewhere between the leftmost interval of
 		 * the right neigbour and the last interval of the leaf.
-		 *
 		 */
 		
@@ -1657,9 +1601,9 @@
 		    left_cnt * PAGE_SIZE)) {
 			/* The interval intersects with the left interval. */
-			return 0;
+			return false;
 		} else if (overlaps(page, count * PAGE_SIZE, right_pg,
 		    right_cnt * PAGE_SIZE)) {
 			/* The interval intersects with the right interval. */
-			return 0;
+			return false;
 		} else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
 		    (page + count * PAGE_SIZE == right_pg)) {
@@ -1667,17 +1611,15 @@
 			 * The interval can be added by merging the two already
 			 * present intervals.
-			 *
 			 */
 			leaf->value[leaf->keys - 1] += count + right_cnt;
 			btree_remove(&area->used_space, right_pg, node);
-			return 1;
+			goto success;
 		} else if (page == left_pg + left_cnt * PAGE_SIZE) {
 			/*
 			 * The interval can be added by simply growing the left
 			 * interval.
-			 *
 			 */
-			leaf->value[leaf->keys - 1] +=  count;
-			return 1;
+			leaf->value[leaf->keys - 1] += count;
+			goto success;
 		} else if (page + count * PAGE_SIZE == right_pg) {
 			/*
@@ -1685,18 +1627,16 @@
 			 * the right interval down and increasing its size
 			 * accordingly.
-			 *
 			 */
 			node->value[0] += count;
 			node->key[0] = page;
-			return 1;
+			goto success;
 		} else {
 			/*
 			 * The interval is between both neigbouring intervals,
 			 * but cannot be merged with any of them.
-			 *
 			 */
 			btree_insert(&area->used_space, page, (void *) count,
 			    leaf);
-			return 1;
+			goto success;
 		}
 	} else if (page >= leaf->key[leaf->keys - 1]) {
@@ -1707,5 +1647,4 @@
 		 * Investigate the border case in which the right neighbour
 		 * does not exist but the interval fits from the right.
-		 *
 		 */
 		
@@ -1713,22 +1652,20 @@
 		    left_cnt * PAGE_SIZE)) {
 			/* The interval intersects with the left interval. */
-			return 0;
+			return false;
 		} else if (left_pg + left_cnt * PAGE_SIZE == page) {
 			/*
 			 * The interval can be added by growing the left
 			 * interval.
-			 *
 			 */
 			leaf->value[leaf->keys - 1] += count;
-			return 1;
+			goto success;
 		} else {
 			/*
 			 * The interval doesn't adjoin with the left interval.
 			 * It must be added individually.
-			 *
 			 */
 			btree_insert(&area->used_space, page, (void *) count,
 			    leaf);
-			return 1;
+			goto success;
 		}
 	}
@@ -1738,5 +1675,4 @@
 	 * only between two other intervals of the leaf. The two border cases
 	 * were already resolved.
-	 *
 	 */
 	btree_key_t i;
@@ -1750,5 +1686,4 @@
 			/*
 			 * The interval fits between left_pg and right_pg.
-			 *
 			 */
 			
@@ -1758,7 +1693,6 @@
 				 * The interval intersects with the left
 				 * interval.
-				 *
 				 */
-				return 0;
+				return false;
 			} else if (overlaps(page, count * PAGE_SIZE, right_pg,
 			    right_cnt * PAGE_SIZE)) {
@@ -1766,7 +1700,6 @@
 				 * The interval intersects with the right
 				 * interval.
-				 *
 				 */
-				return 0;
+				return false;
 			} else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
 			    (page + count * PAGE_SIZE == right_pg)) {
@@ -1774,17 +1707,15 @@
 				 * The interval can be added by merging the two
 				 * already present intervals.
-				 *
 				 */
 				leaf->value[i - 1] += count + right_cnt;
 				btree_remove(&area->used_space, right_pg, leaf);
-				return 1;
+				goto success;
 			} else if (page == left_pg + left_cnt * PAGE_SIZE) {
 				/*
 				 * The interval can be added by simply growing
 				 * the left interval.
-				 *
 				 */
 				leaf->value[i - 1] += count;
-				return 1;
+				goto success;
 			} else if (page + count * PAGE_SIZE == right_pg) {
 				/*
@@ -1792,9 +1723,8 @@
 				 * base of the right interval down and
 				 * increasing its size accordingly.
-				 *
 				 */
 				leaf->value[i] += count;
 				leaf->key[i] = page;
-				return 1;
+				goto success;
 			} else {
 				/*
@@ -1802,9 +1732,8 @@
 				 * intervals, but cannot be merged with any of
 				 * them.
-				 *
 				 */
 				btree_insert(&area->used_space, page,
 				    (void *) count, leaf);
-				return 1;
+				goto success;
 			}
 		}
@@ -1813,4 +1742,8 @@
 	panic("Inconsistency detected while adding %zu pages of used "
 	    "space at %p.", count, (void *) page);
+	
+success:
+	area->resident += count;
+	return true;
 }
 
@@ -1823,8 +1756,8 @@
  * @param count Number of page to be marked.
  *
- * @return Zero on failure and non-zero on success.
- *
- */
-int used_space_remove(as_area_t *area, uintptr_t page, size_t count)
+ * @return False on failure or true on success.
+ *
+ */
+bool used_space_remove(as_area_t *area, uintptr_t page, size_t count)
 {
 	ASSERT(mutex_locked(&area->lock));
@@ -1837,16 +1770,14 @@
 		/*
 		 * We are lucky, page is the beginning of some interval.
-		 *
 		 */
 		if (count > pages) {
-			return 0;
+			return false;
 		} else if (count == pages) {
 			btree_remove(&area->used_space, page, leaf);
-			return 1;
+			goto success;
 		} else {
 			/*
 			 * Find the respective interval.
 			 * Decrease its size and relocate its start address.
-			 *
 			 */
 			btree_key_t i;
@@ -1855,7 +1786,8 @@
 					leaf->key[i] += count * PAGE_SIZE;
 					leaf->value[i] -= count;
-					return 1;
+					goto success;
 				}
 			}
+			
 			goto error;
 		}
@@ -1876,8 +1808,7 @@
 				 * removed by updating the size of the bigger
 				 * interval.
-				 *
 				 */
 				node->value[node->keys - 1] -= count;
-				return 1;
+				goto success;
 			} else if (page + count * PAGE_SIZE <
 			    left_pg + left_cnt*PAGE_SIZE) {
@@ -1888,5 +1819,4 @@
 				 * the original interval and also inserting a
 				 * new interval.
-				 *
 				 */
 				size_t new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
@@ -1895,10 +1825,11 @@
 				btree_insert(&area->used_space, page +
 				    count * PAGE_SIZE, (void *) new_cnt, leaf);
-				return 1;
+				goto success;
 			}
 		}
-		return 0;
+		
+		return false;
 	} else if (page < leaf->key[0])
-		return 0;
+		return false;
 	
 	if (page > leaf->key[leaf->keys - 1]) {
@@ -1914,8 +1845,7 @@
 				 * interval of the leaf and can be removed by
 				 * updating the size of the bigger interval.
-				 *
 				 */
 				leaf->value[leaf->keys - 1] -= count;
-				return 1;
+				goto success;
 			} else if (page + count * PAGE_SIZE < left_pg +
 			    left_cnt * PAGE_SIZE) {
@@ -1926,5 +1856,4 @@
 				 * original interval and also inserting a new
 				 * interval.
-				 *
 				 */
 				size_t new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
@@ -1933,13 +1862,14 @@
 				btree_insert(&area->used_space, page +
 				    count * PAGE_SIZE, (void *) new_cnt, leaf);
-				return 1;
+				goto success;
 			}
 		}
-		return 0;
+		
+		return false;
 	}
 	
 	/*
 	 * The border cases have been already resolved.
-	 * Now the interval can be only between intervals of the leaf. 
+	 * Now the interval can be only between intervals of the leaf.
 	 */
 	btree_key_t i;
@@ -1962,8 +1892,7 @@
 					 * be removed by updating the size of
 					 * the bigger interval.
-					 *
 					 */
 					leaf->value[i - 1] -= count;
-					return 1;
+					goto success;
 				} else if (page + count * PAGE_SIZE <
 				    left_pg + left_cnt * PAGE_SIZE) {
@@ -1983,8 +1912,9 @@
 					    count * PAGE_SIZE, (void *) new_cnt,
 					    leaf);
-					return 1;
+					goto success;
 				}
 			}
-			return 0;
+			
+			return false;
 		}
 	}
@@ -1993,4 +1923,8 @@
 	panic("Inconsistency detected while removing %zu pages of used "
 	    "space from %p.", count, (void *) page);
+	
+success:
+	area->resident -= count;
+	return true;
 }
 
Index: kernel/generic/src/sysinfo/stats.c
===================================================================
--- kernel/generic/src/sysinfo/stats.c	(revision 1e00216b42ec36d97a169d4cf7053880fcb26aac)
+++ kernel/generic/src/sysinfo/stats.c	(revision e950803ed2d515a08b7d9fa5bb04ba2b26c41c99)
@@ -160,69 +160,17 @@
 static size_t get_task_virtmem(as_t *as)
 {
-	size_t result = 0;
-
 	/*
-	 * We are holding some spinlocks here and therefore are not allowed to
-	 * block. Only attempt to lock the address space and address space area
-	 * mutexes conditionally. If it is not possible to lock either object,
-	 * allow the statistics to be inexact by skipping the respective object.
-	 *
-	 * Note that it may be infinitely better to let the address space
-	 * management code compute these statistics as it proceeds instead of
-	 * having them calculated over and over again here.
+	 * We are holding spinlocks here and therefore are not allowed to
+	 * block. Only attempt to lock the address space and address space
+	 * area mutexes conditionally. If it is not possible to lock either
+	 * object, return inexact statistics by skipping the respective object.
 	 */
-
+	
 	if (SYNCH_FAILED(mutex_trylock(&as->lock)))
-		return result * PAGE_SIZE;
+		return 0;
+	
+	size_t pages = 0;
 	
 	/* Walk the B+ tree and count pages */
-	link_t *cur;
-	for (cur = as->as_area_btree.leaf_head.next;
-	    cur != &as->as_area_btree.leaf_head; cur = cur->next) {
-		btree_node_t *node =
-		    list_get_instance(cur, btree_node_t, leaf_link);
-		
-		unsigned int i;
-		for (i = 0; i < node->keys; i++) {
-			as_area_t *area = node->value[i];
-			
-			if (SYNCH_FAILED(mutex_trylock(&area->lock)))
-				continue;
-			result += area->pages;
-			mutex_unlock(&area->lock);
-		}
-	}
-	
-	mutex_unlock(&as->lock);
-	
-	return result * PAGE_SIZE;
-}
-
-/** Get the resident (used) size of a virtual address space
- *
- * @param as Address space.
- *
- * @return Size of the resident (used) virtual address space (bytes).
- *
- */
-static size_t get_task_resmem(as_t *as)
-{
-	size_t result = 0;
-	
-	/*
-	 * We are holding some spinlocks here and therefore are not allowed to
-	 * block. Only attempt to lock the address space and address space area
-	 * mutexes conditionally. If it is not possible to lock either object,
-	 * allow the statistics to be inexact by skipping the respective object.
-	 *
-	 * Note that it may be infinitely better to let the address space
-	 * management code compute these statistics as it proceeds instead of
-	 * having them calculated over and over again here.
-	 */
-	
-	if (SYNCH_FAILED(mutex_trylock(&as->lock)))
-		return result * PAGE_SIZE;
-	
-	/* Walk the B+ tree of AS areas */
 	link_t *cur;
 	for (cur = as->as_area_btree.leaf_head.next;
@@ -238,16 +186,5 @@
 				continue;
 			
-			/* Walk the B+ tree of resident pages */
-			link_t *rcur;
-			for (rcur = area->used_space.leaf_head.next;
-			    rcur != &area->used_space.leaf_head; rcur = rcur->next) {
-				btree_node_t *rnode =
-				    list_get_instance(rcur, btree_node_t, leaf_link);
-				
-				unsigned int j;
-				for (j = 0; j < rnode->keys; j++)
-					result += (size_t) rnode->value[i];
-			}
-			
+			pages += area->pages;
 			mutex_unlock(&area->lock);
 		}
@@ -256,5 +193,50 @@
 	mutex_unlock(&as->lock);
 	
-	return result * PAGE_SIZE;
+	return (pages << PAGE_WIDTH);
+}
+
+/** Get the resident (used) size of a virtual address space
+ *
+ * @param as Address space.
+ *
+ * @return Size of the resident (used) virtual address space (bytes).
+ *
+ */
+static size_t get_task_resmem(as_t *as)
+{
+	/*
+	 * We are holding spinlocks here and therefore are not allowed to
+	 * block. Only attempt to lock the address space and address space
+	 * area mutexes conditionally. If it is not possible to lock either
+	 * object, return inexact statistics by skipping the respective object.
+	 */
+	
+	if (SYNCH_FAILED(mutex_trylock(&as->lock)))
+		return 0;
+	
+	size_t pages = 0;
+	
+	/* Walk the B+ tree and count pages */
+	link_t *cur;
+	for (cur = as->as_area_btree.leaf_head.next;
+	    cur != &as->as_area_btree.leaf_head; cur = cur->next) {
+		btree_node_t *node =
+		    list_get_instance(cur, btree_node_t, leaf_link);
+		
+		unsigned int i;
+		for (i = 0; i < node->keys; i++) {
+			as_area_t *area = node->value[i];
+			
+			if (SYNCH_FAILED(mutex_trylock(&area->lock)))
+				continue;
+			
+			pages += area->resident;
+			mutex_unlock(&area->lock);
+		}
+	}
+	
+	mutex_unlock(&as->lock);
+	
+	return (pages << PAGE_WIDTH);
 }
 
