Index: kernel/generic/src/adt/avl.c
===================================================================
--- kernel/generic/src/adt/avl.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/adt/avl.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -66,5 +66,5 @@
 {
 	avltree_node_t *p;
-	
+
 	/*
 	 * Iteratively descend to the leaf that can contain the searched key.
@@ -92,5 +92,5 @@
 {
 	avltree_node_t *p = t->root;
-	
+
 	/*
 	 * Check whether the tree is empty.
@@ -98,5 +98,5 @@
 	if (!p)
 		return NULL;
-	
+
 	/*
 	 * Iteratively descend to the leftmost leaf in the tree.
@@ -104,5 +104,5 @@
 	while (p->lft != NULL)
 		p = p->lft;
-	
+
 	return p;
 }
@@ -151,5 +151,5 @@
 #define REBALANCE_INSERT_LR()		REBALANCE_INSERT_XY(lft, rgt, 1)
 #define REBALANCE_INSERT_RL()		REBALANCE_INSERT_XY(rgt, lft, -1)
-	
+
 /** Insert new node into AVL tree.
  *
@@ -172,5 +172,5 @@
 	 */
 	key = newnode->key + t->base;
-	
+
 	/*
 	 * Iteratively descend to the leaf that can contain the new node.
@@ -244,5 +244,5 @@
 		}
 	}
-	
+
 	/*
 	 * To balance the tree, we must check and balance top node.
@@ -260,5 +260,5 @@
 			 */
 			assert(par->balance == 1);
-			
+
 			REBALANCE_INSERT_LR();
 		}
@@ -275,5 +275,5 @@
 			 */
 			assert(par->balance == -1);
-		
+
 			REBALANCE_INSERT_RL();
 		}
@@ -375,5 +375,5 @@
 	assert(t);
 	assert(node);
-	
+
 	if (node->lft == NULL) {
 		if (node->rgt) {
@@ -451,5 +451,5 @@
 		cur->par = node->par;
 	}
-	
+
 	/*
 	 * Repair the parent node's pointer which pointed previously to the
@@ -457,5 +457,5 @@
 	 */
 	(void) repair(t, node, node, cur, NULL, false);
-	
+
 	/*
 	 * Repair cycle which repairs balances of nodes on the way from from the
@@ -484,5 +484,5 @@
 					 * RL rotation.
 					 */
-					
+
 					cur = par->lft;
 					par->lft = cur->rgt;
@@ -490,5 +490,5 @@
 					gpa->rgt = cur->lft;
 					cur->lft = gpa;
-					
+
 					/*
 					 * Repair balances and paternity of
@@ -497,5 +497,5 @@
 					 */
 					REBALANCE_DELETE_RL();
-					
+
 					/*
 					 * Repair paternity.
@@ -513,10 +513,10 @@
 					 * RR rotation.
 					 */
-					
+
 					gpa->rgt = par->lft;
 					if (par->lft)
 						par->lft->par = gpa;
 					par->lft = gpa;
-					
+
 					/*
 					 * Repair paternity.
@@ -524,5 +524,5 @@
 					par->par = gpa->par;
 					gpa->par = par;
-					
+
 					if (par->balance == 0) {
 						/*
@@ -575,10 +575,10 @@
 				 */
 				par = gpa->lft;
-				
+
 				if (par->balance == 1) {
 					/*
 					 * LR rotation.
 					 */
-					
+
 					cur = par->rgt;
 					par->rgt = cur->lft;
@@ -586,5 +586,5 @@
 					gpa->lft = cur->rgt;
 					cur->rgt = gpa;
-					
+
 					/*
 					 * Repair balances and paternity of
@@ -619,5 +619,5 @@
 					par->par = gpa->par;
 					gpa->par = par;
-					
+
 					if (par->balance == 0) {
 						/*
@@ -630,5 +630,5 @@
 						par->balance = 1;
 						gpa->balance = -1;
-						
+
 						(void) repair(t, par, gpa, par,
 						    NULL, false);
@@ -637,5 +637,5 @@
 						par->balance = 0;
 						gpa->balance = 0;
-						
+
 						if (!repair(t, par, gpa, par,
 						    &dir, false))
@@ -667,5 +667,5 @@
 {
 	avltree_node_t *node;
-	
+
 	/*
 	 * Start searching for the smallest key in the tree starting in the root
@@ -673,12 +673,12 @@
 	 * must have the smallest key).
 	 */
-	 
+
 	node = t->root;
 	if (!node)
 		return false;
-	
+
 	while (node->lft != NULL)
 		node = node->lft;
-	
+
 	avltree_delete(t, node);
 
Index: kernel/generic/src/adt/bitmap.c
===================================================================
--- kernel/generic/src/adt/bitmap.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/adt/bitmap.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -62,5 +62,5 @@
 	size_t byte = element / BITMAP_ELEMENT;
 	uint8_t mask = 1 << (element & BITMAP_REMAINER);
-	
+
 	return !!((bitmap->bits)[byte] & mask);
 }
@@ -78,8 +78,8 @@
 {
 	size_t size = elements / BITMAP_ELEMENT;
-	
+
 	if ((elements % BITMAP_ELEMENT) != 0)
 		size++;
-	
+
 	return size;
 }
@@ -113,20 +113,20 @@
 {
 	assert(start + count <= bitmap->elements);
-	
+
 	if (count == 0)
 		return;
-	
+
 	size_t start_byte = start / BITMAP_ELEMENT;
 	size_t aligned_start = ALIGN_UP(start, BITMAP_ELEMENT);
-	
+
 	/* Leading unaligned bits */
 	size_t lub = min(aligned_start - start, count);
-	
+
 	/* Aligned middle bits */
 	size_t amb = (count > lub) ? (count - lub) : 0;
-	
+
 	/* Trailing aligned bits */
 	size_t tab = amb % BITMAP_ELEMENT;
-	
+
 	if (start + count < aligned_start) {
 		/* Set bits in the middle of byte. */
@@ -135,5 +135,5 @@
 		return;
 	}
-	
+
 	if (lub) {
 		/* Make sure to set any leading unaligned bits. */
@@ -141,7 +141,7 @@
 		    ~((1 << (BITMAP_ELEMENT - lub)) - 1);
 	}
-	
+
 	size_t i;
-	
+
 	for (i = 0; i < amb / BITMAP_ELEMENT; i++) {
 		/* The middle bits can be set byte by byte. */
@@ -149,5 +149,5 @@
 		    ALL_ONES;
 	}
-	
+
 	if (tab) {
 		/* Make sure to set any trailing aligned bits. */
@@ -167,20 +167,20 @@
 {
 	assert(start + count <= bitmap->elements);
-	
+
 	if (count == 0)
 		return;
-	
+
 	size_t start_byte = start / BITMAP_ELEMENT;
 	size_t aligned_start = ALIGN_UP(start, BITMAP_ELEMENT);
-	
+
 	/* Leading unaligned bits */
 	size_t lub = min(aligned_start - start, count);
-	
+
 	/* Aligned middle bits */
 	size_t amb = (count > lub) ? (count - lub) : 0;
-	
+
 	/* Trailing aligned bits */
 	size_t tab = amb % BITMAP_ELEMENT;
-	
+
 	if (start + count < aligned_start) {
 		/* Set bits in the middle of byte */
@@ -189,5 +189,5 @@
 		return;
 	}
-	
+
 	if (lub) {
 		/* Make sure to clear any leading unaligned bits. */
@@ -195,7 +195,7 @@
 		    (1 << (BITMAP_ELEMENT - lub)) - 1;
 	}
-	
+
 	size_t i;
-	
+
 	for (i = 0; i < amb / BITMAP_ELEMENT; i++) {
 		/* The middle bits can be cleared byte by byte. */
@@ -203,5 +203,5 @@
 		    ALL_ZEROES;
 	}
-	
+
 	if (tab) {
 		/* Make sure to clear any trailing aligned bits. */
@@ -209,5 +209,5 @@
 		    ~((1 << tab) - 1);
 	}
-	
+
 	bitmap->next_fit = start_byte;
 }
@@ -224,10 +224,10 @@
 	assert(count <= dst->elements);
 	assert(count <= src->elements);
-	
+
 	size_t i;
-	
+
 	for (i = 0; i < count / BITMAP_ELEMENT; i++)
 		dst->bits[i] = src->bits[i];
-	
+
 	if (count % BITMAP_ELEMENT) {
 		bitmap_clear_range(dst, i * BITMAP_ELEMENT,
@@ -274,8 +274,8 @@
 	if (count == 0)
 		return false;
-	
+
 	size_t size = bitmap_size(bitmap->elements);
 	size_t next_fit = bitmap->next_fit;
-	
+
 	/*
 	 * Adjust the next-fit value according to the address
@@ -284,30 +284,30 @@
 	if ((prefered > base) && (prefered < base + bitmap->elements)) {
 		size_t prefered_fit = (prefered - base) / BITMAP_ELEMENT;
-		
+
 		if (prefered_fit > next_fit)
 			next_fit = prefered_fit;
 	}
-	
+
 	for (size_t pos = 0; pos < size; pos++) {
 		size_t byte = (next_fit + pos) % size;
-		
+
 		/* Skip if the current byte has all bits set */
 		if (bitmap->bits[byte] == ALL_ONES)
 			continue;
-		
+
 		size_t byte_bit = byte * BITMAP_ELEMENT;
-		
+
 		for (size_t bit = 0; bit < BITMAP_ELEMENT; bit++) {
 			size_t i = byte_bit + bit;
-			
+
 			if (i >= bitmap->elements)
 				break;
-			
+
 			if (!constraint_satisfy(i, base, constraint))
 				continue;
-			
+
 			if (!bitmap_get_fast(bitmap, i)) {
 				size_t continuous = 1;
-				
+
 				for (size_t j = 1; j < count; j++) {
 					if ((i + j < bitmap->elements) &&
@@ -317,5 +317,5 @@
 						break;
 				}
-				
+
 				if (continuous == count) {
 					if (index != NULL) {
@@ -324,5 +324,5 @@
 						*index = i;
 					}
-					
+
 					return true;
 				} else
@@ -331,5 +331,5 @@
 		}
 	}
-	
+
 	return false;
 }
Index: kernel/generic/src/adt/btree.c
===================================================================
--- kernel/generic/src/adt/btree.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/adt/btree.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -83,7 +83,7 @@
 {
 	unsigned int i;
-	
+
 	node->keys = 0;
-	
+
 	/* Clean also space for the extra key. */
 	for (i = 0; i < BTREE_MAX_KEYS + 1; i++) {
@@ -92,8 +92,8 @@
 		node->subtree[i] = NULL;
 	}
-	
+
 	node->subtree[i] = NULL;
 	node->parent = NULL;
-	
+
 	link_initialize(&node->leaf_link);
 	link_initialize(&node->bfs_link);
@@ -122,5 +122,5 @@
 {
 	size_t i;
-	
+
 	if (root->keys) {
 		for (i = 0; i < root->keys + 1; i++) {
@@ -129,5 +129,5 @@
 		}
 	}
-	
+
 	slab_free(btree_node_cache, root);
 }
@@ -156,9 +156,9 @@
 {
 	size_t i;
-	
+
 	for (i = 0; i < node->keys; i++) {
 		if (key < node->key[i]) {
 			size_t j;
-			
+
 			for (j = node->keys; j > i; j--) {
 				node->key[j] = node->key[j - 1];
@@ -166,9 +166,9 @@
 				node->subtree[j + 1] = node->subtree[j];
 			}
-			
+
 			break;
 		}
 	}
-	
+
 	node->key[i] = key;
 	node->value[i] = value;
@@ -191,10 +191,10 @@
 {
 	size_t i;
-	
+
 	for (i = 0; i < node->keys + 1; i++) {
 		if (subtree == node->subtree[i])
 			return i - (int) (right != false);
 	}
-	
+
 	panic("Node %p does not contain subtree %p.", node, subtree);
 }
@@ -215,5 +215,5 @@
 	size_t i;
 	size_t j;
-	
+
 	for (i = 0; i < node->keys; i++) {
 		if (key == node->key[i]) {
@@ -223,12 +223,12 @@
 				node->subtree[j - 1] = node->subtree[j];
 			}
-			
+
 			node->subtree[j - 1] = node->subtree[j];
 			node->keys--;
-			
+
 			return;
 		}
 	}
-	
+
 	panic("Node %p does not contain key %" PRIu64 ".", node, key);
 }
@@ -248,5 +248,5 @@
 {
 	size_t i, j;
-	
+
 	for (i = 0; i < node->keys; i++) {
 		if (key == node->key[i]) {
@@ -256,10 +256,10 @@
 				node->subtree[j] = node->subtree[j + 1];
 			}
-			
+
 			node->keys--;
 			return;
 		}
 	}
-	
+
 	panic("Node %p does not contain key %" PRIu64 ".", node, key);
 }
@@ -280,9 +280,9 @@
 {
 	size_t i;
-	
+
 	for (i = 0; i < node->keys; i++) {
 		if (key < node->key[i]) {
 			size_t j;
-			
+
 			for (j = node->keys; j > i; j--) {
 				node->key[j] = node->key[j - 1];
@@ -290,14 +290,14 @@
 				node->subtree[j + 1] = node->subtree[j];
 			}
-			
+
 			node->subtree[j + 1] = node->subtree[j];
 			break;
 		}
 	}
-	
+
 	node->key[i] = key;
 	node->value[i] = value;
 	node->subtree[i] = lsubtree;
-	
+
 	node->keys++;
 }
@@ -320,8 +320,8 @@
 {
 	btree_key_t key = lnode->key[lnode->keys - 1];
-	
+
 	if (LEAF_NODE(lnode)) {
 		void *value = lnode->value[lnode->keys - 1];
-		
+
 		node_remove_key_and_rsubtree(lnode, key);
 		node_insert_key_and_lsubtree(rnode, key, value, NULL);
@@ -329,9 +329,9 @@
 	} else {
 		btree_node_t *rsubtree = lnode->subtree[lnode->keys];
-		
+
 		node_remove_key_and_rsubtree(lnode, key);
 		node_insert_key_and_lsubtree(rnode, lnode->parent->key[idx], NULL, rsubtree);
 		lnode->parent->key[idx] = key;
-		
+
 		/* Fix parent link of the reconnected right subtree. */
 		rsubtree->parent = rnode;
@@ -356,8 +356,8 @@
 {
 	btree_key_t key = rnode->key[0];
-	
+
 	if (LEAF_NODE(rnode)) {
 		void *value = rnode->value[0];
-		
+
 		node_remove_key_and_lsubtree(rnode, key);
 		node_insert_key_and_rsubtree(lnode, key, value, NULL);
@@ -365,9 +365,9 @@
 	} else {
 		btree_node_t *lsubtree = rnode->subtree[0];
-		
+
 		node_remove_key_and_lsubtree(rnode, key);
 		node_insert_key_and_rsubtree(lnode, rnode->parent->key[idx], NULL, lsubtree);
 		rnode->parent->key[idx] = key;
-		
+
 		/* Fix parent link of the reconnected left subtree. */
 		lsubtree->parent = lnode;
@@ -395,5 +395,5 @@
 	size_t idx;
 	btree_node_t *lnode;
-	
+
 	/*
 	 * If this is root node, the rotation can not be done.
@@ -401,5 +401,5 @@
 	if (ROOT_NODE(node))
 		return false;
-	
+
 	idx = find_key_by_subtree(node->parent, node, true);
 	if ((int) idx == -1) {
@@ -410,5 +410,5 @@
 		return false;
 	}
-	
+
 	lnode = node->parent->subtree[idx];
 	if (lnode->keys < BTREE_MAX_KEYS) {
@@ -420,5 +420,5 @@
 		return true;
 	}
-	
+
 	return false;
 }
@@ -444,5 +444,5 @@
 	size_t idx;
 	btree_node_t *rnode;
-	
+
 	/*
 	 * If this is root node, the rotation can not be done.
@@ -450,5 +450,5 @@
 	if (ROOT_NODE(node))
 		return false;
-	
+
 	idx = find_key_by_subtree(node->parent, node, false);
 	if (idx == node->parent->keys) {
@@ -459,5 +459,5 @@
 		return false;
 	}
-	
+
 	rnode = node->parent->subtree[idx + 1];
 	if (rnode->keys < BTREE_MAX_KEYS) {
@@ -469,5 +469,5 @@
 		return true;
 	}
-	
+
 	return false;
 }
@@ -499,18 +499,18 @@
 	size_t i;
 	size_t j;
-	
+
 	assert(median);
 	assert(node->keys == BTREE_MAX_KEYS);
-	
+
 	/*
 	 * Use the extra space to store the extra node.
 	 */
 	node_insert_key_and_rsubtree(node, key, value, rsubtree);
-	
+
 	/*
 	 * Compute median of keys.
 	 */
 	*median = MEDIAN_HIGH(node);
-	
+
 	/*
 	 * Allocate and initialize new right sibling.
@@ -520,5 +520,5 @@
 	rnode->parent = node->parent;
 	rnode->depth = node->depth;
-	
+
 	/*
 	 * Copy big keys, values and subtree pointers to the new right sibling.
@@ -530,5 +530,5 @@
 		rnode->value[j] = node->value[i];
 		rnode->subtree[j] = node->subtree[i];
-		
+
 		/*
 		 * Fix parent links in subtrees.
@@ -537,12 +537,12 @@
 			rnode->subtree[j]->parent = rnode;
 	}
-	
+
 	rnode->subtree[j] = node->subtree[i];
 	if (rnode->subtree[j])
 		rnode->subtree[j]->parent = rnode;
-	
+
 	rnode->keys = j;  /* Set number of keys of the new node. */
 	node->keys /= 2;  /* Shrink the old node. */
-	
+
 	return rnode;
 }
@@ -578,5 +578,5 @@
 		btree_node_t *rnode;
 		btree_key_t median;
-		
+
 		/*
 		 * Node is full and both siblings (if both exist) are full too.
@@ -584,11 +584,11 @@
 		 * bigger keys (i.e. the new node) into its parent.
 		 */
-		
+
 		rnode = node_split(node, key, value, rsubtree, &median);
-		
+
 		if (LEAF_NODE(node)) {
 			list_insert_after(&rnode->leaf_link, &node->leaf_link);
 		}
-		
+
 		if (ROOT_NODE(node)) {
 			/*
@@ -599,5 +599,5 @@
 			rnode->parent = t->root;
 			node_initialize(t->root);
-			
+
 			/*
 			 * Left-hand side subtree will be the old root (i.e. node).
@@ -605,5 +605,5 @@
 			 */
 			t->root->subtree[0] = node;
-			
+
 			t->root->depth = node->depth + 1;
 		}
@@ -624,7 +624,7 @@
 {
 	btree_node_t *lnode;
-	
+
 	assert(value);
-	
+
 	lnode = leaf_node;
 	if (!lnode) {
@@ -632,5 +632,5 @@
 			panic("B-tree %p already contains key %" PRIu64 ".", t, key);
 	}
-	
+
 	_btree_insert(t, key, value, NULL, lnode);
 }
@@ -648,5 +648,5 @@
 	size_t idx;
 	btree_node_t *lnode;
-	
+
 	/*
 	 * If this is root node, the rotation can not be done.
@@ -654,5 +654,5 @@
 	if (ROOT_NODE(rnode))
 		return false;
-	
+
 	idx = find_key_by_subtree(rnode->parent, rnode, true);
 	if ((int) idx == -1) {
@@ -663,5 +663,5 @@
 		return false;
 	}
-	
+
 	lnode = rnode->parent->subtree[idx];
 	if (lnode->keys > FILL_FACTOR) {
@@ -669,5 +669,5 @@
 		return true;
 	}
-	
+
 	return false;
 }
@@ -685,5 +685,5 @@
 	size_t idx;
 	btree_node_t *rnode;
-	
+
 	/*
 	 * If this is root node, the rotation can not be done.
@@ -691,5 +691,5 @@
 	if (ROOT_NODE(lnode))
 		return false;
-	
+
 	idx = find_key_by_subtree(lnode->parent, lnode, false);
 	if (idx == lnode->parent->keys) {
@@ -700,5 +700,5 @@
 		return false;
 	}
-	
+
 	rnode = lnode->parent->subtree[idx + 1];
 	if (rnode->keys > FILL_FACTOR) {
@@ -706,5 +706,5 @@
 		return true;
 	}
-	
+
 	return false;
 }
@@ -724,7 +724,7 @@
 	btree_node_t *rnode;
 	size_t i;
-	
+
 	assert(!ROOT_NODE(node));
-	
+
 	idx = find_key_by_subtree(node->parent, node, false);
 	if (idx == node->parent->keys) {
@@ -737,14 +737,14 @@
 	} else
 		rnode = node->parent->subtree[idx + 1];
-	
+
 	/* Index nodes need to insert parent node key in between left and right node. */
 	if (INDEX_NODE(node))
 		node->key[node->keys++] = node->parent->key[idx];
-	
+
 	/* Copy the key-value-subtree triplets from the right node. */
 	for (i = 0; i < rnode->keys; i++) {
 		node->key[node->keys + i] = rnode->key[i];
 		node->value[node->keys + i] = rnode->value[i];
-		
+
 		if (INDEX_NODE(node)) {
 			node->subtree[node->keys + i] = rnode->subtree[i];
@@ -752,10 +752,10 @@
 		}
 	}
-	
+
 	if (INDEX_NODE(node)) {
 		node->subtree[node->keys + i] = rnode->subtree[i];
 		rnode->subtree[i]->parent = node;
 	}
-	
+
 	node->keys += rnode->keys;
 	return rnode;
@@ -789,8 +789,8 @@
 			node_remove_key_and_rsubtree(node, key);
 		}
-		
+
 		return;
 	}
-	
+
 	if (node->keys <= FILL_FACTOR) {
 		/*
@@ -801,8 +801,8 @@
 			try_rotation_from_right(node);
 	}
-	
+
 	if (node->keys > FILL_FACTOR) {
 		size_t i;
-		
+
 		/*
 		 * The key can be immediately removed.
@@ -813,5 +813,5 @@
 		 */
 		node_remove_key_and_rsubtree(node, key);
-		
+
 		for (i = 0; i < node->parent->keys; i++) {
 			if (node->parent->key[i] == key)
@@ -822,5 +822,5 @@
 		btree_node_t *rnode;
 		btree_node_t *parent;
-		
+
 		/*
 		 * The node is below the fill factor as well as its left and right sibling.
@@ -832,8 +832,8 @@
 		node_remove_key_and_rsubtree(node, key);
 		rnode = node_combine(node);
-		
+
 		if (LEAF_NODE(rnode))
 			list_remove(&rnode->leaf_link);
-		
+
 		idx = find_key_by_subtree(parent, rnode, true);
 		assert((int) idx != -1);
@@ -855,5 +855,5 @@
 {
 	btree_node_t *lnode;
-	
+
 	lnode = leaf_node;
 	if (!lnode) {
@@ -861,5 +861,5 @@
 			panic("B-tree %p does not contain key %" PRIu64 ".", t, key);
 	}
-	
+
 	_btree_remove(t, key, lnode);
 }
@@ -877,5 +877,5 @@
 {
 	btree_node_t *cur, *next;
-	
+
 	/*
 	 * Iteratively descend to the leaf that can contain the searched key.
@@ -887,5 +887,5 @@
 		 */
 		*leaf_node = cur;
-		
+
 		if (cur->keys == 0)
 			return NULL;
@@ -901,5 +901,5 @@
 			void *val;
 			size_t i;
-			
+
 			/*
 			 * Now if the key is smaller than cur->key[i]
@@ -911,12 +911,12 @@
 					next = cur->subtree[i];
 					val = cur->value[i - 1];
-					
+
 					if (LEAF_NODE(cur))
 						return key == cur->key[i - 1] ? val : NULL;
-					
+
 					goto descend;
 				}
 			}
-			
+
 			/*
 			 * Last possibility is that the key is
@@ -925,5 +925,5 @@
 			next = cur->subtree[i];
 			val = cur->value[i - 1];
-			
+
 			if (LEAF_NODE(cur))
 				return key == cur->key[i - 1] ? val : NULL;
@@ -932,5 +932,5 @@
 		;
 	}
-	
+
 	/*
 	 * The key was not found in the *leaf_node and
@@ -952,5 +952,5 @@
 {
 	assert(LEAF_NODE(node));
-	
+
 	if (node->leaf_link.prev != &t->leaf_list.head)
 		return list_get_instance(node->leaf_link.prev, btree_node_t, leaf_link);
@@ -971,5 +971,5 @@
 {
 	assert(LEAF_NODE(node));
-	
+
 	if (node->leaf_link.next != &t->leaf_list.head)
 		return list_get_instance(node->leaf_link.next, btree_node_t, leaf_link);
@@ -988,9 +988,9 @@
 	int depth = t->root->depth;
 	list_t list;
-	
+
 	printf("Printing B-tree:\n");
 	list_initialize(&list);
 	list_append(&t->root->bfs_link, &list);
-	
+
 	/*
 	 * Use BFS search to print out the tree.
@@ -1000,19 +1000,19 @@
 		link_t *hlp;
 		btree_node_t *node;
-		
+
 		hlp = list_first(&list);
 		assert(hlp != NULL);
 		node = list_get_instance(hlp, btree_node_t, bfs_link);
 		list_remove(hlp);
-		
+
 		assert(node);
-		
+
 		if (node->depth != depth) {
 			printf("\n");
 			depth = node->depth;
 		}
-		
+
 		printf("(");
-		
+
 		for (i = 0; i < node->keys; i++) {
 			printf("%" PRIu64 "%s", node->key[i], i < node->keys - 1 ? "," : "");
@@ -1021,25 +1021,25 @@
 			}
 		}
-		
+
 		if (node->depth && node->subtree[i])
 			list_append(&node->subtree[i]->bfs_link, &list);
-		
+
 		printf(")");
 	}
-	
+
 	printf("\n");
-	
+
 	printf("Printing list of leaves:\n");
 	list_foreach(t->leaf_list, leaf_link, btree_node_t, node) {
 		assert(node);
-		
+
 		printf("(");
-		
+
 		for (i = 0; i < node->keys; i++)
 			printf("%" PRIu64 "%s", node->key[i], i < node->keys - 1 ? "," : "");
-		
+
 		printf(")");
 	}
-	
+
 	printf("\n");
 }
Index: kernel/generic/src/adt/cht.c
===================================================================
--- kernel/generic/src/adt/cht.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/adt/cht.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -531,13 +531,13 @@
 	if (!op || !op->hash || !op->key_hash || !op->equal || !op->key_equal)
 		return false;
-	
+
 	size_t min_order = size_to_order(min_size, CHT_MIN_ORDER);
 	size_t order = size_to_order(init_size, min_order);
-	
+
 	h->b = alloc_buckets(order, false, can_block);
-	
+
 	if (!h->b)
 		return false;
-	
+
 	h->max_load = (max_load == 0) ? CHT_MAX_LOAD : max_load;
 	h->min_order = min_order;
@@ -546,9 +546,9 @@
 	atomic_set(&h->item_cnt, 0);
 	atomic_set(&h->resize_reqs, 0);
-	
+
 	if (NULL == op->remove_callback) {
 		h->op->remove_callback = dummy_remove_callback;
 	}
-	
+
 	/*
 	 * Cached item hashes are stored in item->rcu_link.func. Once the item
@@ -556,8 +556,8 @@
 	 */
 	h->invalid_hash = (uintptr_t)h->op->remove_callback;
-	
+
 	/* Ensure the initialization takes place before we start using the table. */
 	write_barrier();
-	
+
 	return true;
 }
@@ -581,18 +581,18 @@
 		sizeof(cht_buckets_t) + (bucket_cnt - 1) * sizeof(marked_ptr_t);
 	cht_buckets_t *b = malloc(bytes, can_block ? 0 : FRAME_ATOMIC);
-	
+
 	if (!b)
 		return NULL;
-	
+
 	b->order = order;
-	
+
 	marked_ptr_t head_link = set_invalid
 		? make_link(&sentinel, N_INVALID)
 		: make_link(&sentinel, N_NORMAL);
-	
+
 	for (size_t i = 0; i < bucket_cnt; ++i) {
 		b->head[i] = head_link;
 	}
-	
+
 	return b;
 }
@@ -607,8 +607,8 @@
 		if (bucket_cnt <= ((size_t)1 << order))
 			return order;
-		
+
 		++order;
 	} while (order < CHT_MAX_ORDER);
-	
+
 	return order;
 }
@@ -623,5 +623,5 @@
 {
 	cht_destroy_unsafe(h);
-	
+
 	/* You must clear the table of items. Otherwise cht_destroy will leak. */
 	assert(atomic_get(&h->item_cnt) == 0);
@@ -635,8 +635,8 @@
 		rcu_barrier();
 	}
-	
+
 	/* Wait for all remove_callback()s to complete. */
 	rcu_barrier();
-	
+
 	free(h->b);
 	h->b = NULL;
@@ -688,7 +688,7 @@
 	/* See docs to cht_find() and cht_find_lazy(). */
 	assert(rcu_read_locked());
-	
+
 	size_t hash = calc_key_hash(h, key);
-	
+
 	cht_buckets_t *b = rcu_access(h->b);
 	size_t idx = calc_bucket_idx(hash, b->order);
@@ -698,9 +698,9 @@
 	 */
 	marked_ptr_t head = b->head[idx];
-	
+
 	/* Undergoing a resize - take the slow path. */
 	if (N_INVALID == get_mark(head))
 		return find_resizing(h, key, hash, head, idx);
-	
+
 	return search_bucket(h, head, key, hash);
 }
@@ -734,5 +734,5 @@
 	assert(rcu_read_locked());
 	assert(item);
-	
+
 	return find_duplicate(h, item, calc_node_hash(h, item), get_next(item->link));
 }
@@ -758,5 +758,5 @@
 		prev = cur->link;
 	} while (node_hash(h, cur) < search_hash);
-	
+
 	/*
 	 * Only search for an item with an equal key if cur is not the sentinel
@@ -768,9 +768,9 @@
 				return cur;
 		}
-		
+
 		cur = get_next(cur->link);
 		assert(cur);
 	}
-	
+
 	/*
 	 * In the unlikely case that we have encountered a node whose cached
@@ -782,5 +782,5 @@
 		goto try_again;
 	}
-	
+
 	return NULL;
 }
@@ -792,9 +792,9 @@
 	assert(N_INVALID == get_mark(old_head));
 	assert(h->new_b);
-	
+
 	size_t new_idx = calc_bucket_idx(hash, h->new_b->order);
 	marked_ptr_t new_head = h->new_b->head[new_idx];
 	marked_ptr_t search_head = new_head;
-	
+
 	/* Growing. */
 	if (h->b->order < h->new_b->order) {
@@ -818,7 +818,7 @@
 				new_head = h->new_b->head[grow_idx(old_idx)];
 			}
-			
+
 			/* new_head is now the moved bucket, either valid or invalid. */
-			
+
 			/*
 			 * The old bucket was definitely moved to new_head but the
@@ -845,13 +845,13 @@
 			}
 		}
-		
+
 		return search_bucket(h, search_head, key, hash);
 	} else if (h->b->order > h->new_b->order) {
 		/* Shrinking. */
-		
+
 		/* Index of the bucket in the old table that was moved. */
 		size_t move_src_idx = grow_idx(new_idx);
 		marked_ptr_t moved_old_head = h->b->head[move_src_idx];
-		
+
 		/*
 		 * h->b->head[move_src_idx] had already been moved to new_head
@@ -883,7 +883,7 @@
 			search_head = moved_old_head;
 		}
-		
+
 		cht_link_t *ret = search_bucket(h, search_head, key, hash);
-		
+
 		if (ret)
 			return ret;
@@ -906,5 +906,5 @@
 			return search_bucket(h, old_head, key, hash);
 		}
-		
+
 		return NULL;
 	} else {
@@ -979,16 +979,16 @@
 	bool resizing = false;
 	bool inserted = false;
-	
+
 	do {
 		walk_mode_t walk_mode = WM_NORMAL;
 		bool join_finishing;
-		
+
 		resizing = resizing || (N_NORMAL != get_mark(*phead));
-		
+
 		/* The table is resizing. Get the correct bucket head. */
 		if (resizing) {
 			upd_resizing_head(h, hash, &phead, &join_finishing, &walk_mode);
 		}
-		
+
 		wnd_t wnd = {
 			.ppred = phead,
@@ -996,18 +996,18 @@
 			.last = NULL
 		};
-		
+
 		if (!find_wnd_and_gc(h, hash, walk_mode, &wnd, &resizing)) {
 			/* Could not GC a node; or detected an unexpected resize. */
 			continue;
 		}
-		
+
 		if (dup_item && has_duplicate(h, item, hash, wnd.cur, dup_item)) {
 			rcu_read_unlock();
 			return false;
 		}
-		
+
 		inserted = insert_at(item, &wnd, walk_mode, &resizing);
 	} while (!inserted);
-	
+
 	rcu_read_unlock();
 
@@ -1032,13 +1032,13 @@
 {
 	marked_ptr_t ret;
-	
+
 	if (walk_mode == WM_NORMAL) {
 		item->link = make_link(wnd->cur, N_NORMAL);
 		/* Initialize the item before adding it to a bucket. */
 		memory_barrier();
-		
+
 		/* Link a clean/normal predecessor to the item. */
 		ret = cas_link(wnd->ppred, wnd->cur, N_NORMAL, item, N_NORMAL);
-		
+
 		if (ret == make_link(wnd->cur, N_NORMAL)) {
 			return true;
@@ -1054,8 +1054,8 @@
 		/* Initialize the item before adding it to a bucket. */
 		memory_barrier();
-		
+
 		/* Link the not-deleted predecessor to the item. Move its JF mark. */
 		ret = cas_link(wnd->ppred, wnd->cur, jf_mark, item, N_NORMAL);
-		
+
 		return ret == make_link(wnd->cur, jf_mark);
 	} else {
@@ -1065,5 +1065,5 @@
 		/* Initialize the item before adding it to a bucket. */
 		memory_barrier();
-		
+
 		mark_t pred_mark = get_mark(*wnd->ppred);
 		/* If the predecessor is a join node it may be marked deleted.*/
@@ -1090,5 +1090,5 @@
 	assert(cur == &sentinel || hash <= node_hash(h, cur)
 		|| node_hash(h, cur) == h->invalid_hash);
-	
+
 	/* hash < node_hash(h, cur) */
 	if (hash != node_hash(h, cur) && h->invalid_hash != node_hash(h, cur))
@@ -1101,5 +1101,5 @@
 	 */
 	read_barrier();
-	
+
 	*dup_item = find_duplicate(h, item, hash, cur);
 	return NULL != *dup_item;
@@ -1113,5 +1113,5 @@
 
 	cht_link_t *cur = start;
-	
+
 try_again:
 	assert(cur);
@@ -1119,11 +1119,11 @@
 	while (node_hash(h, cur) == hash) {
 		assert(cur != &sentinel);
-		
+
 		bool deleted = (N_DELETED & get_mark(cur->link));
-		
+
 		/* Skip logically deleted nodes. */
 		if (!deleted && h->op->equal(item, cur))
 			return cur;
-		
+
 		cur = get_next(cur->link);
 		assert(cur);
@@ -1135,5 +1135,5 @@
 		goto try_again;
 	}
-	
+
 	return NULL;
 }
@@ -1143,11 +1143,11 @@
 {
 	assert(h);
-	
+
 	size_t hash = calc_key_hash(h, key);
 	size_t removed = 0;
-	
+
 	while (remove_pred(h, hash, h->op->key_equal, key))
 		++removed;
-	
+
 	return removed;
 }
@@ -1181,24 +1181,24 @@
 {
 	rcu_read_lock();
-	
+
 	bool resizing = false;
 	bool deleted = false;
 	bool deleted_but_gc = false;
-	
+
 	cht_buckets_t *b = rcu_access(h->b);
 	size_t idx = calc_bucket_idx(hash, b->order);
 	marked_ptr_t *phead = &b->head[idx];
-	
+
 	do {
 		walk_mode_t walk_mode = WM_NORMAL;
 		bool join_finishing = false;
-		
+
 		resizing = resizing || (N_NORMAL != get_mark(*phead));
-		
+
 		/* The table is resizing. Get the correct bucket head. */
 		if (resizing) {
 			upd_resizing_head(h, hash, &phead, &join_finishing, &walk_mode);
 		}
-		
+
 		wnd_t wnd = {
 			.ppred = phead,
@@ -1206,5 +1206,5 @@
 			.last = NULL
 		};
-		
+
 		if (!find_wnd_and_gc_pred(
 			h, hash, walk_mode, pred, pred_arg, &wnd, &resizing)) {
@@ -1212,5 +1212,5 @@
 			continue;
 		}
-		
+
 		/*
 		 * The item lookup is affected by a bucket join but effects of
@@ -1225,19 +1225,19 @@
 			continue;
 		}
-		
+
 		/* Already deleted, but delete_at() requested one GC pass. */
 		if (deleted_but_gc)
 			break;
-		
+
 		bool found = (wnd.cur != &sentinel && pred(pred_arg, wnd.cur));
-		
+
 		if (!found) {
 			rcu_read_unlock();
 			return false;
 		}
-		
+
 		deleted = delete_at(h, &wnd, walk_mode, &deleted_but_gc, &resizing);
 	} while (!deleted || deleted_but_gc);
-	
+
 	rcu_read_unlock();
 	return true;
@@ -1263,20 +1263,20 @@
 {
 	assert(wnd->cur && wnd->cur != &sentinel);
-	
+
 	*deleted_but_gc = false;
-	
+
 	if (!mark_deleted(wnd->cur, walk_mode, resizing)) {
 		/* Already deleted, or unexpectedly marked as JOIN/JOIN_FOLLOWS. */
 		return false;
 	}
-	
+
 	/* Marked deleted. Unlink from the bucket. */
-	
+
 	/* Never unlink join nodes. */
 	if (walk_mode == WM_LEAVE_JOIN && (N_JOIN & get_mark(wnd->cur->link)))
 		return true;
-	
+
 	cas_order_barrier();
-	
+
 	if (unlink_from_pred(wnd, walk_mode, resizing)) {
 		free_later(h, wnd->cur);
@@ -1284,5 +1284,5 @@
 		*deleted_but_gc = true;
 	}
-	
+
 	return true;
 }
@@ -1293,16 +1293,16 @@
 {
 	assert(cur && cur != &sentinel);
-	
+
 	/*
 	 * Btw, we could loop here if the cas fails but let's not complicate
 	 * things and let's retry from the head of the bucket.
 	 */
-	
+
 	cht_link_t *next = get_next(cur->link);
-	
+
 	if (walk_mode == WM_NORMAL) {
 		/* Only mark clean/normal nodes - JF/JN is used only during resize. */
 		marked_ptr_t ret = cas_link(&cur->link, next, N_NORMAL, next, N_DELETED);
-		
+
 		if (ret != make_link(next, N_NORMAL)) {
 			*resizing = (N_JOIN | N_JOIN_FOLLOWS) & get_mark(ret);
@@ -1311,15 +1311,15 @@
 	} else {
 		static_assert(N_JOIN == N_JOIN_FOLLOWS, "");
-		
+
 		/* Keep the N_JOIN/N_JOIN_FOLLOWS mark but strip N_DELETED. */
 		mark_t cur_mark = get_mark(cur->link) & N_JOIN_FOLLOWS;
-		
+
 		marked_ptr_t ret =
 			cas_link(&cur->link, next, cur_mark, next, cur_mark | N_DELETED);
-		
+
 		if (ret != make_link(next, cur_mark))
 			return false;
 	}
-	
+
 	return true;
 }
@@ -1331,7 +1331,7 @@
 	assert(wnd->cur != &sentinel);
 	assert(wnd->cur && (N_DELETED & get_mark(wnd->cur->link)));
-	
+
 	cht_link_t *next = get_next(wnd->cur->link);
-		
+
 	if (walk_mode == WM_LEAVE_JOIN) {
 		/* Never try to unlink join nodes. */
@@ -1341,8 +1341,8 @@
 		/* Succeed only if the predecessor is clean/normal or a join node. */
 		mark_t exp_pred_mark = (N_JOIN & pred_mark) ? pred_mark : N_NORMAL;
-		
+
 		marked_ptr_t pred_link = make_link(wnd->cur, exp_pred_mark);
 		marked_ptr_t next_link = make_link(next, exp_pred_mark);
-		
+
 		if (pred_link != _cas_link(wnd->ppred, pred_link, next_link))
 			return false;
@@ -1351,12 +1351,12 @@
 		/* Move the JF mark if set. Clear DEL mark. */
 		mark_t cur_mark = N_JOIN_FOLLOWS & get_mark(wnd->cur->link);
-		
+
 		/* The predecessor must be clean/normal. */
 		marked_ptr_t pred_link = make_link(wnd->cur, N_NORMAL);
 		/* Link to cur's successor keeping/copying cur's JF mark. */
 		marked_ptr_t next_link = make_link(next, cur_mark);
-		
+
 		marked_ptr_t ret = _cas_link(wnd->ppred, pred_link, next_link);
-		
+
 		if (pred_link != ret) {
 			/* If we're not resizing the table there are no JF/JN nodes. */
@@ -1366,5 +1366,5 @@
 		}
 	}
-	
+
 	return true;
 }
@@ -1399,8 +1399,8 @@
 {
 	assert(wnd->cur);
-	
+
 	if (wnd->cur == &sentinel)
 		return true;
-	
+
 	/*
 	 * A read barrier is not needed here to bring up the most recent
@@ -1408,13 +1408,13 @@
 	 * an already deleted node; fail in delete_at(); and retry.
 	 */
-	
+
 	size_t cur_hash;
 
 try_again:
 	cur_hash = node_hash(h, wnd->cur);
-		
+
 	while (cur_hash <= hash) {
 		assert(wnd->cur && wnd->cur != &sentinel);
-		
+
 		/* GC any deleted nodes on the way. */
 		if (N_DELETED & get_mark(wnd->cur->link)) {
@@ -1427,11 +1427,11 @@
 			if (cur_hash == hash && pred(pred_arg, wnd->cur))
 				return true;
-			
+
 			next_wnd(wnd);
 		}
-		
+
 		cur_hash = node_hash(h, wnd->cur);
 	}
-	
+
 	if (cur_hash == h->invalid_hash) {
 		next_wnd(wnd);
@@ -1439,5 +1439,5 @@
 		goto try_again;
 	}
-	
+
 	/* The searched for node is not in the current bucket. */
 	return true;
@@ -1481,8 +1481,8 @@
 			next_wnd(wnd);
 		}
-		
+
 		assert(wnd->cur);
 	}
-	
+
 	if (node_hash(h, wnd->cur) == h->invalid_hash) {
 		next_wnd(wnd);
@@ -1520,5 +1520,5 @@
 		wnd->cur = get_next(wnd->cur->link);
 	}
-	
+
 	return true;
 }
@@ -1539,5 +1539,5 @@
 	 * is visible and if not, make it visible to this cpu.
 	 */
-	
+
 	/*
 	 * Resizer ensures h->b->order stays the same for the duration of this
@@ -1548,21 +1548,21 @@
 	assert(h->b->order > h->new_b->order);
 	assert(wnd->cur);
-	
+
 	/* Either we did not need the joining link or we have already followed it.*/
 	if (wnd->cur != &sentinel)
 		return true;
-	
+
 	/* We have reached the end of a bucket. */
-	
+
 	if (wnd->last != &sentinel) {
 		size_t last_seen_hash = node_hash(h, wnd->last);
-		
+
 		if (last_seen_hash == h->invalid_hash) {
 			last_seen_hash = calc_node_hash(h, wnd->last);
 		}
-		
+
 		size_t last_old_idx = calc_bucket_idx(last_seen_hash, h->b->order);
 		size_t move_src_idx = grow_idx(shrink_idx(last_old_idx));
-		
+
 		/*
 		 * Last node seen was in the joining bucket - if the searched
@@ -1572,5 +1572,5 @@
 			return true;
 	}
-	
+
 	/*
 	 * Reached the end of the bucket but no nodes from the joining bucket
@@ -1603,8 +1603,8 @@
 	size_t old_idx = calc_bucket_idx(hash, b->order);
 	size_t new_idx = calc_bucket_idx(hash, h->new_b->order);
-	
+
 	marked_ptr_t *pold_head = &b->head[old_idx];
 	marked_ptr_t *pnew_head = &h->new_b->head[new_idx];
-	
+
 	/* In any case, use the bucket in the new table. */
 	*phead = pnew_head;
@@ -1614,8 +1614,8 @@
 		size_t move_dest_idx = grow_idx(old_idx);
 		marked_ptr_t *pmoved_head = &h->new_b->head[move_dest_idx];
-		
+
 		/* Complete moving the bucket from the old to the new table. */
 		help_head_move(pold_head, pmoved_head);
-		
+
 		/* The hash belongs to the moved bucket. */
 		if (move_dest_idx == new_idx) {
@@ -1634,5 +1634,5 @@
 			 * half of the split/old/moved bucket.
 			 */
-			
+
 			/* The moved bucket has not yet been split. */
 			if (N_NORMAL != get_mark(*pnew_head)) {
@@ -1645,12 +1645,12 @@
 				assert(N_NORMAL == get_mark(*pnew_head));
 			}
-			
+
 			*walk_mode = WM_LEAVE_JOIN;
 		}
 	} else if (h->new_b->order < b->order ) {
 		/* Shrinking the table. */
-		
+
 		size_t move_src_idx = grow_idx(new_idx);
-		
+
 		/*
 		 * Complete moving the bucket from the old to the new table.
@@ -1658,5 +1658,5 @@
 		 */
 		help_head_move(&b->head[move_src_idx], pnew_head);
-		
+
 		/* Hash belongs to the bucket to be joined with the moved bucket. */
 		if (move_src_idx != old_idx) {
@@ -1666,5 +1666,5 @@
 				join_buckets(h, pold_head, pnew_head, split_hash);
 			}
-			
+
 			/*
 			 * The resizer sets pold_head to &sentinel when all cpus are
@@ -1673,5 +1673,5 @@
 			*join_finishing = (&sentinel != get_next(*pold_head));
 		}
-		
+
 		/* move_head() or join_buckets() makes it so or makes the mark visible.*/
 		assert(N_INVALID == get_mark(*pold_head));
@@ -1713,5 +1713,5 @@
 	/* Head move has to in progress already when calling this func. */
 	assert(N_CONST & get_mark(*psrc_head));
-	
+
 	/* Head already moved. */
 	if (N_INVALID == get_mark(*psrc_head)) {
@@ -1724,5 +1724,5 @@
 		complete_head_move(psrc_head, pdest_head);
 	}
-	
+
 	assert(!(N_CONST & get_mark(*pdest_head)));
 }
@@ -1742,10 +1742,10 @@
 {
 	marked_ptr_t ret, src_link;
-	
+
 	/* Mark src head immutable. */
 	do {
 		cht_link_t *next = get_next(*psrc_head);
 		src_link = make_link(next, N_NORMAL);
-		
+
 		/* Mark the normal/clean src link immutable/const. */
 		ret = cas_link(psrc_head, next, N_NORMAL, next, N_CONST);
@@ -1758,5 +1758,5 @@
 	assert(N_JOIN_FOLLOWS != get_mark(*psrc_head));
 	assert(N_CONST & get_mark(*psrc_head));
-	
+
 	cht_link_t *next = get_next(*psrc_head);
 
@@ -1765,5 +1765,5 @@
 	assert(ret == make_link(&sentinel, N_INVALID) || (N_NORMAL == get_mark(ret)));
 	cas_order_barrier();
-	
+
 	DBG(ret = )
 		cas_link(psrc_head, next, N_CONST, next, N_INVALID);
@@ -1791,5 +1791,5 @@
 	if (N_NORMAL == get_mark(*pdest_head))
 		return;
-	
+
 	/*
 	 * L == Last node of the first part of the split bucket. That part
@@ -1836,11 +1836,11 @@
 	 */
 	wnd_t wnd;
-	
+
 	rcu_read_lock();
-	
+
 	/* Mark the last node of the first part of the split bucket as JF. */
 	mark_join_follows(h, psrc_head, split_hash, &wnd);
 	cas_order_barrier();
-	
+
 	/* There are nodes in the dest bucket, ie the second part of the split. */
 	if (wnd.cur != &sentinel) {
@@ -1857,5 +1857,5 @@
 		 */
 	}
-	
+
 	/* Link the dest head to the second part of the split. */
 	DBG(marked_ptr_t ret = )
@@ -1863,5 +1863,5 @@
 	assert(ret == make_link(&sentinel, N_INVALID) || (N_NORMAL == get_mark(ret)));
 	cas_order_barrier();
-	
+
 	rcu_read_unlock();
 }
@@ -1888,5 +1888,5 @@
 {
 	/* See comment in split_bucket(). */
-	
+
 	bool done = false;
 
@@ -1895,5 +1895,5 @@
 		wnd->ppred = psrc_head;
 		wnd->cur = get_next(*psrc_head);
-		
+
 		/*
 		 * Find the split window, ie the last node of the first part of
@@ -1903,5 +1903,5 @@
 		if (!find_wnd_and_gc(h, split_hash, WM_MOVE_JOIN_FOLLOWS, wnd, &resizing))
 			continue;
-		
+
 		/* Must not report that the table is resizing if WM_MOVE_JOIN_FOLLOWS.*/
 		assert(!resizing);
@@ -1926,10 +1926,10 @@
 {
 	/* See comment in split_bucket(). */
-	
+
 	bool done;
 	do {
 		cht_link_t *next = get_next(join_node->link);
 		mark_t mark = get_mark(join_node->link);
-		
+
 		/*
 		 * May already be marked as deleted, but it won't be unlinked
@@ -1938,5 +1938,5 @@
 		marked_ptr_t ret
 			= cas_link(&join_node->link, next, mark, next, mark | N_JOIN);
-		
+
 		/* Successfully marked or already marked as a join node. */
 		done = (ret == make_link(next, mark))
@@ -2023,11 +2023,11 @@
 	 *  [src_head | Inv]-----------> [JN] -> ..
 	 */
-	
+
 	rcu_read_lock();
-	
+
 	/* Mark src_head immutable - signals updaters that bucket join started. */
 	mark_const(psrc_head);
 	cas_order_barrier();
-	
+
 	cht_link_t *join_node = get_next(*psrc_head);
 
@@ -2035,14 +2035,14 @@
 		mark_join_node(join_node);
 		cas_order_barrier();
-		
+
 		link_to_join_node(h, pdest_head, join_node, split_hash);
 		cas_order_barrier();
 	}
-	
+
 	DBG(marked_ptr_t ret = )
 		cas_link(psrc_head, join_node, N_CONST, join_node, N_INVALID);
 	assert(ret == make_link(join_node, N_CONST) || (N_INVALID == get_mark(ret)));
 	cas_order_barrier();
-	
+
 	rcu_read_unlock();
 }
@@ -2067,12 +2067,12 @@
 			.cur = get_next(*pdest_head)
 		};
-		
+
 		bool resizing = false;
-		
+
 		if (!find_wnd_and_gc(h, split_hash, WM_LEAVE_JOIN, &wnd, &resizing))
 			continue;
 
 		assert(!resizing);
-		
+
 		if (wnd.cur != &sentinel) {
 			/* Must be from the new appended bucket. */
@@ -2081,9 +2081,9 @@
 			return;
 		}
-		
+
 		/* Reached the tail of pdest_head - link it to the join node. */
 		marked_ptr_t ret =
 			cas_link(wnd.ppred, &sentinel, N_NORMAL, join_node, N_NORMAL);
-		
+
 		done = (ret == make_link(&sentinel, N_NORMAL));
 	} while (!done);
@@ -2097,5 +2097,5 @@
 {
 	assert(item != &sentinel);
-	
+
 	/*
 	 * remove_callback only works as rcu_func_t because rcu_link is the first
@@ -2103,5 +2103,5 @@
 	 */
 	rcu_call(&item->rcu_link, (rcu_func_t)h->op->remove_callback);
-	
+
 	item_removed(h);
 }
@@ -2116,8 +2116,8 @@
 	size_t items = (size_t) atomic_predec(&h->item_cnt);
 	size_t bucket_cnt = (1 << h->b->order);
-	
+
 	bool need_shrink = (items == h->max_load * bucket_cnt / 4);
 	bool missed_shrink = (items == h->max_load * bucket_cnt / 8);
-	
+
 	if ((need_shrink || missed_shrink) && h->b->order > h->min_order) {
 		atomic_count_t resize_reqs = atomic_preinc(&h->resize_reqs);
@@ -2137,8 +2137,8 @@
 	size_t items = (size_t) atomic_preinc(&h->item_cnt);
 	size_t bucket_cnt = (1 << h->b->order);
-	
+
 	bool need_grow = (items == h->max_load * bucket_cnt);
 	bool missed_grow = (items == 2 * h->max_load * bucket_cnt);
-	
+
 	if ((need_grow || missed_grow) && h->b->order < CHT_MAX_ORDER) {
 		atomic_count_t resize_reqs = atomic_preinc(&h->resize_reqs);
@@ -2154,5 +2154,5 @@
 {
 	cht_t *h = member_to_inst(arg, cht_t, resize_work);
-	
+
 #ifdef CONFIG_DEBUG
 	assert(h->b);
@@ -2188,5 +2188,5 @@
 	if (h->b->order >= CHT_MAX_ORDER)
 		return;
-	
+
 	h->new_b = alloc_buckets(h->b->order + 1, true, false);
 
@@ -2198,5 +2198,5 @@
 	rcu_synchronize();
 	size_t old_bucket_cnt = (1 << h->b->order);
-	
+
 	/*
 	 * Give updaters a chance to help out with the resize. Do the minimum
@@ -2206,8 +2206,8 @@
 		start_head_move(&h->b->head[idx]);
 	}
-	
+
 	/* Order start_head_move() wrt complete_head_move(). */
 	cas_order_barrier();
-	
+
 	/* Complete moving heads and split any buckets not yet split by updaters. */
 	for (size_t old_idx = 0; old_idx < old_bucket_cnt; ++old_idx) {
@@ -2226,5 +2226,5 @@
 		split_bucket(h, move_dest_head, split_dest_head, split_hash);
 	}
-	
+
 	/*
 	 * Wait for all updaters to notice the new heads. Once everyone sees
@@ -2233,9 +2233,9 @@
 	 */
 	rcu_synchronize();
-	
+
 	/* Clear the JOIN_FOLLOWS mark and remove the link between the split buckets.*/
 	for (size_t old_idx = 0; old_idx < old_bucket_cnt; ++old_idx) {
 		size_t new_idx = grow_idx(old_idx);
-		
+
 		cleanup_join_follows(h, &h->new_b->head[new_idx]);
 	}
@@ -2246,9 +2246,9 @@
 	 */
 	rcu_synchronize();
-	
+
 	/* Clear the JOIN mark and GC any deleted join nodes. */
 	for (size_t old_idx = 0; old_idx < old_bucket_cnt; ++old_idx) {
 		size_t new_idx = grow_to_split_idx(old_idx);
-		
+
 		cleanup_join_node(h, &h->new_b->head[new_idx]);
 	}
@@ -2256,5 +2256,5 @@
 	/* Wait for everyone to see that the table is clear of any resize marks. */
 	rcu_synchronize();
-	
+
 	cht_buckets_t *old_b = h->b;
 	rcu_assign(h->b, h->new_b);
@@ -2262,7 +2262,7 @@
 	/* Wait for everyone to start using the new table. */
 	rcu_synchronize();
-	
+
 	free(old_b);
-	
+
 	/* Not needed; just for increased readability. */
 	h->new_b = NULL;
@@ -2274,5 +2274,5 @@
 	if (h->b->order <= h->min_order)
 		return;
-	
+
 	h->new_b = alloc_buckets(h->b->order - 1, true, false);
 
@@ -2283,7 +2283,7 @@
 	/* Wait for all readers and updaters to see the initialized new table. */
 	rcu_synchronize();
-	
+
 	size_t old_bucket_cnt = (1 << h->b->order);
-	
+
 	/*
 	 * Give updaters a chance to help out with the resize. Do the minimum
@@ -2292,5 +2292,5 @@
 	for (size_t old_idx = 0; old_idx < old_bucket_cnt; ++old_idx) {
 		size_t new_idx = shrink_idx(old_idx);
-		
+
 		/* This bucket should be moved. */
 		if (grow_idx(new_idx) == old_idx) {
@@ -2300,13 +2300,13 @@
 		}
 	}
-	
+
 	/* Order start_head_move() wrt to complete_head_move(). */
 	cas_order_barrier();
-	
+
 	/* Complete moving heads and join buckets with the moved buckets. */
 	for (size_t old_idx = 0; old_idx < old_bucket_cnt; ++old_idx) {
 		size_t new_idx = shrink_idx(old_idx);
 		size_t move_src_idx = grow_idx(new_idx);
-		
+
 		/* This bucket should be moved. */
 		if (move_src_idx == old_idx) {
@@ -2322,5 +2322,5 @@
 		}
 	}
-	
+
 	/*
 	 * Wait for all updaters to notice the new heads. Once everyone sees
@@ -2329,23 +2329,23 @@
 	 */
 	rcu_synchronize();
-	
+
 	/* Let everyone know joins are complete and fully visible. */
 	for (size_t old_idx = 0; old_idx < old_bucket_cnt; ++old_idx) {
 		size_t move_src_idx = grow_idx(shrink_idx(old_idx));
-	
+
 		/* Set the invalid joinee head to NULL. */
 		if (old_idx != move_src_idx) {
 			assert(N_INVALID == get_mark(h->b->head[old_idx]));
-			
+
 			if (&sentinel != get_next(h->b->head[old_idx]))
 				h->b->head[old_idx] = make_link(&sentinel, N_INVALID);
 		}
 	}
-	
+
 	/* todo comment join node vs reset joinee head*/
 	rcu_synchronize();
 
 	size_t new_bucket_cnt = (1 << h->new_b->order);
-		
+
 	/* Clear the JOIN mark and GC any deleted join nodes. */
 	for (size_t new_idx = 0; new_idx < new_bucket_cnt; ++new_idx) {
@@ -2355,13 +2355,13 @@
 	/* Wait for everyone to see that the table is clear of any resize marks. */
 	rcu_synchronize();
-	
+
 	cht_buckets_t *old_b = h->b;
 	rcu_assign(h->b, h->new_b);
-	
+
 	/* Wait for everyone to start using the new table. */
 	rcu_synchronize();
-	
+
 	free(old_b);
-	
+
 	/* Not needed; just for increased readability. */
 	h->new_b = NULL;
@@ -2374,5 +2374,5 @@
 
 	cht_link_t *cur = get_next(*new_head);
-		
+
 	while (cur != &sentinel) {
 		/* Clear the join node's JN mark - even if it is marked as deleted. */
@@ -2381,8 +2381,8 @@
 			break;
 		}
-		
+
 		cur = get_next(cur->link);
 	}
-	
+
 	rcu_read_unlock();
 }
@@ -2394,7 +2394,7 @@
 	assert(join_node != &sentinel);
 	assert(join_node && (N_JOIN & get_mark(join_node->link)));
-	
+
 	bool done;
-	
+
 	/* Clear the JN mark. */
 	do {
@@ -2411,5 +2411,5 @@
 		assert(ret == jn_link || (get_mark(ret) & N_JOIN));
 	} while (!done);
-	
+
 	if (!(N_DELETED & get_mark(join_node->link)))
 		return;
@@ -2419,17 +2419,17 @@
 	/* Clear the JOIN mark before trying to unlink the deleted join node.*/
 	cas_order_barrier();
-	
+
 	size_t jn_hash = node_hash(h, join_node);
 	do {
 		bool resizing = false;
-		
+
 		wnd_t wnd = {
 			.ppred = new_head,
 			.cur = get_next(*new_head)
 		};
-		
+
 		done = find_wnd_and_gc_pred(h, jn_hash, WM_NORMAL, same_node_pred,
 			join_node, &wnd, &resizing);
-		
+
 		assert(!resizing);
 	} while (!done);
@@ -2440,5 +2440,5 @@
 {
 	assert(new_head);
-	
+
 	rcu_read_lock();
 
@@ -2448,5 +2448,5 @@
 	};
 	marked_ptr_t *cur_link = new_head;
-		
+
 	/*
 	 * Find the non-deleted node with a JF mark and clear the JF mark.
@@ -2461,5 +2461,5 @@
 	while (true) {
 		bool is_jf_node = N_JOIN_FOLLOWS & get_mark(*cur_link);
-		
+
 		/* GC any deleted nodes on the way - even deleted JOIN_FOLLOWS. */
 		if (N_DELETED & get_mark(*cur_link)) {
@@ -2483,5 +2483,5 @@
 				marked_ptr_t ret =
 					cas_link(cur_link, next, N_JOIN_FOLLOWS, &sentinel, N_NORMAL);
-				
+
 				assert(next == &sentinel
 					|| ((N_JOIN | N_JOIN_FOLLOWS) & get_mark(ret)));
@@ -2508,5 +2508,5 @@
 		cur_link = &wnd.cur->link;
 	}
-	
+
 	rcu_read_unlock();
 }
@@ -2561,5 +2561,5 @@
 		|| item->hash == sentinel.hash
 		|| item->hash == calc_node_hash(h, item));
-	
+
 	return item->hash;
 }
@@ -2586,8 +2586,8 @@
 {
 	marked_ptr_t ptr = (marked_ptr_t) next;
-	
+
 	assert(!(ptr & N_MARK_MASK));
 	assert(!((unsigned)mark & ~N_MARK_MASK));
-	
+
 	return ptr | mark;
 }
@@ -2690,5 +2690,5 @@
 	 */
 	void *expected = (void*)cur;
-	
+
 	/*
 	 * Use the acquire-release model, although we could probably
@@ -2698,5 +2698,5 @@
 	__atomic_compare_exchange_n((void**)link, &expected, (void *)new, false,
 		__ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE);
-	
+
 	return (marked_ptr_t) expected;
 }
Index: kernel/generic/src/adt/hash_table.c
===================================================================
--- kernel/generic/src/adt/hash_table.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/adt/hash_table.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -96,14 +96,14 @@
 	assert(h);
 	assert(op && op->hash && op->key_hash && op->key_equal);
-	
+
 	/* Check for compulsory ops. */
 	if (!op || !op->hash || !op->key_hash || !op->key_equal)
 		return false;
-	
+
 	h->bucket_cnt = round_up_size(init_size);
-	
+
 	if (!alloc_table(h->bucket_cnt, &h->bucket))
 		return false;
-	
+
 	h->max_load = (max_load == 0) ? HT_MAX_LOAD : max_load;
 	h->item_cnt = 0;
@@ -115,5 +115,5 @@
 		h->op->remove_callback = nop_remove_callback;
 	}
-	
+
 	return true;
 }
@@ -128,7 +128,7 @@
 	assert(h && h->bucket);
 	assert(!h->apply_ongoing);
-	
+
 	clear_items(h);
-	
+
 	free(h->bucket);
 
@@ -159,7 +159,7 @@
 	assert(h && h->bucket);
 	assert(!h->apply_ongoing);
-	
+
 	clear_items(h);
-	
+
 	/* Shrink the table to its minimum size if possible. */
 	if (HT_MIN_BUCKETS < h->bucket_cnt) {
@@ -173,15 +173,15 @@
 	if (h->item_cnt == 0)
 		return;
-	
+
 	for (size_t idx = 0; idx < h->bucket_cnt; ++idx) {
 		list_foreach_safe(h->bucket[idx], cur, next) {
 			assert(cur);
 			ht_link_t *cur_link = member_to_inst(cur, ht_link_t, link);
-			
+
 			list_remove(cur);
 			h->op->remove_callback(cur_link);
 		}
 	}
-	
+
 	h->item_cnt = 0;
 }
@@ -197,7 +197,7 @@
 	assert(h && h->bucket);
 	assert(!h->apply_ongoing);
-	
+
 	size_t idx = h->op->hash(item) % h->bucket_cnt;
-	
+
 	list_append(&item->link, &h->bucket[idx]);
 	++h->item_cnt;
@@ -220,7 +220,7 @@
 	assert(h->op && h->op->hash && h->op->equal);
 	assert(!h->apply_ongoing);
-	
+
 	size_t idx = h->op->hash(item) % h->bucket_cnt;
-	
+
 	/* Check for duplicates. */
 	list_foreach(h->bucket[idx], link, ht_link_t, cur_link) {
@@ -232,9 +232,9 @@
 			return false;
 	}
-	
+
 	list_append(&item->link, &h->bucket[idx]);
 	++h->item_cnt;
 	grow_if_needed(h);
-	
+
 	return true;
 }
@@ -251,5 +251,5 @@
 {
 	assert(h && h->bucket);
-	
+
 	size_t idx = h->op->key_hash(key) % h->bucket_cnt;
 
@@ -264,5 +264,5 @@
 		}
 	}
-	
+
 	return NULL;
 }
@@ -305,12 +305,12 @@
 	assert(h && h->bucket);
 	assert(!h->apply_ongoing);
-	
+
 	size_t idx = h->op->key_hash(key) % h->bucket_cnt;
 
 	size_t removed = 0;
-	
+
 	list_foreach_safe(h->bucket[idx], cur, next) {
 		ht_link_t *cur_link = member_to_inst(cur, ht_link_t, link);
-		
+
 		if (h->op->key_equal(key, cur_link)) {
 			++removed;
@@ -322,5 +322,5 @@
 	h->item_cnt -= removed;
 	shrink_if_needed(h);
-	
+
 	return removed;
 }
@@ -352,10 +352,10 @@
 	assert(f);
 	assert(h && h->bucket);
-	
+
 	if (h->item_cnt == 0)
 		return;
-	
+
 	h->apply_ongoing = true;
-	
+
 	for (size_t idx = 0; idx < h->bucket_cnt; ++idx) {
 		list_foreach_safe(h->bucket[idx], cur, next) {
@@ -371,5 +371,5 @@
 out:
 	h->apply_ongoing = false;
-	
+
 	shrink_if_needed(h);
 	grow_if_needed(h);
@@ -380,9 +380,9 @@
 {
 	size_t rounded_size = HT_MIN_BUCKETS;
-	
+
 	while (rounded_size < size) {
 		rounded_size = 2 * rounded_size + 1;
 	}
-	
+
 	return rounded_size;
 }
@@ -392,9 +392,9 @@
 {
 	assert(pbuckets && HT_MIN_BUCKETS <= bucket_cnt);
-		
+
 	list_t *buckets = malloc(bucket_cnt * sizeof(list_t), FRAME_ATOMIC);
 	if (!buckets)
 		return false;
-	
+
 	for (size_t i = 0; i < bucket_cnt; i++)
 		list_initialize(&buckets[i]);
@@ -434,9 +434,9 @@
 	assert(h && h->bucket);
 	assert(HT_MIN_BUCKETS <= new_bucket_cnt);
-	
+
 	/* We are traversing the table and resizing would mess up the buckets. */
 	if (h->apply_ongoing)
 		return;
-	
+
 	list_t *new_buckets;
 
@@ -444,5 +444,5 @@
 	if (!alloc_table(new_bucket_cnt, &new_buckets))
 		return;
-	
+
 	if (0 < h->item_cnt) {
 		/* Rehash all the items to the new table. */
@@ -457,5 +457,5 @@
 		}
 	}
-	
+
 	free(h->bucket);
 	h->bucket = new_buckets;
Index: kernel/generic/src/adt/list.c
===================================================================
--- kernel/generic/src/adt/list.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/adt/list.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -56,5 +56,5 @@
 	bool found = false;
 	link_t *hlp = list->head.next;
-	
+
 	while (hlp != &list->head) {
 		if (hlp == link) {
@@ -64,5 +64,5 @@
 		hlp = hlp->next;
 	}
-	
+
 	return found;
 }
@@ -80,13 +80,13 @@
 	if (list_empty(list))
 		return;
-	
+
 	/* Attach list to destination. */
 	list->head.next->prev = pos;
 	list->head.prev->next = pos->next;
-	
+
 	/* Link destination list to the added list. */
 	pos->next->prev = list->head.prev;
 	pos->next = list->head.next;
-	
+
 	list_initialize(list);
 }
@@ -102,5 +102,5 @@
 {
 	unsigned long count = 0;
-	
+
 	link_t *link = list_first(list);
 	while (link != NULL) {
@@ -108,5 +108,5 @@
 		link = list_next(link, list);
 	}
-	
+
 	return count;
 }
Index: kernel/generic/src/console/chardev.c
===================================================================
--- kernel/generic/src/console/chardev.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/console/chardev.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -68,5 +68,5 @@
 {
 	assert(indev);
-	
+
 	irq_spinlock_lock(&indev->lock, true);
 	if (indev->counter == INDEV_BUFLEN - 1) {
@@ -75,8 +75,8 @@
 		return;
 	}
-	
+
 	indev->counter++;
 	indev->buffer[indev->index++] = ch;
-	
+
 	/* Index modulo size of buffer */
 	indev->index = indev->index % INDEV_BUFLEN;
@@ -102,17 +102,17 @@
 		if (check_poll(indev))
 			return indev->op->poll(indev);
-		
+
 		/* No other way of interacting with user */
 		interrupts_disable();
-		
+
 		if (CPU)
 			printf("cpu%u: ", CPU->id);
 		else
 			printf("cpu: ");
-		
+
 		printf("halted (no polling input)\n");
 		cpu_halt();
 	}
-	
+
 	waitq_sleep(&indev->wq);
 	irq_spinlock_lock(&indev->lock, true);
@@ -121,5 +121,5 @@
 	indev->counter--;
 	irq_spinlock_unlock(&indev->lock, true);
-	
+
 	return ch;
 }
@@ -158,8 +158,8 @@
 	if (indev == NULL)
 		return false;
-	
+
 	if (indev->op == NULL)
 		return false;
-	
+
 	return (indev->op->poll != NULL);
 }
Index: kernel/generic/src/console/cmd.c
===================================================================
--- kernel/generic/src/console/cmd.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/console/cmd.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -686,5 +686,5 @@
 {
 	spinlock_lock(&cmd_lock);
-	
+
 	size_t len = 0;
 	list_foreach(cmd_list, link, cmd_info_t, hlp) {
@@ -694,5 +694,5 @@
 		spinlock_unlock(&hlp->lock);
 	}
-	
+
 	unsigned int _len = (unsigned int) len;
 	if ((_len != len) || (((int) _len) < 0)) {
@@ -700,5 +700,5 @@
 		return 1;
 	}
-	
+
 	list_foreach(cmd_list, link, cmd_info_t, hlp) {
 		spinlock_lock(&hlp->lock);
@@ -706,7 +706,7 @@
 		spinlock_unlock(&hlp->lock);
 	}
-	
+
 	spinlock_unlock(&cmd_lock);
-	
+
 	return 1;
 }
@@ -721,5 +721,5 @@
 {
 	uint8_t *ptr = NULL;
-	
+
 #ifdef IO_SPACE_BOUNDARY
 	if ((void *) argv->intval < IO_SPACE_BOUNDARY)
@@ -729,13 +729,13 @@
 		ptr = (uint8_t *) km_map(argv[0].intval, sizeof(uint8_t),
 		    PAGE_NOT_CACHEABLE);
-	
+
 	const uint8_t val = pio_read_8(ptr);
 	printf("read %" PRIxn ": %" PRIx8 "\n", argv[0].intval, val);
-	
+
 #ifdef IO_SPACE_BOUNDARY
 	if ((void *) argv->intval < IO_SPACE_BOUNDARY)
 		return 1;
 #endif
-	
+
 	km_unmap((uintptr_t) ptr, sizeof(uint8_t));
 	return 1;
@@ -751,5 +751,5 @@
 {
 	uint16_t *ptr = NULL;
-	
+
 #ifdef IO_SPACE_BOUNDARY
 	if ((void *) argv->intval < IO_SPACE_BOUNDARY)
@@ -759,13 +759,13 @@
 		ptr = (uint16_t *) km_map(argv[0].intval, sizeof(uint16_t),
 		    PAGE_NOT_CACHEABLE);
-	
+
 	const uint16_t val = pio_read_16(ptr);
 	printf("read %" PRIxn ": %" PRIx16 "\n", argv[0].intval, val);
-	
+
 #ifdef IO_SPACE_BOUNDARY
 	if ((void *) argv->intval < IO_SPACE_BOUNDARY)
 		return 1;
 #endif
-	
+
 	km_unmap((uintptr_t) ptr, sizeof(uint16_t));
 	return 1;
@@ -781,5 +781,5 @@
 {
 	uint32_t *ptr = NULL;
-	
+
 #ifdef IO_SPACE_BOUNDARY
 	if ((void *) argv->intval < IO_SPACE_BOUNDARY)
@@ -789,13 +789,13 @@
 		ptr = (uint32_t *) km_map(argv[0].intval, sizeof(uint32_t),
 		    PAGE_NOT_CACHEABLE);
-	
+
 	const uint32_t val = pio_read_32(ptr);
 	printf("read %" PRIxn ": %" PRIx32 "\n", argv[0].intval, val);
-	
+
 #ifdef IO_SPACE_BOUNDARY
 	if ((void *) argv->intval < IO_SPACE_BOUNDARY)
 		return 1;
 #endif
-	
+
 	km_unmap((uintptr_t) ptr, sizeof(uint32_t));
 	return 1;
@@ -811,5 +811,5 @@
 {
 	uint8_t *ptr = NULL;
-	
+
 #ifdef IO_SPACE_BOUNDARY
 	if ((void *) argv->intval < IO_SPACE_BOUNDARY)
@@ -819,14 +819,14 @@
 		ptr = (uint8_t *) km_map(argv[0].intval, sizeof(uint8_t),
 		    PAGE_NOT_CACHEABLE);
-	
+
 	printf("write %" PRIxn ": %" PRIx8 "\n", argv[0].intval,
 	    (uint8_t) argv[1].intval);
 	pio_write_8(ptr, (uint8_t) argv[1].intval);
-	
+
 #ifdef IO_SPACE_BOUNDARY
 	if ((void *) argv->intval < IO_SPACE_BOUNDARY)
 		return 1;
 #endif
-	
+
 	km_unmap((uintptr_t) ptr, sizeof(uint8_t));
 	return 1;
@@ -842,5 +842,5 @@
 {
 	uint16_t *ptr = NULL;
-	
+
 #ifdef IO_SPACE_BOUNDARY
 	if ((void *) argv->intval < IO_SPACE_BOUNDARY)
@@ -850,14 +850,14 @@
 		ptr = (uint16_t *) km_map(argv[0].intval, sizeof(uint16_t),
 		    PAGE_NOT_CACHEABLE);
-	
+
 	printf("write %" PRIxn ": %" PRIx16 "\n", argv[0].intval,
 	    (uint16_t) argv[1].intval);
 	pio_write_16(ptr, (uint16_t) argv[1].intval);
-	
+
 #ifdef IO_SPACE_BOUNDARY
 	if ((void *) argv->intval < IO_SPACE_BOUNDARY)
 		return 1;
 #endif
-	
+
 	km_unmap((uintptr_t) ptr, sizeof(uint16_t));
 	return 1;
@@ -873,5 +873,5 @@
 {
 	uint32_t *ptr = NULL;
-	
+
 #ifdef IO_SPACE_BOUNDARY
 	if ((void *) argv->intval < IO_SPACE_BOUNDARY)
@@ -881,14 +881,14 @@
 		ptr = (uint32_t *) km_map(argv[0].intval, sizeof(uint32_t),
 		    PAGE_NOT_CACHEABLE);
-	
+
 	printf("write %" PRIxn ": %" PRIx32 "\n", argv[0].intval,
 	    (uint32_t) argv[1].intval);
 	pio_write_32(ptr, (uint32_t) argv[1].intval);
-	
+
 #ifdef IO_SPACE_BOUNDARY
 	if ((void *) argv->intval < IO_SPACE_BOUNDARY)
 		return 1;
 #endif
-	
+
 	km_unmap((uintptr_t) ptr, sizeof(uint32_t));
 	return 1;
@@ -904,5 +904,5 @@
 {
 	reboot();
-	
+
 	/* Not reached */
 	return 1;
@@ -918,11 +918,11 @@
 {
 	assert(uptime);
-	
+
 	/* This doesn't have to be very accurate */
 	sysarg_t sec = uptime->seconds1;
-	
+
 	printf("Up %" PRIun " days, %" PRIun " hours, %" PRIun " minutes, %" PRIun " seconds\n",
 		sec / 86400, (sec % 86400) / 3600, (sec % 3600) / 60, sec % 60);
-	
+
 	return 1;
 }
@@ -937,8 +937,8 @@
 {
 	spinlock_lock(&cmd_lock);
-	
+
 	list_foreach(cmd_list, link, cmd_info_t, hlp) {
 		spinlock_lock(&hlp->lock);
-		
+
 		if (str_lcmp(hlp->name, (const char *) argv->buffer, str_length(hlp->name)) == 0) {
 			printf("%s - %s\n", hlp->name, hlp->description);
@@ -948,10 +948,10 @@
 			break;
 		}
-		
+
 		spinlock_unlock(&hlp->lock);
 	}
-	
+
 	spinlock_unlock(&cmd_lock);
-	
+
 	return 1;
 }
@@ -961,5 +961,5 @@
 {
 	symtab_print_search((char *) argv->buffer);
-	
+
 	return 1;
 }
@@ -1004,10 +1004,10 @@
 	 * call the function.
 	 */
-	
+
 	unsigned int i;
 	for (i = 0; i < config.cpu_count; i++) {
 		if (!cpus[i].active)
 			continue;
-		
+
 		thread_t *thread;
 		if ((thread = thread_create((void (*)(void *)) cmd_call0,
@@ -1021,5 +1021,5 @@
 			printf("Unable to create thread for cpu%u\n", i);
 	}
-	
+
 	return 1;
 }
@@ -1108,5 +1108,5 @@
 	fncptr_t fptr;
 	errno_t rc;
-	
+
 	symbol = (char *) argv->buffer;
 	rc = symtab_addr_lookup(symbol, &symaddr);
@@ -1183,5 +1183,5 @@
 	bool pointer = false;
 	errno_t rc;
-	
+
 	if (((char *) argv->buffer)[0] == '*') {
 		rc = symtab_addr_lookup((char *) argv->buffer + 1, &addr);
@@ -1195,5 +1195,5 @@
 	} else
 		rc = symtab_addr_lookup((char *) argv->buffer, &addr);
-	
+
 	if (rc == ENOENT)
 		printf("Symbol %s not found.\n", (char *) argv->buffer);
@@ -1210,5 +1210,5 @@
 	} else
 		printf("No symbol information available.\n");
-	
+
 	return 1;
 }
@@ -1252,5 +1252,5 @@
 	else
 		printf("Unknown argument \"%s\".\n", flag_buf);
-	
+
 	return 1;
 }
@@ -1270,5 +1270,5 @@
 	else
 		printf("Unknown argument \"%s\".\n", flag_buf);
-	
+
 	return 1;
 }
@@ -1412,5 +1412,5 @@
 	release_console();
 	indev_pop_character(stdin);
-	
+
 	return 1;
 }
@@ -1420,5 +1420,5 @@
 {
 	printf("%s (%s)\n", test->name, test->desc);
-	
+
 	/* Update and read thread accounting
 	   for benchmarking */
@@ -1427,9 +1427,9 @@
 	task_get_accounting(TASK, &ucycles0, &kcycles0);
 	irq_spinlock_unlock(&TASK->lock, true);
-	
+
 	/* Execute the test */
 	test_quiet = false;
 	const char *ret = test->entry();
-	
+
 	/* Update and read thread accounting */
 	uint64_t ucycles1, kcycles1;
@@ -1437,18 +1437,18 @@
 	task_get_accounting(TASK, &ucycles1, &kcycles1);
 	irq_spinlock_unlock(&TASK->lock, true);
-	
+
 	uint64_t ucycles, kcycles;
 	char usuffix, ksuffix;
 	order_suffix(ucycles1 - ucycles0, &ucycles, &usuffix);
 	order_suffix(kcycles1 - kcycles0, &kcycles, &ksuffix);
-	
+
 	printf("Time: %" PRIu64 "%c user cycles, %" PRIu64 "%c kernel cycles\n",
 	    ucycles, usuffix, kcycles, ksuffix);
-	
+
 	if (ret == NULL) {
 		printf("Test passed\n");
 		return true;
 	}
-	
+
 	printf("%s\n", ret);
 	return false;
@@ -1461,8 +1461,8 @@
 	uint64_t ucycles, kcycles;
 	char usuffix, ksuffix;
-	
+
 	if (cnt < 1)
 		return true;
-	
+
 	uint64_t *data = (uint64_t *) malloc(sizeof(uint64_t) * cnt, 0);
 	if (data == NULL) {
@@ -1470,8 +1470,8 @@
 		return false;
 	}
-	
+
 	for (i = 0; i < cnt; i++) {
 		printf("%s (%u/%u) ... ", test->name, i + 1, cnt);
-		
+
 		/* Update and read thread accounting
 		   for benchmarking */
@@ -1480,9 +1480,9 @@
 		task_get_accounting(TASK, &ucycles0, &kcycles0);
 		irq_spinlock_unlock(&TASK->lock, true);
-		
+
 		/* Execute the test */
 		test_quiet = true;
 		const char *test_ret = test->entry();
-		
+
 		/* Update and read thread accounting */
 		irq_spinlock_lock(&TASK->lock, true);
@@ -1490,5 +1490,5 @@
 		task_get_accounting(TASK, &ucycles1, &kcycles1);
 		irq_spinlock_unlock(&TASK->lock, true);
-		
+
 		if (test_ret != NULL) {
 			printf("%s\n", test_ret);
@@ -1496,5 +1496,5 @@
 			break;
 		}
-		
+
 		data[i] = ucycles1 - ucycles0 + kcycles1 - kcycles0;
 		order_suffix(ucycles1 - ucycles0, &ucycles, &usuffix);
@@ -1503,20 +1503,20 @@
 		    ucycles, usuffix, kcycles, ksuffix);
 	}
-	
+
 	if (ret) {
 		printf("\n");
-		
+
 		uint64_t sum = 0;
-		
+
 		for (i = 0; i < cnt; i++) {
 			sum += data[i];
 		}
-		
+
 		order_suffix(sum / (uint64_t) cnt, &ucycles, &usuffix);
 		printf("Average\t\t%" PRIu64 "%c\n", ucycles, usuffix);
 	}
-	
+
 	free(data);
-	
+
 	return ret;
 }
@@ -1526,10 +1526,10 @@
 	size_t len = 0;
 	test_t *test;
-	
+
 	for (test = tests; test->name != NULL; test++) {
 		if (str_length(test->name) > len)
 			len = str_length(test->name);
 	}
-	
+
 	unsigned int _len = (unsigned int) len;
 	if ((_len != len) || (((int) _len) < 0)) {
@@ -1537,9 +1537,9 @@
 		return;
 	}
-	
+
 	for (test = tests; test->name != NULL; test++)
 		printf("%-*s %s%s\n", _len, test->name, test->desc,
 		    (test->safe ? "" : " (unsafe)"));
-	
+
 	printf("%-*s Run all safe tests\n", _len, "*");
 }
@@ -1555,5 +1555,5 @@
 {
 	test_t *test;
-	
+
 	if (str_cmp((char *) argv->buffer, "*") == 0) {
 		for (test = tests; test->name != NULL; test++) {
@@ -1566,5 +1566,5 @@
 	} else if (str_cmp((char *) argv->buffer, "") != 0) {
 		bool fnd = false;
-		
+
 		for (test = tests; test->name != NULL; test++) {
 			if (str_cmp(test->name, (char *) argv->buffer) == 0) {
@@ -1574,10 +1574,10 @@
 			}
 		}
-		
+
 		if (!fnd)
 			printf("Unknown test\n");
 	} else
 		list_tests();
-	
+
 	return 1;
 }
@@ -1593,5 +1593,5 @@
 	test_t *test;
 	uint32_t cnt = argv[1].intval;
-	
+
 	if (str_cmp((char *) argv->buffer, "*") == 0) {
 		for (test = tests; test->name != NULL; test++) {
@@ -1603,22 +1603,22 @@
 	} else {
 		bool fnd = false;
-		
+
 		for (test = tests; test->name != NULL; test++) {
 			if (str_cmp(test->name, (char *) argv->buffer) == 0) {
 				fnd = true;
-				
+
 				if (test->safe)
 					run_bench(test, cnt);
 				else
 					printf("Unsafe test\n");
-				
+
 				break;
 			}
 		}
-		
+
 		if (!fnd)
 			printf("Unknown test\n");
 	}
-	
+
 	return 1;
 }
Index: kernel/generic/src/console/console.c
===================================================================
--- kernel/generic/src/console/console.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/console/console.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -119,5 +119,5 @@
 		stdin = &stdin_sink;
 	}
-	
+
 	return stdin;
 }
@@ -143,5 +143,5 @@
 		stdout = &stdout_source;
 	}
-	
+
 	list_append(&outdev->link, &stdout->list);
 }
@@ -189,7 +189,7 @@
 {
 	void *faddr = (void *) KA2PA(kio);
-	
+
 	assert((uintptr_t) faddr % FRAME_SIZE == 0);
-	
+
 	kio_parea.pbase = (uintptr_t) faddr;
 	kio_parea.frames = SIZE2FRAMES(sizeof(kio));
@@ -197,8 +197,8 @@
 	kio_parea.mapped = false;
 	ddi_parea_register(&kio_parea);
-	
+
 	sysinfo_set_item_val("kio.faddr", NULL, (sysarg_t) faddr);
 	sysinfo_set_item_val("kio.pages", NULL, KIO_PAGES);
-	
+
 	event_set_unmask_callback(EVENT_KIO, kio_update);
 	atomic_set(&kio_inited, true);
@@ -209,9 +209,9 @@
 	event_notify_1(EVENT_KCONSOLE, false, true);
 	bool prev = console_override;
-	
+
 	console_override = true;
 	if ((stdout) && (stdout->op->redraw))
 		stdout->op->redraw(stdout);
-	
+
 	if ((stdin) && (!prev)) {
 		/*
@@ -256,5 +256,5 @@
 	size_t count = 0;
 	buf[offset] = 0;
-	
+
 	wchar_t ch;
 	while ((ch = indev_pop_character(indev)) != '\n') {
@@ -265,5 +265,5 @@
 				putchar(' ');
 				putchar('\b');
-				
+
 				count--;
 				offset = str_lsize(buf, count);
@@ -271,5 +271,5 @@
 			}
 		}
-		
+
 		if (chr_encode(ch, buf, &offset, buflen - 1) == EOK) {
 			putchar(ch);
@@ -278,5 +278,5 @@
 		}
 	}
-	
+
 	return count;
 }
@@ -294,7 +294,7 @@
 	if (!atomic_get(&kio_inited))
 		return;
-	
+
 	spinlock_lock(&kio_lock);
-	
+
 	if (kio_uspace > 0) {
 		if (event_notify_3(EVENT_KIO, true, kio_start, kio_len,
@@ -302,5 +302,5 @@
 			kio_uspace = 0;
 	}
-	
+
 	spinlock_unlock(&kio_lock);
 }
@@ -312,5 +312,5 @@
 {
 	bool ordy = ((stdout) && (stdout->op->write));
-	
+
 	if (!ordy)
 		return;
@@ -347,8 +347,8 @@
 	else
 		kio_start = (kio_start + 1) % KIO_LENGTH;
-	
+
 	if (kio_stored < kio_len)
 		kio_stored++;
-	
+
 	/* The character is stored for uspace */
 	if (kio_uspace < kio_len)
@@ -359,12 +359,12 @@
 {
 	bool ordy = ((stdout) && (stdout->op->write));
-	
+
 	spinlock_lock(&kio_lock);
 	kio_push_char(ch);
 	spinlock_unlock(&kio_lock);
-	
+
 	/* Output stored characters */
 	kio_flush();
-	
+
 	if (!ordy) {
 		/*
@@ -380,5 +380,5 @@
 		early_putchar(ch);
 	}
-	
+
 	/* Force notification on newline */
 	if (ch == '\n')
@@ -409,10 +409,10 @@
 	if (size > PAGE_SIZE)
 		return (sys_errno_t) ELIMIT;
-	
+
 	if (size > 0) {
 		data = (char *) malloc(size + 1, 0);
 		if (!data)
 			return (sys_errno_t) ENOMEM;
-		
+
 		rc = copy_from_uspace(data, buf, size);
 		if (rc) {
@@ -421,5 +421,5 @@
 		}
 		data[size] = 0;
-		
+
 		switch (cmd) {
 		case KIO_WRITE:
Index: kernel/generic/src/console/kconsole.c
===================================================================
--- kernel/generic/src/console/kconsole.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/console/kconsole.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -98,5 +98,5 @@
 {
 	unsigned int i;
-	
+
 	cmd_init();
 	for (i = 0; i < KCONSOLE_HISTORY; i++)
@@ -114,5 +114,5 @@
 {
 	spinlock_lock(&cmd_lock);
-	
+
 	/*
 	 * Make sure the command is not already listed.
@@ -124,5 +124,5 @@
 			return false;
 		}
-		
+
 		/* Avoid deadlock. */
 		if (hlp < cmd) {
@@ -133,5 +133,5 @@
 			spinlock_lock(&hlp->lock);
 		}
-		
+
 		if (str_cmp(hlp->name, cmd->name) == 0) {
 			/* The command is already there. */
@@ -141,14 +141,14 @@
 			return false;
 		}
-		
+
 		spinlock_unlock(&hlp->lock);
 		spinlock_unlock(&cmd->lock);
 	}
-	
+
 	/*
 	 * Now the command can be added.
 	 */
 	list_append(&cmd->link, &cmd_list);
-	
+
 	spinlock_unlock(&cmd_lock);
 	return true;
@@ -168,27 +168,27 @@
 	link_t **startpos = (link_t**) ctx;
 	size_t namelen = str_length(name);
-	
+
 	spinlock_lock(&cmd_lock);
-	
+
 	if (*startpos == NULL)
 		*startpos = cmd_list.head.next;
-	
+
 	for (; *startpos != &cmd_list.head; *startpos = (*startpos)->next) {
 		cmd_info_t *hlp = list_get_instance(*startpos, cmd_info_t, link);
-		
+
 		const char *curname = hlp->name;
 		if (str_length(curname) < namelen)
 			continue;
-		
+
 		if (str_lcmp(curname, name, namelen) == 0) {
 			*startpos = (*startpos)->next;
 			if (h)
 				*h = hlp->description;
-			
+
 			spinlock_unlock(&cmd_lock);
 			return (curname + str_lsize(curname, namelen));
 		}
 	}
-	
+
 	spinlock_unlock(&cmd_lock);
 	return NULL;
@@ -207,7 +207,7 @@
 {
 	const char *name = input;
-	
+
 	size_t found = 0;
-	
+
 	/*
 	 * Maximum Match Length: Length of longest matching common
@@ -223,14 +223,14 @@
 	size_t total_hints_shown = 0;
 	bool continue_showing_hints = true;
-	
+
 	output[0] = 0;
-	
+
 	while ((hint = hints_enum(name, NULL, &pos))) {
 		if ((found == 0) || (str_length(hint) > str_length(output)))
 			str_cpy(output, MAX_CMDLINE, hint);
-		
+
 		found++;
 	}
-	
+
 	/*
 	 * If the number of possible completions is more than MAX_TAB_HINTS,
@@ -242,10 +242,10 @@
 		    console_prompt_display_all_hints(indev, found);
 	}
-	
+
 	if ((found > 1) && (str_length(output) != 0)) {
 		printf("\n");
 		pos = NULL;
 		while ((hint = hints_enum(name, &help, &pos))) {
-			
+
 			if (continue_showing_hints) {
 				if (help)
@@ -253,8 +253,8 @@
 				else
 					printf("%s%s\n", name, hint);
-				
+
 				--hints_to_show;
 				++total_hints_shown;
-				
+
 				if ((hints_to_show == 0) && (total_hints_shown != found)) {
 					/* Ask user to continue */
@@ -263,20 +263,20 @@
 				}
 			}
-			
+
 			for (max_match_len_tmp = 0;
 			    (output[max_match_len_tmp] ==
 			    hint[max_match_len_tmp]) &&
 			    (max_match_len_tmp < max_match_len); ++max_match_len_tmp);
-			
+
 			max_match_len = max_match_len_tmp;
 		}
-		
+
 		/* Keep only the characters common in all completions */
 		output[max_match_len] = 0;
 	}
-	
+
 	if (found > 0)
 		str_cpy(input, size, output);
-	
+
 	free(output);
 	return found;
@@ -288,22 +288,22 @@
 	size_t end;
 	char *tmp;
-	
+
 	while (isspace(cmdline[start]))
 		start++;
-	
+
 	end = start + 1;
-	
+
 	while (!isspace(cmdline[end]))
 		end++;
-	
+
 	tmp = malloc(STR_BOUNDS(end - start + 1), 0);
-	
+
 	wstr_to_str(tmp, end - start + 1, &cmdline[start]);
-	
+
 	spinlock_lock(&cmd_lock);
-	
+
 	list_foreach(cmd_list, link, cmd_info_t, hlp) {
 		spinlock_lock(&hlp->lock);
-		
+
 		if (str_cmp(hlp->name, tmp) == 0) {
 			spinlock_unlock(&hlp->lock);
@@ -312,11 +312,11 @@
 			return hlp;
 		}
-		
+
 		spinlock_unlock(&hlp->lock);
 	}
-	
+
 	free(tmp);
 	spinlock_unlock(&cmd_lock);
-	
+
 	return NULL;
 }
@@ -325,13 +325,13 @@
 {
 	printf("%s> ", prompt);
-	
+
 	size_t position = 0;
 	wchar_t *current = history[history_pos];
 	current[0] = 0;
 	char *tmp = malloc(STR_BOUNDS(MAX_CMDLINE), 0);
-	
+
 	while (true) {
 		wchar_t ch = indev_pop_character(indev);
-		
+
 		if (ch == '\n') {
 			/* Enter */
@@ -339,10 +339,10 @@
 			break;
 		}
-		
+
 		if (ch == '\b') {
 			/* Backspace */
 			if (position == 0)
 				continue;
-			
+
 			if (wstr_remove(current, position - 1)) {
 				position--;
@@ -353,14 +353,14 @@
 			}
 		}
-		
+
 		if (ch == '\t') {
 			/* Tab completion */
-			
+
 			/* Move to the end of the word */
 			for (; (current[position] != 0) && (!isspace(current[position]));
 			    position++)
 				putchar(current[position]);
-			
-			
+
+
 			/*
 			 * Find the beginning of the word
@@ -376,11 +376,11 @@
 				    (beg > 0) && (!isspace(current[beg]));
 				    beg--);
-				
+
 				if (isspace(current[beg]))
 					beg++;
-				
+
 				wstr_to_str(tmp, position - beg + 1, current + beg);
 			}
-			
+
 			/* Count which argument number are we tabbing (narg=0 is cmd) */
 			bool sp = false;
@@ -394,8 +394,8 @@
 					sp = false;
 			}
-			
+
 			if (narg && isspace(current[0]))
 				narg--;
-			
+
 			int found;
 			if (narg == 0) {
@@ -411,5 +411,5 @@
 				    cmd->hints_enum);
 			}
-			
+
 			if (found == 0)
 				continue;
@@ -424,8 +424,8 @@
 				if (!wstr_linsert(current, ch, position + i, MAX_CMDLINE))
 					break;
-				
+
 				i++;
 			}
-			
+
 			if (found > 1) {
 				/* No unique hint, list was printed */
@@ -436,11 +436,11 @@
 				continue;
 			}
-			
+
 			/* We have a hint */
-			
+
 			printf("%ls", current + position);
 			position += str_length(tmp);
 			print_cc('\b', wstr_length(current) - position);
-			
+
 			if (position == wstr_length(current)) {
 				/* Insert a space after the last completed argument */
@@ -452,5 +452,5 @@
 			continue;
 		}
-		
+
 		if (ch == U_LEFT_ARROW) {
 			/* Left */
@@ -461,5 +461,5 @@
 			continue;
 		}
-		
+
 		if (ch == U_RIGHT_ARROW) {
 			/* Right */
@@ -470,5 +470,5 @@
 			continue;
 		}
-		
+
 		if ((ch == U_UP_ARROW) || (ch == U_DOWN_ARROW)) {
 			/* Up, down */
@@ -476,5 +476,5 @@
 			print_cc(' ', wstr_length(current));
 			print_cc('\b', wstr_length(current));
-			
+
 			if (ch == U_UP_ARROW) {
 				/* Up */
@@ -493,5 +493,5 @@
 			continue;
 		}
-		
+
 		if (ch == U_HOME_ARROW) {
 			/* Home */
@@ -500,5 +500,5 @@
 			continue;
 		}
-		
+
 		if (ch == U_END_ARROW) {
 			/* End */
@@ -507,10 +507,10 @@
 			continue;
 		}
-		
+
 		if (ch == U_DELETE) {
 			/* Delete */
 			if (position == wstr_length(current))
 				continue;
-			
+
 			if (wstr_remove(current, position)) {
 				printf("%ls ", current + position);
@@ -519,5 +519,5 @@
 			continue;
 		}
-		
+
 		if (wstr_linsert(current, ch, position, MAX_CMDLINE)) {
 			printf("%ls", current + position);
@@ -526,10 +526,10 @@
 		}
 	}
-	
+
 	if (wstr_length(current) > 0) {
 		history_pos++;
 		history_pos = history_pos % KCONSOLE_HISTORY;
 	}
-	
+
 	free(tmp);
 	return current;
@@ -546,5 +546,5 @@
 	bool isaddr = false;
 	bool isptr = false;
-	
+
 	/* If we get a name, try to find it in symbol table */
 	if (text[0] == '&') {
@@ -557,9 +557,9 @@
 		len--;
 	}
-	
+
 	if ((text[0] < '0') || (text[0] > '9')) {
 		char symname[MAX_SYMBOL_NAME];
 		str_ncpy(symname, MAX_SYMBOL_NAME, text, len + 1);
-		
+
 		uintptr_t symaddr;
 		errno_t rc = symtab_addr_lookup(symname, &symaddr);
@@ -611,5 +611,5 @@
 		}
 	}
-	
+
 	return true;
 }
@@ -635,10 +635,10 @@
 	assert(start != NULL);
 	assert(end != NULL);
-	
+
 	bool found_start = false;
 	size_t offset = *start;
 	size_t prev = *start;
 	wchar_t ch;
-	
+
 	while ((ch = str_decode(cmdline, &offset, size)) != 0) {
 		if (!found_start) {
@@ -651,9 +651,9 @@
 				break;
 		}
-		
+
 		prev = offset;
 	}
 	*end = prev;
-	
+
 	return found_start;
 }
@@ -676,10 +676,10 @@
 	}
 	spinlock_lock(&cmd_lock);
-	
+
 	cmd_info_t *cmd = NULL;
-	
+
 	list_foreach(cmd_list, link, cmd_info_t, hlp) {
 		spinlock_lock(&hlp->lock);
-		
+
 		if (str_lcmp(hlp->name, cmdline + start,
 		    max(str_length(hlp->name),
@@ -688,10 +688,10 @@
 			break;
 		}
-		
+
 		spinlock_unlock(&hlp->lock);
 	}
-	
+
 	spinlock_unlock(&cmd_lock);
-	
+
 	if (!cmd) {
 		/* Unknown command. */
@@ -699,7 +699,7 @@
 		return NULL;
 	}
-	
+
 	/* cmd == hlp is locked */
-	
+
 	/*
 	 * The command line must be further analyzed and
@@ -708,10 +708,10 @@
 	 * structure.
 	 */
-	
+
 	bool error = false;
 	size_t i;
 	for (i = 0; i < cmd->argc; i++) {
 		char *buf;
-		
+
 		start = end;
 		if (!parse_argument(cmdline, size, &start, &end)) {
@@ -721,10 +721,10 @@
 				continue;
 			}
-			
+
 			printf("Too few arguments.\n");
 			spinlock_unlock(&cmd->lock);
 			return NULL;
 		}
-		
+
 		switch (cmd->argv[i].type) {
 		case ARG_TYPE_STRING:
@@ -767,10 +767,10 @@
 		}
 	}
-	
+
 	if (error) {
 		spinlock_unlock(&cmd->lock);
 		return NULL;
 	}
-	
+
 	start = end;
 	if (parse_argument(cmdline, size, &start, &end)) {
@@ -779,5 +779,5 @@
 		return NULL;
 	}
-	
+
 	spinlock_unlock(&cmd->lock);
 	return cmd;
@@ -798,13 +798,13 @@
 		return;
 	}
-	
+
 	if (msg)
 		printf("%s", msg);
-	
+
 	if (kcon)
 		indev_pop_character(stdin);
 	else
 		printf("Type \"exit\" to leave the console.\n");
-	
+
 	char *cmdline = malloc(STR_BOUNDS(MAX_CMDLINE), 0);
 	while (true) {
@@ -813,14 +813,14 @@
 		if (!len)
 			continue;
-		
+
 		wstr_to_str(cmdline, STR_BOUNDS(MAX_CMDLINE), tmp);
-		
+
 		if ((!kcon) && (len == 4) && (str_lcmp(cmdline, "exit", 4) == 0))
 			break;
-		
+
 		cmd_info_t *cmd_info = parse_cmdline(cmdline, STR_BOUNDS(MAX_CMDLINE));
 		if (!cmd_info)
 			continue;
-		
+
 		(void) cmd_info->func(cmd_info->argv);
 	}
Index: kernel/generic/src/console/prompt.c
===================================================================
--- kernel/generic/src/console/prompt.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/console/prompt.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -52,15 +52,15 @@
 	assert(indev);
 	assert(hints > 0);
-	
+
 	printf("Display all %zu possibilities? (y or n) ", hints);
-	
+
 	while (true) {
 		wchar_t answer = indev_pop_character(indev);
-		
+
 		if ((answer == 'y') || (answer == 'Y')) {
 			printf("y");
 			return true;
 		}
-		
+
 		if ((answer == 'n') || (answer == 'N')) {
 			printf("n");
@@ -84,5 +84,5 @@
 	assert(indev);
 	assert(display_hints != NULL);
-	
+
 	printf("--More--");
 	while (true) {
@@ -95,5 +95,5 @@
 			break;
 		}
-		
+
 		/* Stop displaying hints? */
 		if ((continue_showing_hints == 'n') ||
@@ -104,5 +104,5 @@
 			break;
 		}
-		
+
 		/* Show one more hint? */
 		if (continue_showing_hints == '\n') {
@@ -111,8 +111,8 @@
 		}
 	}
-	
+
 	/* Delete the --More-- option */
 	printf("\r         \r");
-	
+
 	return *display_hints > 0;
 }
Index: kernel/generic/src/cpu/cpu.c
===================================================================
--- kernel/generic/src/cpu/cpu.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/cpu/cpu.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -63,13 +63,13 @@
 	if (config.cpu_active == 1) {
 #endif /* CONFIG_SMP */
-		
+
 		cpus = (cpu_t *) malloc(sizeof(cpu_t) * config.cpu_count,
 		    FRAME_ATOMIC);
 		if (!cpus)
 			panic("Cannot allocate CPU structures.");
-		
+
 		/* Initialize everything */
 		memsetb(cpus, sizeof(cpu_t) * config.cpu_count, 0);
-		
+
 		size_t i;
 		for (i = 0; i < config.cpu_count; i++) {
@@ -78,10 +78,10 @@
 			if (!stack_phys)
 				panic("Cannot allocate CPU stack.");
-			
+
 			cpus[i].stack = (uint8_t *) PA2KA(stack_phys);
 			cpus[i].id = i;
-			
+
 			irq_spinlock_initialize(&cpus[i].lock, "cpus[].lock");
-			
+
 			for (unsigned int j = 0; j < RQ_COUNT; j++) {
 				irq_spinlock_initialize(&cpus[i].rq[j].lock, "cpus[].rq[].lock");
@@ -89,19 +89,19 @@
 			}
 		}
-		
+
 #ifdef CONFIG_SMP
 	}
 #endif /* CONFIG_SMP */
-	
+
 	CPU = &cpus[config.cpu_active - 1];
-	
+
 	CPU->active = true;
 	CPU->tlb_active = true;
-	
+
 	CPU->idle = false;
 	CPU->last_cycle = get_cycle();
 	CPU->idle_cycles = 0;
 	CPU->busy_cycles = 0;
-	
+
 	cpu_identify();
 	cpu_arch_init();
@@ -113,5 +113,5 @@
 {
 	unsigned int i;
-	
+
 	for (i = 0; i < config.cpu_count; i++) {
 		if (cpus[i].active)
Index: kernel/generic/src/cpu/cpu_mask.c
===================================================================
--- kernel/generic/src/cpu/cpu_mask.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/cpu/cpu_mask.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -55,5 +55,5 @@
 	assert(NULL != cpus);
 	assert(cpu_cnt <= config.cpu_count);
-	
+
 	for (size_t active_word = 0;
 		(active_word + 1) * word_bit_cnt <= cpu_cnt;
@@ -62,5 +62,5 @@
 		cpus->mask[active_word] = -1;
 	}
-	
+
 	size_t remaining_bits = (cpu_cnt % word_bit_cnt);
 	if (0 < remaining_bits) {
@@ -89,7 +89,7 @@
 {
 	assert(cpus);
-	
+
 	size_t word_cnt = cpu_mask_size() / word_size;
-		
+
 	for (size_t word = 0; word < word_cnt; ++word) {
 		cpus->mask[word] = 0;
@@ -102,5 +102,5 @@
 	size_t word = cpu_id / word_bit_cnt;
 	size_t word_pos = cpu_id % word_bit_cnt;
-	
+
 	cpus->mask[word] |= (1U << word_pos);
 }
@@ -111,5 +111,5 @@
 	size_t word = cpu_id / word_bit_cnt;
 	size_t word_pos = cpu_id % word_bit_cnt;
-	
+
 	cpus->mask[word] &= ~(1U << word_pos);
 }
@@ -120,5 +120,5 @@
 	size_t word = cpu_id / word_bit_cnt;
 	size_t word_pos = cpu_id % word_bit_cnt;
-	
+
 	return 0 != (cpus->mask[word] & (1U << word_pos));
 }
@@ -133,5 +133,5 @@
 			return false;
 	}
-	
+
 	return true;
 }
Index: kernel/generic/src/ddi/ddi.c
===================================================================
--- kernel/generic/src/ddi/ddi.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/ddi/ddi.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -79,10 +79,10 @@
 {
 	mutex_lock(&parea_lock);
-	
+
 	/*
 	 * We don't check for overlaps here as the kernel is pretty sane.
 	 */
 	btree_insert(&parea_btree, (btree_key_t) parea->pbase, parea, NULL);
-	
+
 	mutex_unlock(&parea_lock);
 }
@@ -108,8 +108,8 @@
 {
 	assert(TASK);
-	
+
 	if ((phys % FRAME_SIZE) != 0)
 		return EBADMEM;
-	
+
 	/*
 	 * Unprivileged tasks are only allowed to map pareas
@@ -118,20 +118,20 @@
 	bool priv =
 	    ((perm_get(TASK) & PERM_MEM_MANAGER) == PERM_MEM_MANAGER);
-	
+
 	mem_backend_data_t backend_data;
 	backend_data.base = phys;
 	backend_data.frames = pages;
 	backend_data.anonymous = false;
-	
+
 	/*
 	 * Check if the memory region is explicitly enabled
 	 * for mapping by any parea structure.
 	 */
-	
+
 	mutex_lock(&parea_lock);
 	btree_node_t *nodep;
 	parea_t *parea = (parea_t *) btree_search(&parea_btree,
 	    (btree_key_t) phys, &nodep);
-	
+
 	if ((parea != NULL) && (parea->frames >= pages)) {
 		if ((!priv) && (!parea->unpriv)) {
@@ -139,19 +139,19 @@
 			return EPERM;
 		}
-		
+
 		goto map;
 	}
-	
+
 	parea = NULL;
 	mutex_unlock(&parea_lock);
-	
+
 	/*
 	 * Check if the memory region is part of physical
 	 * memory generally enabled for mapping.
 	 */
-	
+
 	irq_spinlock_lock(&zones.lock, true);
 	size_t znum = find_zone(ADDR2PFN(phys), pages, 0);
-	
+
 	if (znum == (size_t) -1) {
 		/*
@@ -161,11 +161,11 @@
 		 */
 		irq_spinlock_unlock(&zones.lock, true);
-		
+
 		if (!priv)
 			return EPERM;
-		
+
 		goto map;
 	}
-	
+
 	if (zones.info[znum].flags & (ZONE_FIRMWARE | ZONE_RESERVED)) {
 		/*
@@ -174,14 +174,14 @@
 		 */
 		irq_spinlock_unlock(&zones.lock, true);
-		
+
 		if (!priv)
 			return EPERM;
-		
+
 		goto map;
 	}
-	
+
 	irq_spinlock_unlock(&zones.lock, true);
 	return ENOENT;
-	
+
 map:
 	if (!as_area_create(TASK->as, flags, FRAMES2SIZE(pages),
@@ -191,20 +191,20 @@
 		 * We report it using ENOMEM.
 		 */
-		
+
 		if (parea != NULL)
 			mutex_unlock(&parea_lock);
-		
+
 		return ENOMEM;
 	}
-	
+
 	/*
 	 * Mapping is created on-demand during page fault.
 	 */
-	
+
 	if (parea != NULL) {
 		parea->mapped = true;
 		mutex_unlock(&parea_lock);
 	}
-	
+
 	return EOK;
 }
@@ -235,10 +235,10 @@
 	if (rc != EOK)
 		return rc;
-	
+
 	rc = physmem_map(ALIGN_DOWN(phys, FRAME_SIZE), pages, flags, &virt,
 	    bound);
 	if (rc != EOK)
 		return rc;
-	
+
 	rc = copy_to_uspace(virt_ptr, &virt, sizeof(virt));
 	if (rc != EOK) {
@@ -246,5 +246,5 @@
 		return rc;
 	}
-	
+
 	return EOK;
 }
@@ -273,9 +273,9 @@
 	if (!(perms & PERM_IO_MANAGER))
 		return EPERM;
-	
+
 	irq_spinlock_lock(&tasks_lock, true);
-	
+
 	task_t *task = task_find_by_id(id);
-	
+
 	if ((!task) || (!container_check(CONTAINER, task->container))) {
 		/*
@@ -287,5 +287,5 @@
 		return ENOENT;
 	}
-	
+
 	/* Lock the task and release the lock protecting tasks_btree. */
 	irq_spinlock_exchange(&tasks_lock, &task->lock);
@@ -314,9 +314,9 @@
 	if (!(perms & PERM_IO_MANAGER))
 		return EPERM;
-	
+
 	irq_spinlock_lock(&tasks_lock, true);
-	
+
 	task_t *task = task_find_by_id(id);
-	
+
 	if ((!task) || (!container_check(CONTAINER, task->container))) {
 		/*
@@ -328,10 +328,10 @@
 		return ENOENT;
 	}
-	
+
 	/* Lock the task and release the lock protecting tasks_btree. */
 	irq_spinlock_exchange(&tasks_lock, &task->lock);
 	errno_t rc = ddi_iospace_disable_arch(task, ioaddr, size);
 	irq_spinlock_unlock(&task->lock, true);
-	
+
 	return rc;
 }
@@ -350,5 +350,5 @@
 	if (rc != EOK)
 		return (sys_errno_t) rc;
-	
+
 	return (sys_errno_t) iospace_enable((task_id_t) arg.task_id,
 	    (uintptr_t) arg.ioaddr, (size_t) arg.size);
@@ -370,5 +370,5 @@
 {
 	assert(TASK);
-	
+
 	// TODO: implement locking of non-anonymous mapping
 	return page_find_mapping(virt, phys);
@@ -380,5 +380,5 @@
 {
 	assert(TASK);
-	
+
 	size_t frames = SIZE2FRAMES(size);
 	if (frames == 0)
@@ -388,10 +388,10 @@
 	if (*phys == 0)
 		return ENOMEM;
-	
+
 	mem_backend_data_t backend_data;
 	backend_data.base = *phys;
 	backend_data.frames = frames;
 	backend_data.anonymous = true;
-	
+
 	if (!as_area_create(TASK->as, map_flags, size,
 	    AS_AREA_ATTR_NONE, &phys_backend, &backend_data, virt, bound)) {
@@ -399,5 +399,5 @@
 		return ENOMEM;
 	}
-	
+
 	return EOK;
 }
@@ -421,12 +421,12 @@
 		 * Non-anonymous DMA mapping
 		 */
-		
+
 		uintptr_t phys;
 		errno_t rc = dmamem_map((uintptr_t) virt_ptr, size, map_flags,
 		    flags, &phys);
-		
+
 		if (rc != EOK)
 			return rc;
-		
+
 		rc = copy_to_uspace(phys_ptr, &phys, sizeof(phys));
 		if (rc != EOK) {
@@ -438,5 +438,5 @@
 		 * Anonymous DMA mapping
 		 */
-		
+
 		uintptr_t constraint;
 		errno_t rc = copy_from_uspace(&constraint, phys_ptr,
@@ -444,10 +444,10 @@
 		if (rc != EOK)
 			return rc;
-		
+
 		uintptr_t virt;
 		rc = copy_from_uspace(&virt, virt_ptr, sizeof(virt));
 		if (rc != EOK)
 			return rc;
-		
+
 		uintptr_t phys;
 		rc = dmamem_map_anonymous(size, constraint, map_flags, flags,
@@ -455,5 +455,5 @@
 		if (rc != EOK)
 			return rc;
-		
+
 		rc = copy_to_uspace(phys_ptr, &phys, sizeof(phys));
 		if (rc != EOK) {
@@ -461,5 +461,5 @@
 			return rc;
 		}
-		
+
 		rc = copy_to_uspace(virt_ptr, &virt, sizeof(virt));
 		if (rc != EOK) {
@@ -468,5 +468,5 @@
 		}
 	}
-	
+
 	return EOK;
 }
Index: kernel/generic/src/ddi/irq.c
===================================================================
--- kernel/generic/src/ddi/irq.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/ddi/irq.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -115,5 +115,5 @@
 	irq_spinlock_initialize(&irq->lock, "irq.lock");
 	irq->inr = -1;
-	
+
 	irq_initialize_arch(irq);
 }
@@ -153,5 +153,5 @@
 	}
 	irq_spinlock_unlock(l, false);
-	
+
 	return NULL;
 }
@@ -179,5 +179,5 @@
 	 * In the usual case the uspace handlers have precedence.
 	 */
-	
+
 	if (console_override) {
 		irq_t *irq = irq_dispatch_and_lock_table(&irq_kernel_hash_table,
@@ -185,14 +185,14 @@
 		if (irq)
 			return irq;
-		
+
 		return irq_dispatch_and_lock_table(&irq_uspace_hash_table,
 		    &irq_uspace_hash_table_lock, inr);
 	}
-	
+
 	irq_t *irq = irq_dispatch_and_lock_table(&irq_uspace_hash_table,
 	    &irq_uspace_hash_table_lock, inr);
 	if (irq)
 		return irq;
-	
+
 	return irq_dispatch_and_lock_table(&irq_kernel_hash_table,
 	    &irq_kernel_hash_table_lock, inr);
Index: kernel/generic/src/debug/debug.c
===================================================================
--- kernel/generic/src/debug/debug.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/debug/debug.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -46,8 +46,8 @@
 {
 	const char *fn_sym = symtab_fmt_name_lookup((uintptr_t) fn);
-	
+
 	const char *call_site_sym;
 	uintptr_t call_site_off;
-	
+
 	if (symtab_name_lookup((uintptr_t) call_site, &call_site_sym,
 	    &call_site_off) == EOK)
@@ -61,8 +61,8 @@
 {
 	const char *fn_sym = symtab_fmt_name_lookup((uintptr_t) fn);
-	
+
 	const char *call_site_sym;
 	uintptr_t call_site_off;
-	
+
 	if (symtab_name_lookup((uintptr_t) call_site, &call_site_sym,
 	    &call_site_off) == EOK)
Index: kernel/generic/src/debug/panic.c
===================================================================
--- kernel/generic/src/debug/panic.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/debug/panic.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -49,10 +49,10 @@
 {
 	console_override = true;
-	
+
 	printf("\n%s Kernel panic ", BANNER_LEFT);
 	if (CPU)
 		printf("on cpu%u ", CPU->id);
 	printf("due to ");
-	
+
 	va_list args;
 	va_start(args, fmt);
@@ -91,7 +91,7 @@
 	}
 	va_end(args);
-	
+
 	printf("\n");
-	
+
 	printf("THE=%p: ", THE);
 	if (THE != NULL) {
@@ -99,18 +99,18 @@
 		    " magic=%#" PRIx32 "\n", THE->preemption,
 		    THE->thread, THE->task, THE->cpu, THE->as, THE->magic);
-		
+
 		if (THE->thread != NULL)
 			printf("thread=\"%s\"\n", THE->thread->name);
-		
+
 		if (THE->task != NULL)
 			printf("task=\"%s\"\n", THE->task->name);
 	} else
 		printf("invalid\n");
-	
+
 	if (istate) {
 		istate_decode(istate);
 		printf("\n");
 	}
-	
+
 	stack_trace();
 	halt();
Index: kernel/generic/src/debug/stacktrace.c
===================================================================
--- kernel/generic/src/debug/stacktrace.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/debug/stacktrace.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -48,5 +48,5 @@
 	uintptr_t fp;
 	uintptr_t pc;
-	
+
 	while ((cnt++ < STACK_FRAMES_MAX) &&
 	    (ops->stack_trace_context_validate(ctx))) {
@@ -60,11 +60,11 @@
 		} else
 			printf("%p: %p()\n", (void *) ctx->fp, (void *) ctx->pc);
-		
+
 		if (!ops->return_address_get(ctx, &pc))
 			break;
-		
+
 		if (!ops->frame_pointer_prev(ctx, &fp))
 			break;
-		
+
 		ctx->fp = fp;
 		ctx->pc = pc;
@@ -96,5 +96,5 @@
 		.istate = istate
 	};
-	
+
 	if (istate_from_uspace(istate))
 		stack_trace_ctx(&ust_ops, &ctx);
Index: kernel/generic/src/debug/symtab.c
===================================================================
--- kernel/generic/src/debug/symtab.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/debug/symtab.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -58,10 +58,10 @@
 #ifdef CONFIG_SYMTAB
 	size_t i;
-	
+
 	for (i = 1; symbol_table[i].address_le; i++) {
 		if (addr < uint64_t_le2host(symbol_table[i].address_le))
 			break;
 	}
-	
+
 	if (addr >= uint64_t_le2host(symbol_table[i - 1].address_le)) {
 		*name = symbol_table[i - 1].symbol_name;
@@ -71,8 +71,8 @@
 		return EOK;
 	}
-	
+
 	*name = NULL;
 	return ENOENT;
-	
+
 #else
 	*name = NULL;
@@ -97,5 +97,5 @@
 	const char *name;
 	errno_t rc = symtab_name_lookup(addr, &name, NULL);
-	
+
 	switch (rc) {
 	case EOK:
@@ -121,17 +121,17 @@
 {
 	size_t namelen = str_length(name);
-	
+
 	size_t pos;
 	for (pos = *startpos; symbol_table[pos].address_le; pos++) {
 		const char *curname = symbol_table[pos].symbol_name;
-		
+
 		/* Find a ':' in curname */
 		const char *colon = str_chr(curname, ':');
 		if (colon == NULL)
 			continue;
-		
+
 		if (str_length(curname) < namelen)
 			continue;
-		
+
 		if (str_lcmp(name, curname, namelen) == 0) {
 			*startpos = pos;
@@ -139,5 +139,5 @@
 		}
 	}
-	
+
 	return NULL;
 }
@@ -162,5 +162,5 @@
 	size_t pos = 0;
 	const char *hint;
-	
+
 	while ((hint = symtab_search_one(name, &pos))) {
 		if (str_length(hint) == 0) {
@@ -170,13 +170,13 @@
 		pos++;
 	}
-	
+
 	if (found > 1)
 		return EOVERFLOW;
-	
+
 	if (found < 1)
 		return ENOENT;
-	
+
 	return EOK;
-	
+
 #else
 	return ENOTSUP;
@@ -195,5 +195,5 @@
 		pos++;
 	}
-	
+
 #else
 	printf("No symbol information available.\n");
@@ -208,19 +208,19 @@
 	size_t len = str_length(input);
 	struct symtab_entry **entry = (struct symtab_entry**)ctx;
-	
+
 	if (*entry == NULL)
 		*entry = symbol_table;
-	
+
 	for (; (*entry)->address_le; (*entry)++) {
 		const char *curname = (*entry)->symbol_name;
-		
+
 		/* Find a ':' in curname */
 		const char *colon = str_chr(curname, ':');
 		if (colon == NULL)
 			continue;
-		
+
 		if (str_length(curname) < len)
 			continue;
-		
+
 		if (str_lcmp(input, curname, len) == 0) {
 			(*entry)++;
@@ -230,7 +230,7 @@
 		}
 	}
-	
+
 	return NULL;
-	
+
 #else
 	return NULL;
Index: kernel/generic/src/interrupt/interrupt.c
===================================================================
--- kernel/generic/src/interrupt/interrupt.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/interrupt/interrupt.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -78,7 +78,7 @@
 	assert(n < IVT_ITEMS);
 #endif
-	
+
 	irq_spinlock_lock(&exctbl_lock, true);
-	
+
 	iroutine_t old = exc_table[n].handler;
 	exc_table[n].handler = handler;
@@ -87,7 +87,7 @@
 	exc_table[n].cycles = 0;
 	exc_table[n].count = 0;
-	
+
 	irq_spinlock_unlock(&exctbl_lock, true);
-	
+
 	return old;
 }
@@ -104,5 +104,5 @@
 	assert(n < IVT_ITEMS);
 #endif
-	
+
 	/* Account user cycles */
 	if (THREAD) {
@@ -111,5 +111,5 @@
 		irq_spinlock_unlock(&THREAD->lock, false);
 	}
-	
+
 	/* Account CPU usage if it woke up from sleep */
 	if (CPU && CPU->idle) {
@@ -121,31 +121,31 @@
 		irq_spinlock_unlock(&CPU->lock, false);
 	}
-	
+
 	uint64_t begin_cycle = get_cycle();
-	
+
 #ifdef CONFIG_UDEBUG
 	if (THREAD)
 		THREAD->udebug.uspace_state = istate;
 #endif
-	
+
 	exc_table[n].handler(n + IVT_FIRST, istate);
-	
+
 #ifdef CONFIG_UDEBUG
 	if (THREAD)
 		THREAD->udebug.uspace_state = NULL;
 #endif
-	
+
 	/* This is a safe place to exit exiting thread */
 	if ((THREAD) && (THREAD->interrupted) && (istate_from_uspace(istate)))
 		thread_exit();
-	
+
 	/* Account exception handling */
 	uint64_t end_cycle = get_cycle();
-	
+
 	irq_spinlock_lock(&exctbl_lock, false);
 	exc_table[n].cycles += end_cycle - begin_cycle;
 	exc_table[n].count++;
 	irq_spinlock_unlock(&exctbl_lock, false);
-	
+
 	/* Do not charge THREAD for exception cycles */
 	if (THREAD) {
@@ -171,12 +171,12 @@
 	    "program counter %p.\n", TASK->name, TASK->taskid,
 	    (void *) istate_get_pc(istate));
-	
+
 	istate_decode(istate);
 	stack_trace_istate(istate);
-	
+
 	printf("Kill message: ");
 	vprintf(fmt, args);
 	printf("\n");
-	
+
 	task_kill_self(true);
 }
@@ -201,5 +201,5 @@
 	if (!istate_from_uspace(istate))
 		return;
-	
+
 	va_list args;
 	va_start(args, fmt);
@@ -236,5 +236,5 @@
 {
 	bool excs_all;
-	
+
 	if (str_cmp(flag_buf, "-a") == 0)
 		excs_all = true;
@@ -245,11 +245,11 @@
 		return 1;
 	}
-	
+
 #if (IVT_ITEMS > 0)
 	unsigned int i;
 	unsigned int rows;
-	
+
 	irq_spinlock_lock(&exctbl_lock, true);
-	
+
 #ifdef __32_BITS__
 	printf("[exc   ] [description       ] [count   ] [cycles  ]"
@@ -257,5 +257,5 @@
 	rows = 1;
 #endif
-	
+
 #ifdef __64_BITS__
 	printf("[exc   ] [description       ] [count   ] [cycles  ]"
@@ -264,31 +264,31 @@
 	rows = 2;
 #endif
-	
+
 	for (i = 0; i < IVT_ITEMS; i++) {
 		if ((!excs_all) && (!exc_table[i].hot))
 			continue;
-		
+
 		uint64_t count;
 		char count_suffix;
-		
+
 		order_suffix(exc_table[i].count, &count, &count_suffix);
-		
+
 		uint64_t cycles;
 		char cycles_suffix;
-		
+
 		order_suffix(exc_table[i].cycles, &cycles, &cycles_suffix);
-		
+
 		const char *symbol =
 		    symtab_fmt_name_lookup((sysarg_t) exc_table[i].handler);
-		
+
 #ifdef __32_BITS__
 		printf("%-8u %-20s %9" PRIu64 "%c %9" PRIu64 "%c %10p %s\n",
 		    i + IVT_FIRST, exc_table[i].name, count, count_suffix,
 		    cycles, cycles_suffix, exc_table[i].handler, symbol);
-		
+
 		PAGING(rows, 1, irq_spinlock_unlock(&exctbl_lock, true),
 		    irq_spinlock_lock(&exctbl_lock, true));
 #endif
-		
+
 #ifdef __64_BITS__
 		printf("%-8u %-20s %9" PRIu64 "%c %9" PRIu64 "%c %18p\n",
@@ -296,17 +296,17 @@
 		    cycles, cycles_suffix, exc_table[i].handler);
 		printf("         %s\n", symbol);
-		
+
 		PAGING(rows, 2, irq_spinlock_unlock(&exctbl_lock, true),
 		    irq_spinlock_lock(&exctbl_lock, true));
 #endif
 	}
-	
+
 	irq_spinlock_unlock(&exctbl_lock, true);
 #else /* (IVT_ITEMS > 0) */
-	
+
 	printf("No exception table%s.\n", excs_all ? " (showing all exceptions)" : "");
-	
+
 #endif /* (IVT_ITEMS > 0) */
-	
+
 	return 1;
 }
@@ -335,12 +335,12 @@
 {
 	(void) exc_undef;
-	
+
 #if (IVT_ITEMS > 0)
 	unsigned int i;
-	
+
 	for (i = 0; i < IVT_ITEMS; i++)
 		exc_register(i, "undef", false, (iroutine_t) exc_undef);
 #endif
-	
+
 #ifdef CONFIG_KCONSOLE
 	cmd_initialize(&exc_info);
Index: kernel/generic/src/ipc/event.c
===================================================================
--- kernel/generic/src/ipc/event.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/ipc/event.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -61,12 +61,12 @@
 {
 	assert(evno < EVENT_TASK_END);
-	
+
 	event_t *event;
-	
+
 	if (evno < EVENT_END)
 		event = &events[(event_type_t) evno];
 	else
 		event = &task->events[(event_task_type_t) evno - EVENT_END];
-	
+
 	return event;
 }
@@ -96,5 +96,5 @@
 	for (unsigned int i = 0; i < EVENT_END; i++) {
 		spinlock_lock(&events[i].lock);
-		
+
 		if (events[i].answerbox == answerbox) {
 			events[i].answerbox = NULL;
@@ -103,5 +103,5 @@
 			events[i].masked = false;
 		}
-		
+
 		spinlock_unlock(&events[i].lock);
 	}
@@ -125,5 +125,5 @@
 {
 	assert(evno < EVENT_END);
-	
+
 	_event_set_unmask_callback(evno2event(evno, NULL), callback);
 }
@@ -134,5 +134,5 @@
 	assert(evno >= (int) EVENT_END);
 	assert(evno < EVENT_TASK_END);
-		
+
 	_event_set_unmask_callback(evno2event(evno, task), callback);
 }
@@ -144,13 +144,13 @@
 
 	spinlock_lock(&event->lock);
-	
+
 	if (event->answerbox != NULL) {
 		if (!event->masked) {
 			call_t *call = ipc_call_alloc(FRAME_ATOMIC);
-			
+
 			if (call) {
 				call->flags |= IPC_CALL_NOTIF;
 				call->priv = ++event->counter;
-				
+
 				IPC_SET_IMETHOD(call->data, event->imethod);
 				IPC_SET_ARG1(call->data, a1);
@@ -159,7 +159,7 @@
 				IPC_SET_ARG4(call->data, a4);
 				IPC_SET_ARG5(call->data, a5);
-				
+
 				call->data.task_id = TASK ? TASK->taskid : 0;
-				
+
 				irq_spinlock_lock(&event->answerbox->irq_lock,
 				    true);
@@ -168,11 +168,11 @@
 				irq_spinlock_unlock(&event->answerbox->irq_lock,
 				    true);
-				
+
 				waitq_wakeup(&event->answerbox->wq,
 				    WAKEUP_FIRST);
-				
+
 				if (mask)
 					event->masked = true;
-				
+
 				res = EOK;
 			} else
@@ -182,5 +182,5 @@
 	} else
 		res = ENOENT;
-	
+
 	spinlock_unlock(&event->lock);
 	return res;
@@ -210,5 +210,5 @@
 {
 	assert(evno < EVENT_END);
-	
+
 	return event_enqueue(evno2event(evno, NULL), mask, a1, a2, a3, a4, a5);
 }
@@ -239,5 +239,5 @@
 	assert(evno >= (int) EVENT_END);
 	assert(evno < EVENT_TASK_END);
-	
+
 	return event_enqueue(evno2event(evno, task), mask, a1, a2, a3, a4, a5);
 }
@@ -259,7 +259,7 @@
 {
 	errno_t res;
-	
-	spinlock_lock(&event->lock);
-	
+
+	spinlock_lock(&event->lock);
+
 	if (event->answerbox == NULL) {
 		event->answerbox = answerbox;
@@ -270,7 +270,7 @@
 	} else
 		res = EEXIST;
-	
-	spinlock_unlock(&event->lock);
-	
+
+	spinlock_unlock(&event->lock);
+
 	return res;
 }
@@ -289,7 +289,7 @@
 {
 	errno_t res;
-	
-	spinlock_lock(&event->lock);
-	
+
+	spinlock_lock(&event->lock);
+
 	if (event->answerbox == answerbox) {
 		event->answerbox = NULL;
@@ -300,7 +300,7 @@
 	} else
 		res = ENOENT;
-	
-	spinlock_unlock(&event->lock);
-	
+
+	spinlock_unlock(&event->lock);
+
 	return res;
 }
@@ -317,5 +317,5 @@
 	event_callback_t callback = event->unmask_callback;
 	spinlock_unlock(&event->lock);
-	
+
 	/*
 	 * Check if there is an unmask callback
@@ -342,5 +342,5 @@
 	if (evno >= EVENT_TASK_END)
 		return ELIMIT;
-	
+
 	return (sys_errno_t) event_subscribe(evno2event(evno, TASK),
 	    (sysarg_t) imethod, &TASK->answerbox);
@@ -361,5 +361,5 @@
 	if (evno >= EVENT_TASK_END)
 		return ELIMIT;
-	
+
 	return (sys_errno_t) event_unsubscribe(evno2event(evno, TASK),
 	    &TASK->answerbox);
@@ -383,5 +383,5 @@
 	if (evno >= EVENT_TASK_END)
 		return ELIMIT;
-	
+
 	event_unmask(evno2event(evno, TASK));
 
Index: kernel/generic/src/ipc/ipc.c
===================================================================
--- kernel/generic/src/ipc/ipc.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/ipc/ipc.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -127,5 +127,5 @@
 	kobject_initialize(kobj, KOBJECT_TYPE_CALL, call, &call_kobject_ops);
 	call->kobject = kobj;
-	
+
 	return call;
 }
@@ -212,8 +212,8 @@
 	answerbox_t *mybox = slab_alloc(answerbox_cache, 0);
 	ipc_answerbox_init(mybox, TASK);
-	
+
 	/* We will receive data in a special box. */
 	request->callerbox = mybox;
-	
+
 	errno_t rc = ipc_call(phone, request);
 	if (rc != EOK) {
@@ -247,5 +247,5 @@
 			ipc_forget_call(request);	/* releases locks */
 			rc = EINTR;
-			
+
 		} else {
 			spinlock_unlock(&TASK->active_calls_lock);
@@ -264,5 +264,5 @@
 	}
 	assert(!answer || request == answer);
-	
+
 	slab_free(answerbox_cache, mybox);
 	return rc;
@@ -305,17 +305,17 @@
 	    &call->sender->answerbox;
 	bool do_lock = ((!selflocked) || (callerbox != &TASK->answerbox));
-	
+
 	call->flags |= IPC_CALL_ANSWERED;
-	
+
 	call->data.task_id = TASK->taskid;
-	
+
 	if (do_lock)
 		irq_spinlock_lock(&callerbox->lock, true);
-	
+
 	list_append(&call->ab_link, &callerbox->answers);
-	
+
 	if (do_lock)
 		irq_spinlock_unlock(&callerbox->lock, true);
-	
+
 	waitq_wakeup(&callerbox->wq, WAKEUP_FIRST);
 }
@@ -333,5 +333,5 @@
 	list_remove(&call->ab_link);
 	irq_spinlock_unlock(&box->lock, true);
-	
+
 	/* Send back answer */
 	_ipc_answer_free_call(call, false);
@@ -395,12 +395,12 @@
 	caller->ipc_info.call_sent++;
 	irq_spinlock_unlock(&caller->lock, true);
-	
+
 	if (!(call->flags & IPC_CALL_FORWARDED))
 		_ipc_call_actions_internal(phone, call, preforget);
-	
+
 	irq_spinlock_lock(&box->lock, true);
 	list_append(&call->ab_link, &box->calls);
 	irq_spinlock_unlock(&box->lock, true);
-	
+
 	waitq_wakeup(&box->wq, WAKEUP_FIRST);
 }
@@ -426,11 +426,11 @@
 				ipc_backsend_err(phone, call, ENOENT);
 		}
-		
+
 		return ENOENT;
 	}
-	
+
 	answerbox_t *box = phone->callee;
 	_ipc_call(phone, box, call, false);
-	
+
 	mutex_unlock(&phone->lock);
 	return 0;
@@ -457,5 +457,5 @@
 		return EINVAL;
 	}
-	
+
 	answerbox_t *box = phone->callee;
 	if (phone->state != IPC_PHONE_SLAMMED) {
@@ -467,5 +467,5 @@
 		/* Drop the answerbox reference */
 		kobject_put(phone->kobject);
-		
+
 		call_t *call = ipc_call_alloc(0);
 		IPC_SET_IMETHOD(call->data, IPC_M_PHONE_HUNGUP);
@@ -474,8 +474,8 @@
 		_ipc_call(phone, box, call, false);
 	}
-	
+
 	phone->state = IPC_PHONE_HUNGUP;
 	mutex_unlock(&phone->lock);
-	
+
 	return EOK;
 }
@@ -504,10 +504,10 @@
 	list_remove(&call->ab_link);
 	irq_spinlock_unlock(&oldbox->lock, true);
-	
+
 	if (mode & IPC_FF_ROUTE_FROM_ME) {
 		call->data.phone = newphone;
 		call->data.task_id = TASK->taskid;
 	}
-	
+
 	return ipc_call(newphone, call);
 }
@@ -536,26 +536,26 @@
 	uint64_t call_cnt = 0;
 	errno_t rc;
-	
+
 restart:
 	rc = waitq_sleep_timeout(&box->wq, usec, flags, NULL);
 	if (rc != EOK)
 		return NULL;
-	
+
 	irq_spinlock_lock(&box->lock, true);
 	if (!list_empty(&box->irq_notifs)) {
 		/* Count received IRQ notification */
 		irq_cnt++;
-		
+
 		irq_spinlock_lock(&box->irq_lock, false);
-		
+
 		request = list_get_instance(list_first(&box->irq_notifs),
 		    call_t, ab_link);
 		list_remove(&request->ab_link);
-		
+
 		irq_spinlock_unlock(&box->irq_lock, false);
 	} else if (!list_empty(&box->answers)) {
 		/* Count received answer */
 		answer_cnt++;
-		
+
 		/* Handle asynchronous answers */
 		request = list_get_instance(list_first(&box->answers),
@@ -566,10 +566,10 @@
 		/* Count received call */
 		call_cnt++;
-		
+
 		/* Handle requests */
 		request = list_get_instance(list_first(&box->calls),
 		    call_t, ab_link);
 		list_remove(&request->ab_link);
-		
+
 		/* Append request to dispatch queue */
 		list_append(&request->ab_link, &box->dispatched_calls);
@@ -579,13 +579,13 @@
 		goto restart;
 	}
-	
+
 	irq_spinlock_pass(&box->lock, &TASK->lock);
-	
+
 	TASK->ipc_info.irq_notif_received += irq_cnt;
 	TASK->ipc_info.answer_received += answer_cnt;
 	TASK->ipc_info.call_received += call_cnt;
-	
+
 	irq_spinlock_unlock(&TASK->lock, true);
-	
+
 	return request;
 }
@@ -602,5 +602,5 @@
 		call_t *call = list_get_instance(list_first(lst), call_t,
 		    ab_link);
-		
+
 		list_remove(&call->ab_link);
 
@@ -631,5 +631,5 @@
 	phone_t *phone;
 	DEADLOCK_PROBE_INIT(p_phonelck);
-	
+
 	/* Disconnect all phones connected to our answerbox */
 restart_phones:
@@ -643,11 +643,11 @@
 			goto restart_phones;
 		}
-		
+
 		/* Disconnect phone */
 		assert(phone->state == IPC_PHONE_CONNECTED);
-		
+
 		list_remove(&phone->link);
 		phone->state = IPC_PHONE_SLAMMED;
-		
+
 		if (notify_box) {
 			task_hold(phone->caller);
@@ -671,13 +671,13 @@
 
 			kobject_put(phone->kobject);
-			
+
 			/* Must start again */
 			goto restart_phones;
 		}
-		
+
 		mutex_unlock(&phone->lock);
 		kobject_put(phone->kobject);
 	}
-	
+
 	irq_spinlock_unlock(&box->lock, true);
 }
@@ -727,5 +727,5 @@
 		return;
 	}
-	
+
 	call = list_get_instance(list_first(&TASK->active_calls), call_t,
 	    ta_link);
@@ -811,5 +811,5 @@
 	if (restart)
 		goto restart;
-	
+
 	call = ipc_wait_for_call(&TASK->answerbox, SYNCH_NO_TIMEOUT,
 	    SYNCH_FLAGS_NONE);
@@ -872,15 +872,15 @@
 	caps_apply_to_kobject_type(TASK, KOBJECT_TYPE_PHONE,
 	    phone_cap_cleanup_cb, NULL);
-	
+
 	/* Unsubscribe from any event notifications. */
 	event_cleanup_answerbox(&TASK->answerbox);
-	
+
 	/* Disconnect all connected IRQs */
 	caps_apply_to_kobject_type(TASK, KOBJECT_TYPE_IRQ, irq_cap_cleanup_cb,
 	    NULL);
-	
+
 	/* Disconnect all phones connected to our regular answerbox */
 	ipc_answerbox_slam_phones(&TASK->answerbox, false);
-	
+
 #ifdef CONFIG_UDEBUG
 	/* Clean up kbox thread and communications */
@@ -891,10 +891,10 @@
 	caps_apply_to_kobject_type(TASK, KOBJECT_TYPE_CALL, call_cap_cleanup_cb,
 	    NULL);
-	
+
 	/* Answer all messages in 'calls' and 'dispatched_calls' queues */
 	ipc_cleanup_call_list(&TASK->answerbox, &TASK->answerbox.calls);
 	ipc_cleanup_call_list(&TASK->answerbox,
 	    &TASK->answerbox.dispatched_calls);
- 	
+
 	ipc_forget_all_active_calls();
 	ipc_wait_for_all_answered_calls();
@@ -921,9 +921,9 @@
 		printf("%10p ", call);
 #endif
-		
+
 #ifdef __64_BITS__
 		printf("%18p ", call);
 #endif
-		
+
 		spinlock_lock(&call->forget_lock);
 
@@ -954,5 +954,5 @@
 		printf("%-11d %7" PRIun " ", cap->handle,
 		    atomic_get(&phone->active_calls));
-		
+
 		switch (phone->state) {
 		case IPC_PHONE_CONNECTING:
@@ -973,5 +973,5 @@
 			break;
 		}
-		
+
 		printf("\n");
 	}
@@ -996,23 +996,23 @@
 	task_hold(task);
 	irq_spinlock_unlock(&tasks_lock, true);
-	
+
 	printf("[phone cap] [calls] [state\n");
-	
+
 	caps_apply_to_kobject_type(task, KOBJECT_TYPE_PHONE,
 	    print_task_phone_cb, NULL);
-	
+
 	irq_spinlock_lock(&task->lock, true);
 	irq_spinlock_lock(&task->answerbox.lock, false);
-	
+
 #ifdef __32_BITS__
 	printf("[call id ] [method] [arg1] [arg2] [arg3] [arg4] [arg5]"
 	    " [flags] [sender\n");
 #endif
-	
+
 #ifdef __64_BITS__
 	printf("[call id         ] [method] [arg1] [arg2] [arg3] [arg4]"
 	    " [arg5] [flags] [sender\n");
 #endif
-	
+
 	printf(" --- incomming calls ---\n");
 	ipc_print_call_list(&task->answerbox.calls);
@@ -1021,5 +1021,5 @@
 	printf(" --- incoming answers ---\n");
 	ipc_print_call_list(&task->answerbox.answers);
-	
+
 	irq_spinlock_unlock(&task->answerbox.lock, false);
 	irq_spinlock_unlock(&task->lock, true);
Index: kernel/generic/src/ipc/ipcrsc.c
===================================================================
--- kernel/generic/src/ipc/ipcrsc.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/ipc/ipcrsc.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -193,5 +193,5 @@
 		    &phone_kobject_ops);
 		phone->kobject = kobject;
-		
+
 		cap_publish(task, handle, kobject);
 
@@ -213,8 +213,8 @@
 	if (!kobj)
 		return;
-	
+
 	assert(kobj->phone);
 	assert(kobj->phone->state == IPC_PHONE_CONNECTING);
-	
+
 	kobject_put(kobj);
 	cap_free(TASK, handle);
@@ -232,7 +232,7 @@
 	if (!phone_obj)
 		return false;
-	
+
 	assert(phone_obj->phone->state == IPC_PHONE_CONNECTING);
-	
+
 	/* Hand over phone_obj reference to the answerbox */
 	return ipc_phone_connect(phone_obj->phone, box);
Index: kernel/generic/src/ipc/irq.c
===================================================================
--- kernel/generic/src/ipc/irq.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/ipc/irq.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -85,5 +85,5 @@
 	for (size_t i = 0; i < rangecount; i++)
 		pbase[i] = ranges[i].base;
-	
+
 	/* Map the PIO ranges into the kernel virtual address space. */
 	for (size_t i = 0; i < rangecount; i++) {
@@ -100,10 +100,10 @@
 		}
 	}
-	
+
 	/* Rewrite the IRQ code addresses from physical to kernel virtual. */
 	for (size_t i = 0; i < cmdcount; i++) {
 		uintptr_t addr;
 		size_t size;
-		
+
 		/* Process only commands that use an address. */
 		switch (cmds[i].cmd) {
@@ -127,7 +127,7 @@
 			continue;
 		}
-		
+
 		addr = (uintptr_t) cmds[i].addr;
-		
+
 		size_t j;
 		for (j = 0; j < rangecount; j++) {
@@ -135,13 +135,13 @@
 			if (!iswithin(pbase[j], ranges[j].size, addr, size))
 				continue;
-			
+
 			/* Switch the command to a kernel virtual address. */
 			addr -= pbase[j];
 			addr += ranges[j].base;
-			
+
 			cmds[i].addr = (void *) addr;
 			break;
 		}
-		
+
 		if (j == rangecount) {
 			/*
@@ -154,5 +154,5 @@
 		}
 	}
-	
+
 	free(pbase);
 	return EOK;
@@ -172,11 +172,11 @@
 		if (cmds[i].cmd >= CMD_LAST)
 			return EINVAL;
-		
+
 		if (cmds[i].srcarg >= IPC_CALL_LEN)
 			return EINVAL;
-		
+
 		if (cmds[i].dstarg >= IPC_CALL_LEN)
 			return EINVAL;
-		
+
 		switch (cmds[i].cmd) {
 		case CMD_PREDICATE:
@@ -188,5 +188,5 @@
 			if (i + cmds[i].value > cmdcount)
 				return EINVAL;
-			
+
 			break;
 		default:
@@ -194,5 +194,5 @@
 		}
 	}
-	
+
 	return EOK;
 }
@@ -224,14 +224,14 @@
 	irq_pio_range_t *ranges = NULL;
 	irq_cmd_t *cmds = NULL;
-	
+
 	irq_code_t *code = malloc(sizeof(*code), 0);
 	errno_t rc = copy_from_uspace(code, ucode, sizeof(*code));
 	if (rc != EOK)
 		goto error;
-	
+
 	if ((code->rangecount > IRQ_MAX_RANGE_COUNT) ||
 	    (code->cmdcount > IRQ_MAX_PROG_SIZE))
 		goto error;
-	
+
 	ranges = malloc(sizeof(code->ranges[0]) * code->rangecount, 0);
 	rc = copy_from_uspace(ranges, code->ranges,
@@ -239,5 +239,5 @@
 	if (rc != EOK)
 		goto error;
-	
+
 	cmds = malloc(sizeof(code->cmds[0]) * code->cmdcount, 0);
 	rc = copy_from_uspace(cmds, code->cmds,
@@ -245,26 +245,26 @@
 	if (rc != EOK)
 		goto error;
-	
+
 	rc = code_check(cmds, code->cmdcount);
 	if (rc != EOK)
 		goto error;
-	
+
 	rc = ranges_map_and_apply(ranges, code->rangecount, cmds,
 	    code->cmdcount);
 	if (rc != EOK)
 		goto error;
-	
+
 	code->ranges = ranges;
 	code->cmds = cmds;
-	
+
 	return code;
-	
+
 error:
 	if (cmds)
 		free(cmds);
-	
+
 	if (ranges)
 		free(ranges);
-	
+
 	free(code);
 	return NULL;
@@ -275,5 +275,5 @@
 	irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
 	irq_spinlock_lock(&irq->lock, false);
-	
+
 	if (irq->notif_cfg.hashed_in) {
 		/* Remove the IRQ from the uspace IRQ hash table. */
@@ -318,5 +318,5 @@
 	if ((inr < 0) || (inr > last_inr))
 		return ELIMIT;
-	
+
 	irq_code_t *code;
 	if (ucode) {
@@ -326,5 +326,5 @@
 	} else
 		code = NULL;
-	
+
 	/*
 	 * Allocate and populate the IRQ kernel object.
@@ -334,5 +334,5 @@
 	if (rc != EOK)
 		return rc;
-	
+
 	rc = copy_to_uspace(uspace_handle, &handle, sizeof(cap_handle_t));
 	if (rc != EOK) {
@@ -353,5 +353,5 @@
 		return ENOMEM;
 	}
-	
+
 	irq_initialize(irq);
 	irq->inr = inr;
@@ -363,5 +363,5 @@
 	irq->notif_cfg.code = code;
 	irq->notif_cfg.counter = 0;
-	
+
 	/*
 	 * Insert the IRQ structure into the uspace IRQ hash table.
@@ -369,8 +369,8 @@
 	irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
 	irq_spinlock_lock(&irq->lock, false);
-	
+
 	irq->notif_cfg.hashed_in = true;
 	hash_table_insert(&irq_uspace_hash_table, &irq->link);
-	
+
 	irq_spinlock_unlock(&irq->lock, false);
 	irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
@@ -378,5 +378,5 @@
 	kobject_initialize(kobject, KOBJECT_TYPE_IRQ, irq, &irq_kobject_ops);
 	cap_publish(TASK, handle, kobject);
-	
+
 	return EOK;
 }
@@ -395,5 +395,5 @@
 	if (!kobj)
 		return ENOENT;
-	
+
 	assert(kobj->irq->notif_cfg.answerbox == box);
 
@@ -402,5 +402,5 @@
 	kobject_put(kobj);
 	cap_free(TASK, handle);
-	
+
 	return EOK;
 }
@@ -419,5 +419,5 @@
 	list_append(&call->ab_link, &irq->notif_cfg.answerbox->irq_notifs);
 	irq_spinlock_unlock(&irq->notif_cfg.answerbox->irq_lock, false);
-	
+
 	waitq_wakeup(&irq->notif_cfg.answerbox->wq, WAKEUP_FIRST);
 }
@@ -435,15 +435,15 @@
 	irq_code_t *code = irq->notif_cfg.code;
 	uint32_t *scratch = irq->notif_cfg.scratch;
-	
+
 	if (!irq->notif_cfg.notify)
 		return IRQ_DECLINE;
-	
+
 	if (!code)
 		return IRQ_DECLINE;
-	
+
 	for (size_t i = 0; i < code->cmdcount; i++) {
 		uintptr_t srcarg = code->cmds[i].srcarg;
 		uintptr_t dstarg = code->cmds[i].dstarg;
-		
+
 		switch (code->cmds[i].cmd) {
 		case CMD_PIO_READ_8:
@@ -493,5 +493,5 @@
 			if (scratch[srcarg] == 0)
 				i += code->cmds[i].value;
-			
+
 			break;
 		case CMD_ACCEPT:
@@ -502,5 +502,5 @@
 		}
 	}
-	
+
 	return IRQ_DECLINE;
 }
@@ -516,17 +516,17 @@
 {
 	assert(irq);
-	
+
 	assert(interrupts_disabled());
 	assert(irq_spinlock_locked(&irq->lock));
-	
+
 	if (irq->notif_cfg.answerbox) {
 		call_t *call = ipc_call_alloc(FRAME_ATOMIC);
 		if (!call)
 			return;
-		
+
 		call->flags |= IPC_CALL_NOTIF;
 		/* Put a counter to the message */
 		call->priv = ++irq->notif_cfg.counter;
-		
+
 		/* Set up args */
 		IPC_SET_IMETHOD(call->data, irq->notif_cfg.imethod);
@@ -536,5 +536,5 @@
 		IPC_SET_ARG4(call->data, irq->notif_cfg.scratch[4]);
 		IPC_SET_ARG5(call->data, irq->notif_cfg.scratch[5]);
-		
+
 		send_call(irq, call);
 	}
@@ -555,5 +555,5 @@
 {
 	irq_spinlock_lock(&irq->lock, true);
-	
+
 	if (irq->notif_cfg.answerbox) {
 		call_t *call = ipc_call_alloc(FRAME_ATOMIC);
@@ -562,9 +562,9 @@
 			return;
 		}
-		
+
 		call->flags |= IPC_CALL_NOTIF;
 		/* Put a counter to the message */
 		call->priv = ++irq->notif_cfg.counter;
-		
+
 		IPC_SET_IMETHOD(call->data, irq->notif_cfg.imethod);
 		IPC_SET_ARG1(call->data, a1);
@@ -573,8 +573,8 @@
 		IPC_SET_ARG4(call->data, a4);
 		IPC_SET_ARG5(call->data, a5);
-		
+
 		send_call(irq, call);
 	}
-	
+
 	irq_spinlock_unlock(&irq->lock, true);
 }
Index: kernel/generic/src/ipc/kbox.c
===================================================================
--- kernel/generic/src/ipc/kbox.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/ipc/kbox.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -63,12 +63,12 @@
 	TASK->kb.finished = true;
 	mutex_unlock(&TASK->kb.cleanup_lock);
-	
+
 	bool have_kb_thread = (TASK->kb.thread != NULL);
-	
+
 	/*
 	 * From now on nobody will try to connect phones or attach
 	 * kbox threads
 	 */
-	
+
 	/*
 	 * Disconnect all phones connected to our kbox. Passing true for
@@ -78,5 +78,5 @@
 	 */
 	ipc_answerbox_slam_phones(&TASK->kb.box, have_kb_thread);
-	
+
 	/*
 	 * If the task was being debugged, clean up debugging session.
@@ -87,5 +87,5 @@
 	udebug_task_cleanup(TASK);
 	mutex_unlock(&TASK->udebug.lock);
-	
+
 	if (have_kb_thread) {
 		LOG("Join kb.thread.");
@@ -95,5 +95,5 @@
 		TASK->kb.thread = NULL;
 	}
-	
+
 	/* Answer all messages in 'calls' and 'dispatched_calls' queues. */
 	ipc_cleanup_call_list(&TASK->kb.box, &TASK->kb.box.calls);
@@ -120,11 +120,11 @@
 		LOG("Was not debugger.");
 	}
-	
+
 	LOG("Continue with hangup message.");
 	IPC_SET_RETVAL(call->data, 0);
 	ipc_answer(&TASK->kb.box, call);
-	
+
 	mutex_lock(&TASK->kb.cleanup_lock);
-	
+
 	irq_spinlock_lock(&TASK->lock, true);
 	irq_spinlock_lock(&TASK->kb.box.lock, false);
@@ -134,5 +134,5 @@
 		 * gets freed and signal to the caller.
 		 */
-		
+
 		/* Only detach kbox thread unless already terminating. */
 		if (TASK->kb.finished == false) {
@@ -141,13 +141,13 @@
 			TASK->kb.thread = NULL;
 		}
-		
+
 		LOG("Phone list is empty.");
 		*last = true;
 	} else
 		*last = false;
-	
+
 	irq_spinlock_unlock(&TASK->kb.box.lock, false);
 	irq_spinlock_unlock(&TASK->lock, true);
-	
+
 	mutex_unlock(&TASK->kb.cleanup_lock);
 }
@@ -166,19 +166,19 @@
 	LOG("Starting.");
 	bool done = false;
-	
+
 	while (!done) {
 		call_t *call = ipc_wait_for_call(&TASK->kb.box, SYNCH_NO_TIMEOUT,
 		    SYNCH_FLAGS_NONE);
-		
+
 		if (call == NULL)
 			continue;  /* Try again. */
-		
+
 		switch (IPC_GET_IMETHOD(call->data)) {
-		
+
 		case IPC_M_DEBUG:
 			/* Handle debug call. */
 			udebug_call_receive(call);
 			break;
-		
+
 		case IPC_M_PHONE_HUNGUP:
 			/*
@@ -189,5 +189,5 @@
 			kbox_proc_phone_hungup(call, &done);
 			break;
-		
+
 		default:
 			/* Ignore */
@@ -195,5 +195,5 @@
 		}
 	}
-	
+
 	LOG("Exiting.");
 }
@@ -213,5 +213,5 @@
 {
 	irq_spinlock_lock(&tasks_lock, true);
-	
+
 	task_t *task = task_find_by_id(taskid);
 	if (task == NULL) {
@@ -219,11 +219,11 @@
 		return ENOENT;
 	}
-	
+
 	atomic_inc(&task->refcount);
-	
+
 	irq_spinlock_unlock(&tasks_lock, true);
-	
+
 	mutex_lock(&task->kb.cleanup_lock);
-	
+
 	if (atomic_predec(&task->refcount) == 0) {
 		mutex_unlock(&task->kb.cleanup_lock);
@@ -231,24 +231,24 @@
 		return ENOENT;
 	}
-	
+
 	if (task->kb.finished) {
 		mutex_unlock(&task->kb.cleanup_lock);
 		return EINVAL;
 	}
-	
+
 	/* Create a kbox thread if necessary. */
 	if (task->kb.thread == NULL) {
 		thread_t *kb_thread = thread_create(kbox_thread_proc, NULL, task,
 		    THREAD_FLAG_NONE, "kbox");
-		
+
 		if (!kb_thread) {
 			mutex_unlock(&task->kb.cleanup_lock);
 			return ENOMEM;
 		}
-		
+
 		task->kb.thread = kb_thread;
 		thread_ready(kb_thread);
 	}
-	
+
 	/* Allocate a new phone. */
 	cap_handle_t phone_handle;
@@ -258,5 +258,5 @@
 		return rc;
 	}
-	
+
 	kobject_t *phone_obj = kobject_get(TASK, phone_handle,
 	    KOBJECT_TYPE_PHONE);
@@ -264,5 +264,5 @@
 	/* Hand over phone_obj's reference to ipc_phone_connect() */
 	(void) ipc_phone_connect(phone_obj->phone, &task->kb.box);
-	
+
 	mutex_unlock(&task->kb.cleanup_lock);
 	*out_phone = phone_handle;
Index: kernel/generic/src/ipc/ops/conctmeto.c
===================================================================
--- kernel/generic/src/ipc/ops/conctmeto.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/ipc/ops/conctmeto.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -108,5 +108,5 @@
 		IPC_SET_ARG5(answer->data, phone_handle);
 	}
-	
+
 	return EOK;
 }
Index: kernel/generic/src/ipc/ops/dataread.c
===================================================================
--- kernel/generic/src/ipc/ops/dataread.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/ipc/ops/dataread.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -74,5 +74,5 @@
 			 */
 			IPC_SET_ARG1(answer->data, dst);
-				
+
 			answer->buffer = malloc(size, 0);
 			errno_t rc = copy_from_uspace(answer->buffer,
Index: kernel/generic/src/ipc/ops/datawrite.c
===================================================================
--- kernel/generic/src/ipc/ops/datawrite.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/ipc/ops/datawrite.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -65,5 +65,5 @@
 		return rc;
 	}
-		
+
 	return EOK;
 }
@@ -78,5 +78,5 @@
 		size_t size = (size_t)IPC_GET_ARG2(answer->data);
 		size_t max_size = (size_t)IPC_GET_ARG2(*olddata);
-			
+
 		if (size <= max_size) {
 			errno_t rc = copy_to_uspace((void *) dst,
Index: kernel/generic/src/ipc/ops/pagein.c
===================================================================
--- kernel/generic/src/ipc/ops/pagein.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/ipc/ops/pagein.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -92,5 +92,5 @@
 		page_table_unlock(AS, true);
 	}
-	
+
 	return EOK;
 }
Index: kernel/generic/src/ipc/ops/sharein.c
===================================================================
--- kernel/generic/src/ipc/ops/sharein.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/ipc/ops/sharein.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -47,5 +47,5 @@
 		as_t *as = answer->sender->as;
 		irq_spinlock_unlock(&answer->sender->lock, true);
-			
+
 		uintptr_t dst_base = (uintptr_t) -1;
 		errno_t rc = as_area_share(AS, IPC_GET_ARG1(answer->data),
@@ -55,5 +55,5 @@
 		IPC_SET_RETVAL(answer->data, rc);
 	}
-	
+
 	return EOK;
 }
Index: kernel/generic/src/ipc/ops/shareout.c
===================================================================
--- kernel/generic/src/ipc/ops/shareout.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/ipc/ops/shareout.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -68,10 +68,10 @@
 		    IPC_GET_ARG2(*olddata), AS, IPC_GET_ARG3(*olddata),
 		    &dst_base, IPC_GET_ARG1(answer->data));
-			
+
 		if (rc == EOK) {
 			rc = copy_to_uspace((void *) IPC_GET_ARG2(answer->data),
 			    &dst_base, sizeof(dst_base));
 		}
-			
+
 		IPC_SET_RETVAL(answer->data, rc);
 	}
Index: kernel/generic/src/ipc/sysipc.c
===================================================================
--- kernel/generic/src/ipc/sysipc.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/ipc/sysipc.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -69,5 +69,5 @@
 	if (imethod <= IPC_M_LAST_SYSTEM)
 		return true;
-	
+
 	return false;
 }
@@ -207,5 +207,5 @@
 		mutex_unlock(&phone->lock);
 	}
-	
+
 	if (!olddata)
 		return rc;
@@ -242,5 +242,5 @@
 	    (call->flags & IPC_CALL_FORWARDED))
 		IPC_SET_RETVAL(call->data, EFORWARD);
-	
+
 	SYSIPC_OP(answer_process, call);
 }
@@ -276,9 +276,9 @@
 	if (!kobj->phone)
 		return ENOENT;
-	
+
 	call_t *call = ipc_call_alloc(0);
 	call->priv = priv;
 	memcpy(call->data.args, data->args, sizeof(data->args));
-	
+
 	errno_t rc = request_preprocess(call, kobj->phone);
 	if (!rc) {
@@ -322,9 +322,9 @@
 	} else
 		IPC_SET_RETVAL(call->data, rc);
-	
+
 	memcpy(data->args, call->data.args, sizeof(data->args));
 	kobject_put(call->kobject);
 	kobject_put(kobj);
-	
+
 	return EOK;
 }
@@ -342,5 +342,5 @@
 	if (atomic_get(&phone->active_calls) >= IPC_MAX_ASYNC_CALLS)
 		return -1;
-	
+
 	return 0;
 }
@@ -368,10 +368,10 @@
 	if (!kobj)
 		return ENOENT;
-	
+
 	if (check_call_limit(kobj->phone)) {
 		kobject_put(kobj);
 		return ELIMIT;
 	}
-	
+
 	call_t *call = ipc_call_alloc(0);
 	IPC_SET_IMETHOD(call->data, imethod);
@@ -379,5 +379,5 @@
 	IPC_SET_ARG2(call->data, arg2);
 	IPC_SET_ARG3(call->data, arg3);
-	
+
 	/*
 	 * To achieve deterministic behavior, zero out arguments that are beyond
@@ -388,12 +388,12 @@
 	/* Set the user-defined label */
 	call->data.label = label;
-	
+
 	errno_t res = request_preprocess(call, kobj->phone);
-	
+
 	if (!res)
 		ipc_call(kobj->phone, call);
 	else
 		ipc_backsend_err(kobj->phone, call, res);
-	
+
 	kobject_put(kobj);
 	return EOK;
@@ -432,12 +432,12 @@
 	/* Set the user-defined label */
 	call->data.label = label;
-	
+
 	errno_t res = request_preprocess(call, kobj->phone);
-	
+
 	if (!res)
 		ipc_call(kobj->phone, call);
 	else
 		ipc_backsend_err(kobj->phone, call, res);
-	
+
 	kobject_put(kobj);
 	return EOK;
@@ -473,5 +473,5 @@
 	if (!ckobj)
 		return ENOENT;
-	
+
 	call_t *call = ckobj->call;
 
@@ -480,5 +480,5 @@
 	if (need_old)
 		old = call->data;
-	
+
 	bool after_forward = false;
 	errno_t rc;
@@ -489,12 +489,12 @@
 		goto error;
 	}
-	
+
 	if (!method_is_forwardable(IPC_GET_IMETHOD(call->data))) {
 		rc = EPERM;
 		goto error;
 	}
-	
+
 	call->flags |= IPC_CALL_FORWARDED;
-	
+
 	/*
 	 * User space is not allowed to change interface and method of system
@@ -507,12 +507,12 @@
 			if (IPC_GET_IMETHOD(call->data) == IPC_M_CONNECT_TO_ME)
 				phone_dealloc(IPC_GET_ARG5(call->data));
-			
+
 			IPC_SET_ARG1(call->data, imethod);
 			IPC_SET_ARG2(call->data, arg1);
 			IPC_SET_ARG3(call->data, arg2);
-			
+
 			if (slow)
 				IPC_SET_ARG4(call->data, arg3);
-			
+
 			/*
 			 * For system methods we deliberately don't
@@ -530,5 +530,5 @@
 		}
 	}
-	
+
 	rc = ipc_forward(call, pkobj->phone, &TASK->answerbox, mode);
 	if (rc != EOK) {
@@ -609,5 +609,5 @@
 	if (rc != EOK)
 		return (sys_errno_t) rc;
-	
+
 	return sys_ipc_forward_common(chandle, phandle,
 	    IPC_GET_IMETHOD(newdata), IPC_GET_ARG1(newdata),
@@ -637,10 +637,10 @@
 	if (!kobj)
 		return ENOENT;
-	
+
 	call_t *call = kobj->call;
 
 	ipc_data_t saved_data;
 	bool saved;
-	
+
 	if (answer_need_old(call)) {
 		memcpy(&saved_data, &call->data, sizeof(call->data));
@@ -648,5 +648,5 @@
 	} else
 		saved = false;
-	
+
 	IPC_SET_RETVAL(call->data, retval);
 	IPC_SET_ARG1(call->data, arg1);
@@ -654,5 +654,5 @@
 	IPC_SET_ARG3(call->data, arg3);
 	IPC_SET_ARG4(call->data, arg4);
-	
+
 	/*
 	 * To achieve deterministic behavior, zero out arguments that are beyond
@@ -661,5 +661,5 @@
 	IPC_SET_ARG5(call->data, 0);
 	errno_t rc = answer_preprocess(call, saved ? &saved_data : NULL);
-	
+
 	ipc_answer(&TASK->answerbox, call);
 
@@ -683,10 +683,10 @@
 	if (!kobj)
 		return ENOENT;
-	
+
 	call_t *call = kobj->call;
 
 	ipc_data_t saved_data;
 	bool saved;
-	
+
 	if (answer_need_old(call)) {
 		memcpy(&saved_data, &call->data, sizeof(call->data));
@@ -694,5 +694,5 @@
 	} else
 		saved = false;
-	
+
 	errno_t rc = copy_from_uspace(&call->data.args, &data->args,
 	    sizeof(call->data.args));
@@ -704,7 +704,7 @@
 		return rc;
 	}
-	
+
 	rc = answer_preprocess(call, saved ? &saved_data : NULL);
-	
+
 	ipc_answer(&TASK->answerbox, call);
 
@@ -727,5 +727,5 @@
 	if (!kobj)
 		return ENOENT;
-	
+
 	errno_t rc = ipc_phone_hangup(kobj->phone);
 	kobject_put(kobj);
@@ -746,14 +746,14 @@
 {
 	call_t *call;
-	
+
 restart:
-	
+
 #ifdef CONFIG_UDEBUG
 	udebug_stoppable_begin();
 #endif
-	
+
 	call = ipc_wait_for_call(&TASK->answerbox, usec,
 	    flags | SYNCH_FLAGS_INTERRUPTIBLE);
-	
+
 #ifdef CONFIG_UDEBUG
 	udebug_stoppable_end();
@@ -766,21 +766,21 @@
 		return EOK;
 	}
-	
+
 	call->data.flags = call->flags;
 	if (call->flags & IPC_CALL_NOTIF) {
 		/* Set in_phone_hash to the interrupt counter */
 		call->data.phone = (void *) call->priv;
-		
+
 		call->data.cap_handle = CAP_NIL;
 
 		STRUCT_TO_USPACE(calldata, &call->data);
 		kobject_put(call->kobject);
-		
+
 		return EOK;
 	}
-	
+
 	if (call->flags & IPC_CALL_ANSWERED) {
 		process_answer(call);
-		
+
 		if (call->flags & IPC_CALL_DISCARD_ANSWER) {
 			kobject_put(call->kobject);
@@ -789,14 +789,14 @@
 
 		call->data.cap_handle = CAP_NIL;
-		
+
 		STRUCT_TO_USPACE(calldata, &call->data);
 		kobject_put(call->kobject);
-		
+
 		return EOK;
 	}
-	
+
 	if (process_request(&TASK->answerbox, call))
 		goto restart;
-	
+
 	cap_handle_t handle;
 	errno_t rc = cap_alloc(TASK, &handle);
@@ -804,5 +804,5 @@
 		goto error;
 	}
-	
+
 	call->data.cap_handle = handle;
 
@@ -871,5 +871,5 @@
 	if (!(perm_get(TASK) & PERM_IRQ_REG))
 		return EPERM;
-	
+
 	return ipc_irq_subscribe(&TASK->answerbox, inr, imethod, ucode, uspace_handle);
 }
@@ -887,7 +887,7 @@
 	if (!(perm_get(TASK) & PERM_IRQ_REG))
 		return EPERM;
-	
+
 	ipc_irq_unsubscribe(&TASK->answerbox, cap);
-	
+
 	return 0;
 }
@@ -903,10 +903,10 @@
 	task_id_t taskid;
 	cap_handle_t phone;
-	
+
 	errno_t rc = copy_from_uspace(&taskid, uspace_taskid, sizeof(task_id_t));
 	if (rc == EOK) {
 		rc = ipc_connect_kbox((task_id_t) taskid, &phone);
 	}
-	
+
 	if (rc == EOK) {
 		rc = copy_to_uspace(uspace_phone, &phone, sizeof(cap_handle_t));
@@ -916,5 +916,5 @@
 		}
 	}
-	
+
 	return (sys_errno_t) rc;
 #else
Index: kernel/generic/src/lib/elf.c
===================================================================
--- kernel/generic/src/lib/elf.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/lib/elf.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -80,5 +80,5 @@
 	    (header->e_ident[EI_MAG3] != ELFMAG3))
 		return EE_INVALID;
-	
+
 	/* Identify ELF compatibility */
 	if ((header->e_ident[EI_DATA] != ELF_DATA_ENCODING) ||
@@ -88,19 +88,19 @@
 	    (header->e_ident[EI_CLASS] != ELF_CLASS))
 		return EE_INCOMPATIBLE;
-	
+
 	if (header->e_phentsize != sizeof(elf_segment_header_t))
 		return EE_INCOMPATIBLE;
-	
+
 	if (header->e_shentsize != sizeof(elf_section_header_t))
 		return EE_INCOMPATIBLE;
-	
+
 	/* Check if the object type is supported. */
 	if (header->e_type != ET_EXEC)
 		return EE_UNSUPPORTED;
-	
+
 	/* Check if the ELF image starts on a page boundary */
 	if (ALIGN_UP((uintptr_t) header, PAGE_SIZE) != (uintptr_t) header)
 		return EE_UNSUPPORTED;
-	
+
 	/* Walk through all segment headers and process them. */
 	elf_half i;
@@ -109,10 +109,10 @@
 		    &((elf_segment_header_t *)(((uint8_t *) header) +
 		    header->e_phoff))[i];
-		
+
 		int rc = segment_header(seghdr, header, as, flags);
 		if (rc != EE_OK)
 			return rc;
 	}
-	
+
 	/* Inspect all section headers and process them. */
 	for (i = 0; i < header->e_shnum; i++) {
@@ -120,10 +120,10 @@
 		    &((elf_section_header_t *)(((uint8_t *) header) +
 		    header->e_shoff))[i];
-		
+
 		int rc = section_header(sechdr, header, as);
 		if (rc != EE_OK)
 			return rc;
 	}
-	
+
 	return EE_OK;
 }
@@ -139,5 +139,5 @@
 {
 	assert(rc < sizeof(error_codes) / sizeof(char *));
-	
+
 	return error_codes[rc];
 }
@@ -199,5 +199,5 @@
 	backend_data.elf = elf;
 	backend_data.segment = entry;
-	
+
 	if (entry->p_align > 1) {
 		if ((entry->p_offset % entry->p_align) !=
@@ -205,18 +205,18 @@
 			return EE_INVALID;
 	}
-	
+
 	unsigned int flags = 0;
-	
+
 	if (entry->p_flags & PF_X)
 		flags |= AS_AREA_EXEC;
-	
+
 	if (entry->p_flags & PF_W)
 		flags |= AS_AREA_WRITE;
-	
+
 	if (entry->p_flags & PF_R)
 		flags |= AS_AREA_READ;
-	
+
 	flags |= AS_AREA_CACHEABLE;
-	
+
 	/*
 	 * Align vaddr down, inserting a little "gap" at the beginning.
@@ -226,15 +226,15 @@
 	uintptr_t base = ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE);
 	size_t mem_sz = entry->p_memsz + (entry->p_vaddr - base);
-	
+
 	as_area_t *area = as_area_create(as, flags, mem_sz,
 	    AS_AREA_ATTR_NONE, &elf_backend, &backend_data, &base, 0);
 	if (!area)
 		return EE_MEMORY;
-	
+
 	/*
 	 * The segment will be mapped on demand by elf_page_fault().
 	 *
 	 */
-	
+
 	return EE_OK;
 }
@@ -266,5 +266,5 @@
 		break;
 	}
-	
+
 	return EE_OK;
 }
Index: kernel/generic/src/lib/gsort.c
===================================================================
--- kernel/generic/src/lib/gsort.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/lib/gsort.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -75,5 +75,5 @@
 {
 	size_t i = 0;
-	
+
 	while (i < cnt) {
 		if ((i != 0) &&
@@ -109,5 +109,5 @@
 	uint8_t ibuf_slot[IBUF_SIZE];
 	void *slot;
-	
+
 	if (elem_size > IBUF_SIZE) {
 		slot = (void *) malloc(elem_size, 0);
@@ -116,10 +116,10 @@
 	} else
 		slot = (void *) ibuf_slot;
-	
+
 	_gsort(data, cnt, elem_size, cmp, arg, slot);
-	
+
 	if (elem_size > IBUF_SIZE)
 		free(slot);
-	
+
 	return true;
 }
Index: kernel/generic/src/lib/halt.c
===================================================================
--- kernel/generic/src/lib/halt.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/lib/halt.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -55,5 +55,5 @@
 #if (defined(CONFIG_DEBUG)) && (defined(CONFIG_KCONSOLE))
 	bool rundebugger = false;
-	
+
 	if (!atomic_get(&haltstate)) {
 		atomic_set(&haltstate, 1);
@@ -63,17 +63,17 @@
 	atomic_set(&haltstate, 1);
 #endif
-	
+
 	interrupts_disable();
-	
+
 #if (defined(CONFIG_DEBUG)) && (defined(CONFIG_KCONSOLE))
 	if ((rundebugger) && (kconsole_check_poll()))
 		kconsole("panic", "\nLast resort kernel console ready.\n", false);
 #endif
-	
+
 	if (CPU)
 		log(LF_OTHER, LVL_NOTE, "cpu%u: halted", CPU->id);
 	else
 		log(LF_OTHER, LVL_NOTE, "cpu: halted");
-	
+
 	cpu_halt();
 }
Index: kernel/generic/src/lib/mem.c
===================================================================
--- kernel/generic/src/lib/mem.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/lib/mem.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -72,5 +72,5 @@
 	size_t i;
 	uint16_t *ptr = (uint16_t *) dst;
-	
+
 	for (i = 0; i < cnt; i++)
 		ptr[i] = val;
@@ -94,12 +94,12 @@
 	if (src == dst)
 		return dst;
-	
+
 	/* Non-overlapping? */
 	if ((dst >= src + cnt) || (src >= dst + cnt))
 		return memcpy(dst, src, cnt);
-	
+
 	uint8_t *dp;
 	const uint8_t *sp;
-	
+
 	/* Which direction? */
 	if (src > dst) {
@@ -107,5 +107,5 @@
 		dp = dst;
 		sp = src;
-		
+
 		while (cnt-- != 0)
 			*dp++ = *sp++;
@@ -114,9 +114,9 @@
 		dp = dst + (cnt - 1);
 		sp = src + (cnt - 1);
-		
+
 		while (cnt-- != 0)
 			*dp-- = *sp--;
 	}
-	
+
 	return dst;
 }
Index: kernel/generic/src/lib/memfnc.c
===================================================================
--- kernel/generic/src/lib/memfnc.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/lib/memfnc.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -57,8 +57,8 @@
 {
 	uint8_t *dp = (uint8_t *) dst;
-	
+
 	while (cnt-- != 0)
 		*dp++ = val;
-	
+
 	return dst;
 }
@@ -80,8 +80,8 @@
 	uint8_t *dp = (uint8_t *) dst;
 	const uint8_t *sp = (uint8_t *) src;
-	
+
 	while (cnt-- != 0)
 		*dp++ = *sp++;
-	
+
 	return dst;
 }
Index: kernel/generic/src/lib/ra.c
===================================================================
--- kernel/generic/src/lib/ra.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/lib/ra.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -304,6 +304,6 @@
 			succ->flags |= RA_SEGMENT_FREE;
 		}
-		
-	
+
+
 		/* Put unneeded parts back. */
 		if (pred) {
Index: kernel/generic/src/lib/rd.c
===================================================================
--- kernel/generic/src/lib/rd.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/lib/rd.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -58,5 +58,5 @@
 	uintptr_t base = (uintptr_t) data;
 	assert((base % FRAME_SIZE) == 0);
-	
+
 	rd_parea.pbase = base;
 	rd_parea.frames = SIZE2FRAMES(size);
@@ -64,9 +64,9 @@
 	rd_parea.mapped = false;
 	ddi_parea_register(&rd_parea);
-	
+
 	sysinfo_set_item_val("rd", NULL, true);
 	sysinfo_set_item_val("rd.size", NULL, size);
 	sysinfo_set_item_val("rd.address.physical", NULL, (sysarg_t) base);
-	
+
 	log(LF_OTHER, LVL_NOTE, "RAM disk at %p (size %zu bytes)", (void *) base,
 	    size);
Index: kernel/generic/src/lib/str.c
===================================================================
--- kernel/generic/src/lib/str.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/lib/str.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -151,13 +151,13 @@
 	if (*offset + 1 > size)
 		return 0;
-	
+
 	/* First byte read from string */
 	uint8_t b0 = (uint8_t) str[(*offset)++];
-	
+
 	/* Determine code length */
-	
+
 	unsigned int b0_bits;  /* Data bits in first byte */
 	unsigned int cbytes;   /* Number of continuation bytes */
-	
+
 	if ((b0 & 0x80) == 0) {
 		/* 0xxxxxxx (Plain ASCII) */
@@ -180,23 +180,23 @@
 		return U_SPECIAL;
 	}
-	
+
 	if (*offset + cbytes > size)
 		return U_SPECIAL;
-	
+
 	wchar_t ch = b0 & LO_MASK_8(b0_bits);
-	
+
 	/* Decode continuation bytes */
 	while (cbytes > 0) {
 		uint8_t b = (uint8_t) str[(*offset)++];
-		
+
 		/* Must be 10xxxxxx */
 		if ((b & 0xc0) != 0x80)
 			return U_SPECIAL;
-		
+
 		/* Shift data bits to ch */
 		ch = (ch << CONT_BITS) | (wchar_t) (b & LO_MASK_8(CONT_BITS));
 		cbytes--;
 	}
-	
+
 	return ch;
 }
@@ -221,17 +221,17 @@
 	if (*offset >= size)
 		return EOVERFLOW;
-	
+
 	if (!chr_check(ch))
 		return EINVAL;
-	
+
 	/* Unsigned version of ch (bit operations should only be done
 	   on unsigned types). */
 	uint32_t cc = (uint32_t) ch;
-	
+
 	/* Determine how many continuation bytes are needed */
-	
+
 	unsigned int b0_bits;  /* Data bits in first byte */
 	unsigned int cbytes;   /* Number of continuation bytes */
-	
+
 	if ((cc & ~LO_MASK_32(7)) == 0) {
 		b0_bits = 7;
@@ -250,9 +250,9 @@
 		return EINVAL;
 	}
-	
+
 	/* Check for available space in buffer */
 	if (*offset + cbytes >= size)
 		return EOVERFLOW;
-	
+
 	/* Encode continuation bytes */
 	unsigned int i;
@@ -261,11 +261,11 @@
 		cc = cc >> CONT_BITS;
 	}
-	
+
 	/* Encode first byte */
 	str[*offset] = (cc & LO_MASK_32(b0_bits)) | HI_MASK_8(8 - b0_bits - 1);
-	
+
 	/* Advance offset */
 	*offset += cbytes + 1;
-	
+
 	return EOK;
 }
@@ -284,8 +284,8 @@
 {
 	size_t size = 0;
-	
+
 	while (*str++ != 0)
 		size++;
-	
+
 	return size;
 }
@@ -323,12 +323,12 @@
 	size_t len = 0;
 	size_t offset = 0;
-	
+
 	while (len < max_len) {
 		if (str_decode(str, &offset, STR_NO_LIMIT) == 0)
 			break;
-		
+
 		len++;
 	}
-	
+
 	return offset;
 }
@@ -363,8 +363,8 @@
 	size_t len = 0;
 	size_t offset = 0;
-	
+
 	while (str_decode(str, &offset, STR_NO_LIMIT) != 0)
 		len++;
-	
+
 	return len;
 }
@@ -380,8 +380,8 @@
 {
 	size_t len = 0;
-	
+
 	while (*wstr++ != 0)
 		len++;
-	
+
 	return len;
 }
@@ -399,8 +399,8 @@
 	size_t len = 0;
 	size_t offset = 0;
-	
+
 	while (str_decode(str, &offset, size) != 0)
 		len++;
-	
+
 	return len;
 }
@@ -419,10 +419,10 @@
 	size_t limit = ALIGN_DOWN(size, sizeof(wchar_t));
 	size_t offset = 0;
-	
+
 	while ((offset < limit) && (*str++ != 0)) {
 		len++;
 		offset += sizeof(wchar_t);
 	}
-	
+
 	return len;
 }
@@ -437,5 +437,5 @@
 	if (WCHAR_SIGNED_CHECK(ch >= 0) && (ch <= 127))
 		return true;
-	
+
 	return false;
 }
@@ -450,5 +450,5 @@
 	if (WCHAR_SIGNED_CHECK(ch >= 0) && (ch <= 1114111))
 		return true;
-	
+
 	return false;
 }
@@ -476,5 +476,5 @@
 	wchar_t c1 = 0;
 	wchar_t c2 = 0;
-	
+
 	size_t off1 = 0;
 	size_t off2 = 0;
@@ -486,5 +486,5 @@
 		if (c1 < c2)
 			return -1;
-		
+
 		if (c1 > c2)
 			return 1;
@@ -523,8 +523,8 @@
 	wchar_t c1 = 0;
 	wchar_t c2 = 0;
-	
+
 	size_t off1 = 0;
 	size_t off2 = 0;
-	
+
 	size_t len = 0;
 
@@ -569,8 +569,8 @@
 	assert(size > 0);
 	assert(src != NULL);
-	
+
 	size_t src_off = 0;
 	size_t dest_off = 0;
-	
+
 	wchar_t ch;
 	while ((ch = str_decode(src, &src_off, STR_NO_LIMIT)) != 0) {
@@ -578,5 +578,5 @@
 			break;
 	}
-	
+
 	dest[dest_off] = '\0';
 }
@@ -602,8 +602,8 @@
 	/* There must be space for a null terminator in the buffer. */
 	assert(size > 0);
-	
+
 	size_t src_off = 0;
 	size_t dest_off = 0;
-	
+
 	wchar_t ch;
 	while ((ch = str_decode(src, &src_off, n)) != 0) {
@@ -611,5 +611,5 @@
 			break;
 	}
-	
+
 	dest[dest_off] = '\0';
 }
@@ -636,5 +636,5 @@
 	char *dest = malloc(size, 0);
 	assert(dest);
-	
+
 	str_cpy(dest, size, src);
 	return dest;
@@ -666,8 +666,8 @@
 	if (size > n)
 		size = n;
-	
+
 	char *dest = malloc(size + 1, 0);
 	assert(dest);
-	
+
 	str_ncpy(dest, size + 1, src, size);
 	return dest;
@@ -695,5 +695,5 @@
 	src_idx = 0;
 	dest_off = 0;
-	
+
 	while ((ch = src[src_idx++]) != 0) {
 		if (chr_encode(ch, dest, &dest_off, size - 1) != EOK)
@@ -717,5 +717,5 @@
 	size_t off = 0;
 	size_t last = 0;
-	
+
 	while ((acc = str_decode(str, &off, STR_NO_LIMIT)) != 0) {
 		if (acc == ch)
@@ -723,5 +723,5 @@
 		last = off;
 	}
-	
+
 	return NULL;
 }
@@ -744,14 +744,14 @@
 {
 	size_t len = wstr_length(str);
-	
+
 	if ((pos > len) || (pos + 1 > max_pos))
 		return false;
-	
+
 	size_t i;
 	for (i = len; i + 1 > pos; i--)
 		str[i + 1] = str[i];
-	
+
 	str[pos] = ch;
-	
+
 	return true;
 }
@@ -772,12 +772,12 @@
 {
 	size_t len = wstr_length(str);
-	
+
 	if (pos >= len)
 		return false;
-	
+
 	size_t i;
 	for (i = pos + 1; i <= len; i++)
 		str[i - 1] = str[i];
-	
+
 	return true;
 }
@@ -800,12 +800,12 @@
 	assert(neg != NULL);
 	assert(result != NULL);
-	
+
 	*neg = false;
 	const char *str = nptr;
-	
+
 	/* Ignore leading whitespace */
 	while (isspace(*str))
 		str++;
-	
+
 	if (*str == '-') {
 		*neg = true;
@@ -813,13 +813,13 @@
 	} else if (*str == '+')
 		str++;
-	
+
 	if (base == 0) {
 		/* Decode base if not specified */
 		base = 10;
-		
+
 		if (*str == '0') {
 			base = 8;
 			str++;
-			
+
 			switch (*str) {
 			case 'b':
@@ -856,11 +856,11 @@
 		}
 	}
-	
+
 	*result = 0;
 	const char *startstr = str;
-	
+
 	while (*str != 0) {
 		unsigned int digit;
-		
+
 		if ((*str >= 'a') && (*str <= 'z'))
 			digit = *str - 'a' + 10;
@@ -871,11 +871,11 @@
 		else
 			break;
-		
+
 		if (digit >= base)
 			break;
-		
+
 		uint64_t prev = *result;
 		*result = (*result) * base + digit;
-		
+
 		if (*result < prev) {
 			/* Overflow */
@@ -883,8 +883,8 @@
 			return EOVERFLOW;
 		}
-		
+
 		str++;
 	}
-	
+
 	if (str == startstr) {
 		/*
@@ -894,10 +894,10 @@
 		str = nptr;
 	}
-	
+
 	*endptr = (char *) str;
-	
+
 	if (str == nptr)
 		return EINVAL;
-	
+
 	return EOK;
 }
@@ -919,24 +919,24 @@
 {
 	assert(result != NULL);
-	
+
 	bool neg;
 	char *lendptr;
 	errno_t ret = str_uint(nptr, &lendptr, base, &neg, result);
-	
+
 	if (endptr != NULL)
 		*endptr = (char *) lendptr;
-	
+
 	if (ret != EOK)
 		return ret;
-	
+
 	/* Do not allow negative values */
 	if (neg)
 		return EINVAL;
-	
+
 	/* Check whether we are at the end of
 	   the string in strict mode */
 	if ((strict) && (*lendptr != 0))
 		return EINVAL;
-	
+
 	return EOK;
 }
Index: kernel/generic/src/log/log.c
===================================================================
--- kernel/generic/src/log/log.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/log/log.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -121,10 +121,10 @@
 		len = LOG_LENGTH - log_current_len;
 	}
-	
+
 	if (len == 0)
 		return;
-	
+
 	size_t log_free = LOG_LENGTH - log_used - log_current_len;
-	
+
 	/* Discard older entries to make space, if necessary */
 	while (len > log_free) {
@@ -136,5 +136,5 @@
 		next_for_uspace -= entry_len;
 	}
-	
+
 	size_t pos = (log_current_start + log_current_len) % LOG_LENGTH;
 	log_copy_to(data, pos, len);
@@ -151,8 +151,8 @@
 	spinlock_lock(&log_lock);
 	spinlock_lock(&kio_lock);
-	
+
 	log_current_start = (log_start + log_used) % LOG_LENGTH;
 	log_current_len = 0;
-	
+
 	/* Write header of the log entry, the length will be written in log_end() */
 	log_append((uint8_t *) &log_current_len, sizeof(size_t));
@@ -162,5 +162,5 @@
 	log_append((uint8_t *) &fac32, sizeof(uint32_t));
 	log_append((uint8_t *) &lvl32, sizeof(uint32_t));
-	
+
 	log_counter++;
 }
@@ -174,9 +174,9 @@
 	log_copy_to((uint8_t *) &log_current_len, log_current_start, sizeof(size_t));
 	log_used += log_current_len;
-	
+
 	kio_push_char('\n');
 	spinlock_unlock(&kio_lock);
 	spinlock_unlock(&log_lock);
-	
+
 	/* This has to be called after we released the locks above */
 	kio_flush();
@@ -189,5 +189,5 @@
 	if (!atomic_get(&log_inited))
 		return;
-	
+
 	spinlock_lock(&log_lock);
 	if (next_for_uspace < log_used)
@@ -200,12 +200,12 @@
 	size_t offset = 0;
 	size_t chars = 0;
-	
+
 	while (offset < size) {
 		kio_push_char(str_decode(str, &offset, size));
 		chars++;
 	}
-	
+
 	log_append((const uint8_t *)str, size);
-	
+
 	return chars;
 }
@@ -216,8 +216,8 @@
 	size_t offset = 0;
 	size_t chars = 0;
-	
+
 	for (offset = 0; offset < size; offset += sizeof(wchar_t), chars++) {
 		kio_push_char(wstr[chars]);
-		
+
 		size_t buffer_offset = 0;
 		errno_t rc = chr_encode(wstr[chars], buffer, &buffer_offset, 16);
@@ -225,8 +225,8 @@
 			return EOF;
 		}
-		
+
 		log_append((const uint8_t *)buffer, buffer_offset);
 	}
-	
+
 	return chars;
 }
@@ -239,5 +239,5 @@
 {
 	int ret;
-	
+
 	printf_spec_t ps = {
 		log_printf_str_write,
@@ -245,8 +245,8 @@
 		NULL
 	};
-	
-	
+
+
 	ret = printf_core(fmt, &ps, args);
-	
+
 	return ret;
 }
@@ -260,9 +260,9 @@
 	int ret;
 	va_list args;
-	
+
 	va_start(args, fmt);
 	ret = log_vprintf(fmt, args);
 	va_end(args);
-	
+
 	return ret;
 }
@@ -278,13 +278,13 @@
 	int ret;
 	va_list args;
-	
+
 	log_begin(fac, level);
-	
+
 	va_start(args, fmt);
 	ret = log_vprintf(fmt, args);
 	va_end(args);
-	
+
 	log_end();
-	
+
 	return ret;
 }
@@ -298,8 +298,8 @@
 	char *data;
 	errno_t rc;
-	
+
 	if (size > PAGE_SIZE)
 		return (sys_errno_t) ELIMIT;
-	
+
 	switch (operation) {
 		case KLOG_WRITE:
@@ -307,5 +307,5 @@
 			if (!data)
 				return (sys_errno_t) ENOMEM;
-			
+
 			rc = copy_from_uspace(data, buf, size);
 			if (rc) {
@@ -314,10 +314,10 @@
 			}
 			data[size] = 0;
-			
+
 			if (level >= LVL_LIMIT)
 				level = LVL_NOTE;
-			
+
 			log(LF_USPACE, level, "%s", data);
-			
+
 			free(data);
 			return EOK;
@@ -326,16 +326,16 @@
 			if (!data)
 				return (sys_errno_t) ENOMEM;
-			
+
 			size_t entry_len = 0;
 			size_t copied = 0;
-			
+
 			rc = EOK;
-	
+
 			spinlock_lock(&log_lock);
-			
+
 			while (next_for_uspace < log_used) {
 				size_t pos = (log_start + next_for_uspace) % LOG_LENGTH;
 				log_copy_from((uint8_t *) &entry_len, pos, sizeof(size_t));
-				
+
 				if (entry_len > PAGE_SIZE) {
 					/*
@@ -350,5 +350,5 @@
 					continue;
 				}
-				
+
 				if (size < copied + entry_len) {
 					if (copied == 0)
@@ -356,24 +356,24 @@
 					break;
 				}
-				
+
 				log_copy_from((uint8_t *) (data + copied), pos, entry_len);
 				copied += entry_len;
 				next_for_uspace += entry_len;
 			}
-			
+
 			spinlock_unlock(&log_lock);
-			
+
 			if (rc != EOK) {
 				free(data);
 				return (sys_errno_t) rc;
 			}
-			
+
 			rc = copy_to_uspace(buf, data, size);
-			
+
 			free(data);
-			
+
 			if (rc != EOK)
 				return (sys_errno_t) rc;
-			
+
 			return copy_to_uspace(uspace_nread, &copied, sizeof(copied));
 			return EOK;
Index: kernel/generic/src/main/kinit.c
===================================================================
--- kernel/generic/src/main/kinit.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/main/kinit.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -102,5 +102,5 @@
 {
 	thread_t *thread;
-	
+
 	/*
 	 * Detach kinit as nobody will call thread_join_timeout() on it.
@@ -109,17 +109,17 @@
 
 	interrupts_disable();
-	
+
 	/* Start processing RCU callbacks. RCU is fully functional afterwards. */
 	rcu_kinit_init();
-	
+
 	/*
 	 * Start processing work queue items. Some may have been queued during boot.
 	 */
 	workq_global_worker_init();
-	
+
 #ifdef CONFIG_SMP
 	if (config.cpu_count > 1) {
 		waitq_initialize(&ap_completion_wq);
-		
+
 		/*
 		 * Create the kmp thread and wait for its completion.
@@ -135,13 +135,13 @@
 		} else
 			panic("Unable to create kmp thread.");
-		
+
 		thread_join(thread);
 		thread_detach(thread);
-		
+
 		/*
 		 * For each CPU, create its load balancing thread.
 		 */
 		unsigned int i;
-		
+
 		for (i = 0; i < config.cpu_count; i++) {
 			thread = thread_create(kcpulb, NULL, TASK,
@@ -156,10 +156,10 @@
 	}
 #endif /* CONFIG_SMP */
-	
+
 	/*
 	 * At this point SMP, if present, is configured.
 	 */
 	ARCH_OP(post_smp_init);
-	
+
 	/* Start thread computing system load */
 	thread = thread_create(kload, NULL, TASK, THREAD_FLAG_NONE,
@@ -169,5 +169,5 @@
 	else
 		log(LF_OTHER, LVL_ERROR, "Unable to create kload thread");
-	
+
 #ifdef CONFIG_KCONSOLE
 	if (stdin) {
@@ -184,5 +184,5 @@
 	}
 #endif /* CONFIG_KCONSOLE */
-	
+
 	/*
 	 * Store the default stack size in sysinfo so that uspace can create
@@ -190,7 +190,7 @@
 	 */
 	sysinfo_set_item_val("default.stack_size", NULL, STACK_SIZE_USER);
-	
+
 	interrupts_enable();
-	
+
 	/*
 	 * Create user tasks, load RAM disk images.
@@ -198,5 +198,5 @@
 	size_t i;
 	program_t programs[CONFIG_INIT_TASKS];
-	
+
 	// FIXME: do not propagate arguments through sysinfo
 	// but pass them directly to the tasks
@@ -228,21 +228,21 @@
 			continue;
 		}
-		
+
 		/*
 		 * Construct task name from the 'init:' prefix and the
 		 * name stored in the init structure (if any).
 		 */
-		
+
 		char namebuf[TASK_NAME_BUFLEN];
-		
+
 		const char *name = init.tasks[i].name;
 		if (name[0] == 0)
 			name = "<unknown>";
-		
+
 		static_assert(TASK_NAME_BUFLEN >= INIT_PREFIX_LEN, "");
 		str_cpy(namebuf, TASK_NAME_BUFLEN, INIT_PREFIX);
 		str_cpy(namebuf + INIT_PREFIX_LEN,
 		    TASK_NAME_BUFLEN - INIT_PREFIX_LEN, name);
-		
+
 		/*
 		 * Create virtual memory mappings for init task images.
@@ -252,8 +252,8 @@
 		    PAGE_READ | PAGE_WRITE | PAGE_CACHEABLE);
 		assert(page);
-		
+
 		errno_t rc = program_create_from_image((void *) page, namebuf,
 		    &programs[i]);
-		
+
 		if (rc == 0) {
 			if (programs[i].task != NULL) {
@@ -264,5 +264,5 @@
 				    PERM_PERM | PERM_MEM_MANAGER |
 				    PERM_IO_MANAGER | PERM_IRQ_REG);
-				
+
 				if (!ipc_phone_0) {
 					ipc_phone_0 = &programs[i].task->answerbox;
@@ -276,5 +276,5 @@
 				}
 			}
-			
+
 			/*
 			 * If programs[i].task == NULL then it is
@@ -293,5 +293,5 @@
 			    str_error_name(rc), programs[i].loader_status);
 	}
-	
+
 	/*
 	 * Run user tasks.
@@ -301,10 +301,10 @@
 			program_ready(&programs[i]);
 	}
-	
+
 #ifdef CONFIG_KCONSOLE
 	if (!stdin) {
 		thread_sleep(10);
 		printf("kinit: No stdin\nKernel alive: .");
-		
+
 		unsigned int i = 0;
 		while (true) {
Index: kernel/generic/src/main/main.c
===================================================================
--- kernel/generic/src/main/main.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/main/main.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -166,13 +166,13 @@
 	config.cpu_count = 1;
 	config.cpu_active = 1;
-	
+
 	config.base = hardcoded_load_address;
 	config.kernel_size = ALIGN_UP(hardcoded_ktext_size +
 	    hardcoded_kdata_size, PAGE_SIZE);
 	config.stack_size = STACK_SIZE;
-	
+
 	/* Initialy the stack is placed just after the kernel */
 	config.stack_base = config.base + config.kernel_size;
-	
+
 	/* Avoid placing stack on top of init */
 	size_t i;
@@ -190,5 +190,5 @@
 		}
 	}
-	
+
 	/* Avoid placing stack on top of boot allocations. */
 	if (ballocs.size) {
@@ -198,8 +198,8 @@
 			    ballocs.size, PAGE_SIZE);
 	}
-	
+
 	if (config.stack_base < stack_safe)
 		config.stack_base = ALIGN_UP(stack_safe, PAGE_SIZE);
-	
+
 	context_save(&ctx);
 	context_set(&ctx, FADDR(main_bsp_separated_stack),
@@ -218,12 +218,12 @@
 	/* Keep this the first thing. */
 	the_initialize(THE);
-	
+
 	version_print();
-	
+
 	LOG("\nconfig.base=%p config.kernel_size=%zu"
 	    "\nconfig.stack_base=%p config.stack_size=%zu",
 	    (void *) config.base, config.kernel_size,
 	    (void *) config.stack_base, config.stack_size);
-	
+
 #ifdef CONFIG_KCONSOLE
 	/*
@@ -234,5 +234,5 @@
 	kconsole_init();
 #endif
-	
+
 	/*
 	 * Exception handler initialization, before architecture
@@ -240,5 +240,5 @@
 	 */
 	exc_init();
-	
+
 	/*
 	 * Memory management subsystems initialization.
@@ -260,8 +260,8 @@
 	ARCH_OP(pre_smp_init);
 	smp_init();
-	
+
 	/* Slab must be initialized after we know the number of processors. */
 	slab_enable_cpucache();
-	
+
 	uint64_t size;
 	const char *size_suffix;
@@ -269,5 +269,5 @@
 	printf("Detected %u CPU(s), %" PRIu64 " %s free memory\n",
 	    config.cpu_count, size, size_suffix);
-	
+
 	cpu_init();
 	calibrate_delay_loop();
@@ -293,5 +293,5 @@
 	} else
 		printf("No init binaries found.\n");
-	
+
 	ipc_init();
 	event_init();
@@ -299,5 +299,5 @@
 	log_init();
 	stats_init();
-	
+
 	/*
 	 * Create kernel task.
@@ -306,5 +306,5 @@
 	if (!kernel)
 		panic("Cannot create kernel task.");
-	
+
 	/*
 	 * Create the first thread.
@@ -315,5 +315,5 @@
 		panic("Cannot create kinit thread.");
 	thread_ready(kinit_thread);
-	
+
 	/*
 	 * This call to scheduler() will return to kinit,
@@ -344,10 +344,10 @@
 	 */
 	config.cpu_active++;
-	
+
 	/*
 	 * The THE structure is well defined because ctx.sp is used as stack.
 	 */
 	the_initialize(THE);
-	
+
 	ARCH_OP(pre_mm_init);
 	frame_init();
@@ -355,11 +355,11 @@
 	tlb_init();
 	ARCH_OP(post_mm_init);
-	
+
 	cpu_init();
 	calibrate_delay_loop();
 	ARCH_OP(post_cpu_init);
-	
+
 	the_copy(THE, (the_t *) CPU->stack);
-	
+
 	/*
 	 * If we woke kmp up before we left the kernel stack, we could
@@ -382,10 +382,10 @@
 {
 	smp_call_init();
-	
+
 	/*
 	 * Configure timeouts for this cpu.
 	 */
 	timeout_init();
-	
+
 	waitq_wakeup(&ap_completion_wq, WAKEUP_FIRST);
 	scheduler();
Index: kernel/generic/src/main/shutdown.c
===================================================================
--- kernel/generic/src/main/shutdown.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/main/shutdown.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -45,9 +45,9 @@
 {
 	task_done();
-	
+
 #ifdef CONFIG_DEBUG
 	log(LF_OTHER, LVL_DEBUG, "Rebooting the system");
 #endif
-	
+
 	arch_reboot();
 	halt();
Index: kernel/generic/src/main/uinit.c
===================================================================
--- kernel/generic/src/main/uinit.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/main/uinit.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -65,12 +65,12 @@
 	 */
 	thread_detach(THREAD);
-	
+
 #ifdef CONFIG_UDEBUG
 	udebug_stoppable_end();
 #endif
-	
+
 	uspace_arg_t *uarg = (uspace_arg_t *) arg;
 	uspace_arg_t local_uarg;
-	
+
 	local_uarg.uspace_entry = uarg->uspace_entry;
 	local_uarg.uspace_stack = uarg->uspace_stack;
@@ -79,7 +79,7 @@
 	local_uarg.uspace_thread_function = NULL;
 	local_uarg.uspace_thread_arg = NULL;
-	
+
 	free(uarg);
-	
+
 	userspace(&local_uarg);
 }
Index: kernel/generic/src/mm/as.c
===================================================================
--- kernel/generic/src/mm/as.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/mm/as.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -114,8 +114,8 @@
 {
 	as_t *as = (as_t *) obj;
-	
+
 	link_initialize(&as->inactive_as_with_asid_link);
 	mutex_initialize(&as->lock, MUTEX_PASSIVE);
-	
+
 	return as_constructor_arch(as, flags);
 }
@@ -130,12 +130,12 @@
 {
 	as_arch_init();
-	
+
 	as_cache = slab_cache_create("as_t", sizeof(as_t), 0,
 	    as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED);
-	
+
 	AS_KERNEL = as_create(FLAG_AS_KERNEL);
 	if (!AS_KERNEL)
 		panic("Cannot create kernel address space.");
-	
+
 	/*
 	 * Make sure the kernel address space
@@ -155,15 +155,15 @@
 	as_t *as = (as_t *) slab_alloc(as_cache, 0);
 	(void) as_create_arch(as, 0);
-	
+
 	btree_create(&as->as_area_btree);
-	
+
 	if (flags & FLAG_AS_KERNEL)
 		as->asid = ASID_KERNEL;
 	else
 		as->asid = ASID_INVALID;
-	
+
 	atomic_set(&as->refcount, 0);
 	as->cpu_refcount = 0;
-	
+
 #ifdef AS_PAGE_TABLE
 	as->genarch.page_table = page_table_create(flags);
@@ -171,5 +171,5 @@
 	page_table_create(flags);
 #endif
-	
+
 	return as;
 }
@@ -188,13 +188,13 @@
 {
 	DEADLOCK_PROBE_INIT(p_asidlock);
-	
+
 	assert(as != AS);
 	assert(atomic_get(&as->refcount) == 0);
-	
+
 	/*
 	 * Since there is no reference to this address space, it is safe not to
 	 * lock its mutex.
 	 */
-	
+
 	/*
 	 * We need to avoid deadlock between TLB shootdown and asidlock.
@@ -206,5 +206,5 @@
 	preemption_disable();
 	ipl_t ipl = interrupts_read();
-	
+
 retry:
 	interrupts_disable();
@@ -214,19 +214,19 @@
 		goto retry;
 	}
-	
+
 	/* Interrupts disabled, enable preemption */
 	preemption_enable();
-	
+
 	if ((as->asid != ASID_INVALID) && (as != AS_KERNEL)) {
 		if (as->cpu_refcount == 0)
 			list_remove(&as->inactive_as_with_asid_link);
-		
+
 		asid_put(as->asid);
 	}
-	
+
 	spinlock_unlock(&asidlock);
 	interrupts_restore(ipl);
-	
-	
+
+
 	/*
 	 * Destroy address space areas of the address space.
@@ -237,15 +237,15 @@
 	while (cond) {
 		assert(!list_empty(&as->as_area_btree.leaf_list));
-		
+
 		btree_node_t *node =
 		    list_get_instance(list_first(&as->as_area_btree.leaf_list),
 		    btree_node_t, leaf_link);
-		
+
 		if ((cond = node->keys))
 			as_area_destroy(as, node->key[0]);
 	}
-	
+
 	btree_destroy(&as->as_area_btree);
-	
+
 #ifdef AS_PAGE_TABLE
 	page_table_destroy(as->genarch.page_table);
@@ -253,5 +253,5 @@
 	page_table_destroy(NULL);
 #endif
-	
+
 	slab_free(as_cache, as);
 }
@@ -307,5 +307,5 @@
 	if (overflows_into_positive(addr, P2SZ(count)))
 		return false;
-	
+
 	/*
 	 * We don't want any area to have conflicts with NULL page.
@@ -328,5 +328,5 @@
 			return false;
 	}
-	
+
 	/* First, check the two border cases. */
 	btree_node_t *node =
@@ -334,5 +334,5 @@
 	if (node) {
 		area = (as_area_t *) node->value[node->keys - 1];
-		
+
 		if (area != avoid) {
 			mutex_lock(&area->lock);
@@ -346,5 +346,5 @@
 			int const gp = (guarded ||
 			    (area->flags & AS_AREA_GUARD)) ? 1 : 0;
-			
+
 			/*
 			 * The area comes from the left neighbour node, which
@@ -358,13 +358,13 @@
 				return false;
 			}
-			
+
 			mutex_unlock(&area->lock);
 		}
 	}
-	
+
 	node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf);
 	if (node) {
 		area = (as_area_t *) node->value[0];
-		
+
 		if (area != avoid) {
 			int gp;
@@ -382,5 +382,5 @@
 				gp--;
 			}
-			
+
 			if (overlaps(addr, P2SZ(count + gp), area->base,
 			    P2SZ(area->pages))) {
@@ -388,9 +388,9 @@
 				return false;
 			}
-			
+
 			mutex_unlock(&area->lock);
 		}
 	}
-	
+
 	/* Second, check the leaf node. */
 	btree_key_t i;
@@ -399,8 +399,8 @@
 		int agp;
 		int gp;
-		
+
 		if (area == avoid)
 			continue;
-		
+
 		mutex_lock(&area->lock);
 
@@ -421,8 +421,8 @@
 			return false;
 		}
-		
+
 		mutex_unlock(&area->lock);
 	}
-	
+
 	/*
 	 * So far, the area does not conflict with other areas.
@@ -434,5 +434,5 @@
 		    addr, P2SZ(count));
 	}
-	
+
 	return true;
 }
@@ -456,8 +456,8 @@
 {
 	assert(mutex_locked(&as->lock));
-	
+
 	if (size == 0)
 		return (uintptr_t) -1;
-	
+
 	/*
 	 * Make sure we allocate from page-aligned
@@ -465,12 +465,12 @@
 	 * each step.
 	 */
-	
+
 	size_t pages = SIZE2FRAMES(size);
-	
+
 	/*
 	 * Find the lowest unmapped address aligned on the size
 	 * boundary, not smaller than bound and of the required size.
 	 */
-	
+
 	/* First check the bound address itself */
 	uintptr_t addr = ALIGN_UP(bound, PAGE_SIZE);
@@ -486,13 +486,13 @@
 			return addr;
 	}
-	
+
 	/* Eventually check the addresses behind each area */
 	list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, node) {
-		
+
 		for (btree_key_t i = 0; i < node->keys; i++) {
 			as_area_t *area = (as_area_t *) node->value[i];
-			
+
 			mutex_lock(&area->lock);
-			
+
 			addr =
 			    ALIGN_UP(area->base + P2SZ(area->pages), PAGE_SIZE);
@@ -508,12 +508,12 @@
 			    ((addr >= bound) && (addr >= area->base) &&
 			    (check_area_conflicts(as, addr, pages, guarded, area)));
-			
+
 			mutex_unlock(&area->lock);
-			
+
 			if (avail)
 				return addr;
 		}
 	}
-	
+
 	/* No suitable address space area found */
 	return (uintptr_t) -1;
@@ -530,11 +530,11 @@
 {
 	bool dealloc = false;
-	
+
 	mutex_lock(&sh_info->lock);
 	assert(sh_info->refcount);
-	
+
 	if (--sh_info->refcount == 0) {
 		dealloc = true;
-		
+
 		/*
 		 * Now walk carefully the pagemap B+tree and free/remove
@@ -544,12 +544,12 @@
 		    btree_node_t, node) {
 			btree_key_t i;
-			
+
 			for (i = 0; i < node->keys; i++)
 				frame_free((uintptr_t) node->value[i], 1);
 		}
-		
+
 	}
 	mutex_unlock(&sh_info->lock);
-	
+
 	if (dealloc) {
 		if (sh_info->backend && sh_info->backend->destroy_shared_data) {
@@ -588,10 +588,10 @@
 	if ((*base != (uintptr_t) AS_AREA_ANY) && !IS_ALIGNED(*base, PAGE_SIZE))
 		return NULL;
-	
+
 	if (size == 0)
 		return NULL;
 
 	size_t pages = SIZE2FRAMES(size);
-	
+
 	/* Writeable executable areas are not supported. */
 	if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
@@ -599,7 +599,7 @@
 
 	bool const guarded = flags & AS_AREA_GUARD;
-	
+
 	mutex_lock(&as->lock);
-	
+
 	if (*base == (uintptr_t) AS_AREA_ANY) {
 		*base = as_get_unmapped_area(as, bound, size, guarded);
@@ -619,9 +619,9 @@
 		return NULL;
 	}
-	
+
 	as_area_t *area = (as_area_t *) malloc(sizeof(as_area_t), 0);
-	
+
 	mutex_initialize(&area->lock, MUTEX_PASSIVE);
-	
+
 	area->as = as;
 	area->flags = flags;
@@ -632,5 +632,5 @@
 	area->backend = backend;
 	area->sh_info = NULL;
-	
+
 	if (backend_data)
 		area->backend_data = *backend_data;
@@ -655,5 +655,5 @@
 
 		area->sh_info = si;
-	
+
 		if (area->backend && area->backend->create_shared_data) {
 			if (!area->backend->create_shared_data(area)) {
@@ -679,7 +679,7 @@
 	btree_insert(&as->as_area_btree, *base, (void *) area,
 	    NULL);
-	
+
 	mutex_unlock(&as->lock);
-	
+
 	return area;
 }
@@ -697,5 +697,5 @@
 {
 	assert(mutex_locked(&as->lock));
-	
+
 	btree_node_t *leaf;
 	as_area_t *area = (as_area_t *) btree_search(&as->as_area_btree, va,
@@ -706,5 +706,5 @@
 		return area;
 	}
-	
+
 	/*
 	 * Search the leaf node and the rightmost record of its left neighbour
@@ -712,11 +712,11 @@
 	 * space area found there.
 	 */
-	
+
 	/* First, search the leaf node itself. */
 	btree_key_t i;
-	
+
 	for (i = 0; i < leaf->keys; i++) {
 		area = (as_area_t *) leaf->value[i];
-		
+
 		mutex_lock(&area->lock);
 
@@ -724,8 +724,8 @@
 		    (va <= area->base + (P2SZ(area->pages) - 1)))
 			return area;
-		
+
 		mutex_unlock(&area->lock);
 	}
-	
+
 	/*
 	 * Second, locate the left neighbour and test its last record.
@@ -736,13 +736,13 @@
 	if (lnode) {
 		area = (as_area_t *) lnode->value[lnode->keys - 1];
-		
+
 		mutex_lock(&area->lock);
-		
+
 		if (va <= area->base + (P2SZ(area->pages) - 1))
 			return area;
-		
+
 		mutex_unlock(&area->lock);
 	}
-	
+
 	return NULL;
 }
@@ -766,5 +766,5 @@
 
 	mutex_lock(&as->lock);
-	
+
 	/*
 	 * Locate the area.
@@ -784,5 +784,5 @@
 		return ENOTSUP;
 	}
-	
+
 	mutex_lock(&area->sh_info->lock);
 	if (area->sh_info->shared) {
@@ -797,5 +797,5 @@
 	}
 	mutex_unlock(&area->sh_info->lock);
-	
+
 	size_t pages = SIZE2FRAMES((address - area->base) + size);
 	if (!pages) {
@@ -807,15 +807,15 @@
 		return EPERM;
 	}
-	
+
 	if (pages < area->pages) {
 		uintptr_t start_free = area->base + P2SZ(pages);
-		
+
 		/*
 		 * Shrinking the area.
 		 * No need to check for overlaps.
 		 */
-		
+
 		page_table_lock(as, false);
-		
+
 		/*
 		 * Remove frames belonging to used space starting from
@@ -828,9 +828,9 @@
 		while (cond) {
 			assert(!list_empty(&area->used_space.leaf_list));
-			
+
 			btree_node_t *node =
 			    list_get_instance(list_last(&area->used_space.leaf_list),
 			    btree_node_t, leaf_link);
-			
+
 			if ((cond = (node->keys != 0))) {
 				uintptr_t ptr = node->key[node->keys - 1];
@@ -838,8 +838,8 @@
 				    (size_t) node->value[node->keys - 1];
 				size_t i = 0;
-				
+
 				if (overlaps(ptr, P2SZ(node_size), area->base,
 				    P2SZ(pages))) {
-					
+
 					if (ptr + P2SZ(node_size) <= start_free) {
 						/*
@@ -850,5 +850,5 @@
 						break;
 					}
-					
+
 					/*
 					 * Part of the interval corresponding
@@ -856,5 +856,5 @@
 					 * address space area.
 					 */
-					
+
 					/* We are almost done */
 					cond = false;
@@ -871,5 +871,5 @@
 						panic("Cannot remove used space.");
 				}
-				
+
 				/*
 				 * Start TLB shootdown sequence.
@@ -887,14 +887,14 @@
 				    as->asid, area->base + P2SZ(pages),
 				    area->pages - pages);
-		
+
 				for (; i < node_size; i++) {
 					pte_t pte;
 					bool found = page_mapping_find(as,
 					    ptr + P2SZ(i), false, &pte);
-					
+
 					assert(found);
 					assert(PTE_VALID(&pte));
 					assert(PTE_PRESENT(&pte));
-					
+
 					if ((area->backend) &&
 					    (area->backend->frame_free)) {
@@ -903,16 +903,16 @@
 						    PTE_GET_FRAME(&pte));
 					}
-					
+
 					page_mapping_remove(as, ptr + P2SZ(i));
 				}
-		
+
 				/*
 				 * Finish TLB shootdown sequence.
 				 */
-		
+
 				tlb_invalidate_pages(as->asid,
 				    area->base + P2SZ(pages),
 				    area->pages - pages);
-		
+
 				/*
 				 * Invalidate software translation caches
@@ -944,5 +944,5 @@
 		}
 	}
-	
+
 	if (area->backend && area->backend->resize) {
 		if (!area->backend->resize(area, pages)) {
@@ -952,10 +952,10 @@
 		}
 	}
-	
+
 	area->pages = pages;
-	
+
 	mutex_unlock(&area->lock);
 	mutex_unlock(&as->lock);
-	
+
 	return 0;
 }
@@ -972,5 +972,5 @@
 {
 	mutex_lock(&as->lock);
-	
+
 	as_area_t *area = find_area_and_lock(as, address);
 	if (!area) {
@@ -981,9 +981,9 @@
 	if (area->backend && area->backend->destroy)
 		area->backend->destroy(area);
-	
+
 	uintptr_t base = area->base;
-	
+
 	page_table_lock(as, false);
-	
+
 	/*
 	 * Start TLB shootdown sequence.
@@ -991,5 +991,5 @@
 	ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base,
 	    area->pages);
-	
+
 	/*
 	 * Visit only the pages mapped by used_space B+tree.
@@ -998,18 +998,18 @@
 	    node) {
 		btree_key_t i;
-		
+
 		for (i = 0; i < node->keys; i++) {
 			uintptr_t ptr = node->key[i];
 			size_t size;
-			
+
 			for (size = 0; size < (size_t) node->value[i]; size++) {
 				pte_t pte;
 				bool found = page_mapping_find(as,
 				     ptr + P2SZ(size), false, &pte);
-				
+
 				assert(found);
 				assert(PTE_VALID(&pte));
 				assert(PTE_PRESENT(&pte));
-				
+
 				if ((area->backend) &&
 				    (area->backend->frame_free)) {
@@ -1018,16 +1018,16 @@
 					    PTE_GET_FRAME(&pte));
 				}
-				
+
 				page_mapping_remove(as, ptr + P2SZ(size));
 			}
 		}
 	}
-	
+
 	/*
 	 * Finish TLB shootdown sequence.
 	 */
-	
+
 	tlb_invalidate_pages(as->asid, area->base, area->pages);
-	
+
 	/*
 	 * Invalidate potential software translation caches
@@ -1036,22 +1036,22 @@
 	as_invalidate_translation_cache(as, area->base, area->pages);
 	tlb_shootdown_finalize(ipl);
-	
+
 	page_table_unlock(as, false);
-	
+
 	btree_destroy(&area->used_space);
-	
+
 	area->attributes |= AS_AREA_ATTR_PARTIAL;
-	
+
 	sh_info_remove_reference(area->sh_info);
-	
+
 	mutex_unlock(&area->lock);
-	
+
 	/*
 	 * Remove the empty area from address space.
 	 */
 	btree_remove(&as->as_area_btree, base, NULL);
-	
+
 	free(area);
-	
+
 	mutex_unlock(&as->lock);
 	return 0;
@@ -1098,5 +1098,5 @@
 		return ENOENT;
 	}
-	
+
 	if (!src_area->backend->is_shareable(src_area)) {
 		/*
@@ -1107,14 +1107,14 @@
 		return ENOTSUP;
 	}
-	
+
 	size_t src_size = P2SZ(src_area->pages);
 	unsigned int src_flags = src_area->flags;
 	mem_backend_t *src_backend = src_area->backend;
 	mem_backend_data_t src_backend_data = src_area->backend_data;
-	
+
 	/* Share the cacheable flag from the original mapping */
 	if (src_flags & AS_AREA_CACHEABLE)
 		dst_flags_mask |= AS_AREA_CACHEABLE;
-	
+
 	if ((src_size != acc_size) ||
 	    ((src_flags & dst_flags_mask) != dst_flags_mask)) {
@@ -1123,5 +1123,5 @@
 		return EPERM;
 	}
-	
+
 	/*
 	 * Now we are committed to sharing the area.
@@ -1130,5 +1130,5 @@
 	 */
 	share_info_t *sh_info = src_area->sh_info;
-	
+
 	mutex_lock(&sh_info->lock);
 	sh_info->refcount++;
@@ -1144,8 +1144,8 @@
 		src_area->backend->share(src_area);
 	}
-	
+
 	mutex_unlock(&src_area->lock);
 	mutex_unlock(&src_as->lock);
-	
+
 	/*
 	 * Create copy of the source address space area.
@@ -1164,8 +1164,8 @@
 		 */
 		sh_info_remove_reference(sh_info);
-		
+
 		return ENOMEM;
 	}
-	
+
 	/*
 	 * Now the destination address space area has been
@@ -1179,5 +1179,5 @@
 	mutex_unlock(&dst_area->lock);
 	mutex_unlock(&dst_as->lock);
-	
+
 	return 0;
 }
@@ -1195,5 +1195,5 @@
 {
 	assert(mutex_locked(&area->lock));
-	
+
 	int flagmap[] = {
 		[PF_ACCESS_READ] = AS_AREA_READ,
@@ -1201,8 +1201,8 @@
 		[PF_ACCESS_EXEC] = AS_AREA_EXEC
 	};
-	
+
 	if (!(area->flags & flagmap[access]))
 		return false;
-	
+
 	return true;
 }
@@ -1218,17 +1218,17 @@
 {
 	unsigned int flags = PAGE_USER | PAGE_PRESENT;
-	
+
 	if (aflags & AS_AREA_READ)
 		flags |= PAGE_READ;
-		
+
 	if (aflags & AS_AREA_WRITE)
 		flags |= PAGE_WRITE;
-	
+
 	if (aflags & AS_AREA_EXEC)
 		flags |= PAGE_EXEC;
-	
+
 	if (aflags & AS_AREA_CACHEABLE)
 		flags |= PAGE_CACHEABLE;
-	
+
 	return flags;
 }
@@ -1252,7 +1252,7 @@
 	/* Flags for the new memory mapping */
 	unsigned int page_flags = area_flags_to_page_flags(flags);
-	
+
 	mutex_lock(&as->lock);
-	
+
 	as_area_t *area = find_area_and_lock(as, address);
 	if (!area) {
@@ -1260,5 +1260,5 @@
 		return ENOENT;
 	}
-	
+
 	if (area->backend != &anon_backend) {
 		/* Copying non-anonymous memory not supported yet */
@@ -1277,23 +1277,23 @@
 	}
 	mutex_unlock(&area->sh_info->lock);
-	
+
 	/*
 	 * Compute total number of used pages in the used_space B+tree
 	 */
 	size_t used_pages = 0;
-	
+
 	list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t,
 	    node) {
 		btree_key_t i;
-		
+
 		for (i = 0; i < node->keys; i++)
 			used_pages += (size_t) node->value[i];
 	}
-	
+
 	/* An array for storing frame numbers */
 	uintptr_t *old_frame = malloc(used_pages * sizeof(uintptr_t), 0);
-	
+
 	page_table_lock(as, false);
-	
+
 	/*
 	 * Start TLB shootdown sequence.
@@ -1301,5 +1301,5 @@
 	ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base,
 	    area->pages);
-	
+
 	/*
 	 * Remove used pages from page tables and remember their frame
@@ -1307,24 +1307,24 @@
 	 */
 	size_t frame_idx = 0;
-	
+
 	list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t,
 	    node) {
 		btree_key_t i;
-		
+
 		for (i = 0; i < node->keys; i++) {
 			uintptr_t ptr = node->key[i];
 			size_t size;
-			
+
 			for (size = 0; size < (size_t) node->value[i]; size++) {
 				pte_t pte;
 				bool found = page_mapping_find(as,
 				    ptr + P2SZ(size), false, &pte);
-				
+
 				assert(found);
 				assert(PTE_VALID(&pte));
 				assert(PTE_PRESENT(&pte));
-				
+
 				old_frame[frame_idx++] = PTE_GET_FRAME(&pte);
-				
+
 				/* Remove old mapping */
 				page_mapping_remove(as, ptr + P2SZ(size));
@@ -1332,11 +1332,11 @@
 		}
 	}
-	
+
 	/*
 	 * Finish TLB shootdown sequence.
 	 */
-	
+
 	tlb_invalidate_pages(as->asid, area->base, area->pages);
-	
+
 	/*
 	 * Invalidate potential software translation caches
@@ -1345,12 +1345,12 @@
 	as_invalidate_translation_cache(as, area->base, area->pages);
 	tlb_shootdown_finalize(ipl);
-	
+
 	page_table_unlock(as, false);
-	
+
 	/*
 	 * Set the new flags.
 	 */
 	area->flags = flags;
-	
+
 	/*
 	 * Map pages back in with new flags. This step is kept separate
@@ -1359,30 +1359,30 @@
 	 */
 	frame_idx = 0;
-	
+
 	list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t,
 	    node) {
 		btree_key_t i;
-		
+
 		for (i = 0; i < node->keys; i++) {
 			uintptr_t ptr = node->key[i];
 			size_t size;
-			
+
 			for (size = 0; size < (size_t) node->value[i]; size++) {
 				page_table_lock(as, false);
-				
+
 				/* Insert the new mapping */
 				page_mapping_insert(as, ptr + P2SZ(size),
 				    old_frame[frame_idx++], page_flags);
-				
+
 				page_table_unlock(as, false);
 			}
 		}
 	}
-	
+
 	free(old_frame);
-	
+
 	mutex_unlock(&area->lock);
 	mutex_unlock(&as->lock);
-	
+
 	return 0;
 }
@@ -1414,8 +1414,8 @@
 	if (!THREAD)
 		goto page_fault;
-	
+
 	if (!AS)
 		goto page_fault;
-	
+
 	mutex_lock(&AS->lock);
 	as_area_t *area = find_area_and_lock(AS, page);
@@ -1428,5 +1428,5 @@
 		goto page_fault;
 	}
-	
+
 	if (area->attributes & AS_AREA_ATTR_PARTIAL) {
 		/*
@@ -1438,5 +1438,5 @@
 		goto page_fault;
 	}
-	
+
 	if ((!area->backend) || (!area->backend->page_fault)) {
 		/*
@@ -1448,7 +1448,7 @@
 		goto page_fault;
 	}
-	
+
 	page_table_lock(AS, false);
-	
+
 	/*
 	 * To avoid race condition between two page faults on the same address,
@@ -1467,5 +1467,5 @@
 		}
 	}
-	
+
 	/*
 	 * Resort to the backend page fault handler.
@@ -1478,10 +1478,10 @@
 		goto page_fault;
 	}
-	
+
 	page_table_unlock(AS, false);
 	mutex_unlock(&area->lock);
 	mutex_unlock(&AS->lock);
 	return AS_PF_OK;
-	
+
 page_fault:
 	if (THREAD->in_copy_from_uspace) {
@@ -1501,5 +1501,5 @@
 		panic_memtrap(istate, access, address, NULL);
 	}
-	
+
 	return AS_PF_DEFER;
 }
@@ -1521,5 +1521,5 @@
 	DEADLOCK_PROBE_INIT(p_asidlock);
 	preemption_disable();
-	
+
 retry:
 	(void) interrupts_disable();
@@ -1536,5 +1536,5 @@
 	}
 	preemption_enable();
-	
+
 	/*
 	 * First, take care of the old address space.
@@ -1542,5 +1542,5 @@
 	if (old_as) {
 		assert(old_as->cpu_refcount);
-		
+
 		if ((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) {
 			/*
@@ -1551,9 +1551,9 @@
 			 */
 			assert(old_as->asid != ASID_INVALID);
-			
+
 			list_append(&old_as->inactive_as_with_asid_link,
 			    &inactive_as_with_asid_list);
 		}
-		
+
 		/*
 		 * Perform architecture-specific tasks when the address space
@@ -1562,5 +1562,5 @@
 		as_deinstall_arch(old_as);
 	}
-	
+
 	/*
 	 * Second, prepare the new address space.
@@ -1572,9 +1572,9 @@
 			new_as->asid = asid_get();
 	}
-	
+
 #ifdef AS_PAGE_TABLE
 	SET_PTL0_ADDRESS(new_as->genarch.page_table);
 #endif
-	
+
 	/*
 	 * Perform architecture-specific steps.
@@ -1582,7 +1582,7 @@
 	 */
 	as_install_arch(new_as);
-	
+
 	spinlock_unlock(&asidlock);
-	
+
 	AS = new_as;
 }
@@ -1598,5 +1598,5 @@
 {
 	assert(mutex_locked(&area->lock));
-	
+
 	return area_flags_to_page_flags(area->flags);
 }
@@ -1617,5 +1617,5 @@
 	assert(as_operations);
 	assert(as_operations->page_table_create);
-	
+
 	return as_operations->page_table_create(flags);
 }
@@ -1632,5 +1632,5 @@
 	assert(as_operations);
 	assert(as_operations->page_table_destroy);
-	
+
 	as_operations->page_table_destroy(page_table);
 }
@@ -1653,5 +1653,5 @@
 	assert(as_operations);
 	assert(as_operations->page_table_lock);
-	
+
 	as_operations->page_table_lock(as, lock);
 }
@@ -1667,5 +1667,5 @@
 	assert(as_operations);
 	assert(as_operations->page_table_unlock);
-	
+
 	as_operations->page_table_unlock(as, unlock);
 }
@@ -1697,8 +1697,8 @@
 {
 	size_t size;
-	
+
 	page_table_lock(AS, true);
 	as_area_t *src_area = find_area_and_lock(AS, base);
-	
+
 	if (src_area) {
 		size = P2SZ(src_area->pages);
@@ -1706,5 +1706,5 @@
 	} else
 		size = 0;
-	
+
 	page_table_unlock(AS, true);
 	return size;
@@ -1727,5 +1727,5 @@
 	assert(IS_ALIGNED(page, PAGE_SIZE));
 	assert(count);
-	
+
 	btree_node_t *leaf = NULL;
 	size_t pages = (size_t) btree_search(&area->used_space, page, &leaf);
@@ -1738,10 +1738,10 @@
 
 	assert(leaf != NULL);
-	
+
 	if (!leaf->keys) {
 		btree_insert(&area->used_space, page, (void *) count, leaf);
 		goto success;
 	}
-	
+
 	btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, leaf);
 	if (node) {
@@ -1750,5 +1750,5 @@
 		size_t left_cnt = (size_t) node->value[node->keys - 1];
 		size_t right_cnt = (size_t) leaf->value[0];
-		
+
 		/*
 		 * Examine the possibility that the interval fits
@@ -1756,5 +1756,5 @@
 		 * the left neigbour and the first interval of the leaf.
 		 */
-		
+
 		if (page >= right_pg) {
 			/* Do nothing. */
@@ -1804,10 +1804,10 @@
 		uintptr_t right_pg = leaf->key[0];
 		size_t right_cnt = (size_t) leaf->value[0];
-		
+
 		/*
 		 * Investigate the border case in which the left neighbour does
 		 * not exist but the interval fits from the left.
 		 */
-		
+
 		if (overlaps(page, P2SZ(count), right_pg, P2SZ(right_cnt))) {
 			/* The interval intersects with the right interval. */
@@ -1832,5 +1832,5 @@
 		}
 	}
-	
+
 	node = btree_leaf_node_right_neighbour(&area->used_space, leaf);
 	if (node) {
@@ -1839,5 +1839,5 @@
 		size_t left_cnt = (size_t) leaf->value[leaf->keys - 1];
 		size_t right_cnt = (size_t) node->value[0];
-		
+
 		/*
 		 * Examine the possibility that the interval fits
@@ -1845,5 +1845,5 @@
 		 * the right neigbour and the last interval of the leaf.
 		 */
-		
+
 		if (page < left_pg) {
 			/* Do nothing. */
@@ -1893,10 +1893,10 @@
 		uintptr_t left_pg = leaf->key[leaf->keys - 1];
 		size_t left_cnt = (size_t) leaf->value[leaf->keys - 1];
-		
+
 		/*
 		 * Investigate the border case in which the right neighbour
 		 * does not exist but the interval fits from the right.
 		 */
-		
+
 		if (overlaps(page, P2SZ(count), left_pg, P2SZ(left_cnt))) {
 			/* The interval intersects with the left interval. */
@@ -1919,5 +1919,5 @@
 		}
 	}
-	
+
 	/*
 	 * Note that if the algorithm made it thus far, the interval can fit
@@ -1932,9 +1932,9 @@
 			size_t left_cnt = (size_t) leaf->value[i - 1];
 			size_t right_cnt = (size_t) leaf->value[i];
-			
+
 			/*
 			 * The interval fits between left_pg and right_pg.
 			 */
-			
+
 			if (overlaps(page, P2SZ(count), left_pg,
 			    P2SZ(left_cnt))) {
@@ -1988,8 +1988,8 @@
 		}
 	}
-	
+
 	panic("Inconsistency detected while adding %zu pages of used "
 	    "space at %p.", count, (void *) page);
-	
+
 success:
 	area->resident += count;
@@ -2013,5 +2013,5 @@
 	assert(IS_ALIGNED(page, PAGE_SIZE));
 	assert(count);
-	
+
 	btree_node_t *leaf;
 	size_t pages = (size_t) btree_search(&area->used_space, page, &leaf);
@@ -2038,9 +2038,9 @@
 				}
 			}
-			
+
 			goto error;
 		}
 	}
-	
+
 	btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space,
 	    leaf);
@@ -2048,5 +2048,5 @@
 		uintptr_t left_pg = node->key[node->keys - 1];
 		size_t left_cnt = (size_t) node->value[node->keys - 1];
-		
+
 		if (overlaps(left_pg, P2SZ(left_cnt), page, P2SZ(count))) {
 			if (page + P2SZ(count) == left_pg + P2SZ(left_cnt)) {
@@ -2078,13 +2078,13 @@
 			}
 		}
-		
+
 		return false;
 	} else if (page < leaf->key[0])
 		return false;
-	
+
 	if (page > leaf->key[leaf->keys - 1]) {
 		uintptr_t left_pg = leaf->key[leaf->keys - 1];
 		size_t left_cnt = (size_t) leaf->value[leaf->keys - 1];
-		
+
 		if (overlaps(left_pg, P2SZ(left_cnt), page, P2SZ(count))) {
 			if (page + P2SZ(count) == left_pg + P2SZ(left_cnt)) {
@@ -2115,8 +2115,8 @@
 			}
 		}
-		
+
 		return false;
 	}
-	
+
 	/*
 	 * The border cases have been already resolved.
@@ -2128,5 +2128,5 @@
 			uintptr_t left_pg = leaf->key[i - 1];
 			size_t left_cnt = (size_t) leaf->value[i - 1];
-			
+
 			/*
 			 * Now the interval is between intervals corresponding
@@ -2166,13 +2166,13 @@
 				}
 			}
-			
+
 			return false;
 		}
 	}
-	
+
 error:
 	panic("Inconsistency detected while removing %zu pages of used "
 	    "space from %p.", count, (void *) page);
-	
+
 success:
 	area->resident -= count;
@@ -2204,5 +2204,5 @@
 	if (area == NULL)
 		return (sysarg_t) AS_MAP_FAILED;
-	
+
 	return (sysarg_t) virt;
 }
@@ -2233,42 +2233,42 @@
 {
 	mutex_lock(&as->lock);
-	
+
 	/* First pass, count number of areas. */
-	
+
 	size_t area_cnt = 0;
-	
+
 	list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t,
 	    node) {
 		area_cnt += node->keys;
 	}
-	
+
 	size_t isize = area_cnt * sizeof(as_area_info_t);
 	as_area_info_t *info = malloc(isize, 0);
-	
+
 	/* Second pass, record data. */
-	
+
 	size_t area_idx = 0;
-	
+
 	list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t,
 	    node) {
 		btree_key_t i;
-		
+
 		for (i = 0; i < node->keys; i++) {
 			as_area_t *area = node->value[i];
-			
+
 			assert(area_idx < area_cnt);
 			mutex_lock(&area->lock);
-			
+
 			info[area_idx].start_addr = area->base;
 			info[area_idx].size = P2SZ(area->pages);
 			info[area_idx].flags = area->flags;
 			++area_idx;
-			
+
 			mutex_unlock(&area->lock);
 		}
 	}
-	
+
 	mutex_unlock(&as->lock);
-	
+
 	*obuf = info;
 	*osize = isize;
@@ -2283,13 +2283,13 @@
 {
 	mutex_lock(&as->lock);
-	
+
 	/* Print out info about address space areas */
 	list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t,
 	    node) {
 		btree_key_t i;
-		
+
 		for (i = 0; i < node->keys; i++) {
 			as_area_t *area = node->value[i];
-			
+
 			mutex_lock(&area->lock);
 			printf("as_area: %p, base=%p, pages=%zu"
@@ -2300,5 +2300,5 @@
 		}
 	}
-	
+
 	mutex_unlock(&as->lock);
 }
Index: kernel/generic/src/mm/backend_anon.c
===================================================================
--- kernel/generic/src/mm/backend_anon.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/mm/backend_anon.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -125,14 +125,14 @@
 	    node) {
 		unsigned int i;
-		
+
 		for (i = 0; i < node->keys; i++) {
 			uintptr_t base = node->key[i];
 			size_t count = (size_t) node->value[i];
 			unsigned int j;
-			
+
 			for (j = 0; j < count; j++) {
 				pte_t pte;
 				bool found;
-			
+
 				page_table_lock(area->as, false);
 				found = page_mapping_find(area->as,
@@ -201,5 +201,5 @@
 	if (area->sh_info->shared) {
 		btree_node_t *leaf;
-		
+
 		/*
 		 * The area is shared, chances are that the mapping can be found
@@ -214,5 +214,5 @@
 			bool allocate = true;
 			unsigned int i;
-			
+
 			/*
 			 * Zero can be returned as a valid frame address.
@@ -230,5 +230,5 @@
 				memsetb((void *) kpage, PAGE_SIZE, 0);
 				km_temporary_page_put(kpage);
-				
+
 				/*
 				 * Insert the address of the newly allocated
@@ -272,5 +272,5 @@
 	}
 	mutex_unlock(&area->sh_info->lock);
-	
+
 	/*
 	 * Map 'upage' to 'frame'.
@@ -281,5 +281,5 @@
 	if (!used_space_insert(area, upage, 1))
 		panic("Cannot insert used space.");
-		
+
 	return AS_PF_OK;
 }
Index: kernel/generic/src/mm/backend_elf.c
===================================================================
--- kernel/generic/src/mm/backend_elf.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/mm/backend_elf.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -102,5 +102,5 @@
 	if (area->pages <= nonanon_pages)
 		return true;
-	
+
 	return reserve_try_alloc(area->pages - nonanon_pages);
 }
@@ -123,5 +123,5 @@
 			reserve_free(nonanon_pages - new_pages);
 	}
-	
+
 	return true;
 }
@@ -166,12 +166,12 @@
 	    cur = cur->next) {
 		unsigned int i;
-		
+
 		node = list_get_instance(cur, btree_node_t, leaf_link);
-		
+
 		for (i = 0; i < node->keys; i++) {
 			uintptr_t base = node->key[i];
 			size_t count = (size_t) node->value[i];
 			unsigned int j;
-			
+
 			/*
 			 * Skip read-only areas of used space that are backed
@@ -182,9 +182,9 @@
 				    base + P2SZ(count) <= start_anon)
 					continue;
-			
+
 			for (j = 0; j < count; j++) {
 				pte_t pte;
 				bool found;
-			
+
 				/*
 				 * Skip read-only pages that are backed by the
@@ -195,5 +195,5 @@
 					    base + P2SZ(j + 1) <= start_anon)
 						continue;
-				
+
 				page_table_lock(area->as, false);
 				found = page_mapping_find(area->as,
@@ -212,5 +212,5 @@
 				frame_reference_add(pfn);
 			}
-				
+
 		}
 	}
@@ -267,11 +267,11 @@
 	if (!as_area_check_access(area, access))
 		return AS_PF_FAULT;
-	
+
 	if (upage < ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE))
 		return AS_PF_FAULT;
-	
+
 	if (upage >= entry->p_vaddr + entry->p_memsz)
 		return AS_PF_FAULT;
-	
+
 	i = (upage - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH;
 	base = (uintptr_t)
@@ -288,5 +288,5 @@
 		 * The address space area is shared.
 		 */
-		
+
 		frame = (uintptr_t) btree_search(&area->sh_info->pagemap,
 		    upage - area->base, &leaf);
Index: kernel/generic/src/mm/backend_phys.c
===================================================================
--- kernel/generic/src/mm/backend_phys.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/mm/backend_phys.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -75,5 +75,5 @@
 	.page_fault = phys_page_fault,
 	.frame_free = NULL,
-	
+
 	.create_shared_data = phys_create_shared_data,
 	.destroy_shared_data = phys_destroy_shared_data
@@ -145,5 +145,5 @@
 	page_mapping_insert(AS, upage, base + (upage - area->base),
 	    as_area_get_flags(area));
-	
+
 	if (!used_space_insert(area, upage, 1))
 		panic("Cannot insert used space.");
Index: kernel/generic/src/mm/backend_user.c
===================================================================
--- kernel/generic/src/mm/backend_user.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/mm/backend_user.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -172,5 +172,5 @@
 		/* Nothing to do */
 	}
-		
+
 }
 
Index: kernel/generic/src/mm/frame.c
===================================================================
--- kernel/generic/src/mm/frame.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/mm/frame.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -108,5 +108,5 @@
 		return (size_t) -1;
 	}
-	
+
 	size_t i;
 	for (i = 0; i < zones.count; i++) {
@@ -114,5 +114,5 @@
 		if (overlaps(zones.info[i].base, zones.info[i].count,
 		    base, count)) {
-			
+
 			/*
 			 * If the overlaping zones are of the same type
@@ -121,5 +121,5 @@
 			 *
 			 */
-			
+
 			if ((zones.info[i].flags != flags) ||
 			    (!iswithin(zones.info[i].base, zones.info[i].count,
@@ -132,5 +132,5 @@
 				    (void *) PFN2ADDR(zones.info[i].count));
 			}
-			
+
 			return (size_t) -1;
 		}
@@ -138,11 +138,11 @@
 			break;
 	}
-	
+
 	/* Move other zones up */
 	for (size_t j = zones.count; j > i; j--)
 		zones.info[j] = zones.info[j - 1];
-	
+
 	zones.count++;
-	
+
 	return i;
 }
@@ -163,5 +163,5 @@
 	for (i = 0; i < zones.count; i++)
 		total += zones.info[i].free_count;
-	
+
 	return total;
 }
@@ -195,5 +195,5 @@
 	if (hint >= zones.count)
 		hint = 0;
-	
+
 	size_t i = hint;
 	do {
@@ -201,11 +201,11 @@
 		    && (zones.info[i].base + zones.info[i].count >= frame + count))
 			return i;
-		
+
 		i++;
 		if (i >= zones.count)
 			i = 0;
-		
+
 	} while (i != hint);
-	
+
 	return (size_t) -1;
 }
@@ -219,5 +219,5 @@
 	 * the bitmap if the last argument is NULL.
 	 */
-	
+
 	return ((zone->flags & ZONE_AVAILABLE) &&
 	    bitmap_allocate_range(&zone->bitmap, count, zone->base,
@@ -245,14 +245,14 @@
 	for (size_t pos = 0; pos < zones.count; pos++) {
 		size_t i = (pos + hint) % zones.count;
-		
+
 		/* Check whether the zone meets the search criteria. */
 		if (!ZONE_FLAGS_MATCH(zones.info[i].flags, flags))
 			continue;
-		
+
 		/* Check if the zone can satisfy the allocation request. */
 		if (zone_can_alloc(&zones.info[i], count, constraint))
 			return i;
 	}
-	
+
 	return (size_t) -1;
 }
@@ -291,18 +291,18 @@
 	for (size_t pos = 0; pos < zones.count; pos++) {
 		size_t i = (pos + hint) % zones.count;
-		
+
 		/* Skip zones containing only high-priority memory. */
 		if (is_high_priority(zones.info[i].base, zones.info[i].count))
 			continue;
-		
+
 		/* Check whether the zone meets the search criteria. */
 		if (!ZONE_FLAGS_MATCH(zones.info[i].flags, flags))
 			continue;
-		
+
 		/* Check if the zone can satisfy the allocation request. */
 		if (zone_can_alloc(&zones.info[i], count, constraint))
 			return i;
 	}
-	
+
 	return (size_t) -1;
 }
@@ -328,14 +328,14 @@
 	if (hint >= zones.count)
 		hint = 0;
-	
+
 	/*
 	 * Prefer zones with low-priority memory over
 	 * zones with high-priority memory.
 	 */
-	
+
 	size_t znum = find_free_zone_lowprio(count, flags, constraint, hint);
 	if (znum != (size_t) -1)
 		return znum;
-	
+
 	/* Take all zones into account */
 	return find_free_zone_all(count, flags, constraint, hint);
@@ -350,5 +350,5 @@
 {
 	assert(index < zone->count);
-	
+
 	return &zone->frames[index];
 }
@@ -371,25 +371,25 @@
 {
 	assert(zone->flags & ZONE_AVAILABLE);
-	
+
 	/* Allocate frames from zone */
 	size_t index = (size_t) -1;
 	int avail = bitmap_allocate_range(&zone->bitmap, count, zone->base,
 	    FRAME_LOWPRIO, constraint, &index);
-	
+
 	assert(avail);
 	assert(index != (size_t) -1);
-	
+
 	/* Update frame reference count */
 	for (size_t i = 0; i < count; i++) {
 		frame_t *frame = zone_get_frame(zone, index + i);
-		
+
 		assert(frame->refcount == 0);
 		frame->refcount = 1;
 	}
-	
+
 	/* Update zone information. */
 	zone->free_count -= count;
 	zone->busy_count += count;
-	
+
 	return index;
 }
@@ -408,19 +408,19 @@
 {
 	assert(zone->flags & ZONE_AVAILABLE);
-	
+
 	frame_t *frame = zone_get_frame(zone, index);
-	
+
 	assert(frame->refcount > 0);
-	
+
 	if (!--frame->refcount) {
 		bitmap_set(&zone->bitmap, index, 0);
-		
+
 		/* Update zone information. */
 		zone->free_count++;
 		zone->busy_count--;
-		
+
 		return 1;
 	}
-	
+
 	return 0;
 }
@@ -430,12 +430,12 @@
 {
 	assert(zone->flags & ZONE_AVAILABLE);
-	
+
 	frame_t *frame = zone_get_frame(zone, index);
 	if (frame->refcount > 0)
 		return;
-	
+
 	frame->refcount = 1;
 	bitmap_set_range(&zone->bitmap, index, 1);
-	
+
 	zone->free_count--;
 	reserve_force_alloc(1);
@@ -462,22 +462,22 @@
 	assert(!overlaps(zones.info[z1].base, zones.info[z1].count,
 	    zones.info[z2].base, zones.info[z2].count));
-	
+
 	/* Difference between zone bases */
 	pfn_t base_diff = zones.info[z2].base - zones.info[z1].base;
-	
+
 	zones.info[z1].count = base_diff + zones.info[z2].count;
 	zones.info[z1].free_count += zones.info[z2].free_count;
 	zones.info[z1].busy_count += zones.info[z2].busy_count;
-	
+
 	bitmap_initialize(&zones.info[z1].bitmap, zones.info[z1].count,
 	    confdata + (sizeof(frame_t) * zones.info[z1].count));
 	bitmap_clear_range(&zones.info[z1].bitmap, 0, zones.info[z1].count);
-	
+
 	zones.info[z1].frames = (frame_t *) confdata;
-	
+
 	/*
 	 * Copy frames and bits from both zones to preserve parents, etc.
 	 */
-	
+
 	for (size_t i = 0; i < old_z1->count; i++) {
 		bitmap_set(&zones.info[z1].bitmap, i,
@@ -485,5 +485,5 @@
 		zones.info[z1].frames[i] = old_z1->frames[i];
 	}
-	
+
 	for (size_t i = 0; i < zones.info[z2].count; i++) {
 		bitmap_set(&zones.info[z1].bitmap, base_diff + i,
@@ -510,11 +510,11 @@
 {
 	assert(zones.info[znum].flags & ZONE_AVAILABLE);
-	
+
 	size_t cframes = SIZE2FRAMES(zone_conf_size(count));
-	
+
 	if ((pfn < zones.info[znum].base) ||
 	    (pfn >= zones.info[znum].base + zones.info[znum].count))
 		return;
-	
+
 	for (size_t i = 0; i < cframes; i++)
 		(void) zone_frame_free(&zones.info[znum],
@@ -536,7 +536,7 @@
 {
 	irq_spinlock_lock(&zones.lock, true);
-	
+
 	bool ret = true;
-	
+
 	/*
 	 * We can join only 2 zones with none existing inbetween,
@@ -549,9 +549,9 @@
 		goto errout;
 	}
-	
+
 	pfn_t cframes = SIZE2FRAMES(zone_conf_size(
 	    zones.info[z2].base - zones.info[z1].base
 	    + zones.info[z2].count));
-	
+
 	/* Allocate merged zone data inside one of the zones */
 	pfn_t pfn;
@@ -566,14 +566,14 @@
 		goto errout;
 	}
-	
+
 	/* Preserve original data from z1 */
 	zone_t old_z1 = zones.info[z1];
-	
+
 	/* Do zone merging */
 	zone_merge_internal(z1, z2, &old_z1, (void *) PA2KA(PFN2ADDR(pfn)));
-	
+
 	/* Subtract zone information from busy frames */
 	zones.info[z1].busy_count -= cframes;
-	
+
 	/* Free old zone information */
 	return_config_frames(z1,
@@ -582,14 +582,14 @@
 	    ADDR2PFN(KA2PA((uintptr_t) zones.info[z2].frames)),
 	    zones.info[z2].count);
-	
+
 	/* Move zones down */
 	for (size_t i = z2 + 1; i < zones.count; i++)
 		zones.info[i - 1] = zones.info[i];
-	
+
 	zones.count--;
-	
+
 errout:
 	irq_spinlock_unlock(&zones.lock, true);
-	
+
 	return ret;
 }
@@ -605,5 +605,5 @@
 {
 	size_t i = 1;
-	
+
 	while (i < zones.count) {
 		if (!zone_merge(i - 1, i))
@@ -631,5 +631,5 @@
 	zone->free_count = count;
 	zone->busy_count = 0;
-	
+
 	if (flags & ZONE_AVAILABLE) {
 		/*
@@ -637,15 +637,15 @@
 		 * frame_t structures in the configuration space).
 		 */
-		
+
 		bitmap_initialize(&zone->bitmap, count, confdata +
 		    (sizeof(frame_t) * count));
 		bitmap_clear_range(&zone->bitmap, 0, count);
-		
+
 		/*
 		 * Initialize the array of frame_t structures.
 		 */
-		
+
 		zone->frames = (frame_t *) confdata;
-		
+
 		for (size_t i = 0; i < count; i++)
 			frame_initialize(&zone->frames[i]);
@@ -672,5 +672,5 @@
 {
 	size_t frames = SIZE2FRAMES(zone_conf_size(count));
-	
+
 	return ADDR2PFN((uintptr_t)
 	    frame_alloc(frames, FRAME_LOWMEM | FRAME_ATOMIC, 0));
@@ -697,5 +697,5 @@
 {
 	irq_spinlock_lock(&zones.lock, true);
-	
+
 	if (flags & ZONE_AVAILABLE) {  /* Create available zone */
 		/*
@@ -705,8 +705,8 @@
 		 */
 		assert(confframe != ADDR2PFN((uintptr_t ) NULL));
-		
+
 		/* Update the known end of physical memory. */
 		config.physmem_end = max(config.physmem_end, PFN2ADDR(start + count));
-		
+
 		/*
 		 * If confframe is supposed to be inside our zone, then make sure
@@ -714,5 +714,5 @@
 		 */
 		size_t confcount = SIZE2FRAMES(zone_conf_size(count));
-		
+
 		if ((confframe >= start) && (confframe < start + count)) {
 			for (; confframe < start + count; confframe++) {
@@ -721,9 +721,9 @@
 				    KA2PA(config.base), config.kernel_size))
 					continue;
-				
+
 				if (overlaps(addr, PFN2ADDR(confcount),
 				    KA2PA(config.stack_base), config.stack_size))
 					continue;
-				
+
 				bool overlap = false;
 				for (size_t i = 0; i < init.cnt; i++) {
@@ -735,15 +735,15 @@
 					}
 				}
-				
+
 				if (overlap)
 					continue;
-				
+
 				break;
 			}
-			
+
 			if (confframe >= start + count)
 				panic("Cannot find configuration data for zone.");
 		}
-		
+
 		size_t znum = zones_insert_zone(start, count, flags);
 		if (znum == (size_t) -1) {
@@ -751,8 +751,8 @@
 			return (size_t) -1;
 		}
-		
+
 		void *confdata = (void *) PA2KA(PFN2ADDR(confframe));
 		zone_construct(&zones.info[znum], start, count, flags, confdata);
-		
+
 		/* If confdata in zone, mark as unavailable */
 		if ((confframe >= start) && (confframe < start + count)) {
@@ -761,10 +761,10 @@
 				    i - zones.info[znum].base);
 		}
-		
+
 		irq_spinlock_unlock(&zones.lock, true);
-		
+
 		return znum;
 	}
-	
+
 	/* Non-available zone */
 	size_t znum = zones_insert_zone(start, count, flags);
@@ -773,9 +773,9 @@
 		return (size_t) -1;
 	}
-	
+
 	zone_construct(&zones.info[znum], start, count, flags, NULL);
-	
+
 	irq_spinlock_unlock(&zones.lock, true);
-	
+
 	return znum;
 }
@@ -789,12 +789,12 @@
 {
 	irq_spinlock_lock(&zones.lock, true);
-	
+
 	size_t znum = find_zone(pfn, 1, hint);
-	
+
 	assert(znum != (size_t) -1);
-	
+
 	zone_get_frame(&zones.info[znum],
 	    pfn - zones.info[znum].base)->parent = data;
-	
+
 	irq_spinlock_unlock(&zones.lock, true);
 }
@@ -803,14 +803,14 @@
 {
 	irq_spinlock_lock(&zones.lock, true);
-	
+
 	size_t znum = find_zone(pfn, 1, hint);
-	
+
 	assert(znum != (size_t) -1);
-	
+
 	void *res = zone_get_frame(&zones.info[znum],
 	    pfn - zones.info[znum].base)->parent;
-	
+
 	irq_spinlock_unlock(&zones.lock, true);
-	
+
 	return res;
 }
@@ -831,8 +831,8 @@
 {
 	assert(count > 0);
-	
+
 	size_t hint = pzone ? (*pzone) : 0;
 	pfn_t frame_constraint = ADDR2PFN(constraint);
-	
+
 	/*
 	 * If not told otherwise, we must first reserve the memory.
@@ -840,8 +840,8 @@
 	if (!(flags & FRAME_NO_RESERVE))
 		reserve_force_alloc(count);
-	
+
 loop:
 	irq_spinlock_lock(&zones.lock, true);
-	
+
 	/*
 	 * First, find suitable frame zone.
@@ -849,5 +849,5 @@
 	size_t znum = find_free_zone(count, FRAME_TO_ZONE_FLAGS(flags),
 	    frame_constraint, hint);
-	
+
 	/*
 	 * If no memory, reclaim some slab memory,
@@ -858,14 +858,14 @@
 		size_t freed = slab_reclaim(0);
 		irq_spinlock_lock(&zones.lock, true);
-		
+
 		if (freed > 0)
 			znum = find_free_zone(count, FRAME_TO_ZONE_FLAGS(flags),
 			    frame_constraint, hint);
-		
+
 		if (znum == (size_t) -1) {
 			irq_spinlock_unlock(&zones.lock, true);
 			freed = slab_reclaim(SLAB_RECLAIM_ALL);
 			irq_spinlock_lock(&zones.lock, true);
-			
+
 			if (freed > 0)
 				znum = find_free_zone(count, FRAME_TO_ZONE_FLAGS(flags),
@@ -873,27 +873,27 @@
 		}
 	}
-	
+
 	if (znum == (size_t) -1) {
 		if (flags & FRAME_ATOMIC) {
 			irq_spinlock_unlock(&zones.lock, true);
-			
+
 			if (!(flags & FRAME_NO_RESERVE))
 				reserve_free(count);
-			
+
 			return 0;
 		}
-		
+
 		size_t avail = frame_total_free_get_internal();
-		
+
 		irq_spinlock_unlock(&zones.lock, true);
-		
+
 		if (!THREAD)
 			panic("Cannot wait for %zu frames to become available "
 			    "(%zu available).", count, avail);
-		
+
 		/*
 		 * Sleep until some frames are available again.
 		 */
-		
+
 #ifdef CONFIG_DEBUG
 		log(LF_OTHER, LVL_DEBUG,
@@ -901,5 +901,5 @@
 		    "%zu available.", THREAD->tid, count, avail);
 #endif
-		
+
 		/*
 		 * Since the mem_avail_mtx is an active mutex, we need to
@@ -908,34 +908,34 @@
 		ipl_t ipl = interrupts_disable();
 		mutex_lock(&mem_avail_mtx);
-		
+
 		if (mem_avail_req > 0)
 			mem_avail_req = min(mem_avail_req, count);
 		else
 			mem_avail_req = count;
-		
+
 		size_t gen = mem_avail_gen;
-		
+
 		while (gen == mem_avail_gen)
 			condvar_wait(&mem_avail_cv, &mem_avail_mtx);
-		
+
 		mutex_unlock(&mem_avail_mtx);
 		interrupts_restore(ipl);
-		
+
 #ifdef CONFIG_DEBUG
 		log(LF_OTHER, LVL_DEBUG, "Thread %" PRIu64 " woken up.",
 		    THREAD->tid);
 #endif
-		
+
 		goto loop;
 	}
-	
+
 	pfn_t pfn = zone_frame_alloc(&zones.info[znum], count,
 	    frame_constraint) + zones.info[znum].base;
-	
+
 	irq_spinlock_unlock(&zones.lock, true);
-	
+
 	if (pzone)
 		*pzone = znum;
-	
+
 	return PFN2ADDR(pfn);
 }
@@ -960,7 +960,7 @@
 {
 	size_t freed = 0;
-	
+
 	irq_spinlock_lock(&zones.lock, true);
-	
+
 	for (size_t i = 0; i < count; i++) {
 		/*
@@ -969,13 +969,13 @@
 		pfn_t pfn = ADDR2PFN(start) + i;
 		size_t znum = find_zone(pfn, 1, 0);
-		
+
 		assert(znum != (size_t) -1);
-		
+
 		freed += zone_frame_free(&zones.info[znum],
 		    pfn - zones.info[znum].base);
 	}
-	
+
 	irq_spinlock_unlock(&zones.lock, true);
-	
+
 	/*
 	 * Signal that some memory has been freed.
@@ -984,19 +984,19 @@
 	 * with TLB shootdown.
 	 */
-	
+
 	ipl_t ipl = interrupts_disable();
 	mutex_lock(&mem_avail_mtx);
-	
+
 	if (mem_avail_req > 0)
 		mem_avail_req -= min(mem_avail_req, freed);
-	
+
 	if (mem_avail_req == 0) {
 		mem_avail_gen++;
 		condvar_broadcast(&mem_avail_cv);
 	}
-	
+
 	mutex_unlock(&mem_avail_mtx);
 	interrupts_restore(ipl);
-	
+
 	if (!(flags & FRAME_NO_RESERVE))
 		reserve_free(freed);
@@ -1024,14 +1024,14 @@
 {
 	irq_spinlock_lock(&zones.lock, true);
-	
+
 	/*
 	 * First, find host frame zone for addr.
 	 */
 	size_t znum = find_zone(pfn, 1, 0);
-	
+
 	assert(znum != (size_t) -1);
-	
+
 	zones.info[znum].frames[pfn - zones.info[znum].base].refcount++;
-	
+
 	irq_spinlock_unlock(&zones.lock, true);
 }
@@ -1043,15 +1043,15 @@
 {
 	irq_spinlock_lock(&zones.lock, true);
-	
+
 	for (size_t i = 0; i < count; i++) {
 		size_t znum = find_zone(start + i, 1, 0);
-		
+
 		if (znum == (size_t) -1)  /* PFN not found */
 			continue;
-		
+
 		zone_mark_unavailable(&zones.info[znum],
 		    start + i - zones.info[znum].base);
 	}
-	
+
 	irq_spinlock_unlock(&zones.lock, true);
 }
@@ -1068,8 +1068,8 @@
 		condvar_initialize(&mem_avail_cv);
 	}
-	
+
 	/* Tell the architecture to create some memory */
 	frame_low_arch_init();
-	
+
 	if (config.cpu_active == 1) {
 		frame_mark_unavailable(ADDR2PFN(KA2PA(config.base)),
@@ -1077,13 +1077,13 @@
 		frame_mark_unavailable(ADDR2PFN(KA2PA(config.stack_base)),
 		    SIZE2FRAMES(config.stack_size));
-		
+
 		for (size_t i = 0; i < init.cnt; i++)
 			frame_mark_unavailable(ADDR2PFN(init.tasks[i].paddr),
 			    SIZE2FRAMES(init.tasks[i].size));
-		
+
 		if (ballocs.size)
 			frame_mark_unavailable(ADDR2PFN(KA2PA(ballocs.base)),
 			    SIZE2FRAMES(ballocs.size));
-		
+
 		/*
 		 * Blacklist first frame, as allocating NULL would
@@ -1092,5 +1092,5 @@
 		frame_mark_unavailable(0, 1);
 	}
-	
+
 	frame_high_arch_init();
 }
@@ -1113,9 +1113,9 @@
 {
 	uintptr_t limit = KA2PA(config.identity_base) + config.identity_size;
-	
+
 	if (low) {
 		if (*basep > limit)
 			return false;
-		
+
 		if (*basep + *sizep > limit)
 			*sizep = limit - *basep;
@@ -1123,5 +1123,5 @@
 		if (*basep + *sizep <= limit)
 			return false;
-		
+
 		if (*basep <= limit) {
 			*sizep -= limit - *basep;
@@ -1129,5 +1129,5 @@
 		}
 	}
-	
+
 	return true;
 }
@@ -1139,12 +1139,12 @@
 {
 	irq_spinlock_lock(&zones.lock, true);
-	
+
 	uint64_t total = 0;
-	
+
 	for (size_t i = 0; i < zones.count; i++)
 		total += (uint64_t) FRAMES2SIZE(zones.info[i].count);
-	
+
 	irq_spinlock_unlock(&zones.lock, true);
-	
+
 	return total;
 }
@@ -1157,15 +1157,15 @@
 	assert(busy != NULL);
 	assert(free != NULL);
-	
+
 	irq_spinlock_lock(&zones.lock, true);
-	
+
 	*total = 0;
 	*unavail = 0;
 	*busy = 0;
 	*free = 0;
-	
+
 	for (size_t i = 0; i < zones.count; i++) {
 		*total += (uint64_t) FRAMES2SIZE(zones.info[i].count);
-		
+
 		if (zones.info[i].flags & ZONE_AVAILABLE) {
 			*busy += (uint64_t) FRAMES2SIZE(zones.info[i].busy_count);
@@ -1174,5 +1174,5 @@
 			*unavail += (uint64_t) FRAMES2SIZE(zones.info[i].count);
 	}
-	
+
 	irq_spinlock_unlock(&zones.lock, true);
 }
@@ -1190,5 +1190,5 @@
 	printf("[nr] [base address    ] [frames    ] [flags ] [free frames ] [busy frames ]\n");
 #endif
-	
+
 	/*
 	 * Because printing may require allocation of memory, we may not hold
@@ -1201,17 +1201,17 @@
 	 * the listing).
 	 */
-	
+
 	size_t free_lowmem = 0;
 	size_t free_highmem = 0;
 	size_t free_highprio = 0;
-	
+
 	for (size_t i = 0;; i++) {
 		irq_spinlock_lock(&zones.lock, true);
-		
+
 		if (i >= zones.count) {
 			irq_spinlock_unlock(&zones.lock, true);
 			break;
 		}
-		
+
 		pfn_t fbase = zones.info[i].base;
 		uintptr_t base = PFN2ADDR(fbase);
@@ -1220,17 +1220,17 @@
 		size_t free_count = zones.info[i].free_count;
 		size_t busy_count = zones.info[i].busy_count;
-		
+
 		bool available = ((flags & ZONE_AVAILABLE) != 0);
 		bool lowmem = ((flags & ZONE_LOWMEM) != 0);
 		bool highmem = ((flags & ZONE_HIGHMEM) != 0);
 		bool highprio = is_high_priority(fbase, count);
-		
+
 		if (available) {
 			if (lowmem)
 				free_lowmem += free_count;
-			
+
 			if (highmem)
 				free_highmem += free_count;
-			
+
 			if (highprio) {
 				free_highprio += free_count;
@@ -1241,5 +1241,5 @@
 				 * statistics.
 				 */
-				
+
 				for (size_t index = 0; index < count; index++) {
 					if (is_high_priority(fbase + index, 0)) {
@@ -1251,17 +1251,17 @@
 			}
 		}
-		
+
 		irq_spinlock_unlock(&zones.lock, true);
-		
+
 		printf("%-4zu", i);
-		
+
 #ifdef __32_BITS__
 		printf("  %p", (void *) base);
 #endif
-		
+
 #ifdef __64_BITS__
 		printf(" %p", (void *) base);
 #endif
-		
+
 		printf(" %12zu %c%c%c%c%c    ", count,
 		    available ? 'A' : '-',
@@ -1270,27 +1270,27 @@
 		    (flags & ZONE_LOWMEM) ? 'L' : '-',
 		    (flags & ZONE_HIGHMEM) ? 'H' : '-');
-		
+
 		if (available)
 			printf("%14zu %14zu",
 			    free_count, busy_count);
-		
+
 		printf("\n");
 	}
-	
+
 	printf("\n");
-	
+
 	uint64_t size;
 	const char *size_suffix;
-	
+
 	bin_order_suffix(FRAMES2SIZE(free_lowmem), &size, &size_suffix,
 	    false);
 	printf("Available low memory:    %zu frames (%" PRIu64 " %s)\n",
 	    free_lowmem, size, size_suffix);
-	
+
 	bin_order_suffix(FRAMES2SIZE(free_highmem), &size, &size_suffix,
 	    false);
 	printf("Available high memory:   %zu frames (%" PRIu64 " %s)\n",
 	    free_highmem, size, size_suffix);
-	
+
 	bin_order_suffix(FRAMES2SIZE(free_highprio), &size, &size_suffix,
 	    false);
@@ -1308,5 +1308,5 @@
 	irq_spinlock_lock(&zones.lock, true);
 	size_t znum = (size_t) -1;
-	
+
 	for (size_t i = 0; i < zones.count; i++) {
 		if ((i == num) || (PFN2ADDR(zones.info[i].base) == num)) {
@@ -1315,5 +1315,5 @@
 		}
 	}
-	
+
 	if (znum == (size_t) -1) {
 		irq_spinlock_unlock(&zones.lock, true);
@@ -1321,9 +1321,9 @@
 		return;
 	}
-	
+
 	size_t free_lowmem = 0;
 	size_t free_highmem = 0;
 	size_t free_highprio = 0;
-	
+
 	pfn_t fbase = zones.info[znum].base;
 	uintptr_t base = PFN2ADDR(fbase);
@@ -1332,17 +1332,17 @@
 	size_t free_count = zones.info[znum].free_count;
 	size_t busy_count = zones.info[znum].busy_count;
-	
+
 	bool available = ((flags & ZONE_AVAILABLE) != 0);
 	bool lowmem = ((flags & ZONE_LOWMEM) != 0);
 	bool highmem = ((flags & ZONE_HIGHMEM) != 0);
 	bool highprio = is_high_priority(fbase, count);
-	
+
 	if (available) {
 		if (lowmem)
 			free_lowmem = free_count;
-		
+
 		if (highmem)
 			free_highmem = free_count;
-		
+
 		if (highprio) {
 			free_highprio = free_count;
@@ -1353,5 +1353,5 @@
 			 * statistics.
 			 */
-			
+
 			for (size_t index = 0; index < count; index++) {
 				if (is_high_priority(fbase + index, 0)) {
@@ -1363,12 +1363,12 @@
 		}
 	}
-	
+
 	irq_spinlock_unlock(&zones.lock, true);
-	
+
 	uint64_t size;
 	const char *size_suffix;
-	
+
 	bin_order_suffix(FRAMES2SIZE(count), &size, &size_suffix, false);
-	
+
 	printf("Zone number:             %zu\n", znum);
 	printf("Zone base address:       %p\n", (void *) base);
@@ -1381,5 +1381,5 @@
 	    (flags & ZONE_LOWMEM) ? 'L' : '-',
 	    (flags & ZONE_HIGHMEM) ? 'H' : '-');
-	
+
 	if (available) {
 		bin_order_suffix(FRAMES2SIZE(busy_count), &size, &size_suffix,
@@ -1387,20 +1387,20 @@
 		printf("Allocated space:         %zu frames (%" PRIu64 " %s)\n",
 		    busy_count, size, size_suffix);
-		
+
 		bin_order_suffix(FRAMES2SIZE(free_count), &size, &size_suffix,
 		    false);
 		printf("Available space:         %zu frames (%" PRIu64 " %s)\n",
 		    free_count, size, size_suffix);
-		
+
 		bin_order_suffix(FRAMES2SIZE(free_lowmem), &size, &size_suffix,
 		    false);
 		printf("Available low memory:    %zu frames (%" PRIu64 " %s)\n",
 		    free_lowmem, size, size_suffix);
-		
+
 		bin_order_suffix(FRAMES2SIZE(free_highmem), &size, &size_suffix,
 		    false);
 		printf("Available high memory:   %zu frames (%" PRIu64 " %s)\n",
 		    free_highmem, size, size_suffix);
-		
+
 		bin_order_suffix(FRAMES2SIZE(free_highprio), &size, &size_suffix,
 		    false);
Index: kernel/generic/src/mm/km.c
===================================================================
--- kernel/generic/src/mm/km.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/mm/km.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -149,5 +149,5 @@
 	}
 	page_table_unlock(AS_KERNEL, true);
-	
+
 	return vaddr;
 }
@@ -247,5 +247,5 @@
 	assert(framep);
 	assert(!(flags & ~(FRAME_NO_RESERVE | FRAME_ATOMIC)));
-	
+
 	/*
 	 * Allocate a frame, preferably from high memory.
@@ -267,8 +267,8 @@
 		if (!frame)
 			return (uintptr_t) NULL;
-		
+
 		page = PA2KA(frame);
 	}
-	
+
 	*framep = frame;
 	return page;
Index: kernel/generic/src/mm/page.c
===================================================================
--- kernel/generic/src/mm/page.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/mm/page.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -99,5 +99,5 @@
 {
 	assert(page_table_locked(as));
-	
+
 	assert(page_mapping_operations);
 	assert(page_mapping_operations->mapping_insert);
@@ -105,5 +105,5 @@
 	page_mapping_operations->mapping_insert(as, ALIGN_DOWN(page, PAGE_SIZE),
 	    ALIGN_DOWN(frame, FRAME_SIZE), flags);
-	
+
 	/* Repel prefetched accesses to the old mapping. */
 	memory_barrier();
@@ -123,11 +123,11 @@
 {
 	assert(page_table_locked(as));
-	
+
 	assert(page_mapping_operations);
 	assert(page_mapping_operations->mapping_remove);
-	
+
 	page_mapping_operations->mapping_remove(as,
 	    ALIGN_DOWN(page, PAGE_SIZE));
-	
+
 	/* Repel prefetched accesses to the old mapping. */
 	memory_barrier();
@@ -148,8 +148,8 @@
 {
 	assert(nolock || page_table_locked(as));
-	
+
 	assert(page_mapping_operations);
 	assert(page_mapping_operations->mapping_find);
-	
+
 	return page_mapping_operations->mapping_find(as,
 	    ALIGN_DOWN(page, PAGE_SIZE), nolock, pte);
@@ -169,8 +169,8 @@
 {
 	assert(nolock || page_table_locked(as));
-	
+
 	assert(page_mapping_operations);
 	assert(page_mapping_operations->mapping_find);
-	
+
 	page_mapping_operations->mapping_update(as,
 	    ALIGN_DOWN(page, PAGE_SIZE), nolock, pte);
@@ -186,5 +186,5 @@
 	assert(page_mapping_operations);
 	assert(page_mapping_operations->mapping_make_global);
-	
+
 	return page_mapping_operations->mapping_make_global(base, size);
 }
@@ -193,5 +193,5 @@
 {
 	page_table_lock(AS, true);
-	
+
 	pte_t pte;
 	bool found = page_mapping_find(AS, virt, false, &pte);
@@ -200,10 +200,10 @@
 		return ENOENT;
 	}
-	
+
 	*phys = PTE_GET_FRAME(&pte) +
 	    (virt - ALIGN_DOWN(virt, PAGE_SIZE));
-	
+
 	page_table_unlock(AS, true);
-	
+
 	return EOK;
 }
@@ -221,5 +221,5 @@
 	if (rc != EOK)
 		return rc;
-	
+
 	rc = copy_to_uspace(phys_ptr, &phys, sizeof(phys));
 	return (sys_errno_t) rc;
Index: kernel/generic/src/mm/slab.c
===================================================================
--- kernel/generic/src/mm/slab.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/mm/slab.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -186,15 +186,15 @@
 {
 	size_t zone = 0;
-	
+
 	uintptr_t data_phys =
 	    frame_alloc_generic(cache->frames, flags, 0, &zone);
 	if (!data_phys)
 		return NULL;
-	
+
 	void *data = (void *) PA2KA(data_phys);
-	
+
 	slab_t *slab;
 	size_t fsize;
-	
+
 	if (!(cache->flags & SLAB_CACHE_SLINSIDE)) {
 		slab = slab_alloc(slab_extern_cache, flags);
@@ -207,18 +207,18 @@
 		slab = data + fsize - sizeof(*slab);
 	}
-	
+
 	/* Fill in slab structures */
 	size_t i;
 	for (i = 0; i < cache->frames; i++)
 		frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab, zone);
-	
+
 	slab->start = data;
 	slab->available = cache->objects;
 	slab->nextavail = 0;
 	slab->cache = cache;
-	
+
 	for (i = 0; i < cache->objects; i++)
 		*((size_t *) (slab->start + i * cache->size)) = i + 1;
-	
+
 	atomic_inc(&cache->allocated_slabs);
 	return slab;
@@ -235,7 +235,7 @@
 	if (!(cache->flags & SLAB_CACHE_SLINSIDE))
 		slab_free(slab_extern_cache, slab);
-	
+
 	atomic_dec(&cache->allocated_slabs);
-	
+
 	return cache->frames;
 }
@@ -263,19 +263,19 @@
 	if (!slab)
 		slab = obj2slab(obj);
-	
+
 	assert(slab->cache == cache);
-	
+
 	size_t freed = 0;
-	
+
 	if (cache->destructor)
 		freed = cache->destructor(obj);
-	
+
 	irq_spinlock_lock(&cache->slablock, true);
 	assert(slab->available < cache->objects);
-	
+
 	*((size_t *) obj) = slab->nextavail;
 	slab->nextavail = (obj - slab->start) / cache->size;
 	slab->available++;
-	
+
 	/* Move it to correct list */
 	if (slab->available == cache->objects) {
@@ -283,5 +283,5 @@
 		list_remove(&slab->link);
 		irq_spinlock_unlock(&cache->slablock, true);
-		
+
 		return freed + slab_space_free(cache, slab);
 	} else if (slab->available == 1) {
@@ -290,5 +290,5 @@
 		list_prepend(&slab->link, &cache->partial_slabs);
 	}
-	
+
 	irq_spinlock_unlock(&cache->slablock, true);
 	return freed;
@@ -303,7 +303,7 @@
 {
 	irq_spinlock_lock(&cache->slablock, true);
-	
+
 	slab_t *slab;
-	
+
 	if (list_empty(&cache->partial_slabs)) {
 		/*
@@ -319,5 +319,5 @@
 		if (!slab)
 			return NULL;
-		
+
 		irq_spinlock_lock(&cache->slablock, true);
 	} else {
@@ -326,16 +326,16 @@
 		list_remove(&slab->link);
 	}
-	
+
 	void *obj = slab->start + slab->nextavail * cache->size;
 	slab->nextavail = *((size_t *) obj);
 	slab->available--;
-	
+
 	if (!slab->available)
 		list_prepend(&slab->link, &cache->full_slabs);
 	else
 		list_prepend(&slab->link, &cache->partial_slabs);
-	
+
 	irq_spinlock_unlock(&cache->slablock, true);
-	
+
 	if ((cache->constructor) && (cache->constructor(obj, flags) != EOK)) {
 		/* Bad, bad, construction failed */
@@ -343,5 +343,5 @@
 		return NULL;
 	}
-	
+
 	return obj;
 }
@@ -361,5 +361,5 @@
 	slab_magazine_t *mag = NULL;
 	link_t *cur;
-	
+
 	irq_spinlock_lock(&cache->maglock, true);
 	if (!list_empty(&cache->magazines)) {
@@ -368,5 +368,5 @@
 		else
 			cur = list_last(&cache->magazines);
-		
+
 		mag = list_get_instance(cur, slab_magazine_t, link);
 		list_remove(&mag->link);
@@ -385,8 +385,8 @@
 {
 	irq_spinlock_lock(&cache->maglock, true);
-	
+
 	list_prepend(&mag->link, &cache->magazines);
 	atomic_inc(&cache->magazine_counter);
-	
+
 	irq_spinlock_unlock(&cache->maglock, true);
 }
@@ -402,12 +402,12 @@
 	size_t i;
 	size_t frames = 0;
-	
+
 	for (i = 0; i < mag->busy; i++) {
 		frames += slab_obj_destroy(cache, mag->objs[i], NULL);
 		atomic_dec(&cache->cached_objs);
 	}
-	
+
 	slab_free(&mag_cache, mag);
-	
+
 	return frames;
 }
@@ -420,11 +420,11 @@
 	slab_magazine_t *cmag = cache->mag_cache[CPU->id].current;
 	slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last;
-	
+
 	assert(irq_spinlock_locked(&cache->mag_cache[CPU->id].lock));
-	
+
 	if (cmag) { /* First try local CPU magazines */
 		if (cmag->busy)
 			return cmag;
-		
+
 		if ((lastmag) && (lastmag->busy)) {
 			cache->mag_cache[CPU->id].current = lastmag;
@@ -433,16 +433,16 @@
 		}
 	}
-	
+
 	/* Local magazines are empty, import one from magazine list */
 	slab_magazine_t *newmag = get_mag_from_cache(cache, 1);
 	if (!newmag)
 		return NULL;
-	
+
 	if (lastmag)
 		magazine_destroy(cache, lastmag);
-	
+
 	cache->mag_cache[CPU->id].last = cmag;
 	cache->mag_cache[CPU->id].current = newmag;
-	
+
 	return newmag;
 }
@@ -457,7 +457,7 @@
 	if (!CPU)
 		return NULL;
-	
+
 	irq_spinlock_lock(&cache->mag_cache[CPU->id].lock, true);
-	
+
 	slab_magazine_t *mag = get_full_current_mag(cache);
 	if (!mag) {
@@ -465,10 +465,10 @@
 		return NULL;
 	}
-	
+
 	void *obj = mag->objs[--mag->busy];
 	irq_spinlock_unlock(&cache->mag_cache[CPU->id].lock, true);
-	
+
 	atomic_dec(&cache->cached_objs);
-	
+
 	return obj;
 }
@@ -487,11 +487,11 @@
 	slab_magazine_t *cmag = cache->mag_cache[CPU->id].current;
 	slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last;
-	
+
 	assert(irq_spinlock_locked(&cache->mag_cache[CPU->id].lock));
-	
+
 	if (cmag) {
 		if (cmag->busy < cmag->size)
 			return cmag;
-		
+
 		if ((lastmag) && (lastmag->busy < lastmag->size)) {
 			cache->mag_cache[CPU->id].last = cmag;
@@ -500,7 +500,7 @@
 		}
 	}
-	
+
 	/* current | last are full | nonexistent, allocate new */
-	
+
 	/*
 	 * We do not want to sleep just because of caching,
@@ -513,16 +513,16 @@
 	if (!newmag)
 		return NULL;
-	
+
 	newmag->size = SLAB_MAG_SIZE;
 	newmag->busy = 0;
-	
+
 	/* Flush last to magazine list */
 	if (lastmag)
 		put_mag_to_cache(cache, lastmag);
-	
+
 	/* Move current as last, save new as current */
 	cache->mag_cache[CPU->id].last = cmag;
 	cache->mag_cache[CPU->id].current = newmag;
-	
+
 	return newmag;
 }
@@ -537,7 +537,7 @@
 	if (!CPU)
 		return -1;
-	
+
 	irq_spinlock_lock(&cache->mag_cache[CPU->id].lock, true);
-	
+
 	slab_magazine_t *mag = make_empty_current_mag(cache);
 	if (!mag) {
@@ -545,11 +545,11 @@
 		return -1;
 	}
-	
+
 	mag->objs[mag->busy++] = obj;
-	
+
 	irq_spinlock_unlock(&cache->mag_cache[CPU->id].lock, true);
-	
+
 	atomic_inc(&cache->cached_objs);
-	
+
 	return 0;
 }
@@ -578,8 +578,8 @@
 	size_t objects = comp_objects(cache);
 	size_t ssize = FRAMES2SIZE(cache->frames);
-	
+
 	if (cache->flags & SLAB_CACHE_SLINSIDE)
 		ssize -= sizeof(slab_t);
-	
+
 	return ssize - objects * cache->size;
 }
@@ -591,9 +591,9 @@
 {
 	assert(_slab_initialized >= 2);
-	
+
 	cache->mag_cache = slab_alloc(&slab_mag_cache, FRAME_ATOMIC);
 	if (!cache->mag_cache)
 		return false;
-	
+
 	size_t i;
 	for (i = 0; i < config.cpu_count; i++) {
@@ -602,5 +602,5 @@
 		    "slab.cache.mag_cache[].lock");
 	}
-	
+
 	return true;
 }
@@ -614,44 +614,44 @@
 {
 	assert(size > 0);
-	
+
 	memsetb(cache, sizeof(*cache), 0);
 	cache->name = name;
-	
+
 	if (align < sizeof(sysarg_t))
 		align = sizeof(sysarg_t);
-	
+
 	size = ALIGN_UP(size, align);
-	
+
 	cache->size = size;
 	cache->constructor = constructor;
 	cache->destructor = destructor;
 	cache->flags = flags;
-	
+
 	list_initialize(&cache->full_slabs);
 	list_initialize(&cache->partial_slabs);
 	list_initialize(&cache->magazines);
-	
+
 	irq_spinlock_initialize(&cache->slablock, "slab.cache.slablock");
 	irq_spinlock_initialize(&cache->maglock, "slab.cache.maglock");
-	
+
 	if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
 		(void) make_magcache(cache);
-	
+
 	/* Compute slab sizes, object counts in slabs etc. */
 	if (cache->size < SLAB_INSIDE_SIZE)
 		cache->flags |= SLAB_CACHE_SLINSIDE;
-	
+
 	/* Minimum slab frames */
 	cache->frames = SIZE2FRAMES(cache->size);
-	
+
 	while (badness(cache) > SLAB_MAX_BADNESS(cache))
 		cache->frames <<= 1;
-	
+
 	cache->objects = comp_objects(cache);
-	
+
 	/* If info fits in, put it inside */
 	if (badness(cache) > sizeof(slab_t))
 		cache->flags |= SLAB_CACHE_SLINSIDE;
-	
+
 	/* Add cache to cache list */
 	irq_spinlock_lock(&slab_cache_lock, true);
@@ -670,5 +670,5 @@
 	_slab_cache_create(cache, name, size, align, constructor, destructor,
 	    flags);
-	
+
 	return cache;
 }
@@ -685,5 +685,5 @@
 	if (cache->flags & SLAB_CACHE_NOMAGAZINE)
 		return 0; /* Nothing to do */
-	
+
 	/*
 	 * We count up to original magazine count to avoid
@@ -691,8 +691,8 @@
 	 */
 	atomic_count_t magcount = atomic_get(&cache->magazine_counter);
-	
+
 	slab_magazine_t *mag;
 	size_t frames = 0;
-	
+
 	while ((magcount--) && (mag = get_mag_from_cache(cache, 0))) {
 		frames += magazine_destroy(cache, mag);
@@ -700,5 +700,5 @@
 			break;
 	}
-	
+
 	if (flags & SLAB_RECLAIM_ALL) {
 		/* Free cpu-bound magazines */
@@ -707,19 +707,19 @@
 		for (i = 0; i < config.cpu_count; i++) {
 			irq_spinlock_lock(&cache->mag_cache[i].lock, true);
-			
+
 			mag = cache->mag_cache[i].current;
 			if (mag)
 				frames += magazine_destroy(cache, mag);
 			cache->mag_cache[i].current = NULL;
-			
+
 			mag = cache->mag_cache[i].last;
 			if (mag)
 				frames += magazine_destroy(cache, mag);
 			cache->mag_cache[i].last = NULL;
-			
+
 			irq_spinlock_unlock(&cache->mag_cache[i].lock, true);
 		}
 	}
-	
+
 	return frames;
 }
@@ -731,9 +731,9 @@
 {
 	ipl_t ipl = interrupts_disable();
-	
+
 	if ((cache->flags & SLAB_CACHE_NOMAGAZINE) ||
 	    (magazine_obj_put(cache, obj)))
 		slab_obj_destroy(cache, obj, slab);
-	
+
 	interrupts_restore(ipl);
 	atomic_dec(&cache->allocated_objs);
@@ -753,5 +753,5 @@
 	list_remove(&cache->link);
 	irq_spinlock_unlock(&slab_cache_lock, true);
-	
+
 	/*
 	 * Do not lock anything, we assume the software is correct and
@@ -759,18 +759,18 @@
 	 *
 	 */
-	
+
 	/* Destroy all magazines */
 	_slab_reclaim(cache, SLAB_RECLAIM_ALL);
-	
+
 	/* All slabs must be empty */
 	if ((!list_empty(&cache->full_slabs)) ||
 	    (!list_empty(&cache->partial_slabs)))
 		panic("Destroying cache that is not empty.");
-	
+
 	if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) {
 		slab_t *mag_slab = obj2slab(cache->mag_cache);
 		_slab_free(mag_slab->cache, cache->mag_cache, mag_slab);
 	}
-	
+
 	slab_free(&slab_cache_cache, cache);
 }
@@ -783,18 +783,18 @@
 	/* Disable interrupts to avoid deadlocks with interrupt handlers */
 	ipl_t ipl = interrupts_disable();
-	
+
 	void *result = NULL;
-	
+
 	if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
 		result = magazine_obj_get(cache);
-	
+
 	if (!result)
 		result = slab_obj_create(cache, flags);
-	
+
 	interrupts_restore(ipl);
-	
+
 	if (result)
 		atomic_inc(&cache->allocated_objs);
-	
+
 	return result;
 }
@@ -812,12 +812,12 @@
 {
 	irq_spinlock_lock(&slab_cache_lock, true);
-	
+
 	size_t frames = 0;
 	list_foreach(slab_cache_list, link, slab_cache_t, cache) {
 		frames += _slab_reclaim(cache, flags);
 	}
-	
+
 	irq_spinlock_unlock(&slab_cache_lock, true);
-	
+
 	return frames;
 }
@@ -828,5 +828,5 @@
 	printf("[cache name      ] [size  ] [pages ] [obj/pg] [slabs ]"
 	    " [cached] [alloc ] [ctl]\n");
-	
+
 	size_t skip = 0;
 	while (true) {
@@ -853,7 +853,7 @@
 		 * statistics.
 		 */
-		
+
 		irq_spinlock_lock(&slab_cache_lock, true);
-		
+
 		link_t *cur;
 		size_t i;
@@ -861,14 +861,14 @@
 		    (i < skip) && (cur != &slab_cache_list.head);
 		    i++, cur = cur->next);
-		
+
 		if (cur == &slab_cache_list.head) {
 			irq_spinlock_unlock(&slab_cache_lock, true);
 			break;
 		}
-		
+
 		skip++;
-		
+
 		slab_cache_t *cache = list_get_instance(cur, slab_cache_t, link);
-		
+
 		const char *name = cache->name;
 		size_t frames = cache->frames;
@@ -879,7 +879,7 @@
 		long allocated_objs = atomic_get(&cache->allocated_objs);
 		unsigned int flags = cache->flags;
-		
+
 		irq_spinlock_unlock(&slab_cache_lock, true);
-		
+
 		printf("%-18s %8zu %8zu %8zu %8ld %8ld %8ld %-5s\n",
 		    name, size, frames, objects, allocated_slabs,
@@ -896,18 +896,18 @@
 	    sizeof(uintptr_t), NULL, NULL, SLAB_CACHE_NOMAGAZINE |
 	    SLAB_CACHE_SLINSIDE);
-	
+
 	/* Initialize slab_cache cache */
 	_slab_cache_create(&slab_cache_cache, "slab_cache_cache",
 	    sizeof(slab_cache_cache), sizeof(uintptr_t), NULL, NULL,
 	    SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
-	
+
 	/* Initialize external slab cache */
 	slab_extern_cache = slab_cache_create("slab_t", sizeof(slab_t), 0,
 	    NULL, NULL, SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED);
-	
+
 	/* Initialize structures for malloc */
 	size_t i;
 	size_t size;
-	
+
 	for (i = 0, size = (1 << SLAB_MIN_MALLOC_W);
 	    i < (SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1);
@@ -916,5 +916,5 @@
 		    NULL, NULL, SLAB_CACHE_MAGDEFERRED);
 	}
-	
+
 #ifdef CONFIG_DEBUG
 	_slab_initialized = 1;
@@ -934,20 +934,20 @@
 	_slab_initialized = 2;
 #endif
-	
+
 	_slab_cache_create(&slab_mag_cache, "slab_mag_cache",
 	    sizeof(slab_mag_cache_t) * config.cpu_count, sizeof(uintptr_t),
 	    NULL, NULL, SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
-	
+
 	irq_spinlock_lock(&slab_cache_lock, false);
-	
+
 	list_foreach(slab_cache_list, link, slab_cache_t, slab) {
 		if ((slab->flags & SLAB_CACHE_MAGDEFERRED) !=
 		    SLAB_CACHE_MAGDEFERRED)
 			continue;
-		
+
 		(void) make_magcache(slab);
 		slab->flags &= ~SLAB_CACHE_MAGDEFERRED;
 	}
-	
+
 	irq_spinlock_unlock(&slab_cache_lock, false);
 }
@@ -957,10 +957,10 @@
 	assert(_slab_initialized);
 	assert(size <= (1 << SLAB_MAX_MALLOC_W));
-	
+
 	if (size < (1 << SLAB_MIN_MALLOC_W))
 		size = (1 << SLAB_MIN_MALLOC_W);
-	
+
 	uint8_t idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;
-	
+
 	return slab_alloc(malloc_caches[idx], flags);
 }
@@ -970,24 +970,24 @@
 	assert(_slab_initialized);
 	assert(size <= (1 << SLAB_MAX_MALLOC_W));
-	
+
 	void *new_ptr;
-	
+
 	if (size > 0) {
 		if (size < (1 << SLAB_MIN_MALLOC_W))
 			size = (1 << SLAB_MIN_MALLOC_W);
 		uint8_t idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;
-		
+
 		new_ptr = slab_alloc(malloc_caches[idx], flags);
 	} else
 		new_ptr = NULL;
-	
+
 	if ((new_ptr != NULL) && (ptr != NULL)) {
 		slab_t *slab = obj2slab(ptr);
 		memcpy(new_ptr, ptr, min(size, slab->cache->size));
 	}
-	
+
 	if (ptr != NULL)
 		free(ptr);
-	
+
 	return new_ptr;
 }
@@ -997,5 +997,5 @@
 	if (!ptr)
 		return;
-	
+
 	slab_t *slab = obj2slab(ptr);
 	_slab_free(slab->cache, ptr, slab);
Index: kernel/generic/src/mm/tlb.c
===================================================================
--- kernel/generic/src/mm/tlb.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/mm/tlb.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -87,12 +87,12 @@
 	CPU->tlb_active = false;
 	irq_spinlock_lock(&tlblock, false);
-	
+
 	size_t i;
 	for (i = 0; i < config.cpu_count; i++) {
 		if (i == CPU->id)
 			continue;
-		
+
 		cpu_t *cpu = &cpus[i];
-		
+
 		irq_spinlock_lock(&cpu->lock, false);
 		if (cpu->tlb_messages_count == TLB_MESSAGE_QUEUE_LEN) {
@@ -118,7 +118,7 @@
 		irq_spinlock_unlock(&cpu->lock, false);
 	}
-	
+
 	tlb_shootdown_ipi_send();
-	
+
 busy_wait:
 	for (i = 0; i < config.cpu_count; i++) {
@@ -126,5 +126,5 @@
 			goto busy_wait;
 	}
-	
+
 	return ipl;
 }
@@ -153,12 +153,12 @@
 {
 	assert(CPU);
-	
+
 	CPU->tlb_active = false;
 	irq_spinlock_lock(&tlblock, false);
 	irq_spinlock_unlock(&tlblock, false);
-	
+
 	irq_spinlock_lock(&CPU->lock, false);
 	assert(CPU->tlb_messages_count <= TLB_MESSAGE_QUEUE_LEN);
-	
+
 	size_t i;
 	for (i = 0; i < CPU->tlb_messages_count; i++) {
@@ -167,5 +167,5 @@
 		uintptr_t page = CPU->tlb_messages[i].page;
 		size_t count = CPU->tlb_messages[i].count;
-		
+
 		switch (type) {
 		case TLB_INVL_ALL:
@@ -183,9 +183,9 @@
 			break;
 		}
-		
+
 		if (type == TLB_INVL_ALL)
 			break;
 	}
-	
+
 	CPU->tlb_messages_count = 0;
 	irq_spinlock_unlock(&CPU->lock, false);
Index: kernel/generic/src/printf/printf.c
===================================================================
--- kernel/generic/src/printf/printf.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/printf/printf.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -40,11 +40,11 @@
 	int ret;
 	va_list args;
-	
+
 	va_start(args, fmt);
-	
+
 	ret = vprintf(fmt, args);
-	
+
 	va_end(args);
-	
+
 	return ret;
 }
Index: kernel/generic/src/printf/printf_core.c
===================================================================
--- kernel/generic/src/printf/printf_core.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/printf/printf_core.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -157,5 +157,5 @@
 	if (str == NULL)
 		return printf_putnchars(nullstr, str_size(nullstr), ps);
-	
+
 	return ps->str_write((void *) str, str_size(str), ps->data);
 }
@@ -173,5 +173,5 @@
 	if (!ascii_check(ch))
 		return ps->str_write((void *) &invalch, 1, ps->data);
-	
+
 	return ps->str_write(&ch, 1, ps->data);
 }
@@ -189,5 +189,5 @@
 	if (!chr_check(ch))
 		return ps->str_write((void *) &invalch, 1, ps->data);
-	
+
 	return ps->wstr_write(&ch, sizeof(wchar_t), ps->data);
 }
@@ -215,8 +215,8 @@
 		}
 	}
-	
+
 	if (printf_putchar(ch, ps) > 0)
 		counter++;
-	
+
 	while (--width > 0) {
 		/*
@@ -227,5 +227,5 @@
 			counter++;
 	}
-	
+
 	return (int) (counter);
 }
@@ -253,8 +253,8 @@
 		}
 	}
-	
+
 	if (printf_putwchar(ch, ps) > 0)
 		counter++;
-	
+
 	while (--width > 0) {
 		/*
@@ -265,5 +265,5 @@
 			counter++;
 	}
-	
+
 	return (int) (counter);
 }
@@ -283,10 +283,10 @@
 	if (str == NULL)
 		return printf_putstr(nullstr, ps);
-	
+
 	/* Print leading spaces. */
 	size_t strw = str_length(str);
 	if ((precision == 0) || (precision > strw))
 		precision = strw;
-	
+
 	/* Left padding */
 	size_t counter = 0;
@@ -298,5 +298,5 @@
 		}
 	}
-	
+
 	/* Part of @a str fitting into the alloted space. */
 	int retval;
@@ -331,10 +331,10 @@
 	if (str == NULL)
 		return printf_putstr(nullstr, ps);
-	
+
 	/* Print leading spaces. */
 	size_t strw = wstr_length(str);
 	if ((precision == 0) || (precision > strw))
 		precision = strw;
-	
+
 	/* Left padding */
 	size_t counter = 0;
@@ -346,5 +346,5 @@
 		}
 	}
-	
+
 	/* Part of @a wstr fitting into the alloted space. */
 	int retval;
@@ -352,7 +352,7 @@
 	if ((retval = printf_wputnchars(str, size, ps)) < 0)
 		return -counter;
-	
+
 	counter += retval;
-	
+
 	/* Right padding */
 	while (width-- > 0) {
@@ -385,14 +385,14 @@
 	else
 		digits = digits_small;
-	
+
 	char data[PRINT_NUMBER_BUFFER_SIZE];
 	char *ptr = &data[PRINT_NUMBER_BUFFER_SIZE - 1];
-	
+
 	/* Size of number with all prefixes and signs */
 	int size = 0;
-	
+
 	/* Put zero at end of string */
 	*ptr-- = 0;
-	
+
 	if (num == 0) {
 		*ptr-- = '0';
@@ -404,8 +404,8 @@
 		} while (num /= base);
 	}
-	
+
 	/* Size of plain number */
 	int number_size = size;
-	
+
 	/*
 	 * Collect the sum of all prefixes/signs/etc. to calculate padding and
@@ -426,5 +426,5 @@
 		}
 	}
-	
+
 	char sgn = 0;
 	if (flags & __PRINTF_FLAG_SIGNED) {
@@ -440,8 +440,8 @@
 		}
 	}
-	
+
 	if (flags & __PRINTF_FLAG_LEFTALIGNED)
 		flags &= ~__PRINTF_FLAG_ZEROPADDED;
-	
+
 	/*
 	 * If the number is left-aligned or precision is specified then
@@ -452,5 +452,5 @@
 			precision = width - size + number_size;
 	}
-	
+
 	/* Print leading spaces */
 	if (number_size > precision) {
@@ -458,8 +458,8 @@
 		precision = number_size;
 	}
-	
+
 	width -= precision + size - number_size;
 	size_t counter = 0;
-	
+
 	if (!(flags & __PRINTF_FLAG_LEFTALIGNED)) {
 		while (width-- > 0) {
@@ -468,5 +468,5 @@
 		}
 	}
-	
+
 	/* Print sign */
 	if (sgn) {
@@ -474,5 +474,5 @@
 			counter++;
 	}
-	
+
 	/* Print prefix */
 	if (flags & __PRINTF_FLAG_PREFIX) {
@@ -507,5 +507,5 @@
 		}
 	}
-	
+
 	/* Print leading zeroes */
 	precision -= number_size;
@@ -514,17 +514,17 @@
 			counter++;
 	}
-	
+
 	/* Print the number itself */
 	int retval;
 	if ((retval = printf_putstr(++ptr, ps)) > 0)
 		counter += retval;
-	
+
 	/* Print trailing spaces */
-	
+
 	while (width-- > 0) {
 		if (printf_putchar(' ', ps) == 1)
 			counter++;
 	}
-	
+
 	return ((int) counter);
 }
@@ -624,15 +624,15 @@
 	size_t nxt = 0;  /* Index of the next character from fmt */
 	size_t j = 0;    /* Index to the first not printed nonformating character */
-	
+
 	size_t counter = 0;   /* Number of characters printed */
 	int retval;           /* Return values from nested functions */
-	
+
 	while (true) {
 		i = nxt;
 		wchar_t uc = str_decode(fmt, &nxt, STR_NO_LIMIT);
-		
+
 		if (uc == 0)
 			break;
-		
+
 		/* Control character */
 		if (uc == '%') {
@@ -646,11 +646,11 @@
 				counter += retval;
 			}
-			
+
 			j = i;
-			
+
 			/* Parse modifiers */
 			uint32_t flags = 0;
 			bool end = false;
-			
+
 			do {
 				i = nxt;
@@ -676,5 +676,5 @@
 				};
 			} while (!end);
-			
+
 			/* Width & '*' operator */
 			int width = 0;
@@ -683,5 +683,5 @@
 					width *= 10;
 					width += uc - '0';
-					
+
 					i = nxt;
 					uc = str_decode(fmt, &nxt, STR_NO_LIMIT);
@@ -702,5 +702,5 @@
 				}
 			}
-			
+
 			/* Precision and '*' operator */
 			int precision = 0;
@@ -712,5 +712,5 @@
 						precision *= 10;
 						precision += uc - '0';
-						
+
 						i = nxt;
 						uc = str_decode(fmt, &nxt, STR_NO_LIMIT);
@@ -731,7 +731,7 @@
 				}
 			}
-			
+
 			qualifier_t qualifier;
-			
+
 			switch (uc) {
 			case 't':
@@ -780,7 +780,7 @@
 				qualifier = PrintfQualifierInt;
 			}
-			
+
 			unsigned int base = 10;
-			
+
 			switch (uc) {
 			/*
@@ -792,10 +792,10 @@
 				else
 					retval = print_str(va_arg(ap, char *), width, precision, flags, ps);
-				
+
 				if (retval < 0) {
 					counter = -counter;
 					goto out;
 				}
-				
+
 				counter += retval;
 				j = nxt;
@@ -806,14 +806,14 @@
 				else
 					retval = print_char(va_arg(ap, unsigned int), width, flags, ps);
-				
+
 				if (retval < 0) {
 					counter = -counter;
 					goto out;
 				};
-				
+
 				counter += retval;
 				j = nxt;
 				goto next_char;
-			
+
 			/*
 			 * Integer values
@@ -847,10 +847,10 @@
 				base = 16;
 				break;
-			
+
 			/* Percentile itself */
 			case '%':
 				j = i;
 				goto next_char;
-			
+
 			/*
 			 * Bad formatting.
@@ -863,9 +863,9 @@
 				goto next_char;
 			}
-			
+
 			/* Print integers */
 			size_t size;
 			uint64_t number;
-			
+
 			switch (qualifier) {
 			case PrintfQualifierByte:
@@ -907,5 +907,5 @@
 				goto out;
 			}
-			
+
 			if ((retval = print_number(number, width, precision,
 			    base, flags, ps)) < 0) {
@@ -913,5 +913,5 @@
 				goto out;
 			}
-			
+
 			counter += retval;
 			j = nxt;
@@ -920,5 +920,5 @@
 		;
 	}
-	
+
 	if (i > j) {
 		if ((retval = printf_putnchars(&fmt[j], i - j, ps)) < 0) {
@@ -929,5 +929,5 @@
 		counter += retval;
 	}
-	
+
 out:
 	return ((int) counter);
Index: kernel/generic/src/printf/snprintf.c
===================================================================
--- kernel/generic/src/printf/snprintf.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/printf/snprintf.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -40,10 +40,10 @@
 	int ret;
 	va_list args;
-	
+
 	va_start(args, fmt);
 	ret = vsnprintf(str, size, fmt, args);
-	
+
 	va_end(args);
-	
+
 	return ret;
 }
Index: kernel/generic/src/printf/vprintf.c
===================================================================
--- kernel/generic/src/printf/vprintf.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/printf/vprintf.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -45,10 +45,10 @@
 	size_t offset = 0;
 	size_t chars = 0;
-	
+
 	while (offset < size) {
 		putchar(str_decode(str, &offset, size));
 		chars++;
 	}
-	
+
 	return chars;
 }
@@ -58,5 +58,5 @@
 	size_t offset = 0;
 	size_t chars = 0;
-	
+
 	while (offset < size) {
 		putchar(str[chars]);
@@ -64,5 +64,5 @@
 		offset += sizeof(wchar_t);
 	}
-	
+
 	return chars;
 }
@@ -73,10 +73,10 @@
 	size_t chars = 0;
 	wchar_t uc;
-	
+
 	while ((uc = str_decode(str, &offset, STR_NO_LIMIT)) != 0) {
 		putchar(uc);
 		chars++;
 	}
-	
+
 	return chars;
 }
@@ -89,5 +89,5 @@
 		NULL
 	};
-	
+
 	return printf_core(fmt, &ps, ap);
 }
Index: kernel/generic/src/printf/vsnprintf.c
===================================================================
--- kernel/generic/src/printf/vsnprintf.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/printf/vsnprintf.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -65,8 +65,8 @@
 {
 	size_t left = data->size - data->len;
-	
+
 	if (left == 0)
 		return ((int) size);
-	
+
 	if (left == 1) {
 		/* We have only one free byte left in buffer
@@ -77,5 +77,5 @@
 		return ((int) size);
 	}
-	
+
 	if (left <= size) {
 		/* We do not have enough space for the whole string
@@ -84,29 +84,29 @@
 		 */
 		size_t index = 0;
-		
+
 		while (index < size) {
 			wchar_t uc = str_decode(str, &index, size);
-			
+
 			if (chr_encode(uc, data->dst, &data->len, data->size - 1) != EOK)
 				break;
 		}
-		
+
 		/* Put trailing zero at end, but not count it
 		 * into data->len so it could be rewritten next time
 		 */
 		data->dst[data->len] = 0;
-		
+
 		return ((int) size);
 	}
-	
+
 	/* Buffer is big enough to print the whole string */
 	memcpy((void *)(data->dst + data->len), (void *) str, size);
 	data->len += size;
-	
+
 	/* Put trailing zero at end, but not count it
 	 * into data->len so it could be rewritten next time
 	 */
 	data->dst[data->len] = 0;
-	
+
 	return ((int) size);
 }
@@ -132,11 +132,11 @@
 {
 	size_t index = 0;
-	
+
 	while (index < (size / sizeof(wchar_t))) {
 		size_t left = data->size - data->len;
-		
+
 		if (left == 0)
 			return ((int) size);
-		
+
 		if (left == 1) {
 			/* We have only one free byte left in buffer
@@ -147,16 +147,16 @@
 			return ((int) size);
 		}
-		
+
 		if (chr_encode(str[index], data->dst, &data->len, data->size - 1) != EOK)
 			break;
-		
+
 		index++;
 	}
-	
+
 	/* Put trailing zero at end, but not count it
 	 * into data->len so it could be rewritten next time
 	 */
 	data->dst[data->len] = 0;
-	
+
 	return ((int) size);
 }
@@ -174,9 +174,9 @@
 		&data
 	};
-	
+
 	/* Print 0 at end of string - fix the case that nothing will be printed */
 	if (size > 0)
 		str[0] = 0;
-	
+
 	/* vsnprintf_write ensures that str will be terminated by zero. */
 	return printf_core(fmt, &ps, ap);
Index: kernel/generic/src/proc/program.c
===================================================================
--- kernel/generic/src/proc/program.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/proc/program.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -75,5 +75,5 @@
 	if (!prg->task)
 		return ELIMIT;
-	
+
 	/*
 	 * Create the stack address space area.
@@ -94,8 +94,8 @@
 		return ENOMEM;
 	}
-	
+
 	uspace_arg_t *kernel_uarg = (uspace_arg_t *)
 	    malloc(sizeof(uspace_arg_t), 0);
-	
+
 	kernel_uarg->uspace_entry = (void *) entry_addr;
 	kernel_uarg->uspace_stack = (void *) virt;
@@ -104,5 +104,5 @@
 	kernel_uarg->uspace_thread_arg = NULL;
 	kernel_uarg->uspace_uarg = NULL;
-	
+
 	/*
 	 * Create the main thread.
@@ -117,5 +117,5 @@
 		return ELIMIT;
 	}
-	
+
 	return EOK;
 }
@@ -142,5 +142,5 @@
 	if (!as)
 		return ENOMEM;
-	
+
 	prg->loader_status = elf_load((elf_header_t *) image_addr, as, 0);
 	if (prg->loader_status != EE_OK) {
@@ -148,18 +148,18 @@
 		prg->task = NULL;
 		prg->main_thread = NULL;
-		
+
 		if (prg->loader_status != EE_LOADER)
 			return ENOTSUP;
-		
+
 		/* Register image as the program loader */
 		if (program_loader != NULL)
 			return ELIMIT;
-		
+
 		program_loader = image_addr;
 		log(LF_OTHER, LVL_NOTE, "Program loader at %p", (void *) image_addr);
-		
+
 		return EOK;
 	}
-	
+
 	return program_create(as, ((elf_header_t *) image_addr)->e_entry,
 	    name, prg);
@@ -179,5 +179,5 @@
 	if (!as)
 		return ENOMEM;
-	
+
 	void *loader = program_loader;
 	if (!loader) {
@@ -187,5 +187,5 @@
 		return ENOENT;
 	}
-	
+
 	prg->loader_status = elf_load((elf_header_t *) program_loader, as,
 	    ELD_F_LOADER);
@@ -196,5 +196,5 @@
 		return ENOENT;
 	}
-	
+
 	return program_create(as, ((elf_header_t *) program_loader)->e_entry,
 	    name, prg);
@@ -230,12 +230,12 @@
 	if (name_len > TASK_NAME_BUFLEN - 1)
 		name_len = TASK_NAME_BUFLEN - 1;
-	
+
 	char namebuf[TASK_NAME_BUFLEN];
 	errno_t rc = copy_from_uspace(namebuf, uspace_name, name_len);
 	if (rc != EOK)
 		return (sys_errno_t) rc;
-	
+
 	namebuf[name_len] = 0;
-	
+
 	/* Spawn the new task. */
 	program_t prg;
@@ -243,9 +243,9 @@
 	if (rc != EOK)
 		return rc;
-	
+
 	// FIXME: control the permissions
 	perm_set(prg.task, perm_get(TASK));
 	program_ready(&prg);
-	
+
 	return EOK;
 }
Index: kernel/generic/src/proc/scheduler.c
===================================================================
--- kernel/generic/src/proc/scheduler.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/proc/scheduler.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -90,5 +90,5 @@
 	before_thread_runs_arch();
 	rcu_before_thread_runs();
-	
+
 #ifdef CONFIG_FPU_LAZY
 	if (THREAD == CPU->fpu_owner)
@@ -105,5 +105,5 @@
 	}
 #endif
-	
+
 #ifdef CONFIG_UDEBUG
 	if (THREAD->btrace) {
@@ -113,5 +113,5 @@
 			stack_trace_istate(istate);
 		}
-		
+
 		THREAD->btrace = false;
 	}
@@ -141,10 +141,10 @@
 	fpu_enable();
 	irq_spinlock_lock(&CPU->lock, false);
-	
+
 	/* Save old context */
 	if (CPU->fpu_owner != NULL) {
 		irq_spinlock_lock(&CPU->fpu_owner->lock, false);
 		fpu_context_save(CPU->fpu_owner->saved_fpu_context);
-		
+
 		/* Don't prevent migration */
 		CPU->fpu_owner->fpu_context_engaged = false;
@@ -152,5 +152,5 @@
 		CPU->fpu_owner = NULL;
 	}
-	
+
 	irq_spinlock_lock(&THREAD->lock, false);
 	if (THREAD->fpu_context_exists) {
@@ -164,5 +164,5 @@
 			THREAD->saved_fpu_context =
 			    (fpu_context_t *) slab_alloc(fpu_context_cache, 0);
-			
+
 			/* We may have switched CPUs during slab_alloc */
 			goto restart;
@@ -171,9 +171,9 @@
 		THREAD->fpu_context_exists = true;
 	}
-	
+
 	CPU->fpu_owner = THREAD;
 	THREAD->fpu_context_engaged = true;
 	irq_spinlock_unlock(&THREAD->lock, false);
-	
+
 	irq_spinlock_unlock(&CPU->lock, false);
 }
@@ -201,7 +201,7 @@
 {
 	assert(CPU != NULL);
-	
+
 loop:
-	
+
 	if (atomic_get(&CPU->nrdy) == 0) {
 		/*
@@ -214,5 +214,5 @@
 		irq_spinlock_unlock(&CPU->lock, false);
 		interrupts_enable();
-		
+
 		/*
 		 * An interrupt might occur right now and wake up a thread.
@@ -226,5 +226,5 @@
 
 	assert(!CPU->idle);
-	
+
 	unsigned int i;
 	for (i = 0; i < RQ_COUNT; i++) {
@@ -237,9 +237,9 @@
 			continue;
 		}
-		
+
 		atomic_dec(&CPU->nrdy);
 		atomic_dec(&nrdy);
 		CPU->rq[i].n--;
-		
+
 		/*
 		 * Take the first thread from the queue.
@@ -248,11 +248,11 @@
 		    list_first(&CPU->rq[i].rq), thread_t, rq_link);
 		list_remove(&thread->rq_link);
-		
+
 		irq_spinlock_pass(&(CPU->rq[i].lock), &thread->lock);
-		
+
 		thread->cpu = CPU;
 		thread->ticks = us2ticks((i + 1) * 10000);
 		thread->priority = i;  /* Correct rq index */
-		
+
 		/*
 		 * Clear the stolen flag so that it can be migrated
@@ -261,8 +261,8 @@
 		thread->stolen = false;
 		irq_spinlock_unlock(&thread->lock, false);
-		
+
 		return thread;
 	}
-	
+
 	goto loop;
 }
@@ -282,13 +282,13 @@
 {
 	list_t list;
-	
+
 	list_initialize(&list);
 	irq_spinlock_lock(&CPU->lock, false);
-	
+
 	if (CPU->needs_relink > NEEDS_RELINK_MAX) {
 		int i;
 		for (i = start; i < RQ_COUNT - 1; i++) {
 			/* Remember and empty rq[i + 1] */
-			
+
 			irq_spinlock_lock(&CPU->rq[i + 1].lock, false);
 			list_concat(&list, &CPU->rq[i + 1].rq);
@@ -296,7 +296,7 @@
 			CPU->rq[i + 1].n = 0;
 			irq_spinlock_unlock(&CPU->rq[i + 1].lock, false);
-			
+
 			/* Append rq[i + 1] to rq[i] */
-			
+
 			irq_spinlock_lock(&CPU->rq[i].lock, false);
 			list_concat(&CPU->rq[i].rq, &list);
@@ -304,8 +304,8 @@
 			irq_spinlock_unlock(&CPU->rq[i].lock, false);
 		}
-		
+
 		CPU->needs_relink = 0;
 	}
-	
+
 	irq_spinlock_unlock(&CPU->lock, false);
 }
@@ -321,18 +321,18 @@
 {
 	volatile ipl_t ipl;
-	
+
 	assert(CPU != NULL);
-	
+
 	ipl = interrupts_disable();
-	
+
 	if (atomic_get(&haltstate))
 		halt();
-	
+
 	if (THREAD) {
 		irq_spinlock_lock(&THREAD->lock, false);
-		
+
 		/* Update thread kernel accounting */
 		THREAD->kcycles += get_cycle() - THREAD->last_cycle;
-		
+
 #if (defined CONFIG_FPU) && (!defined CONFIG_FPU_LAZY)
 		fpu_context_save(THREAD->saved_fpu_context);
@@ -342,14 +342,14 @@
 			 * This is the place where threads leave scheduler();
 			 */
-			
+
 			/* Save current CPU cycle */
 			THREAD->last_cycle = get_cycle();
-			
+
 			irq_spinlock_unlock(&THREAD->lock, false);
 			interrupts_restore(THREAD->saved_context.ipl);
-			
+
 			return;
 		}
-		
+
 		/*
 		 * Interrupt priority level of preempted thread is recorded
@@ -360,5 +360,5 @@
 		THREAD->saved_context.ipl = ipl;
 	}
-	
+
 	/*
 	 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, AS
@@ -368,5 +368,5 @@
 	 */
 	the_copy(THE, (the_t *) CPU->stack);
-	
+
 	/*
 	 * We may not keep the old stack.
@@ -386,5 +386,5 @@
 	    (uintptr_t) CPU->stack, STACK_SIZE);
 	context_restore(&CPU->saved_context);
-	
+
 	/* Not reached */
 }
@@ -402,9 +402,9 @@
 	task_t *old_task = TASK;
 	as_t *old_as = AS;
-	
+
 	assert((!THREAD) || (irq_spinlock_locked(&THREAD->lock)));
 	assert(CPU != NULL);
 	assert(interrupts_disabled());
-	
+
 	/*
 	 * Hold the current task and the address space to prevent their
@@ -414,12 +414,12 @@
 	if (old_task)
 		task_hold(old_task);
-	
+
 	if (old_as)
 		as_hold(old_as);
-	
+
 	if (THREAD) {
 		/* Must be run after the switch to scheduler stack */
 		after_thread_ran();
-		
+
 		switch (THREAD->state) {
 		case Running:
@@ -427,5 +427,5 @@
 			thread_ready(THREAD);
 			break;
-		
+
 		case Exiting:
 			rcu_thread_exiting();
@@ -452,10 +452,10 @@
 				    WAKEUP_FIRST);
 				irq_spinlock_unlock(&THREAD->join_wq.lock, false);
-				
+
 				THREAD->state = Lingering;
 				irq_spinlock_unlock(&THREAD->lock, false);
 			}
 			break;
-			
+
 		case Sleeping:
 			/*
@@ -463,5 +463,5 @@
 			 */
 			THREAD->priority = -1;
-			
+
 			/*
 			 * We need to release wq->lock which we locked in
@@ -470,8 +470,8 @@
 			 */
 			irq_spinlock_unlock(&THREAD->sleep_queue->lock, false);
-			
+
 			irq_spinlock_unlock(&THREAD->lock, false);
 			break;
-		
+
 		default:
 			/*
@@ -482,16 +482,16 @@
 			break;
 		}
-		
+
 		THREAD = NULL;
 	}
-	
+
 	THREAD = find_best_thread();
-	
+
 	irq_spinlock_lock(&THREAD->lock, false);
 	int priority = THREAD->priority;
 	irq_spinlock_unlock(&THREAD->lock, false);
-	
+
 	relink_rq(priority);
-	
+
 	/*
 	 * If both the old and the new task are the same,
@@ -500,5 +500,5 @@
 	if (TASK != THREAD->task) {
 		as_t *new_as = THREAD->task->as;
-		
+
 		/*
 		 * Note that it is possible for two tasks
@@ -512,18 +512,18 @@
 			as_switch(old_as, new_as);
 		}
-		
+
 		TASK = THREAD->task;
 		before_task_runs();
 	}
-	
+
 	if (old_task)
 		task_release(old_task);
-	
+
 	if (old_as)
 		as_release(old_as);
-	
+
 	irq_spinlock_lock(&THREAD->lock, false);
 	THREAD->state = Running;
-	
+
 #ifdef SCHEDULER_VERBOSE
 	log(LF_OTHER, LVL_DEBUG,
@@ -532,5 +532,5 @@
 	    THREAD->ticks, atomic_get(&CPU->nrdy));
 #endif
-	
+
 	/*
 	 * Some architectures provide late kernel PA2KA(identity)
@@ -542,5 +542,5 @@
 	 */
 	before_thread_runs();
-	
+
 	/*
 	 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to
@@ -548,7 +548,7 @@
 	 */
 	the_copy(THE, (the_t *) THREAD->kstack);
-	
+
 	context_restore(&THREAD->saved_context);
-	
+
 	/* Not reached */
 }
@@ -567,10 +567,10 @@
 	atomic_count_t average;
 	atomic_count_t rdy;
-	
+
 	/*
 	 * Detach kcpulb as nobody will call thread_join_timeout() on it.
 	 */
 	thread_detach(THREAD);
-	
+
 loop:
 	/*
@@ -578,5 +578,5 @@
 	 */
 	thread_sleep(1);
-	
+
 not_satisfied:
 	/*
@@ -588,10 +588,10 @@
 	average = atomic_get(&nrdy) / config.cpu_active + 1;
 	rdy = atomic_get(&CPU->nrdy);
-	
+
 	if (average <= rdy)
 		goto satisfied;
-	
+
 	atomic_count_t count = average - rdy;
-	
+
 	/*
 	 * Searching least priority queues on all CPU's first and most priority
@@ -601,9 +601,9 @@
 	size_t acpu_bias = 0;
 	int rq;
-	
+
 	for (rq = RQ_COUNT - 1; rq >= 0; rq--) {
 		for (acpu = 0; acpu < config.cpu_active; acpu++) {
 			cpu_t *cpu = &cpus[(acpu + acpu_bias) % config.cpu_active];
-			
+
 			/*
 			 * Not interested in ourselves.
@@ -614,8 +614,8 @@
 			if (CPU == cpu)
 				continue;
-			
+
 			if (atomic_get(&cpu->nrdy) <= average)
 				continue;
-			
+
 			irq_spinlock_lock(&(cpu->rq[rq].lock), true);
 			if (cpu->rq[rq].n == 0) {
@@ -623,14 +623,14 @@
 				continue;
 			}
-			
+
 			thread_t *thread = NULL;
-			
+
 			/* Search rq from the back */
 			link_t *link = cpu->rq[rq].rq.head.prev;
-			
+
 			while (link != &(cpu->rq[rq].rq.head)) {
 				thread = (thread_t *) list_get_instance(link,
 				    thread_t, rq_link);
-				
+
 				/*
 				 * Do not steal CPU-wired threads, threads
@@ -640,5 +640,5 @@
 				 */
 				irq_spinlock_lock(&thread->lock, false);
-				
+
 				if ((!thread->wired) && (!thread->stolen) &&
 				    (!thread->nomigrate) &&
@@ -649,28 +649,28 @@
 					irq_spinlock_unlock(&thread->lock,
 					    false);
-					
+
 					atomic_dec(&cpu->nrdy);
 					atomic_dec(&nrdy);
-					
+
 					cpu->rq[rq].n--;
 					list_remove(&thread->rq_link);
-					
+
 					break;
 				}
-				
+
 				irq_spinlock_unlock(&thread->lock, false);
-				
+
 				link = link->prev;
 				thread = NULL;
 			}
-			
+
 			if (thread) {
 				/*
 				 * Ready thread on local CPU
 				 */
-				
+
 				irq_spinlock_pass(&(cpu->rq[rq].lock),
 				    &thread->lock);
-				
+
 #ifdef KCPULB_VERBOSE
 				log(LF_OTHER, LVL_DEBUG,
@@ -680,14 +680,14 @@
 				    atomic_get(&nrdy) / config.cpu_active);
 #endif
-				
+
 				thread->stolen = true;
 				thread->state = Entering;
-				
+
 				irq_spinlock_unlock(&thread->lock, true);
 				thread_ready(thread);
-				
+
 				if (--count == 0)
 					goto satisfied;
-				
+
 				/*
 				 * We are not satisfied yet, focus on another
@@ -696,12 +696,12 @@
 				 */
 				acpu_bias++;
-				
+
 				continue;
 			} else
 				irq_spinlock_unlock(&(cpu->rq[rq].lock), true);
-			
-		}
-	}
-	
+
+		}
+	}
+
 	if (atomic_get(&CPU->nrdy)) {
 		/*
@@ -718,7 +718,7 @@
 		goto loop;
 	}
-	
+
 	goto not_satisfied;
-	
+
 satisfied:
 	goto loop;
@@ -735,11 +735,11 @@
 		if (!cpus[cpu].active)
 			continue;
-		
+
 		irq_spinlock_lock(&cpus[cpu].lock, true);
-		
+
 		printf("cpu%u: address=%p, nrdy=%" PRIua ", needs_relink=%zu\n",
 		    cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy),
 		    cpus[cpu].needs_relink);
-		
+
 		unsigned int i;
 		for (i = 0; i < RQ_COUNT; i++) {
@@ -749,5 +749,5 @@
 				continue;
 			}
-			
+
 			printf("\trq[%u]: ", i);
 			list_foreach(cpus[cpu].rq[i].rq, rq_link, thread_t,
@@ -757,8 +757,8 @@
 			}
 			printf("\n");
-			
+
 			irq_spinlock_unlock(&(cpus[cpu].rq[i].lock), false);
 		}
-		
+
 		irq_spinlock_unlock(&cpus[cpu].lock, true);
 	}
Index: kernel/generic/src/proc/task.c
===================================================================
--- kernel/generic/src/proc/task.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/proc/task.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -107,15 +107,15 @@
 	task_t *task = avltree_get_instance(node, task_t, tasks_tree_node);
 	size_t *cnt = (size_t *) arg;
-	
+
 	if (task != TASK) {
 		(*cnt)++;
-		
+
 #ifdef CONFIG_DEBUG
 		printf("[%"PRIu64"] ", task->taskid);
 #endif
-		
+
 		task_kill_internal(task);
 	}
-	
+
 	/* Continue the walk */
 	return true;
@@ -138,5 +138,5 @@
 		task_release(task_0);
 	}
-	
+
 	/* Repeat until there are any tasks except TASK */
 	do {
@@ -144,12 +144,12 @@
 		printf("Killing tasks... ");
 #endif
-		
+
 		irq_spinlock_lock(&tasks_lock, true);
 		tasks_left = 0;
 		avltree_walk(&tasks_tree, task_done_walker, &tasks_left);
 		irq_spinlock_unlock(&tasks_lock, true);
-		
+
 		thread_sleep(1);
-		
+
 #ifdef CONFIG_DEBUG
 		printf("\n");
@@ -165,17 +165,17 @@
 	if (rc != EOK)
 		return rc;
-	
+
 	atomic_set(&task->refcount, 0);
 	atomic_set(&task->lifecount, 0);
-	
+
 	irq_spinlock_initialize(&task->lock, "task_t_lock");
-	
+
 	list_initialize(&task->threads);
-	
+
 	ipc_answerbox_init(&task->answerbox, task);
-	
+
 	spinlock_initialize(&task->active_calls_lock, "active_calls_lock");
 	list_initialize(&task->active_calls);
-		
+
 #ifdef CONFIG_UDEBUG
 	/* Init kbox stuff */
@@ -184,5 +184,5 @@
 	mutex_initialize(&task->kb.cleanup_lock, MUTEX_PASSIVE);
 #endif
-	
+
 	return EOK;
 }
@@ -191,5 +191,5 @@
 {
 	task_t *task = (task_t *) obj;
-	
+
 	caps_task_free(task);
 	return 0;
@@ -210,10 +210,10 @@
 		return NULL;
 	}
-	
+
 	task_create_arch(task);
-	
+
 	task->as = as;
 	str_cpy(task->name, TASK_NAME_BUFLEN, name);
-	
+
 	task->container = CONTAINER;
 	task->perms = 0;
@@ -231,5 +231,5 @@
 
 	event_task_init(task);
-	
+
 	task->answerbox.active = true;
 
@@ -237,10 +237,10 @@
 	/* Init debugging stuff */
 	udebug_task_init(&task->udebug);
-	
+
 	/* Init kbox stuff */
 	task->kb.box.active = true;
 	task->kb.finished = false;
 #endif
-	
+
 	if ((ipc_phone_0) &&
 	    (container_check(ipc_phone_0->task->container, task->container))) {
@@ -253,26 +253,26 @@
 			return NULL;
 		}
-		
+
 		kobject_t *phone_obj = kobject_get(task, phone_handle,
 		    KOBJECT_TYPE_PHONE);
 		(void) ipc_phone_connect(phone_obj->phone, ipc_phone_0);
 	}
-	
+
 	futex_task_init(task);
-	
+
 	/*
 	 * Get a reference to the address space.
 	 */
 	as_hold(task->as);
-	
+
 	irq_spinlock_lock(&tasks_lock, true);
-	
+
 	task->taskid = ++task_counter;
 	avltree_node_initialize(&task->tasks_tree_node);
 	task->tasks_tree_node.key = task->taskid;
 	avltree_insert(&tasks_tree, &task->tasks_tree_node);
-	
+
 	irq_spinlock_unlock(&tasks_lock, true);
-	
+
 	return task;
 }
@@ -291,20 +291,20 @@
 	avltree_delete(&tasks_tree, &task->tasks_tree_node);
 	irq_spinlock_unlock(&tasks_lock, true);
-	
+
 	/*
 	 * Perform architecture specific task destruction.
 	 */
 	task_destroy_arch(task);
-	
+
 	/*
 	 * Free up dynamically allocated state.
 	 */
 	futex_task_deinit(task);
-	
+
 	/*
 	 * Drop our reference to the address space.
 	 */
 	as_release(task->as);
-	
+
 	slab_free(task_cache, task);
 }
@@ -388,15 +388,15 @@
 {
 	char namebuf[TASK_NAME_BUFLEN];
-	
+
 	/* Cap length of name and copy it from userspace. */
 	if (name_len > TASK_NAME_BUFLEN - 1)
 		name_len = TASK_NAME_BUFLEN - 1;
-	
+
 	errno_t rc = copy_from_uspace(namebuf, uspace_name, name_len);
 	if (rc != EOK)
 		return (sys_errno_t) rc;
-	
+
 	namebuf[name_len] = '\0';
-	
+
 	/*
 	 * As the task name is referenced also from the
@@ -404,16 +404,16 @@
 	 * of the update.
 	 */
-	
+
 	irq_spinlock_lock(&tasks_lock, true);
 	irq_spinlock_lock(&TASK->lock, false);
 	irq_spinlock_lock(&threads_lock, false);
-	
+
 	/* Set task name */
 	str_cpy(TASK->name, TASK_NAME_BUFLEN, namebuf);
-	
+
 	irq_spinlock_unlock(&threads_lock, false);
 	irq_spinlock_unlock(&TASK->lock, false);
 	irq_spinlock_unlock(&tasks_lock, true);
-	
+
 	return EOK;
 }
@@ -432,5 +432,5 @@
 	if (rc != EOK)
 		return (sys_errno_t) rc;
-	
+
 	return (sys_errno_t) task_kill(taskid);
 }
@@ -453,8 +453,8 @@
 	avltree_node_t *node =
 	    avltree_search(&tasks_tree, (avltree_key_t) id);
-	
+
 	if (node)
 		return avltree_get_instance(node, task_t, tasks_tree_node);
-	
+
 	return NULL;
 }
@@ -478,9 +478,9 @@
 	uint64_t uret = task->ucycles;
 	uint64_t kret = task->kcycles;
-	
+
 	/* Current values of threads */
 	list_foreach(task->threads, th_link, thread_t, thread) {
 		irq_spinlock_lock(&thread->lock, false);
-		
+
 		/* Process only counted threads */
 		if (!thread->uncounted) {
@@ -489,12 +489,12 @@
 				thread_update_accounting(false);
 			}
-			
+
 			uret += thread->ucycles;
 			kret += thread->kcycles;
 		}
-		
+
 		irq_spinlock_unlock(&thread->lock, false);
 	}
-	
+
 	*ucycles = uret;
 	*kcycles = kret;
@@ -505,24 +505,24 @@
 	irq_spinlock_lock(&task->lock, false);
 	irq_spinlock_lock(&threads_lock, false);
-	
+
 	/*
 	 * Interrupt all threads.
 	 */
-	
+
 	list_foreach(task->threads, th_link, thread_t, thread) {
 		bool sleeping = false;
-		
+
 		irq_spinlock_lock(&thread->lock, false);
-		
+
 		thread->interrupted = true;
 		if (thread->state == Sleeping)
 			sleeping = true;
-		
+
 		irq_spinlock_unlock(&thread->lock, false);
-		
+
 		if (sleeping)
 			waitq_interrupt_sleep(thread);
 	}
-	
+
 	irq_spinlock_unlock(&threads_lock, false);
 	irq_spinlock_unlock(&task->lock, false);
@@ -543,7 +543,7 @@
 	if (id == 1)
 		return EPERM;
-	
+
 	irq_spinlock_lock(&tasks_lock, true);
-	
+
 	task_t *task = task_find_by_id(id);
 	if (!task) {
@@ -551,8 +551,8 @@
 		return ENOENT;
 	}
-	
+
 	task_kill_internal(task);
 	irq_spinlock_unlock(&tasks_lock, true);
-	
+
 	return EOK;
 }
@@ -583,9 +583,9 @@
 		}
 	}
-	
+
 	irq_spinlock_lock(&tasks_lock, true);
 	task_kill_internal(TASK);
 	irq_spinlock_unlock(&tasks_lock, true);
-	
+
 	thread_exit();
 }
@@ -599,5 +599,5 @@
 {
 	task_kill_self(notify);
-	
+
 	/* Unreachable */
 	return EOK;
@@ -609,5 +609,5 @@
 	task_t *task = avltree_get_instance(node, task_t, tasks_tree_node);
 	irq_spinlock_lock(&task->lock, false);
-	
+
 	uint64_t ucycles;
 	uint64_t kcycles;
@@ -616,5 +616,5 @@
 	order_suffix(ucycles, &ucycles, &usuffix);
 	order_suffix(kcycles, &kcycles, &ksuffix);
-	
+
 #ifdef __32_BITS__
 	if (*additional)
@@ -627,5 +627,5 @@
 		    ucycles, usuffix, kcycles, ksuffix);
 #endif
-	
+
 #ifdef __64_BITS__
 	if (*additional)
@@ -637,5 +637,5 @@
 		    task->taskid, task->name, task->container, task, task->as);
 #endif
-	
+
 	irq_spinlock_unlock(&task->lock, false);
 	return true;
@@ -651,5 +651,5 @@
 	/* Messing with task structures, avoid deadlock */
 	irq_spinlock_lock(&tasks_lock, true);
-	
+
 #ifdef __32_BITS__
 	if (additional)
@@ -659,5 +659,5 @@
 		    " [ucycles ] [kcycles ]\n");
 #endif
-	
+
 #ifdef __64_BITS__
 	if (additional)
@@ -668,7 +668,7 @@
 		    " [as              ]\n");
 #endif
-	
+
 	avltree_walk(&tasks_tree, task_print_walker, &additional);
-	
+
 	irq_spinlock_unlock(&tasks_lock, true);
 }
Index: kernel/generic/src/proc/thread.c
===================================================================
--- kernel/generic/src/proc/thread.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/proc/thread.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -122,11 +122,11 @@
 	void *arg = THREAD->thread_arg;
 	THREAD->last_cycle = get_cycle();
-	
+
 	/* This is where each thread wakes up after its creation */
 	irq_spinlock_unlock(&THREAD->lock, false);
 	interrupts_enable();
-	
+
 	f(arg);
-	
+
 	/* Accumulate accounting to the task */
 	irq_spinlock_lock(&THREAD->lock, true);
@@ -137,5 +137,5 @@
 		uint64_t kcycles = THREAD->kcycles;
 		THREAD->kcycles = 0;
-		
+
 		irq_spinlock_pass(&THREAD->lock, &TASK->lock);
 		TASK->ucycles += ucycles;
@@ -144,7 +144,7 @@
 	} else
 		irq_spinlock_unlock(&THREAD->lock, true);
-	
+
 	thread_exit();
-	
+
 	/* Not reached */
 }
@@ -156,13 +156,13 @@
 {
 	thread_t *thread = (thread_t *) obj;
-	
+
 	irq_spinlock_initialize(&thread->lock, "thread_t_lock");
 	link_initialize(&thread->rq_link);
 	link_initialize(&thread->wq_link);
 	link_initialize(&thread->th_link);
-	
+
 	/* call the architecture-specific part of the constructor */
 	thr_constructor_arch(thread);
-	
+
 #ifdef CONFIG_FPU
 #ifdef CONFIG_FPU_LAZY
@@ -174,5 +174,5 @@
 #endif /* CONFIG_FPU_LAZY */
 #endif /* CONFIG_FPU */
-	
+
 	/*
 	 * Allocate the kernel stack from the low-memory to prevent an infinite
@@ -193,5 +193,5 @@
 	kmflags |= FRAME_LOWMEM;
 	kmflags &= ~FRAME_HIGHMEM;
-	
+
 	uintptr_t stack_phys =
 	    frame_alloc(STACK_FRAMES, kmflags, STACK_SIZE - 1);
@@ -203,11 +203,11 @@
 		return ENOMEM;
 	}
-	
+
 	thread->kstack = (uint8_t *) PA2KA(stack_phys);
-	
+
 #ifdef CONFIG_UDEBUG
 	mutex_initialize(&thread->udebug.lock, MUTEX_PASSIVE);
 #endif
-	
+
 	return EOK;
 }
@@ -217,15 +217,15 @@
 {
 	thread_t *thread = (thread_t *) obj;
-	
+
 	/* call the architecture-specific part of the destructor */
 	thr_destructor_arch(thread);
-	
+
 	frame_free(KA2PA(thread->kstack), STACK_FRAMES);
-	
+
 #ifdef CONFIG_FPU
 	if (thread->saved_fpu_context)
 		slab_free(fpu_context_cache, thread->saved_fpu_context);
 #endif
-	
+
 	return STACK_FRAMES;  /* number of frames freed */
 }
@@ -239,14 +239,14 @@
 {
 	THREAD = NULL;
-	
+
 	atomic_set(&nrdy, 0);
 	thread_cache = slab_cache_create("thread_t", sizeof(thread_t), 0,
 	    thr_constructor, thr_destructor, 0);
-	
+
 #ifdef CONFIG_FPU
 	fpu_context_cache = slab_cache_create("fpu_context_t",
 	    sizeof(fpu_context_t), FPU_CONTEXT_ALIGN, NULL, NULL, 0);
 #endif
-	
+
 	avltree_create(&threads_tree);
 }
@@ -282,9 +282,9 @@
 {
 	irq_spinlock_lock(&thread->lock, true);
-	
+
 	assert(thread->state != Ready);
 
 	before_thread_is_ready(thread);
-	
+
 	int i = (thread->priority < RQ_COUNT - 1) ?
 	    ++thread->priority : thread->priority;
@@ -305,18 +305,18 @@
 		cpu = CPU;
 	}
-	
+
 	thread->state = Ready;
-	
+
 	irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock));
-	
+
 	/*
 	 * Append thread to respective ready queue
 	 * on respective processor.
 	 */
-	
+
 	list_append(&thread->rq_link, &cpu->rq[i].rq);
 	cpu->rq[i].n++;
 	irq_spinlock_unlock(&(cpu->rq[i].lock), true);
-	
+
 	atomic_inc(&nrdy);
 	atomic_inc(&cpu->nrdy);
@@ -344,24 +344,24 @@
 	if (!thread)
 		return NULL;
-	
+
 	/* Not needed, but good for debugging */
 	memsetb(thread->kstack, STACK_SIZE, 0);
-	
+
 	irq_spinlock_lock(&tidlock, true);
 	thread->tid = ++last_tid;
 	irq_spinlock_unlock(&tidlock, true);
-	
+
 	context_save(&thread->saved_context);
 	context_set(&thread->saved_context, FADDR(cushion),
 	    (uintptr_t) thread->kstack, STACK_SIZE);
-	
+
 	the_initialize((the_t *) thread->kstack);
-	
+
 	ipl_t ipl = interrupts_disable();
 	thread->saved_context.ipl = interrupts_read();
 	interrupts_restore(ipl);
-	
+
 	str_cpy(thread->name, THREAD_NAME_BUFLEN, name);
-	
+
 	thread->thread_code = func;
 	thread->thread_arg = arg;
@@ -377,30 +377,30 @@
 	thread->uspace =
 	    ((flags & THREAD_FLAG_USPACE) == THREAD_FLAG_USPACE);
-	
+
 	thread->nomigrate = 0;
 	thread->state = Entering;
-	
+
 	timeout_initialize(&thread->sleep_timeout);
 	thread->sleep_interruptible = false;
 	thread->sleep_queue = NULL;
 	thread->timeout_pending = false;
-	
+
 	thread->in_copy_from_uspace = false;
 	thread->in_copy_to_uspace = false;
-	
+
 	thread->interrupted = false;
 	thread->detached = false;
 	waitq_initialize(&thread->join_wq);
-	
+
 	thread->task = task;
-	
+
 	thread->workq = NULL;
-	
+
 	thread->fpu_context_exists = false;
 	thread->fpu_context_engaged = false;
-	
+
 	avltree_node_initialize(&thread->threads_tree_node);
 	thread->threads_tree_node.key = (uintptr_t) thread;
-	
+
 #ifdef CONFIG_UDEBUG
 	/* Initialize debugging stuff */
@@ -408,13 +408,13 @@
 	udebug_thread_initialize(&thread->udebug);
 #endif
-	
+
 	/* Might depend on previous initialization */
 	thread_create_arch(thread);
-	
+
 	rcu_thread_init(thread);
-	
+
 	if ((flags & THREAD_FLAG_NOATTACH) != THREAD_FLAG_NOATTACH)
 		thread_attach(thread, task);
-	
+
 	return thread;
 }
@@ -435,16 +435,16 @@
 	assert(thread->task);
 	assert(thread->cpu);
-	
+
 	irq_spinlock_lock(&thread->cpu->lock, false);
 	if (thread->cpu->fpu_owner == thread)
 		thread->cpu->fpu_owner = NULL;
 	irq_spinlock_unlock(&thread->cpu->lock, false);
-	
+
 	irq_spinlock_pass(&thread->lock, &threads_lock);
-	
+
 	avltree_delete(&threads_tree, &thread->threads_tree_node);
-	
+
 	irq_spinlock_pass(&threads_lock, &thread->task->lock);
-	
+
 	/*
 	 * Detach from the containing task.
@@ -452,5 +452,5 @@
 	list_remove(&thread->th_link);
 	irq_spinlock_unlock(&thread->task->lock, irq_res);
-	
+
 	/*
 	 * Drop the reference to the containing task.
@@ -475,16 +475,16 @@
 	 */
 	irq_spinlock_lock(&task->lock, true);
-	
+
 	/* Hold a reference to the task. */
 	task_hold(task);
-	
+
 	/* Must not count kbox thread into lifecount */
 	if (thread->uspace)
 		atomic_inc(&task->lifecount);
-	
+
 	list_append(&thread->th_link, &task->threads);
-	
+
 	irq_spinlock_pass(&task->lock, &threads_lock);
-	
+
 	/*
 	 * Register this thread in the system-wide list.
@@ -506,5 +506,5 @@
 		/* Generate udebug THREAD_E event */
 		udebug_thread_e_event();
-		
+
 		/*
 		 * This thread will not execute any code or system calls from
@@ -527,5 +527,5 @@
 		}
 	}
-	
+
 restart:
 	irq_spinlock_lock(&THREAD->lock, true);
@@ -535,10 +535,10 @@
 		goto restart;
 	}
-	
+
 	THREAD->state = Exiting;
 	irq_spinlock_unlock(&THREAD->lock, true);
-	
+
 	scheduler();
-	
+
 	/* Not reached */
 	while (true);
@@ -562,12 +562,12 @@
 {
 	assert(thread != NULL);
-	
+
 	irq_spinlock_lock(&thread->lock, true);
-	
+
 	thread->interrupted = true;
 	bool sleeping = (thread->state == Sleeping);
-	
+
 	irq_spinlock_unlock(&thread->lock, true);
-	
+
 	if (sleeping)
 		waitq_interrupt_sleep(thread);
@@ -583,11 +583,11 @@
 {
 	assert(thread != NULL);
-	
+
 	bool interrupted;
-	
+
 	irq_spinlock_lock(&thread->lock, true);
 	interrupted = thread->interrupted;
 	irq_spinlock_unlock(&thread->lock, true);
-	
+
 	return interrupted;
 }
@@ -597,5 +597,5 @@
 {
 	assert(THREAD);
-	
+
 	THREAD->nomigrate++;
 }
@@ -606,5 +606,5 @@
 	assert(THREAD);
 	assert(THREAD->nomigrate > 0);
-	
+
 	if (THREAD->nomigrate > 0)
 		THREAD->nomigrate--;
@@ -624,5 +624,5 @@
 	while (sec > 0) {
 		uint32_t period = (sec > 1000) ? 1000 : sec;
-		
+
 		thread_usleep(period * 1000000);
 		sec -= period;
@@ -643,14 +643,14 @@
 	if (thread == THREAD)
 		return EINVAL;
-	
+
 	/*
 	 * Since thread join can only be called once on an undetached thread,
 	 * the thread pointer is guaranteed to be still valid.
 	 */
-	
+
 	irq_spinlock_lock(&thread->lock, true);
 	assert(!thread->detached);
 	irq_spinlock_unlock(&thread->lock, true);
-	
+
 	return waitq_sleep_timeout(&thread->join_wq, usec, flags, NULL);
 }
@@ -672,5 +672,5 @@
 	irq_spinlock_lock(&thread->lock, true);
 	assert(!thread->detached);
-	
+
 	if (thread->state == Lingering) {
 		/*
@@ -683,5 +683,5 @@
 		thread->detached = true;
 	}
-	
+
 	irq_spinlock_unlock(&thread->lock, true);
 }
@@ -697,7 +697,7 @@
 {
 	waitq_t wq;
-	
+
 	waitq_initialize(&wq);
-	
+
 	(void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING, NULL);
 }
@@ -707,10 +707,10 @@
 	bool *additional = (bool *) arg;
 	thread_t *thread = avltree_get_instance(node, thread_t, threads_tree_node);
-	
+
 	uint64_t ucycles, kcycles;
 	char usuffix, ksuffix;
 	order_suffix(thread->ucycles, &ucycles, &usuffix);
 	order_suffix(thread->kcycles, &kcycles, &ksuffix);
-	
+
 	char *name;
 	if (str_cmp(thread->name, "uinit") == 0)
@@ -718,5 +718,5 @@
 	else
 		name = thread->name;
-	
+
 #ifdef __32_BITS__
 	if (*additional)
@@ -729,5 +729,5 @@
 		    thread->task, thread->task->container);
 #endif
-	
+
 #ifdef __64_BITS__
 	if (*additional)
@@ -741,5 +741,5 @@
 		    thread->task, thread->task->container);
 #endif
-	
+
 	if (*additional) {
 		if (thread->cpu)
@@ -747,18 +747,18 @@
 		else
 			printf("none ");
-		
+
 		if (thread->state == Sleeping) {
 #ifdef __32_BITS__
 			printf(" %10p", thread->sleep_queue);
 #endif
-			
+
 #ifdef __64_BITS__
 			printf(" %18p", thread->sleep_queue);
 #endif
 		}
-		
+
 		printf("\n");
 	}
-	
+
 	return true;
 }
@@ -773,5 +773,5 @@
 	/* Messing with thread structures, avoid deadlock */
 	irq_spinlock_lock(&threads_lock, true);
-	
+
 #ifdef __32_BITS__
 	if (additional)
@@ -782,5 +782,5 @@
 		    " [ctn]\n");
 #endif
-	
+
 #ifdef __64_BITS__
 	if (additional) {
@@ -791,7 +791,7 @@
 		    " [task            ] [ctn]\n");
 #endif
-	
+
 	avltree_walk(&threads_tree, thread_walker, &additional);
-	
+
 	irq_spinlock_unlock(&threads_lock, true);
 }
@@ -814,5 +814,5 @@
 	avltree_node_t *node =
 	    avltree_search(&threads_tree, (avltree_key_t) ((uintptr_t) thread));
-	
+
 	return node != NULL;
 }
@@ -832,10 +832,10 @@
 	assert(interrupts_disabled());
 	assert(irq_spinlock_locked(&THREAD->lock));
-	
+
 	if (user)
 		THREAD->ucycles += time - THREAD->last_cycle;
 	else
 		THREAD->kcycles += time - THREAD->last_cycle;
-	
+
 	THREAD->last_cycle = time;
 }
@@ -846,10 +846,10 @@
 	    (thread_t *) avltree_get_instance(node, thread_t, threads_tree_node);
 	thread_iterator_t *iterator = (thread_iterator_t *) arg;
-	
+
 	if (thread->tid == iterator->thread_id) {
 		iterator->thread = thread;
 		return false;
 	}
-	
+
 	return true;
 }
@@ -869,12 +869,12 @@
 	assert(interrupts_disabled());
 	assert(irq_spinlock_locked(&threads_lock));
-	
+
 	thread_iterator_t iterator;
-	
+
 	iterator.thread_id = thread_id;
 	iterator.thread = NULL;
-	
+
 	avltree_walk(&threads_tree, thread_search_walker, (void *) &iterator);
-	
+
 	return iterator.thread;
 }
@@ -885,5 +885,5 @@
 {
 	irq_spinlock_lock(&threads_lock, true);
-	
+
 	thread_t *thread = thread_find_by_id(thread_id);
 	if (thread == NULL) {
@@ -892,7 +892,7 @@
 		return;
 	}
-	
+
 	irq_spinlock_lock(&thread->lock, false);
-	
+
 	/*
 	 * Schedule a stack trace to be printed
@@ -906,5 +906,5 @@
 	 * is probably justifiable.
 	 */
-	
+
 	bool sleeping = false;
 	istate_t *istate = thread->udebug.uspace_state;
@@ -916,10 +916,10 @@
 	} else
 		printf("Thread interrupt state not available.\n");
-	
+
 	irq_spinlock_unlock(&thread->lock, false);
-	
+
 	if (sleeping)
 		waitq_interrupt_sleep(thread);
-	
+
 	irq_spinlock_unlock(&threads_lock, true);
 }
@@ -935,12 +935,12 @@
 	if (name_len > THREAD_NAME_BUFLEN - 1)
 		name_len = THREAD_NAME_BUFLEN - 1;
-	
+
 	char namebuf[THREAD_NAME_BUFLEN];
 	errno_t rc = copy_from_uspace(namebuf, uspace_name, name_len);
 	if (rc != EOK)
 		return (sys_errno_t) rc;
-	
+
 	namebuf[name_len] = 0;
-	
+
 	/*
 	 * In case of failure, kernel_uarg will be deallocated in this function.
@@ -949,5 +949,5 @@
 	uspace_arg_t *kernel_uarg =
 	    (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
-	
+
 	rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t));
 	if (rc != EOK) {
@@ -955,5 +955,5 @@
 		return (sys_errno_t) rc;
 	}
-	
+
 	thread_t *thread = thread_create(uinit, kernel_uarg, TASK,
 	    THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf);
@@ -968,5 +968,5 @@
 				 * creation now.
 				 */
-				
+
 				/*
 				 * The new thread structure is initialized, but
@@ -976,9 +976,9 @@
 				slab_free(thread_cache, thread);
 				free(kernel_uarg);
-				
+
 				return (sys_errno_t) rc;
 			 }
 		}
-		
+
 #ifdef CONFIG_UDEBUG
 		/*
@@ -994,9 +994,9 @@
 #endif
 		thread_ready(thread);
-		
+
 		return 0;
 	} else
 		free(kernel_uarg);
-	
+
 	return (sys_errno_t) ENOMEM;
 }
Index: kernel/generic/src/security/perm.c
===================================================================
--- kernel/generic/src/security/perm.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/security/perm.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -70,5 +70,5 @@
 	perm_t perms = task->perms;
 	irq_spinlock_unlock(&task->lock, true);
-	
+
 	return perms;
 }
@@ -88,17 +88,17 @@
 	if (!(perm_get(TASK) & PERM_PERM))
 		return EPERM;
-	
+
 	irq_spinlock_lock(&tasks_lock, true);
 	task_t *task = task_find_by_id(taskid);
-	
+
 	if ((!task) || (!container_check(CONTAINER, task->container))) {
 		irq_spinlock_unlock(&tasks_lock, true);
 		return ENOENT;
 	}
-	
+
 	irq_spinlock_lock(&task->lock, false);
 	task->perms |= perms;
 	irq_spinlock_unlock(&task->lock, false);
-	
+
 	irq_spinlock_unlock(&tasks_lock, true);
 	return EOK;
@@ -119,5 +119,5 @@
 {
 	irq_spinlock_lock(&tasks_lock, true);
-	
+
 	task_t *task = task_find_by_id(taskid);
 	if ((!task) || (!container_check(CONTAINER, task->container))) {
@@ -125,5 +125,5 @@
 		return ENOENT;
 	}
-	
+
 	/*
 	 * Revoking permissions is different from granting them in that
@@ -132,5 +132,5 @@
 	 */
 	irq_spinlock_unlock(&TASK->lock, false);
-	
+
 	if ((!(TASK->perms & PERM_PERM)) || (task != TASK)) {
 		irq_spinlock_unlock(&TASK->lock, false);
@@ -138,8 +138,8 @@
 		return EPERM;
 	}
-	
+
 	task->perms &= ~perms;
 	irq_spinlock_unlock(&TASK->lock, false);
-	
+
 	irq_spinlock_unlock(&tasks_lock, true);
 	return EOK;
@@ -164,5 +164,5 @@
 	if (rc != EOK)
 		return (sys_errno_t) rc;
-	
+
 	return perm_grant((task_id_t) taskid, perms);
 }
@@ -185,5 +185,5 @@
 	if (rc != EOK)
 		return (sys_errno_t) rc;
-	
+
 	return perm_revoke((task_id_t) taskid, perms);
 }
Index: kernel/generic/src/smp/ipi.c
===================================================================
--- kernel/generic/src/smp/ipi.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/smp/ipi.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -55,5 +55,5 @@
 	 * - if there is only one CPU but the kernel was compiled with CONFIG_SMP
 	 */
-	
+
 	if (config.cpu_count > 1)
 		ipi_broadcast_arch(ipi);
Index: kernel/generic/src/smp/smp.c
===================================================================
--- kernel/generic/src/smp/smp.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/smp/smp.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -34,5 +34,5 @@
  * @file
  */
- 
+
 #include <smp/smp.h>
 
Index: kernel/generic/src/smp/smp_call.c
===================================================================
--- kernel/generic/src/smp/smp_call.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/smp/smp_call.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -55,5 +55,5 @@
 	assert(CPU);
 	assert(PREEMPTION_DISABLED || interrupts_disabled());
-	
+
 	spinlock_initialize(&CPU->smp_calls_lock, "cpu[].smp_calls_lock");
 	list_initialize(&CPU->smp_pending_calls);
@@ -133,5 +133,5 @@
 	assert(!interrupts_disabled());
 	assert(call_info != NULL);
-	
+
 	/* Discard invalid calls. */
 	if (config.cpu_count <= cpu_id || !cpus[cpu_id].active) {
@@ -140,10 +140,10 @@
 		return;
 	}
-	
+
 	/* Protect cpu->id against migration. */
 	preemption_disable();
 
 	call_start(call_info, func, arg);
-	
+
 	if (cpu_id != CPU->id) {
 #ifdef CONFIG_SMP
@@ -169,8 +169,8 @@
 		func(arg);
 		interrupts_restore(ipl);
-		
+
 		call_done(call_info);
 	}
-	
+
 	preemption_enable();
 }
@@ -209,8 +209,8 @@
 	assert(interrupts_disabled());
 	assert(CPU);
-	
+
 	list_t calls_list;
 	list_initialize(&calls_list);
-	
+
 	/*
 	 * Acts as a load memory barrier. Any changes made by the cpu that
@@ -224,8 +224,8 @@
 	for (link_t *cur = calls_list.head.next, *next = cur->next;
 		!list_empty(&calls_list); cur = next, next = cur->next) {
-		
+
 		smp_call_t *call_info = list_get_instance(cur, smp_call_t, calls_link);
 		list_remove(cur);
-		
+
 		call_info->func(call_info->arg);
 		call_done(call_info);
@@ -240,5 +240,5 @@
 	call_info->func = func;
 	call_info->arg = arg;
-	
+
 	/*
 	 * We can't use standard spinlocks here because we want to lock
@@ -247,5 +247,5 @@
 	 */
 	atomic_set(&call_info->pending, 1);
-	
+
 	/* Let initialization complete before continuing. */
 	memory_barrier();
Index: kernel/generic/src/synch/condvar.c
===================================================================
--- kernel/generic/src/synch/condvar.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/synch/condvar.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -143,5 +143,5 @@
 	/* Lock only after releasing the waitq to avoid a possible deadlock. */
 	spinlock_lock(lock);
-	
+
 	return rc;
 }
@@ -168,7 +168,7 @@
 	ipl_t ipl = irq_lock->ipl;
 	bool guard = irq_lock->guard;
-	
+
 	irq_lock->guard = false;
-	
+
 	/*
 	 * waitq_prepare() restores interrupts to the current state,
@@ -182,8 +182,8 @@
 	 */
 	rc = _condvar_wait_timeout_spinlock(cv, &irq_lock->lock, usec, flags);
-	
+
 	irq_lock->guard = guard;
 	irq_lock->ipl = ipl;
-	
+
 	return rc;
 }
Index: kernel/generic/src/synch/futex.c
===================================================================
--- kernel/generic/src/synch/futex.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/synch/futex.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -157,7 +157,7 @@
 {
 	task->futexes = malloc(sizeof(struct futex_cache), 0);
-	
+
 	cht_create(&task->futexes->ht, 0, 0, 0, true, &task_futex_ht_ops);
-	
+
 	list_initialize(&task->futexes->list);
 	spinlock_initialize(&task->futexes->list_lock, "futex-list-lock");
@@ -183,5 +183,5 @@
 	struct futex_cache *cache =
 		member_to_inst(work, struct futex_cache, destroy_work);
-	
+
 	/*
 	 * Destroy the cache before manually freeing items of the cache in case
@@ -189,5 +189,5 @@
 	 */
 	cht_destroy_unsafe(&cache->ht);
-	
+
 	/* Manually free futex_ptr cache items. */
 	list_foreach_safe(cache->list, cur_link, next_link) {
@@ -197,5 +197,5 @@
 		free(fut_ptr);
 	}
-	
+
 	free(cache);
 }
@@ -205,8 +205,8 @@
 {
 	struct futex_cache *futexes = TASK->futexes;
-	
+
 	/* All threads of this task have terminated. This is the last thread. */
 	spinlock_lock(&futexes->list_lock);
-	
+
 	list_foreach_safe(futexes->list, cur_link, next_link) {
 		futex_ptr_t *fut_ptr = member_to_inst(cur_link, futex_ptr_t, all_link);
@@ -222,5 +222,5 @@
 		futex_release_ref_locked(fut_ptr->futex);
 	}
-	
+
 	spinlock_unlock(&futexes->list_lock);
 }
@@ -252,7 +252,7 @@
 	assert(spinlock_locked(&futex_ht_lock));
 	assert(0 < futex->refcount);
-	
+
 	--futex->refcount;
-	
+
 	if (0 == futex->refcount) {
 		hash_table_remove(&futex_ht, &futex->paddr);
@@ -272,5 +272,5 @@
 {
 	futex_t *futex = find_cached_futex(uaddr);
-	
+
 	if (futex)
 		return futex;
@@ -303,8 +303,8 @@
 		    (uaddr - ALIGN_DOWN(uaddr, PAGE_SIZE));
 	}
-	
+
 	spinlock_unlock(&futex_ht_lock);
 	page_table_unlock(AS, false);
-	
+
 	return success;
 }
@@ -314,5 +314,5 @@
 {
 	cht_read_lock();
-	
+
 	futex_t *futex;
 	cht_link_t *futex_ptr_link = cht_find_lazy(&TASK->futexes->ht, &uaddr);
@@ -321,12 +321,12 @@
 		futex_ptr_t *futex_ptr
 			= member_to_inst(futex_ptr_link, futex_ptr_t, cht_link);
-		
+
 		futex = futex_ptr->futex;
 	} else {
 		futex = NULL;
 	}
-	
+
 	cht_read_unlock();
-	
+
 	return futex;
 }
@@ -340,5 +340,5 @@
 {
 	futex_t *futex = malloc(sizeof(futex_t), 0);
-	
+
 	/*
 	 * Find the futex object in the global futex table (or insert it
@@ -346,7 +346,7 @@
 	 */
 	spinlock_lock(&futex_ht_lock);
-	
+
 	ht_link_t *fut_link = hash_table_find(&futex_ht, &phys_addr);
-	
+
 	if (fut_link) {
 		free(futex);
@@ -357,7 +357,7 @@
 		hash_table_insert(&futex_ht, &futex->ht_link);
 	}
-	
+
 	spinlock_unlock(&futex_ht_lock);
-	
+
 	/*
 	 * Cache the link to the futex object for this task.
@@ -365,10 +365,10 @@
 	futex_ptr_t *fut_ptr = malloc(sizeof(futex_ptr_t), 0);
 	cht_link_t *dup_link;
-	
+
 	fut_ptr->futex = futex;
 	fut_ptr->uaddr = uaddr;
-	
+
 	cht_read_lock();
-	
+
 	/* Cache the mapping from the virtual address to the futex for this task. */
 	if (cht_insert_unique(&TASK->futexes->ht, &fut_ptr->cht_link, &dup_link)) {
@@ -380,5 +380,5 @@
 		free(fut_ptr);
 		futex_release_ref_locked(futex);
-		
+
 		futex_ptr_t *dup = member_to_inst(dup_link, futex_ptr_t, cht_link);
 		futex = dup->futex;
@@ -386,5 +386,5 @@
 
 	cht_read_unlock();
-	
+
 	return futex;
 }
@@ -401,5 +401,5 @@
 {
 	futex_t *futex = get_futex(uaddr);
-	
+
 	if (!futex)
 		return (sys_errno_t) ENOENT;
@@ -428,5 +428,5 @@
 {
 	futex_t *futex = get_futex(uaddr);
-	
+
 	if (futex) {
 		waitq_wakeup(&futex->wq, WAKEUP_FIRST);
@@ -492,5 +492,5 @@
 	const futex_ptr_t *fut_ptr1 = member_to_inst(item1, futex_ptr_t, cht_link);
 	const futex_ptr_t *fut_ptr2 = member_to_inst(item2, futex_ptr_t, cht_link);
-	
+
 	return fut_ptr1->uaddr == fut_ptr2->uaddr;
 }
@@ -500,5 +500,5 @@
 	const futex_ptr_t *fut_ptr = member_to_inst(item, futex_ptr_t, cht_link);
 	uintptr_t uaddr = *(uintptr_t*)key;
-	
+
 	return fut_ptr->uaddr == uaddr;
 }
Index: kernel/generic/src/synch/mutex.c
===================================================================
--- kernel/generic/src/synch/mutex.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/synch/mutex.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -108,5 +108,5 @@
 		assert(usec == SYNCH_NO_TIMEOUT);
 		assert(!(flags & SYNCH_FLAGS_INTERRUPTIBLE));
-		
+
 		unsigned int cnt = 0;
 		bool deadlock_reported = false;
Index: kernel/generic/src/synch/rcu.c
===================================================================
--- kernel/generic/src/synch/rcu.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/synch/rcu.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -26,6 +26,6 @@
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
- 
- 
+
+
 /** @addtogroup sync
  * @{
@@ -182,5 +182,5 @@
 	 */
 	rcu_gp_t completed_gp;
-	
+
 	/** Protects the following 3 fields. */
 	IRQ_SPINLOCK_DECLARE(preempt_lock);
@@ -195,7 +195,7 @@
 	 */
 	bool preempt_blocking_det;
-	
+
 #ifdef RCU_PREEMPT_A
-	
+
 	/**
 	 * The detector waits on this semaphore for any preempted readers
@@ -205,5 +205,5 @@
 
 #elif defined(RCU_PREEMPT_PODZIMEK)
-	
+
 	/** Reclaimers notify the detector when they request more grace periods.*/
 	condvar_t req_gp_changed;
@@ -228,5 +228,5 @@
 	semaphore_t remaining_readers;
 #endif
-	
+
 	/** Excludes simultaneous rcu_barrier() calls. */
 	mutex_t barrier_mtx;
@@ -235,8 +235,8 @@
 	/** rcu_barrier() waits for the completion of barrier callbacks on this wq.*/
 	waitq_t barrier_wq;
-	
+
 	/** Interruptible attached detector thread pointer. */
 	thread_t *detector_thr;
-	
+
 	/* Some statistics. */
 	size_t stat_expedited_cnt;
@@ -305,10 +305,10 @@
 	_rcu_cur_gp = 0;
 	rcu.completed_gp = 0;
-	
+
 	irq_spinlock_initialize(&rcu.preempt_lock, "rcu.preempt_lock");
 	list_initialize(&rcu.cur_preempted);
 	list_initialize(&rcu.next_preempted);
 	rcu.preempt_blocking_det = false;
-	
+
 	mutex_initialize(&rcu.barrier_mtx, MUTEX_PASSIVE);
 	atomic_set(&rcu.barrier_wait_cnt, 0);
@@ -316,15 +316,15 @@
 
 	semaphore_initialize(&rcu.remaining_readers, 0);
-	
+
 #ifdef RCU_PREEMPT_PODZIMEK
 	condvar_initialize(&rcu.req_gp_changed);
-	
+
 	rcu.req_gp_end_cnt = 0;
 	rcu.req_expedited_cnt = 0;
 	atomic_set(&rcu.delaying_cpu_cnt, 0);
 #endif
-	
+
 	rcu.detector_thr = NULL;
-	
+
 	rcu.stat_expedited_cnt = 0;
 	rcu.stat_delayed_cnt = 0;
@@ -347,5 +347,5 @@
 	CPU->rcu.signal_unlock = false;
 #endif
-	
+
 	CPU->rcu.cur_cbs = NULL;
 	CPU->rcu.cur_cbs_cnt = 0;
@@ -358,5 +358,5 @@
 	CPU->rcu.cur_cbs_gp = 0;
 	CPU->rcu.next_cbs_gp = 0;
-	
+
 	semaphore_initialize(&CPU->rcu.arrived_flag, 0);
 
@@ -364,5 +364,5 @@
 	if (config.cpu_active == 1)
 		CPU->rcu.reclaimer_thr = NULL;
-	
+
 	CPU->rcu.stat_max_cbs = 0;
 	CPU->rcu.stat_avg_cbs = 0;
@@ -379,5 +379,5 @@
 	start_detector();
 #endif
-	
+
 	start_reclaimers();
 }
@@ -391,5 +391,5 @@
 	thread->rcu.was_preempted = false;
 #endif
-	
+
 	link_initialize(&thread->rcu.preempt_link);
 }
@@ -406,5 +406,5 @@
 	for (unsigned int cpu_id = 0; cpu_id < config.cpu_active; ++cpu_id) {
 		assert(cpus[cpu_id].rcu.reclaimer_thr != NULL);
-	
+
 		if (cpus[cpu_id].rcu.reclaimer_thr) {
 			thread_interrupt(cpus[cpu_id].rcu.reclaimer_thr);
@@ -432,5 +432,5 @@
 	uint64_t completed = rcu.completed_gp;
 	spinlock_unlock(&rcu.gp_lock);
-	
+
 	return completed;
 }
@@ -441,7 +441,7 @@
 	for (unsigned int cpu_id = 0; cpu_id < config.cpu_count; ++cpu_id) {
 		char name[THREAD_NAME_BUFLEN] = {0};
-		
+
 		snprintf(name, THREAD_NAME_BUFLEN - 1, "rcu-rec/%u", cpu_id);
-		
+
 		cpus[cpu_id].rcu.reclaimer_thr =
 			thread_create(reclaimer, NULL, TASK, THREAD_FLAG_NONE, name);
@@ -462,8 +462,8 @@
 	rcu.detector_thr =
 		thread_create(detector, NULL, TASK, THREAD_FLAG_NONE, "rcu-det");
-	
+
 	if (!rcu.detector_thr)
 		panic("Failed to create RCU detector thread.");
-	
+
 	thread_ready(rcu.detector_thr);
 }
@@ -475,5 +475,5 @@
 	bool locked = 0 < CPU->rcu.nesting_cnt;
 	preemption_enable();
-	
+
 	return locked;
 }
@@ -489,8 +489,8 @@
 {
 	assert(PREEMPTION_DISABLED || interrupts_disabled());
-	
+
 	if (0 == --(*pnesting_cnt)) {
 		_rcu_record_qs();
-		
+
 		/*
 		 * The thread was preempted while in a critical section or
@@ -511,5 +511,5 @@
 {
 	assert(PREEMPTION_DISABLED || interrupts_disabled());
-	
+
 	/*
 	 * If an interrupt occurs here (even a NMI) it may beat us to
@@ -517,5 +517,5 @@
 	 * for us.
 	 */
-	
+
 	/*
 	 * If the detector is eagerly waiting for this cpu's reader to unlock,
@@ -525,5 +525,5 @@
 		semaphore_up(&rcu.remaining_readers);
 	}
-	
+
 	/*
 	 * This reader was preempted while in a reader section.
@@ -536,5 +536,5 @@
 		rm_preempted_reader();
 	}
-	
+
 	/* If there was something to signal to the detector we have done so. */
 	CPU->rcu.signal_unlock = false;
@@ -565,5 +565,5 @@
 	/* Calling from a reader section will deadlock. */
 	assert(!rcu_read_locked());
-	
+
 	synch_item_t completion;
 
@@ -589,5 +589,5 @@
 	 */
 	mutex_lock(&rcu.barrier_mtx);
-	
+
 	/*
 	 * Ensure we queue a barrier callback on all cpus before the already
@@ -598,13 +598,13 @@
 	DEFINE_CPU_MASK(cpu_mask);
 	cpu_mask_active(cpu_mask);
-	
+
 	cpu_mask_for_each(*cpu_mask, cpu_id) {
 		smp_call(cpu_id, add_barrier_cb, NULL);
 	}
-	
+
 	if (0 < atomic_predec(&rcu.barrier_wait_cnt)) {
 		waitq_sleep(&rcu.barrier_wq);
 	}
-	
+
 	mutex_unlock(&rcu.barrier_mtx);
 }
@@ -659,8 +659,8 @@
 {
 	assert(rcu_item);
-	
+
 	rcu_item->func = func;
 	rcu_item->next = NULL;
-	
+
 	preemption_disable();
 
@@ -670,19 +670,19 @@
 		= local_atomic_exchange(&r->parriving_cbs_tail, &rcu_item->next);
 	*prev_tail = rcu_item;
-	
+
 	/* Approximate the number of callbacks present. */
 	++r->arriving_cbs_cnt;
-	
+
 	if (expedite) {
 		r->expedite_arriving = true;
 	}
-	
+
 	bool first_cb = (prev_tail == &CPU->rcu.arriving_cbs);
-	
+
 	/* Added first callback - notify the reclaimer. */
 	if (first_cb && !semaphore_count_get(&r->arrived_flag)) {
 		semaphore_up(&r->arrived_flag);
 	}
-	
+
 	preemption_enable();
 }
@@ -725,12 +725,12 @@
 	rcu_gp_t last_compl_gp = 0;
 	bool ok = true;
-	
+
 	while (ok && wait_for_pending_cbs()) {
 		assert(CPU->rcu.reclaimer_thr == THREAD);
-		
+
 		exec_completed_cbs(last_compl_gp);
 
 		bool expedite = advance_cbs();
-		
+
 		ok = wait_for_cur_cbs_gp_end(expedite, &last_compl_gp);
 	}
@@ -744,9 +744,9 @@
 
 	bool ok = true;
-	
+
 	while (arriving_cbs_empty() && ok) {
 		ok = semaphore_down_interruptable(&CPU->rcu.arrived_flag);
 	}
-	
+
 	return ok;
 }
@@ -763,11 +763,11 @@
 {
 	upd_stat_missed_gp(last_completed_gp);
-	
+
 	/* Both next_cbs and cur_cbs GP elapsed. */
 	if (CPU->rcu.next_cbs_gp <= last_completed_gp) {
 		assert(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp);
-		
+
 		size_t exec_cnt = CPU->rcu.cur_cbs_cnt + CPU->rcu.next_cbs_cnt;
-		
+
 		if (exec_cnt < CRITICAL_THRESHOLD) {
 			exec_cbs(&CPU->rcu.cur_cbs);
@@ -784,5 +784,5 @@
 			preemption_enable();
 		}
-		
+
 		CPU->rcu.cur_cbs_cnt = 0;
 		CPU->rcu.next_cbs_cnt = 0;
@@ -815,10 +815,10 @@
 		rcu_item_t *next = rcu_item->next;
 		rcu_func_t func = rcu_item->func;
-		
+
 		func(rcu_item);
-		
+
 		rcu_item = next;
 	}
-	
+
 	*phead = NULL;
 }
@@ -843,10 +843,10 @@
 	CPU->rcu.cur_cbs_cnt = CPU->rcu.next_cbs_cnt;
 	CPU->rcu.cur_cbs_gp = CPU->rcu.next_cbs_gp;
-	
+
 	/* Move arriving_cbs to next_cbs. */
-	
+
 	CPU->rcu.next_cbs_cnt = CPU->rcu.arriving_cbs_cnt;
 	CPU->rcu.arriving_cbs_cnt = 0;
-	
+
 	/*
 	 * Too many callbacks queued. Better speed up the detection
@@ -859,5 +859,5 @@
 	/* Start moving the arriving_cbs list to next_cbs. */
 	CPU->rcu.next_cbs = CPU->rcu.arriving_cbs;
-	
+
 	/*
 	 * At least one callback arrived. The tail therefore does not point
@@ -866,5 +866,5 @@
 	if (CPU->rcu.next_cbs) {
 		assert(CPU->rcu.parriving_cbs_tail != &CPU->rcu.arriving_cbs);
-		
+
 		CPU->rcu.arriving_cbs = NULL;
 		/* Reset arriving_cbs before updating the tail pointer. */
@@ -883,5 +883,5 @@
 	/* Update statistics of arrived callbacks. */
 	upd_stat_cb_cnts(CPU->rcu.next_cbs_cnt);
-	
+
 	/*
 	 * Make changes prior to queuing next_cbs visible to readers.
@@ -891,11 +891,11 @@
 
 	/* At the end of next_cbs_gp, exec next_cbs. Determine what GP that is. */
-	
+
 	if (!next_cbs_empty()) {
 		spinlock_lock(&rcu.gp_lock);
-	
+
 		/* Exec next_cbs at the end of the next GP. */
 		CPU->rcu.next_cbs_gp = _rcu_cur_gp + 1;
-		
+
 		/*
 		 * There are no callbacks to invoke before next_cbs. Instruct
@@ -908,12 +908,12 @@
 			CPU->rcu.cur_cbs_gp = rcu.completed_gp + 1;
 		}
-		
+
 		spinlock_unlock(&rcu.gp_lock);
 	} else {
 		CPU->rcu.next_cbs_gp = CPU->rcu.cur_cbs_gp;
 	}
-	
+
 	assert(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp);
-	
+
 	return expedite;
 }
@@ -936,5 +936,5 @@
 	assert(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp);
 	assert(CPU->rcu.cur_cbs_gp <= _rcu_cur_gp + 1);
-	
+
 	while (rcu.completed_gp < CPU->rcu.cur_cbs_gp) {
 		/* GP has not yet started - start a new one. */
@@ -952,12 +952,12 @@
 		} else {
 			/* GP detection is in progress.*/
-			
+
 			if (expedite)
 				condvar_signal(&rcu.expedite_now);
-			
+
 			/* Wait for the GP to complete. */
 			errno_t ret = _condvar_wait_timeout_spinlock(&rcu.gp_ended, &rcu.gp_lock,
 				SYNCH_NO_TIMEOUT, SYNCH_FLAGS_INTERRUPTIBLE);
-			
+
 			if (ret == EINTR) {
 				spinlock_unlock(&rcu.gp_lock);
@@ -966,10 +966,10 @@
 		}
 	}
-	
+
 	upd_missed_gp_in_wait(rcu.completed_gp);
-	
+
 	*completed_gp = rcu.completed_gp;
 	spinlock_unlock(&rcu.gp_lock);
-	
+
 	return true;
 }
@@ -978,22 +978,22 @@
 {
 	DEFINE_CPU_MASK(reader_cpus);
-	
+
 	cpu_mask_active(reader_cpus);
 	rm_quiescent_cpus(reader_cpus);
-	
+
 	while (!cpu_mask_is_none(reader_cpus)) {
 		/* Give cpus a chance to context switch (a QS) and batch callbacks. */
 		if(!gp_sleep(&expedite))
 			return false;
-		
+
 		rm_quiescent_cpus(reader_cpus);
 		sample_cpus(reader_cpus, reader_cpus);
 	}
-	
+
 	/* Update statistic. */
 	if (expedite) {
 		++rcu.stat_expedited_cnt;
 	}
-	
+
 	/*
 	 * All cpus have passed through a QS and see the most recent _rcu_cur_gp.
@@ -1032,9 +1032,9 @@
 	assert(interrupts_disabled());
 	cpu_mask_t *reader_cpus = (cpu_mask_t *)arg;
-	
+
 	bool locked = RCU_CNT_INC <= THE->rcu_nesting;
 	/* smp_call machinery makes the most current _rcu_cur_gp visible. */
 	bool passed_qs = (CPU->rcu.last_seen_gp == _rcu_cur_gp);
-		
+
 	if (locked && !passed_qs) {
 		/*
@@ -1062,5 +1062,5 @@
 	 */
 	size_t nesting_cnt = local_atomic_exchange(&THE->rcu_nesting, 0);
-	
+
 	/*
 	 * Ensures NMIs see .rcu_nesting without the WAS_PREEMPTED mark and
@@ -1068,5 +1068,5 @@
 	 */
 	compiler_barrier();
-	
+
 	/* Preempted a reader critical section for the first time. */
 	if (RCU_CNT_INC <= nesting_cnt && !(nesting_cnt & RCU_WAS_PREEMPTED)) {
@@ -1074,5 +1074,5 @@
 		note_preempted_reader();
 	}
-	
+
 	/* Save the thread's nesting count when it is not running. */
 	THREAD->rcu.nesting_cnt = nesting_cnt;
@@ -1110,5 +1110,5 @@
 		THREAD->priority = -1;
 	}
-	
+
 	upd_max_cbs_in_slice(CPU->rcu.arriving_cbs_cnt);
 }
@@ -1118,5 +1118,5 @@
 {
 	assert(!rcu_read_locked());
-	
+
 	/* Load the thread's saved nesting count from before it was preempted. */
 	THE->rcu_nesting = THREAD->rcu.nesting_cnt;
@@ -1131,5 +1131,5 @@
 {
 	assert(THE->rcu_nesting == 0);
-	
+
 	/*
 	 * The thread forgot to exit its reader critical section.
@@ -1159,5 +1159,5 @@
 {
 	assert(0 == THE->rcu_nesting || RCU_WAS_PREEMPTED == THE->rcu_nesting);
-	
+
 	size_t prev = local_atomic_exchange(&THE->rcu_nesting, 0);
 	if (prev == RCU_WAS_PREEMPTED) {
@@ -1212,7 +1212,7 @@
 		return true;
 	}
-	
+
 	spinlock_lock(&rcu.gp_lock);
-	
+
 	if (CPU->rcu.cur_cbs_gp <= rcu.completed_gp) {
 		*completed_gp = rcu.completed_gp;
@@ -1220,8 +1220,8 @@
 		return true;
 	}
-	
+
 	assert(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp);
 	assert(_rcu_cur_gp <= CPU->rcu.cur_cbs_gp);
-	
+
 	/*
 	 * Notify the detector of how many GP ends we intend to wait for, so
@@ -1231,5 +1231,5 @@
 	size_t remaining_gp_ends = (size_t) (CPU->rcu.next_cbs_gp - _rcu_cur_gp);
 	req_detection(remaining_gp_ends + (arriving_cbs_empty() ? 0 : 1));
-	
+
 	/*
 	 * Ask the detector to speed up GP detection if there are too many
@@ -1239,5 +1239,5 @@
 		if(0 == rcu.req_expedited_cnt)
 			condvar_signal(&rcu.expedite_now);
-		
+
 		/*
 		 * Expedite only cub_cbs. If there really is a surge of callbacks
@@ -1250,11 +1250,11 @@
 	/* Wait for cur_cbs_gp to end. */
 	bool interrupted = cv_wait_for_gp(CPU->rcu.cur_cbs_gp);
-	
+
 	*completed_gp = rcu.completed_gp;
 	spinlock_unlock(&rcu.gp_lock);
-	
+
 	if (!interrupted)
 		upd_missed_gp_in_wait(*completed_gp);
-	
+
 	return !interrupted;
 }
@@ -1264,7 +1264,7 @@
 {
 	assert(spinlock_locked(&rcu.gp_lock));
-	
+
 	bool interrupted = false;
-	
+
 	/* Wait until wait_on_gp ends. */
 	while (rcu.completed_gp < wait_on_gp && !interrupted) {
@@ -1273,5 +1273,5 @@
 		interrupted = (ret == EINTR);
 	}
-	
+
 	return interrupted;
 }
@@ -1296,5 +1296,5 @@
 {
 	spinlock_lock(&rcu.gp_lock);
-	
+
 	while (wait_for_detect_req()) {
 		/*
@@ -1303,10 +1303,10 @@
 		 */
 		start_new_gp();
-		
+
 		spinlock_unlock(&rcu.gp_lock);
-		
+
 		if (!wait_for_readers())
 			goto unlocked_out;
-		
+
 		spinlock_lock(&rcu.gp_lock);
 
@@ -1314,7 +1314,7 @@
 		end_cur_gp();
 	}
-	
+
 	spinlock_unlock(&rcu.gp_lock);
-	
+
 unlocked_out:
 	return;
@@ -1325,14 +1325,14 @@
 {
 	assert(spinlock_locked(&rcu.gp_lock));
-	
+
 	bool interrupted = false;
-	
+
 	while (0 == rcu.req_gp_end_cnt && !interrupted) {
 		int ret = _condvar_wait_timeout_spinlock(&rcu.req_gp_changed,
 			&rcu.gp_lock, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_INTERRUPTIBLE);
-		
+
 		interrupted = (ret == EINTR);
 	}
-	
+
 	return !interrupted;
 }
@@ -1342,8 +1342,8 @@
 {
 	assert(spinlock_locked(&rcu.gp_lock));
-	
+
 	rcu.completed_gp = _rcu_cur_gp;
 	--rcu.req_gp_end_cnt;
-	
+
 	condvar_broadcast(&rcu.gp_ended);
 }
@@ -1353,5 +1353,5 @@
 {
 	DEFINE_CPU_MASK(reading_cpus);
-	
+
 	/* All running cpus have potential readers. */
 	cpu_mask_active(reading_cpus);
@@ -1363,11 +1363,11 @@
 	if (!gp_sleep())
 		return false;
-	
+
 	/* Non-intrusively determine which cpus have yet to pass a QS. */
 	rm_quiescent_cpus(reading_cpus);
-	
+
 	/* Actively interrupt cpus delaying the current GP and demand a QS. */
 	interrupt_delaying_cpus(reading_cpus);
-	
+
 	/* Wait for the interrupted cpus to notify us that they reached a QS. */
 	if (!wait_for_delaying_cpus())
@@ -1378,9 +1378,9 @@
 	 * monotonically descreases.
 	 */
-	
+
 	/* Wait for the last reader in cur_preempted to notify us it is done. */
 	if (!wait_for_preempt_reader())
 		return false;
-	
+
 	return true;
 }
@@ -1397,5 +1397,5 @@
 			DETECT_SLEEP_MS * 1000, SYNCH_FLAGS_INTERRUPTIBLE);
 	}
-	
+
 	if (0 < rcu.req_expedited_cnt) {
 		--rcu.req_expedited_cnt;
@@ -1403,7 +1403,7 @@
 		++rcu.stat_expedited_cnt;
 	}
-	
+
 	spinlock_unlock(&rcu.gp_lock);
-	
+
 	return (ret != EINTR);
 }
@@ -1413,5 +1413,5 @@
 {
 	atomic_set(&rcu.delaying_cpu_cnt, 0);
-	
+
 	sample_cpus(cpu_mask, NULL);
 }
@@ -1426,5 +1426,5 @@
 	assert(interrupts_disabled());
 	assert(!CPU->rcu.is_delaying_gp);
-	
+
 	/* Cpu did not pass a quiescent state yet. */
 	if (CPU->rcu.last_seen_gp != _rcu_cur_gp) {
@@ -1440,5 +1440,5 @@
 			ACCESS_ONCE(CPU->rcu.is_delaying_gp) = true;
 			CPU->rcu.signal_unlock = true;
-			
+
 			atomic_inc(&rcu.delaying_cpu_cnt);
 		} else {
@@ -1466,5 +1466,5 @@
 		 */
 	}
-	
+
 	/*
 	 * smp_call() makes sure any changes propagate back to the caller.
@@ -1483,8 +1483,8 @@
 			return false;
 	}
-	
+
 	/* Update statistic. */
 	rcu.stat_delayed_cnt += delaying_cpu_cnt;
-	
+
 	return true;
 }
@@ -1506,8 +1506,8 @@
 	 */
 	compiler_barrier();
-	
+
 	/* Save the thread's nesting count when it is not running. */
 	THREAD->rcu.nesting_cnt = CPU->rcu.nesting_cnt;
-	
+
 	/* Preempted a reader critical section for the first time. */
 	if (0 < THREAD->rcu.nesting_cnt && !THREAD->rcu.was_preempted) {
@@ -1515,5 +1515,5 @@
 		note_preempted_reader();
 	}
-	
+
 	/*
 	 * The preempted reader has been noted globally. There are therefore
@@ -1528,5 +1528,5 @@
 	 */
 	CPU->rcu.nesting_cnt = 0;
-	
+
 	/*
 	 * This cpu is holding up the current GP. Let the detector know
@@ -1553,5 +1553,5 @@
 		THREAD->priority = -1;
 	}
-	
+
 	upd_max_cbs_in_slice(CPU->rcu.arriving_cbs_cnt);
 }
@@ -1562,8 +1562,8 @@
 	assert(PREEMPTION_DISABLED || interrupts_disabled());
 	assert(0 == CPU->rcu.nesting_cnt);
-	
+
 	/* Load the thread's saved nesting count from before it was preempted. */
 	CPU->rcu.nesting_cnt = THREAD->rcu.nesting_cnt;
-	
+
 	/*
 	 * Ensures NMI see the proper nesting count before .signal_unlock.
@@ -1572,5 +1572,5 @@
 	 */
 	compiler_barrier();
-	
+
 	/*
 	 * In the unlikely event that a NMI occurs between the loading of the
@@ -1594,5 +1594,5 @@
 	assert(THREAD->state == Exiting);
 	assert(PREEMPTION_DISABLED || interrupts_disabled());
-	
+
 	/*
 	 * The thread forgot to exit its reader critical section.
@@ -1617,10 +1617,10 @@
 {
 	assert(spinlock_locked(&rcu.gp_lock));
-	
+
 	irq_spinlock_lock(&rcu.preempt_lock, true);
-	
+
 	/* Start a new GP. Announce to readers that a quiescent state is needed. */
 	++_rcu_cur_gp;
-	
+
 	/*
 	 * Readers preempted before the start of this GP (next_preempted)
@@ -1632,5 +1632,5 @@
 	 */
 	list_concat(&rcu.cur_preempted, &rcu.next_preempted);
-	
+
 	irq_spinlock_unlock(&rcu.preempt_lock, true);
 }
@@ -1694,5 +1694,5 @@
 	 */
 	memory_barrier(); /* MB C */
-	
+
 	cpu_mask_for_each(*cpu_mask, cpu_id) {
 		/*
@@ -1707,5 +1707,5 @@
 		 */
 		bool cpu_acked_gp = (cpus[cpu_id].rcu.last_seen_gp == _rcu_cur_gp);
-		
+
 		/*
 		 * Either the cpu is idle or it is exiting away from idle mode
@@ -1714,5 +1714,5 @@
 		 */
 		bool cpu_idle = cpus[cpu_id].idle;
-		
+
 		if (cpu_acked_gp || cpu_idle) {
 			cpu_mask_reset(cpu_mask, cpu_id);
@@ -1736,5 +1736,5 @@
 {
 	assert(CPU->rcu.cur_cbs_gp <= completed_gp);
-	
+
 	size_t delta = (size_t)(completed_gp - CPU->rcu.cur_cbs_gp);
 	CPU->rcu.stat_missed_gp_in_wait += delta;
@@ -1764,5 +1764,5 @@
 {
 	irq_spinlock_lock(&rcu.preempt_lock, true);
-	
+
 	assert(link_used(&THREAD->rcu.preempt_link));
 
@@ -1793,14 +1793,14 @@
 	bool reader_exists = !list_empty(&rcu.cur_preempted);
 	rcu.preempt_blocking_det = reader_exists;
-	
+
 	irq_spinlock_unlock(&rcu.preempt_lock, true);
-	
+
 	if (reader_exists) {
 		/* Update statistic. */
 		++rcu.stat_preempt_blocking_cnt;
-		
+
 		return semaphore_down_interruptable(&rcu.remaining_readers);
 	}
-	
+
 	return true;
 }
@@ -1809,10 +1809,10 @@
 {
 	rcu_cpu_data_t *cr = &CPU->rcu;
-	
+
 	if (arriving_cbs_cnt > cr->last_arriving_cnt) {
 		size_t arrived_cnt = arriving_cbs_cnt - cr->last_arriving_cnt;
 		cr->stat_max_slice_cbs = max(arrived_cnt, cr->stat_max_slice_cbs);
 	}
-	
+
 	cr->last_arriving_cnt = arriving_cbs_cnt;
 }
@@ -1826,5 +1826,5 @@
 	 * are no locks to lock in order to get up-to-date values.
 	 */
-	
+
 #ifdef RCU_PREEMPT_PODZIMEK
 	const char *algo = "podzimek-preempt-rcu";
@@ -1832,5 +1832,5 @@
 	const char *algo = "a-preempt-rcu";
 #endif
-	
+
 	printf("Config: expedite_threshold=%d, critical_threshold=%d,"
 		" detect_sleep=%dms, %s\n",
@@ -1843,5 +1843,5 @@
 		"running or not)\n", rcu.stat_preempt_blocking_cnt);
 	printf("Smp calls:     %zu\n", rcu.stat_smp_call_cnt);
-	
+
 	printf("Max arrived callbacks per GP and CPU:\n");
 	for (unsigned int i = 0; i < config.cpu_count; ++i) {
@@ -1853,5 +1853,5 @@
 		printf(" %zu", cpus[i].rcu.stat_avg_cbs);
 	}
-	
+
 	printf("\nMax arrived callbacks per time slice and CPU:\n");
 	for (unsigned int i = 0; i < config.cpu_count; ++i) {
Index: kernel/generic/src/synch/smp_memory_barrier.c
===================================================================
--- kernel/generic/src/synch/smp_memory_barrier.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/synch/smp_memory_barrier.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -56,5 +56,5 @@
 		smp_call(cpu_id, issue_mem_bar, NULL);
 	}
-	
+
 	return 0;
 }
Index: kernel/generic/src/synch/spinlock.c
===================================================================
--- kernel/generic/src/synch/spinlock.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/synch/spinlock.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -77,5 +77,5 @@
 	size_t i = 0;
 	bool deadlock_reported = false;
-	
+
 	preemption_disable();
 	while (test_and_set(&lock->val)) {
@@ -101,5 +101,5 @@
 		if (lock->name[0] == '*')
 			continue;
-		
+
 		if (i++ > DEADLOCK_THRESHOLD) {
 			printf("cpu%u: looping on spinlock %p:%s, "
@@ -107,13 +107,13 @@
 			    (void *) CALLER, symtab_fmt_name_lookup(CALLER));
 			stack_trace();
-			
+
 			i = 0;
 			deadlock_reported = true;
 		}
 	}
-	
+
 	if (deadlock_reported)
 		printf("cpu%u: not deadlocked\n", CPU->id);
-	
+
 	/*
 	 * Prevent critical section code from bleeding out this way up.
@@ -131,10 +131,10 @@
 {
 	ASSERT_SPINLOCK(spinlock_locked(lock), lock);
-	
+
 	/*
 	 * Prevent critical section code from bleeding out this way down.
 	 */
 	CS_LEAVE_BARRIER();
-	
+
 	atomic_set(&lock->val, 0);
 	preemption_enable();
@@ -157,13 +157,13 @@
 	preemption_disable();
 	bool ret = !test_and_set(&lock->val);
-	
+
 	/*
 	 * Prevent critical section code from bleeding out this way up.
 	 */
 	CS_ENTER_BARRIER();
-	
+
 	if (!ret)
 		preemption_enable();
-	
+
 	return ret;
 }
@@ -208,10 +208,10 @@
 		ipl_t ipl = interrupts_disable();
 		spinlock_lock(&(lock->lock));
-		
+
 		lock->guard = true;
 		lock->ipl = ipl;
 	} else {
 		ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock);
-		
+
 		spinlock_lock(&(lock->lock));
 		ASSERT_IRQ_SPINLOCK(!lock->guard, lock);
@@ -231,11 +231,11 @@
 {
 	ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock);
-	
+
 	if (irq_res) {
 		ASSERT_IRQ_SPINLOCK(lock->guard, lock);
-		
+
 		lock->guard = false;
 		ipl_t ipl = lock->ipl;
-		
+
 		spinlock_unlock(&(lock->lock));
 		interrupts_restore(ipl);
@@ -261,5 +261,5 @@
 	ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock);
 	bool ret = spinlock_trylock(&(lock->lock));
-	
+
 	ASSERT_IRQ_SPINLOCK((!ret) || (!lock->guard), lock);
 	return ret;
@@ -280,15 +280,15 @@
 {
 	ASSERT_IRQ_SPINLOCK(interrupts_disabled(), unlock);
-	
+
 	/* Pass guard from unlock to lock */
 	bool guard = unlock->guard;
 	ipl_t ipl = unlock->ipl;
 	unlock->guard = false;
-	
+
 	spinlock_unlock(&(unlock->lock));
 	spinlock_lock(&(lock->lock));
-	
+
 	ASSERT_IRQ_SPINLOCK(!lock->guard, lock);
-	
+
 	if (guard) {
 		lock->guard = true;
@@ -311,8 +311,8 @@
 {
 	ASSERT_IRQ_SPINLOCK(interrupts_disabled(), unlock);
-	
+
 	spinlock_lock(&(lock->lock));
 	ASSERT_IRQ_SPINLOCK(!lock->guard, lock);
-	
+
 	/* Pass guard from unlock to lock */
 	if (unlock->guard) {
@@ -321,5 +321,5 @@
 		unlock->guard = false;
 	}
-	
+
 	spinlock_unlock(&(unlock->lock));
 }
Index: kernel/generic/src/synch/waitq.c
===================================================================
--- kernel/generic/src/synch/waitq.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/synch/waitq.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -94,12 +94,12 @@
 	bool do_wakeup = false;
 	DEADLOCK_PROBE_INIT(p_wqlock);
-	
+
 	irq_spinlock_lock(&threads_lock, false);
 	if (!thread_exists(thread))
 		goto out;
-	
+
 grab_locks:
 	irq_spinlock_lock(&thread->lock, false);
-	
+
 	waitq_t *wq;
 	if ((wq = thread->sleep_queue)) {  /* Assignment */
@@ -110,5 +110,5 @@
 			goto grab_locks;
 		}
-		
+
 		list_remove(&thread->wq_link);
 		thread->saved_context = thread->sleep_timeout_context;
@@ -117,11 +117,11 @@
 		irq_spinlock_unlock(&wq->lock, false);
 	}
-	
+
 	thread->timeout_pending = false;
 	irq_spinlock_unlock(&thread->lock, false);
-	
+
 	if (do_wakeup)
 		thread_ready(thread);
-	
+
 out:
 	irq_spinlock_unlock(&threads_lock, false);
@@ -144,13 +144,13 @@
 	bool do_wakeup = false;
 	DEADLOCK_PROBE_INIT(p_wqlock);
-	
+
 	/*
 	 * The thread is quaranteed to exist because
 	 * threads_lock is held.
 	 */
-	
+
 grab_locks:
 	irq_spinlock_lock(&thread->lock, false);
-	
+
 	waitq_t *wq;
 	if ((wq = thread->sleep_queue)) {  /* Assignment */
@@ -162,5 +162,5 @@
 			return;
 		}
-		
+
 		if (!irq_spinlock_trylock(&wq->lock)) {
 			/* Avoid deadlock */
@@ -169,9 +169,9 @@
 			goto grab_locks;
 		}
-		
+
 		if ((thread->timeout_pending) &&
 		    (timeout_unregister(&thread->sleep_timeout)))
 			thread->timeout_pending = false;
-		
+
 		list_remove(&thread->wq_link);
 		thread->saved_context = thread->sleep_interruption_context;
@@ -180,7 +180,7 @@
 		irq_spinlock_unlock(&wq->lock, false);
 	}
-	
+
 	irq_spinlock_unlock(&thread->lock, false);
-	
+
 	if (do_wakeup)
 		thread_ready(thread);
@@ -198,25 +198,25 @@
 {
 	irq_spinlock_lock(&wq->lock, true);
-	
+
 	if (!list_empty(&wq->sleepers)) {
 		thread_t *thread = list_get_instance(list_first(&wq->sleepers),
 		    thread_t, wq_link);
-		
+
 		irq_spinlock_lock(&thread->lock, false);
-		
+
 		assert(thread->sleep_interruptible);
-		
+
 		if ((thread->timeout_pending) &&
 		    (timeout_unregister(&thread->sleep_timeout)))
 			thread->timeout_pending = false;
-		
+
 		list_remove(&thread->wq_link);
 		thread->saved_context = thread->sleep_interruption_context;
 		thread->sleep_queue = NULL;
-		
+
 		irq_spinlock_unlock(&thread->lock, false);
 		thread_ready(thread);
 	}
-	
+
 	irq_spinlock_unlock(&wq->lock, true);
 }
@@ -271,5 +271,5 @@
 {
 	assert((!PREEMPTION_DISABLED) || (PARAM_NON_BLOCKING(flags, usec)));
-	
+
 	ipl_t ipl = waitq_sleep_prepare(wq);
 	bool nblocked;
@@ -296,8 +296,8 @@
 {
 	ipl_t ipl;
-	
+
 restart:
 	ipl = interrupts_disable();
-	
+
 	if (THREAD) {  /* Needed during system initiailzation */
 		/*
@@ -310,5 +310,5 @@
 		 */
 		irq_spinlock_lock(&THREAD->lock, false);
-		
+
 		if (THREAD->timeout_pending) {
 			irq_spinlock_unlock(&THREAD->lock, false);
@@ -316,8 +316,8 @@
 			goto restart;
 		}
-		
+
 		irq_spinlock_unlock(&THREAD->lock, false);
 	}
-	
+
 	irq_spinlock_lock(&wq->lock, false);
 	return ipl;
@@ -354,5 +354,5 @@
 		irq_spinlock_unlock(&wq->lock, false);
 	}
-	
+
 	interrupts_restore(ipl);
 }
@@ -387,5 +387,5 @@
 		}
 	}
-	
+
 	/*
 	 * Now we are firmly decided to go to sleep.
@@ -393,5 +393,5 @@
 	 */
 	irq_spinlock_lock(&THREAD->lock, false);
-	
+
 	if (flags & SYNCH_FLAGS_INTERRUPTIBLE) {
 		/*
@@ -403,5 +403,5 @@
 			return EINTR;
 		}
-		
+
 		/*
 		 * Set context that will be restored if the sleep
@@ -417,5 +417,5 @@
 	} else
 		THREAD->sleep_interruptible = false;
-	
+
 	if (usec) {
 		/* We use the timeout variant. */
@@ -426,12 +426,12 @@
 			return ETIMEOUT;
 		}
-		
+
 		THREAD->timeout_pending = true;
 		timeout_register(&THREAD->sleep_timeout, (uint64_t) usec,
 		    waitq_sleep_timed_out, THREAD);
 	}
-	
+
 	list_append(&THREAD->wq_link, &wq->sleepers);
-	
+
 	/*
 	 * Suspend execution.
@@ -440,15 +440,15 @@
 	THREAD->state = Sleeping;
 	THREAD->sleep_queue = wq;
-	
+
 	/* Must be before entry to scheduler, because there are multiple
 	 * return vectors.
 	 */
 	*blocked = true;
-	
+
 	irq_spinlock_unlock(&THREAD->lock, false);
-	
+
 	/* wq->lock is released in scheduler_separated_stack() */
 	scheduler();
-	
+
 	return EOK;
 }
@@ -511,5 +511,5 @@
 {
 	assert(interrupts_disabled());
-	
+
 	irq_spinlock_lock(&wq->lock, false);
 	irq_spinlock_unlock(&wq->lock, false);
@@ -536,5 +536,5 @@
 	assert(interrupts_disabled());
 	assert(irq_spinlock_locked(&wq->lock));
-	
+
 loop:
 	if (list_empty(&wq->sleepers)) {
@@ -542,12 +542,12 @@
 		if ((count) && (mode == WAKEUP_ALL))
 			wq->missed_wakeups--;
-		
+
 		return;
 	}
-	
+
 	count++;
 	thread_t *thread = list_get_instance(list_first(&wq->sleepers),
 	    thread_t, wq_link);
-	
+
 	/*
 	 * Lock the thread prior to removing it from the wq.
@@ -569,14 +569,14 @@
 	irq_spinlock_lock(&thread->lock, false);
 	list_remove(&thread->wq_link);
-	
+
 	if ((thread->timeout_pending) &&
 	    (timeout_unregister(&thread->sleep_timeout)))
 		thread->timeout_pending = false;
-	
+
 	thread->sleep_queue = NULL;
 	irq_spinlock_unlock(&thread->lock, false);
-	
+
 	thread_ready(thread);
-	
+
 	if (mode == WAKEUP_ALL)
 		goto loop;
Index: kernel/generic/src/synch/workqueue.c
===================================================================
--- kernel/generic/src/synch/workqueue.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/synch/workqueue.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -59,17 +59,17 @@
 	 */
 	IRQ_SPINLOCK_DECLARE(lock);
-	
+
 	/* Activates a worker if new work arrives or if shutting down the queue. */
 	condvar_t activate_worker;
-	
+
 	/* Queue of work_items ready to be dispatched. */
 	list_t queue;
-	
+
 	/* List of worker threads. */
 	list_t workers;
-	
+
 	/* Number of work items queued. */
 	size_t item_cnt;
-	
+
 	/* Indicates the work queue is shutting down. */
 	bool stopping;
@@ -84,10 +84,10 @@
 	/* Number of blocked workers sleeping in work func() (ie not idle). */
 	size_t blocked_worker_cnt;
-	
+
 	/* Number of pending signal_worker_op() operations. */
 	size_t pending_op_cnt;
-	
+
 	link_t nb_link;
-	
+
 #ifdef CONFIG_DEBUG
 	/* Magic cookie for integrity checks. Immutable. Accessed without lock. */
@@ -105,5 +105,5 @@
 /** Max number of work items per active worker before a new worker is activated.*/
 static const size_t max_items_per_worker = 8;
-	
+
 /** System wide work queue. */
 static struct work_queue g_work_queue;
@@ -157,10 +157,10 @@
 	 */
 	booting = false;
-	
+
 	nonblock_init();
-	
+
 	if (!add_worker(&g_work_queue))
 		panic("Could not create a single global work queue worker!\n");
-	
+
 }
 
@@ -174,5 +174,5 @@
 	/* Maximum concurrency without slowing down the system. */
 	max_concurrent_workers = max(2, config.cpu_count);
-	
+
 	workq_preinit(&g_work_queue, "kworkq");
 }
@@ -188,5 +188,5 @@
 {
 	struct work_queue *workq = malloc(sizeof(struct work_queue), 0);
-	
+
 	if (workq) {
 		if (workq_init(workq, name)) {
@@ -194,8 +194,8 @@
 			return workq;
 		}
-		
+
 		free(workq);
 	}
-	
+
 	return NULL;
 }
@@ -205,5 +205,5 @@
 {
 	assert(!workq_corrupted(workq));
-	
+
 	irq_spinlock_lock(&workq->lock, true);
 	bool stopped = workq->stopping;
@@ -212,5 +212,5 @@
 #endif
 	irq_spinlock_unlock(&workq->lock, true);
-	
+
 	if (!stopped) {
 		workq_stop(workq);
@@ -218,9 +218,9 @@
 		assert(0 == running_workers);
 	}
-	
+
 #ifdef CONFIG_DEBUG
 	workq->cookie = 0;
 #endif
-	
+
 	free(workq);
 }
@@ -232,20 +232,20 @@
 	workq->cookie = WORKQ_MAGIC;
 #endif
-	
+
 	irq_spinlock_initialize(&workq->lock, name);
 	condvar_initialize(&workq->activate_worker);
-	
+
 	list_initialize(&workq->queue);
 	list_initialize(&workq->workers);
-	
+
 	workq->item_cnt = 0;
 	workq->stopping = false;
 	workq->name = name;
-	
+
 	workq->cur_worker_cnt = 1;
 	workq->idle_worker_cnt = 0;
 	workq->activate_pending = 0;
 	workq->blocked_worker_cnt = 0;
-	
+
 	workq->pending_op_cnt = 0;
 	link_initialize(&workq->nb_link);
@@ -270,16 +270,16 @@
 	thread_t *thread = thread_create(worker_thread, workq, TASK,
 		THREAD_FLAG_NONE, workq->name);
-	
+
 	if (!thread) {
 		irq_spinlock_lock(&workq->lock, true);
-		
+
 		/* cur_worker_cnt proactively increased in signal_worker_logic() .*/
 		assert(0 < workq->cur_worker_cnt);
 		--workq->cur_worker_cnt;
-		
+
 		irq_spinlock_unlock(&workq->lock, true);
 		return false;
 	}
-	
+
 	/* Respect lock ordering. */
 	irq_spinlock_lock(&thread->lock, true);
@@ -290,8 +290,8 @@
 	if (!workq->stopping) {
 		success = true;
-		
+
 		/* Try to distribute workers among cpus right away. */
 		unsigned int cpu_id = (workq->cur_worker_cnt) % config.cpu_active;
-		
+
 		if (!cpus[cpu_id].active)
 			cpu_id = CPU->id;
@@ -312,10 +312,10 @@
 		 */
 		success = false;
-		
+
 		/* cur_worker_cnt proactively increased in signal_worker() .*/
 		assert(0 < workq->cur_worker_cnt);
 		--workq->cur_worker_cnt;
 	}
-	
+
 	irq_spinlock_unlock(&workq->lock, false);
 	irq_spinlock_unlock(&thread->lock, true);
@@ -324,7 +324,7 @@
 		thread_interrupt(thread);
 	}
-		
+
 	thread_ready(thread);
-	
+
 	return success;
 }
@@ -337,5 +337,5 @@
 {
 	assert(!workq_corrupted(workq));
-	
+
 	interrupt_workers(workq);
 	wait_for_workers(workq);
@@ -350,8 +350,8 @@
 	assert(!workq->stopping);
 	workq->stopping = true;
-	
+
 	/* Respect lock ordering - do not hold workq->lock during broadcast. */
 	irq_spinlock_unlock(&workq->lock, true);
-	
+
 	condvar_broadcast(&workq->activate_worker);
 }
@@ -361,7 +361,7 @@
 {
 	assert(!PREEMPTION_DISABLED);
-	
+
 	irq_spinlock_lock(&workq->lock, true);
-	
+
 	list_foreach_safe(workq->workers, cur_worker, next_worker) {
 		thread_t *worker = list_get_instance(cur_worker, thread_t, workq_link);
@@ -370,22 +370,22 @@
 		/* Wait without the lock. */
 		irq_spinlock_unlock(&workq->lock, true);
-		
+
 		thread_join(worker);
 		thread_detach(worker);
-		
+
 		irq_spinlock_lock(&workq->lock, true);
 	}
-	
+
 	assert(list_empty(&workq->workers));
-	
+
 	/* Wait for deferred add_worker_op(), signal_worker_op() to finish. */
 	while (0 < workq->cur_worker_cnt || 0 < workq->pending_op_cnt) {
 		irq_spinlock_unlock(&workq->lock, true);
-		
+
 		scheduler();
-		
+
 		irq_spinlock_lock(&workq->lock, true);
 	}
-	
+
 	irq_spinlock_unlock(&workq->lock, true);
 }
@@ -422,5 +422,5 @@
  *                  until func() is entered.
  * @param func      User supplied function to invoke in a worker thread.
- 
+
  * @return false if work queue is shutting down; function is not
  *               queued for further processing.
@@ -442,5 +442,5 @@
  *                  until func() is entered.
  * @param func      User supplied function to invoke in a worker thread.
- 
+
  * @return false if work queue is shutting down; function is not
  *               queued for further processing.
@@ -467,5 +467,5 @@
  * @param func      User supplied function to invoke in a worker thread.
  * @param can_block May adding this work item block?
- 
+
  * @return false if work queue is shutting down; function is not
  *               queued for further processing.
@@ -476,10 +476,10 @@
 {
 	assert(!workq_corrupted(workq));
-	
+
 	bool success = true;
 	signal_op_t signal_op = NULL;
-	
+
 	irq_spinlock_lock(&workq->lock, true);
-	
+
 	if (workq->stopping) {
 		success = false;
@@ -489,5 +489,5 @@
 		++workq->item_cnt;
 		success = true;
-		
+
 		if (!booting) {
 			signal_op = signal_worker_logic(workq, can_block);
@@ -499,5 +499,5 @@
 		}
 	}
-	
+
 	irq_spinlock_unlock(&workq->lock, true);
 
@@ -505,5 +505,5 @@
 		signal_op(workq);
 	}
-	
+
 	return success;
 }
@@ -515,5 +515,5 @@
 	work_item->cookie = WORK_ITEM_MAGIC;
 #endif
-	
+
 	link_initialize(&work_item->queue_link);
 	work_item->func = func;
@@ -524,17 +524,17 @@
 {
 	assert(irq_spinlock_locked(&workq->lock));
-	
+
 	/* Workers blocked are sleeping in the work function (ie not idle). */
 	assert(workq->blocked_worker_cnt <= workq->cur_worker_cnt);
 	/* Idle workers are waiting for more work to arrive in condvar_wait. */
 	assert(workq->idle_worker_cnt <= workq->cur_worker_cnt);
-	
+
 	/* Idle + blocked workers == sleeping worker threads. */
 	size_t sleeping_workers = workq->blocked_worker_cnt + workq->idle_worker_cnt;
-	
+
 	assert(sleeping_workers	<= workq->cur_worker_cnt);
 	/* Workers pending activation are idle workers not yet given a time slice. */
 	assert(workq->activate_pending <= workq->idle_worker_cnt);
-	
+
 	/*
 	 * Workers actively running the work func() this very moment and
@@ -553,5 +553,5 @@
 {
 	assert(irq_spinlock_locked(&workq->lock));
-	
+
 	/*
 	 * Workers actively running the work func() and are neither blocked nor
@@ -578,5 +578,5 @@
 
 	condvar_signal(&workq->activate_worker);
-	
+
 	irq_spinlock_lock(&workq->lock, true);
 	assert(0 < workq->pending_op_cnt);
@@ -597,5 +597,5 @@
 	assert(!workq_corrupted(workq));
 	assert(irq_spinlock_locked(&workq->lock));
-	
+
 	/* Only signal workers if really necessary. */
 	signal_op_t signal_op = NULL;
@@ -630,5 +630,5 @@
 			bool need_worker = (active < max_concurrent_workers)
 				&& (workq->cur_worker_cnt < max_worker_cnt);
-			
+
 			if (need_worker && can_block) {
 				signal_op = add_worker_op;
@@ -641,5 +641,5 @@
 				++workq->cur_worker_cnt;
 			}
-			
+
 			/*
 			 * We cannot create a new worker but we need one desperately
@@ -648,5 +648,5 @@
 			if (need_worker && !can_block && 0 == active) {
 				assert(0 == workq->idle_worker_cnt);
-				
+
 				irq_spinlock_lock(&nonblock_adder.lock, true);
 
@@ -667,5 +667,5 @@
 		signal_op = NULL;
 	}
-	
+
 	return signal_op;
 }
@@ -682,10 +682,10 @@
 		return;
 	}
-	
+
 	assert(arg != NULL);
-	
+
 	struct work_queue *workq = arg;
 	work_t *work_item;
-	
+
 	while (dequeue_work(workq, &work_item)) {
 		/* Copy the func field so func() can safely free work_item. */
@@ -700,7 +700,7 @@
 {
 	assert(!workq_corrupted(workq));
-	
+
 	irq_spinlock_lock(&workq->lock, true);
-	
+
 	/* Check if we should exit if load is low. */
 	if (!workq->stopping && worker_unnecessary(workq)) {
@@ -710,15 +710,15 @@
 		list_remove(&THREAD->workq_link);
 		irq_spinlock_unlock(&workq->lock, true);
-		
+
 		thread_detach(THREAD);
 		return false;
 	}
-	
+
 	bool stop = false;
-	
+
 	/* Wait for work to arrive. */
 	while (list_empty(&workq->queue) && !workq->stopping) {
 		cv_wait(workq);
-		
+
 		if (0 < workq->activate_pending)
 			--workq->activate_pending;
@@ -729,5 +729,5 @@
 		link_t *work_link = list_first(&workq->queue);
 		*pwork_item = list_get_instance(work_link, work_t, queue_link);
-		
+
 #ifdef CONFIG_DEBUG
 		assert(!work_item_corrupted(*pwork_item));
@@ -736,5 +736,5 @@
 		list_remove(work_link);
 		--workq->item_cnt;
-		
+
 		stop = false;
 	} else {
@@ -744,7 +744,7 @@
 		stop = true;
 	}
-	
+
 	irq_spinlock_unlock(&workq->lock, true);
-	
+
 	return !stop;
 }
@@ -754,5 +754,5 @@
 {
 	assert(irq_spinlock_locked(&workq->lock));
-	
+
 	/* No work is pending. We don't need too many idle threads. */
 	if (list_empty(&workq->queue)) {
@@ -775,8 +775,8 @@
 	++workq->idle_worker_cnt;
 	THREAD->workq_idling = true;
-	
+
 	/* Ignore lock ordering just here. */
 	assert(irq_spinlock_locked(&workq->lock));
-	
+
 	_condvar_wait_timeout_irq_spinlock(&workq->activate_worker,
 		&workq->lock, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE);
@@ -784,5 +784,5 @@
 	assert(!workq_corrupted(workq));
 	assert(irq_spinlock_locked(&workq->lock));
-	
+
 	THREAD->workq_idling = false;
 	--workq->idle_worker_cnt;
@@ -803,8 +803,8 @@
 		assert(THREAD != thread);
 		assert(!workq_corrupted(thread->workq));
-		
+
 		/* Protected by thread->lock */
 		thread->workq_blocked = false;
-		
+
 		irq_spinlock_lock(&thread->workq->lock, true);
 		--thread->workq->blocked_worker_cnt;
@@ -823,16 +823,16 @@
 		assert(!THREAD->workq_blocked);
 		assert(!workq_corrupted(THREAD->workq));
-		
+
 		THREAD->workq_blocked = true;
-		
+
 		irq_spinlock_lock(&THREAD->workq->lock, false);
 
 		++THREAD->workq->blocked_worker_cnt;
-		
+
 		bool can_block = false;
 		signal_op_t op = signal_worker_logic(THREAD->workq, can_block);
-		
+
 		irq_spinlock_unlock(&THREAD->workq->lock, false);
-		
+
 		if (op) {
 			assert(add_worker_noblock_op == op || signal_worker_op == op);
@@ -856,7 +856,7 @@
 	const char *load_str = worker_surplus ? "decreasing" :
 		(0 < workq->activate_pending) ? "increasing" : "stable";
-	
+
 	irq_spinlock_unlock(&workq->lock, true);
-	
+
 	printf(
 		"Configuration: max_worker_cnt=%zu, min_worker_cnt=%zu,\n"
@@ -893,12 +893,12 @@
 
 	irq_spinlock_lock(&info->lock, true);
-	
+
 	while (list_empty(&info->work_queues) && !stop) {
 		errno_t ret = _condvar_wait_timeout_irq_spinlock(&info->req_cv,
 			&info->lock, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_INTERRUPTIBLE);
-		
+
 		stop = (ret == EINTR);
 	}
-	
+
 	if (!stop) {
 		*pworkq = list_get_instance(list_first(&info->work_queues),
@@ -906,10 +906,10 @@
 
 		assert(!workq_corrupted(*pworkq));
-		
+
 		list_remove(&(*pworkq)->nb_link);
 	}
-	
+
 	irq_spinlock_unlock(&info->lock, true);
-	
+
 	return !stop;
 }
@@ -919,5 +919,5 @@
 	nonblock_adder_t *info = arg;
 	struct work_queue *workq;
-	
+
 	while (dequeue_add_req(info, &workq)) {
 		add_worker(workq);
@@ -931,8 +931,8 @@
 	condvar_initialize(&nonblock_adder.req_cv);
 	list_initialize(&nonblock_adder.work_queues);
-	
+
 	nonblock_adder.thread = thread_create(thr_nonblock_add_worker,
 		&nonblock_adder, TASK, THREAD_FLAG_NONE, "kworkq-nb");
-	
+
 	if (nonblock_adder.thread) {
 		thread_ready(nonblock_adder.thread);
Index: kernel/generic/src/syscall/copy.c
===================================================================
--- kernel/generic/src/syscall/copy.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/syscall/copy.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -26,5 +26,5 @@
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
-    
+
 /** @addtogroup generic
  * @{
@@ -63,8 +63,8 @@
 	ipl_t ipl;
 	errno_t rc;
-	
+
 	assert(THREAD);
 	assert(!THREAD->in_copy_from_uspace);
-	
+
 	if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
 		if (overlaps((uintptr_t) uspace_src, size,
@@ -86,8 +86,8 @@
 		return EPERM;
 #endif
-	
+
 	ipl = interrupts_disable();
 	THREAD->in_copy_from_uspace = true;
-	
+
 	rc = memcpy_from_uspace(dst, uspace_src, size);
 
@@ -114,8 +114,8 @@
 	ipl_t ipl;
 	errno_t rc;
-	
+
 	assert(THREAD);
 	assert(!THREAD->in_copy_to_uspace);
-	
+
 	if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
 		if (overlaps((uintptr_t) uspace_dst, size,
@@ -137,8 +137,8 @@
 		return EPERM;
 #endif
-	
+
 	ipl = interrupts_disable();
 	THREAD->in_copy_to_uspace = true;
-	
+
 	rc = memcpy_to_uspace(uspace_dst, src, size);
 
Index: kernel/generic/src/syscall/syscall.c
===================================================================
--- kernel/generic/src/syscall/syscall.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/syscall/syscall.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -66,5 +66,5 @@
 	thread_update_accounting(true);
 	irq_spinlock_unlock(&THREAD->lock, true);
-	
+
 #ifdef CONFIG_UDEBUG
 	/*
@@ -82,5 +82,5 @@
 		udebug_syscall_event(a1, a2, a3, a4, a5, a6, id, 0, false);
 #endif
-	
+
 	sysarg_t rc;
 	if (id < SYSCALL_END) {
@@ -91,12 +91,12 @@
 		task_kill_self(true);
 	}
-	
+
 	if (THREAD->interrupted)
 		thread_exit();
-	
+
 #ifdef CONFIG_UDEBUG
 	if (THREAD->udebug.active) {
 		udebug_syscall_event(a1, a2, a3, a4, a5, a6, id, rc, true);
-		
+
 		/*
 		 * Stopping point needed for tasks that only invoke
@@ -111,10 +111,10 @@
 	THREAD->udebug.uspace_state = NULL;
 #endif
-	
+
 	/* Do kernel accounting */
 	irq_spinlock_lock(&THREAD->lock, true);
 	thread_update_accounting(false);
 	irq_spinlock_unlock(&THREAD->lock, true);
-	
+
 	return rc;
 }
@@ -123,5 +123,5 @@
 	/* System management syscalls. */
 	[SYS_KIO] = (syshandler_t) sys_kio,
-	
+
 	/* Thread and task related syscalls. */
 	[SYS_THREAD_CREATE] = (syshandler_t) sys_thread_create,
@@ -130,5 +130,5 @@
 	[SYS_THREAD_USLEEP] = (syshandler_t) sys_thread_usleep,
 	[SYS_THREAD_UDELAY] = (syshandler_t) sys_thread_udelay,
-	
+
 	[SYS_TASK_GET_ID] = (syshandler_t) sys_task_get_id,
 	[SYS_TASK_SET_NAME] = (syshandler_t) sys_task_set_name,
@@ -136,5 +136,5 @@
 	[SYS_TASK_EXIT] = (syshandler_t) sys_task_exit,
 	[SYS_PROGRAM_SPAWN_LOADER] = (syshandler_t) sys_program_spawn_loader,
-	
+
 	/* Synchronization related syscalls. */
 	[SYS_FUTEX_SLEEP] = (syshandler_t) sys_futex_sleep,
@@ -142,5 +142,5 @@
 	[SYS_SMC_COHERENCE] = (syshandler_t) sys_smc_coherence,
 	[SYS_SMP_MEMORY_BARRIER] = (syshandler_t) sys_smp_memory_barrier,
-	
+
 	/* Address space related syscalls. */
 	[SYS_AS_AREA_CREATE] = (syshandler_t) sys_as_area_create,
@@ -148,8 +148,8 @@
 	[SYS_AS_AREA_CHANGE_FLAGS] = (syshandler_t) sys_as_area_change_flags,
 	[SYS_AS_AREA_DESTROY] = (syshandler_t) sys_as_area_destroy,
-	
+
 	/* Page mapping related syscalls. */
 	[SYS_PAGE_FIND_MAPPING] = (syshandler_t) sys_page_find_mapping,
-	
+
 	/* IPC related syscalls. */
 	[SYS_IPC_CALL_ASYNC_FAST] = (syshandler_t) sys_ipc_call_async_fast,
@@ -163,14 +163,14 @@
 	[SYS_IPC_HANGUP] = (syshandler_t) sys_ipc_hangup,
 	[SYS_IPC_CONNECT_KBOX] = (syshandler_t) sys_ipc_connect_kbox,
-	
+
 	/* Event notification syscalls. */
 	[SYS_IPC_EVENT_SUBSCRIBE] = (syshandler_t) sys_ipc_event_subscribe,
 	[SYS_IPC_EVENT_UNSUBSCRIBE] = (syshandler_t) sys_ipc_event_unsubscribe,
 	[SYS_IPC_EVENT_UNMASK] = (syshandler_t) sys_ipc_event_unmask,
-	
+
 	/* Permission related syscalls. */
 	[SYS_PERM_GRANT] = (syshandler_t) sys_perm_grant,
 	[SYS_PERM_REVOKE] = (syshandler_t) sys_perm_revoke,
-	
+
 	/* DDI related syscalls. */
 	[SYS_PHYSMEM_MAP] = (syshandler_t) sys_physmem_map,
@@ -180,8 +180,8 @@
 	[SYS_IOSPACE_ENABLE] = (syshandler_t) sys_iospace_enable,
 	[SYS_IOSPACE_DISABLE] = (syshandler_t) sys_iospace_disable,
-	
+
 	[SYS_IPC_IRQ_SUBSCRIBE] = (syshandler_t) sys_ipc_irq_subscribe,
 	[SYS_IPC_IRQ_UNSUBSCRIBE] = (syshandler_t) sys_ipc_irq_unsubscribe,
-	
+
 	/* Sysinfo syscalls. */
 	[SYS_SYSINFO_GET_KEYS_SIZE] = (syshandler_t) sys_sysinfo_get_keys_size,
@@ -191,8 +191,8 @@
 	[SYS_SYSINFO_GET_DATA_SIZE] = (syshandler_t) sys_sysinfo_get_data_size,
 	[SYS_SYSINFO_GET_DATA] = (syshandler_t) sys_sysinfo_get_data,
-	
+
 	/* Kernel console syscalls. */
 	[SYS_DEBUG_CONSOLE] = (syshandler_t) sys_debug_console,
-	
+
 	[SYS_KLOG] = (syshandler_t) sys_klog,
 };
Index: kernel/generic/src/sysinfo/stats.c
===================================================================
--- kernel/generic/src/sysinfo/stats.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/sysinfo/stats.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -99,5 +99,5 @@
 	if (dry_run)
 		return NULL;
-	
+
 	/* Assumption: config.cpu_count is constant */
 	stats_cpu_t *stats_cpus = (stats_cpu_t *) malloc(*size, FRAME_ATOMIC);
@@ -106,9 +106,9 @@
 		return NULL;
 	}
-	
+
 	size_t i;
 	for (i = 0; i < config.cpu_count; i++) {
 		irq_spinlock_lock(&cpus[i].lock, true);
-		
+
 		stats_cpus[i].id = cpus[i].id;
 		stats_cpus[i].active = cpus[i].active;
@@ -116,8 +116,8 @@
 		stats_cpus[i].busy_cycles = cpus[i].busy_cycles;
 		stats_cpus[i].idle_cycles = cpus[i].idle_cycles;
-		
+
 		irq_spinlock_unlock(&cpus[i].lock, true);
 	}
-	
+
 	return ((void *) stats_cpus);
 }
@@ -137,5 +137,5 @@
 	size_t *count = (size_t *) arg;
 	(*count)++;
-	
+
 	return true;
 }
@@ -156,10 +156,10 @@
 	 * object, return inexact statistics by skipping the respective object.
 	 */
-	
+
 	if (mutex_trylock(&as->lock) != EOK)
 		return 0;
-	
+
 	size_t pages = 0;
-	
+
 	/* Walk the B+ tree and count pages */
 	list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t,
@@ -168,15 +168,15 @@
 		for (i = 0; i < node->keys; i++) {
 			as_area_t *area = node->value[i];
-			
+
 			if (mutex_trylock(&area->lock) != EOK)
 				continue;
-			
+
 			pages += area->pages;
 			mutex_unlock(&area->lock);
 		}
 	}
-	
+
 	mutex_unlock(&as->lock);
-	
+
 	return (pages << PAGE_WIDTH);
 }
@@ -197,10 +197,10 @@
 	 * object, return inexact statistics by skipping the respective object.
 	 */
-	
+
 	if (mutex_trylock(&as->lock) != EOK)
 		return 0;
-	
+
 	size_t pages = 0;
-	
+
 	/* Walk the B+ tree and count pages */
 	list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, node) {
@@ -208,15 +208,15 @@
 		for (i = 0; i < node->keys; i++) {
 			as_area_t *area = node->value[i];
-			
+
 			if (mutex_trylock(&area->lock) != EOK)
 				continue;
-			
+
 			pages += area->resident;
 			mutex_unlock(&area->lock);
 		}
 	}
-	
+
 	mutex_unlock(&as->lock);
-	
+
 	return (pages << PAGE_WIDTH);
 }
@@ -234,5 +234,5 @@
 	assert(interrupts_disabled());
 	assert(irq_spinlock_locked(&task->lock));
-	
+
 	stats_task->task_id = task->taskid;
 	str_cpy(stats_task->name, TASK_NAME_BUFLEN, task->name);
@@ -260,14 +260,14 @@
 	stats_task_t **iterator = (stats_task_t **) arg;
 	task_t *task = avltree_get_instance(node, task_t, tasks_tree_node);
-	
+
 	/* Interrupts are already disabled */
 	irq_spinlock_lock(&(task->lock), false);
-	
+
 	/* Record the statistics and increment the iterator */
 	produce_stats_task(task, *iterator);
 	(*iterator)++;
-	
+
 	irq_spinlock_unlock(&(task->lock), false);
-	
+
 	return true;
 }
@@ -289,9 +289,9 @@
 	/* Messing with task structures, avoid deadlock */
 	irq_spinlock_lock(&tasks_lock, true);
-	
+
 	/* First walk the task tree to count the tasks */
 	size_t count = 0;
 	avltree_walk(&tasks_tree, avl_count_walker, (void *) &count);
-	
+
 	if (count == 0) {
 		/* No tasks found (strange) */
@@ -300,5 +300,5 @@
 		return NULL;
 	}
-	
+
 	*size = sizeof(stats_task_t) * count;
 	if (dry_run) {
@@ -306,5 +306,5 @@
 		return NULL;
 	}
-	
+
 	stats_task_t *stats_tasks = (stats_task_t *) malloc(*size, FRAME_ATOMIC);
 	if (stats_tasks == NULL) {
@@ -314,11 +314,11 @@
 		return NULL;
 	}
-	
+
 	/* Walk tha task tree again to gather the statistics */
 	stats_task_t *iterator = stats_tasks;
 	avltree_walk(&tasks_tree, task_serialize_walker, (void *) &iterator);
-	
+
 	irq_spinlock_unlock(&tasks_lock, true);
-	
+
 	return ((void *) stats_tasks);
 }
@@ -336,5 +336,5 @@
 	assert(interrupts_disabled());
 	assert(irq_spinlock_locked(&thread->lock));
-	
+
 	stats_thread->thread_id = thread->tid;
 	stats_thread->task_id = thread->task->taskid;
@@ -343,5 +343,5 @@
 	stats_thread->ucycles = thread->ucycles;
 	stats_thread->kcycles = thread->kcycles;
-	
+
 	if (thread->cpu != NULL) {
 		stats_thread->on_cpu = true;
@@ -366,14 +366,14 @@
 	stats_thread_t **iterator = (stats_thread_t **) arg;
 	thread_t *thread = avltree_get_instance(node, thread_t, threads_tree_node);
-	
+
 	/* Interrupts are already disabled */
 	irq_spinlock_lock(&thread->lock, false);
-	
+
 	/* Record the statistics and increment the iterator */
 	produce_stats_thread(thread, *iterator);
 	(*iterator)++;
-	
+
 	irq_spinlock_unlock(&thread->lock, false);
-	
+
 	return true;
 }
@@ -395,9 +395,9 @@
 	/* Messing with threads structures, avoid deadlock */
 	irq_spinlock_lock(&threads_lock, true);
-	
+
 	/* First walk the thread tree to count the threads */
 	size_t count = 0;
 	avltree_walk(&threads_tree, avl_count_walker, (void *) &count);
-	
+
 	if (count == 0) {
 		/* No threads found (strange) */
@@ -406,5 +406,5 @@
 		return NULL;
 	}
-	
+
 	*size = sizeof(stats_thread_t) * count;
 	if (dry_run) {
@@ -412,5 +412,5 @@
 		return NULL;
 	}
-	
+
 	stats_thread_t *stats_threads = (stats_thread_t *) malloc(*size, FRAME_ATOMIC);
 	if (stats_threads == NULL) {
@@ -420,11 +420,11 @@
 		return NULL;
 	}
-	
+
 	/* Walk tha thread tree again to gather the statistics */
 	stats_thread_t *iterator = stats_threads;
 	avltree_walk(&threads_tree, thread_serialize_walker, (void *) &iterator);
-	
+
 	irq_spinlock_unlock(&threads_lock, true);
-	
+
 	return ((void *) stats_threads);
 }
@@ -454,13 +454,13 @@
 	sysinfo_return_t ret;
 	ret.tag = SYSINFO_VAL_UNDEFINED;
-	
+
 	/* Parse the task ID */
 	task_id_t task_id;
 	if (str_uint64_t(name, NULL, 0, true, &task_id) != EOK)
 		return ret;
-	
+
 	/* Messing with task structures, avoid deadlock */
 	irq_spinlock_lock(&tasks_lock, true);
-	
+
 	task_t *task = task_find_by_id(task_id);
 	if (task == NULL) {
@@ -469,10 +469,10 @@
 		return ret;
 	}
-	
+
 	if (dry_run) {
 		ret.tag = SYSINFO_VAL_FUNCTION_DATA;
 		ret.data.data = NULL;
 		ret.data.size = sizeof(stats_task_t);
-		
+
 		irq_spinlock_unlock(&tasks_lock, true);
 	} else {
@@ -484,18 +484,18 @@
 			return ret;
 		}
-		
+
 		/* Correct return value */
 		ret.tag = SYSINFO_VAL_FUNCTION_DATA;
 		ret.data.data = (void *) stats_task;
 		ret.data.size = sizeof(stats_task_t);
-		
+
 		/* Hand-over-hand locking */
 		irq_spinlock_exchange(&tasks_lock, &task->lock);
-		
+
 		produce_stats_task(task, stats_task);
-		
+
 		irq_spinlock_unlock(&task->lock, true);
 	}
-	
+
 	return ret;
 }
@@ -525,13 +525,13 @@
 	sysinfo_return_t ret;
 	ret.tag = SYSINFO_VAL_UNDEFINED;
-	
+
 	/* Parse the thread ID */
 	thread_id_t thread_id;
 	if (str_uint64_t(name, NULL, 0, true, &thread_id) != EOK)
 		return ret;
-	
+
 	/* Messing with threads structures, avoid deadlock */
 	irq_spinlock_lock(&threads_lock, true);
-	
+
 	thread_t *thread = thread_find_by_id(thread_id);
 	if (thread == NULL) {
@@ -540,10 +540,10 @@
 		return ret;
 	}
-	
+
 	if (dry_run) {
 		ret.tag = SYSINFO_VAL_FUNCTION_DATA;
 		ret.data.data = NULL;
 		ret.data.size = sizeof(stats_thread_t);
-		
+
 		irq_spinlock_unlock(&threads_lock, true);
 	} else {
@@ -555,18 +555,18 @@
 			return ret;
 		}
-		
+
 		/* Correct return value */
 		ret.tag = SYSINFO_VAL_FUNCTION_DATA;
 		ret.data.data = (void *) stats_thread;
 		ret.data.size = sizeof(stats_thread_t);
-		
+
 		/* Hand-over-hand locking */
 		irq_spinlock_exchange(&threads_lock, &thread->lock);
-		
+
 		produce_stats_thread(thread, stats_thread);
-		
+
 		irq_spinlock_unlock(&thread->lock, true);
 	}
-	
+
 	return ret;
 }
@@ -587,8 +587,8 @@
 {
 	*size = sizeof(stats_exc_t) * IVT_ITEMS;
-	
+
 	if ((dry_run) || (IVT_ITEMS == 0))
 		return NULL;
-	
+
 	stats_exc_t *stats_exceptions =
 	    (stats_exc_t *) malloc(*size, FRAME_ATOMIC);
@@ -598,9 +598,9 @@
 		return NULL;
 	}
-	
+
 #if (IVT_ITEMS > 0)
 	/* Messing with exception table, avoid deadlock */
 	irq_spinlock_lock(&exctbl_lock, true);
-	
+
 	unsigned int i;
 	for (i = 0; i < IVT_ITEMS; i++) {
@@ -611,8 +611,8 @@
 		stats_exceptions[i].count = exc_table[i].count;
 	}
-	
+
 	irq_spinlock_unlock(&exctbl_lock, true);
 #endif
-	
+
 	return ((void *) stats_exceptions);
 }
@@ -642,15 +642,15 @@
 	sysinfo_return_t ret;
 	ret.tag = SYSINFO_VAL_UNDEFINED;
-	
+
 	/* Parse the exception number */
 	uint64_t excn;
 	if (str_uint64_t(name, NULL, 0, true, &excn) != EOK)
 		return ret;
-	
+
 #if (IVT_FIRST > 0)
 	if (excn < IVT_FIRST)
 		return ret;
 #endif
-	
+
 #if (IVT_ITEMS + IVT_FIRST == 0)
 	return ret;
@@ -659,5 +659,5 @@
 		return ret;
 #endif
-	
+
 	if (dry_run) {
 		ret.tag = SYSINFO_VAL_FUNCTION_DATA;
@@ -667,5 +667,5 @@
 		/* Update excn index for accessing exc_table */
 		excn -= IVT_FIRST;
-		
+
 		/* Allocate stats_exc_t structure */
 		stats_exc_t *stats_exception =
@@ -673,13 +673,13 @@
 		if (stats_exception == NULL)
 			return ret;
-		
+
 		/* Messing with exception table, avoid deadlock */
 		irq_spinlock_lock(&exctbl_lock, true);
-		
+
 		/* Correct return value */
 		ret.tag = SYSINFO_VAL_FUNCTION_DATA;
 		ret.data.data = (void *) stats_exception;
 		ret.data.size = sizeof(stats_exc_t);
-		
+
 		stats_exception->id = excn;
 		str_cpy(stats_exception->desc, EXC_NAME_BUFLEN, exc_table[excn].name);
@@ -687,8 +687,8 @@
 		stats_exception->cycles = exc_table[excn].cycles;
 		stats_exception->count = exc_table[excn].count;
-		
+
 		irq_spinlock_unlock(&exctbl_lock, true);
 	}
-	
+
 	return ret;
 }
@@ -711,5 +711,5 @@
 	if (dry_run)
 		return NULL;
-	
+
 	stats_physmem_t *stats_physmem =
 	    (stats_physmem_t *) malloc(*size, FRAME_ATOMIC);
@@ -718,8 +718,8 @@
 		return NULL;
 	}
-	
+
 	zones_stats(&(stats_physmem->total), &(stats_physmem->unavail),
 	    &(stats_physmem->used), &(stats_physmem->free));
-	
+
 	return ((void *) stats_physmem);
 }
@@ -742,5 +742,5 @@
 	if (dry_run)
 		return NULL;
-	
+
 	load_t *stats_load = (load_t *) malloc(*size, FRAME_ATOMIC);
 	if (stats_load == NULL) {
@@ -748,14 +748,14 @@
 		return NULL;
 	}
-	
+
 	/* To always get consistent values acquire the mutex */
 	mutex_lock(&load_lock);
-	
+
 	unsigned int i;
 	for (i = 0; i < LOAD_STEPS; i++)
 		stats_load[i] = avenrdy[i] << LOAD_KERNEL_SHIFT;
-	
+
 	mutex_unlock(&load_lock);
-	
+
 	return ((void *) stats_load);
 }
@@ -768,5 +768,5 @@
 	load *= exp;
 	load += (ready << LOAD_FIXED_SHIFT) * (LOAD_FIXED_1 - exp);
-	
+
 	return (load >> LOAD_FIXED_SHIFT);
 }
@@ -782,17 +782,17 @@
 {
 	thread_detach(THREAD);
-	
+
 	while (true) {
 		atomic_count_t ready = atomic_get(&nrdy);
-		
+
 		/* Mutually exclude with get_stats_load() */
 		mutex_lock(&load_lock);
-		
+
 		unsigned int i;
 		for (i = 0; i < LOAD_STEPS; i++)
 			avenrdy[i] = load_calc(avenrdy[i], load_exp[i], ready);
-		
+
 		mutex_unlock(&load_lock);
-		
+
 		thread_sleep(LOAD_INTERVAL);
 	}
@@ -805,5 +805,5 @@
 {
 	mutex_initialize(&load_lock, MUTEX_PASSIVE);
-	
+
 	sysinfo_set_item_gen_data("system.cpus", NULL, get_stats_cpus, NULL);
 	sysinfo_set_item_gen_data("system.physmem", NULL, get_stats_physmem, NULL);
Index: kernel/generic/src/sysinfo/sysinfo.c
===================================================================
--- kernel/generic/src/sysinfo/sysinfo.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/sysinfo/sysinfo.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -64,5 +64,5 @@
 {
 	sysinfo_item_t *item = (sysinfo_item_t *) obj;
-	
+
 	item->name = NULL;
 	item->val_type = SYSINFO_VAL_UNDEFINED;
@@ -70,5 +70,5 @@
 	item->subtree.table = NULL;
 	item->next = NULL;
-	
+
 	return EOK;
 }
@@ -84,8 +84,8 @@
 {
 	sysinfo_item_t *item = (sysinfo_item_t *) obj;
-	
+
 	if (item->name != NULL)
 		free(item->name);
-	
+
 	return 0;
 }
@@ -101,5 +101,5 @@
 	    sizeof(sysinfo_item_t), 0, sysinfo_item_constructor,
 	    sysinfo_item_destructor, SLAB_CACHE_MAGDEFERRED);
-	
+
 	mutex_initialize(&sysinfo_lock, MUTEX_ACTIVE);
 }
@@ -127,19 +127,19 @@
 {
 	assert(subtree != NULL);
-	
+
 	sysinfo_item_t *cur = subtree;
-	
+
 	/* Walk all siblings */
 	while (cur != NULL) {
 		size_t i = 0;
-		
+
 		/* Compare name with path */
 		while ((cur->name[i] != 0) && (name[i] == cur->name[i]))
 			i++;
-		
+
 		/* Check for perfect name and path match */
 		if ((name[i] == 0) && (cur->name[i] == 0))
 			return cur;
-		
+
 		/* Partial match up to the delimiter */
 		if ((name[i] == '.') && (cur->name[i] == 0)) {
@@ -155,5 +155,5 @@
 					**ret = cur->subtree.generator.fn(name + i + 1,
 					    dry_run, cur->subtree.generator.data);
-				
+
 				return NULL;
 			default:
@@ -161,16 +161,16 @@
 				if (ret != NULL)
 					*ret = NULL;
-				
+
 				return NULL;
 			}
 		}
-		
+
 		cur = cur->next;
 	}
-	
+
 	/* Not found, no data generated */
 	if (ret != NULL)
 		*ret = NULL;
-	
+
 	return NULL;
 }
@@ -193,22 +193,22 @@
 {
 	assert(psubtree != NULL);
-	
+
 	if (*psubtree == NULL) {
 		/* No parent */
-		
+
 		size_t i = 0;
-		
+
 		/* Find the first delimiter in name */
 		while ((name[i] != 0) && (name[i] != '.'))
 			i++;
-		
+
 		*psubtree =
 		    (sysinfo_item_t *) slab_alloc(sysinfo_item_cache, 0);
 		assert(*psubtree);
-		
+
 		/* Fill in item name up to the delimiter */
 		(*psubtree)->name = str_ndup(name, i);
 		assert((*psubtree)->name);
-		
+
 		/* Create subtree items */
 		if (name[i] == '.') {
@@ -217,19 +217,19 @@
 			    &((*psubtree)->subtree.table));
 		}
-		
+
 		/* No subtree needs to be created */
 		return *psubtree;
 	}
-	
+
 	sysinfo_item_t *cur = *psubtree;
-	
+
 	/* Walk all siblings */
 	while (cur != NULL) {
 		size_t i = 0;
-		
+
 		/* Compare name with path */
 		while ((cur->name[i] != 0) && (name[i] == cur->name[i]))
 			i++;
-		
+
 		/* Check for perfect name and path match
 		 * -> item is already present.
@@ -237,5 +237,5 @@
 		if ((name[i] == 0) && (cur->name[i] == 0))
 			return cur;
-		
+
 		/* Partial match up to the delimiter */
 		if ((name[i] == '.') && (cur->name[i] == 0)) {
@@ -257,5 +257,5 @@
 			}
 		}
-		
+
 		/* No match and no more siblings to check
 		 * -> create a new sibling item.
@@ -266,15 +266,15 @@
 			while ((name[i] != 0) && (name[i] != '.'))
 				i++;
-			
+
 			sysinfo_item_t *item =
 			    (sysinfo_item_t *) slab_alloc(sysinfo_item_cache, 0);
 			assert(item);
-			
+
 			cur->next = item;
-			
+
 			/* Fill in item name up to the delimiter */
 			item->name = str_ndup(name, i);
 			assert(item->name);
-			
+
 			/* Create subtree items */
 			if (name[i] == '.') {
@@ -283,12 +283,12 @@
 				    &(item->subtree.table));
 			}
-			
+
 			/* No subtree needs to be created */
 			return item;
 		}
-		
+
 		cur = cur->next;
 	}
-	
+
 	/* Unreachable */
 	assert(false);
@@ -309,8 +309,8 @@
 	/* Protect sysinfo tree consistency */
 	mutex_lock(&sysinfo_lock);
-	
+
 	if (root == NULL)
 		root = &global_root;
-	
+
 	sysinfo_item_t *item = sysinfo_create_path(name, root);
 	if (item != NULL) {
@@ -318,5 +318,5 @@
 		item->val.val = val;
 	}
-	
+
 	mutex_unlock(&sysinfo_lock);
 }
@@ -340,8 +340,8 @@
 	/* Protect sysinfo tree consistency */
 	mutex_lock(&sysinfo_lock);
-	
+
 	if (root == NULL)
 		root = &global_root;
-	
+
 	sysinfo_item_t *item = sysinfo_create_path(name, root);
 	if (item != NULL) {
@@ -350,5 +350,5 @@
 		item->val.data.size = size;
 	}
-	
+
 	mutex_unlock(&sysinfo_lock);
 }
@@ -368,8 +368,8 @@
 	/* Protect sysinfo tree consistency */
 	mutex_lock(&sysinfo_lock);
-	
+
 	if (root == NULL)
 		root = &global_root;
-	
+
 	sysinfo_item_t *item = sysinfo_create_path(name, root);
 	if (item != NULL) {
@@ -378,5 +378,5 @@
 		item->val.gen_val.data = data;
 	}
-	
+
 	mutex_unlock(&sysinfo_lock);
 }
@@ -401,8 +401,8 @@
 	/* Protect sysinfo tree consistency */
 	mutex_lock(&sysinfo_lock);
-	
+
 	if (root == NULL)
 		root = &global_root;
-	
+
 	sysinfo_item_t *item = sysinfo_create_path(name, root);
 	if (item != NULL) {
@@ -411,5 +411,5 @@
 		item->val.gen_data.data = data;
 	}
-	
+
 	mutex_unlock(&sysinfo_lock);
 }
@@ -426,12 +426,12 @@
 	/* Protect sysinfo tree consistency */
 	mutex_lock(&sysinfo_lock);
-	
+
 	if (root == NULL)
 		root = &global_root;
-	
+
 	sysinfo_item_t *item = sysinfo_create_path(name, root);
 	if (item != NULL)
 		item->val_type = SYSINFO_VAL_UNDEFINED;
-	
+
 	mutex_unlock(&sysinfo_lock);
 }
@@ -451,10 +451,10 @@
 	/* Protect sysinfo tree consistency */
 	mutex_lock(&sysinfo_lock);
-	
+
 	if (root == NULL)
 		root = &global_root;
-	
+
 	sysinfo_item_t *item = sysinfo_create_path(name, root);
-	
+
 	/* Change the type of the subtree only if it is not already
 	   a fixed subtree */
@@ -464,5 +464,5 @@
 		item->subtree.generator.data = data;
 	}
-	
+
 	mutex_unlock(&sysinfo_lock);
 }
@@ -492,5 +492,5 @@
 	for (sysinfo_item_t *cur = root; cur; cur = cur->next) {
 		size_t length;
-		
+
 		if (spaces == 0) {
 			printf("%s", cur->name);
@@ -501,8 +501,8 @@
 			length = str_length(cur->name) + 1;
 		}
-		
+
 		sysarg_t val;
 		size_t size;
-		
+
 		/* Display node value and type */
 		switch (cur->val_type) {
@@ -531,5 +531,5 @@
 			printf("+ %s [unknown]\n", cur->name);
 		}
-		
+
 		/* Recursivelly nest into the subtree */
 		switch (cur->subtree_type) {
@@ -562,10 +562,10 @@
 	   while we are dumping it */
 	mutex_lock(&sysinfo_lock);
-	
+
 	if (root == NULL)
 		sysinfo_dump_internal(global_root, 0);
 	else
 		sysinfo_dump_internal(root, 0);
-	
+
 	mutex_unlock(&sysinfo_lock);
 }
@@ -590,5 +590,5 @@
 	if (root == NULL)
 		root = &global_root;
-	
+
 	/* Try to find the item or generate data */
 	sysinfo_return_t ret;
@@ -596,8 +596,8 @@
 	sysinfo_item_t *item = sysinfo_find_item(name, *root, &ret_ptr,
 	    dry_run);
-	
+
 	if (item != NULL) {
 		/* Item found in the fixed sysinfo tree */
-		
+
 		ret.tag = item->val_type;
 		switch (item->val_type) {
@@ -625,5 +625,5 @@
 		}
 	}
-	
+
 	return ret;
 }
@@ -645,11 +645,11 @@
 	sysinfo_return_t ret;
 	ret.tag = SYSINFO_VAL_UNDEFINED;
-	
+
 	if (size > SYSINFO_MAX_PATH)
 		return ret;
-	
+
 	char *path = (char *) malloc(size + 1, 0);
 	assert(path);
-	
+
 	if ((copy_from_uspace(path, ptr, size + 1) == 0) &&
 	    (path[size] == 0)) {
@@ -662,5 +662,5 @@
 		mutex_unlock(&sysinfo_lock);
 	}
-	
+
 	free(path);
 	return ret;
@@ -686,7 +686,7 @@
 	if (root == NULL)
 		root = &global_root;
-	
+
 	sysinfo_item_t *subtree = NULL;
-	
+
 	if (name[0] != 0) {
 		/* Try to find the item */
@@ -698,8 +698,8 @@
 	} else
 		subtree = *root;
-	
+
 	sysinfo_return_t ret;
 	ret.tag = SYSINFO_VAL_UNDEFINED;
-	
+
 	if (subtree != NULL) {
 		/*
@@ -709,5 +709,5 @@
 		for (sysinfo_item_t *cur = subtree; cur; cur = cur->next)
 			size += str_size(cur->name) + 1;
-		
+
 		if (dry_run) {
 			ret.tag = SYSINFO_VAL_DATA;
@@ -719,5 +719,5 @@
 			if (names == NULL)
 				return ret;
-			
+
 			size_t pos = 0;
 			for (sysinfo_item_t *cur = subtree; cur; cur = cur->next) {
@@ -725,5 +725,5 @@
 				pos += str_size(cur->name) + 1;
 			}
-			
+
 			/* Correct return value */
 			ret.tag = SYSINFO_VAL_DATA;
@@ -732,5 +732,5 @@
 		}
 	}
-	
+
 	return ret;
 }
@@ -754,11 +754,11 @@
 	ret.data.data = NULL;
 	ret.data.size = 0;
-	
+
 	if (size > SYSINFO_MAX_PATH)
 		return ret;
-	
+
 	char *path = (char *) malloc(size + 1, 0);
 	assert(path);
-	
+
 	if ((copy_from_uspace(path, ptr, size + 1) == 0) &&
 	    (path[size] == 0)) {
@@ -771,5 +771,5 @@
 		mutex_unlock(&sysinfo_lock);
 	}
-	
+
 	free(path);
 	return ret;
@@ -794,5 +794,5 @@
 {
 	errno_t rc;
-	
+
 	/*
 	 * Get the keys.
@@ -803,5 +803,5 @@
 	sysinfo_return_t ret =
 	    sysinfo_get_keys_uspace(path_ptr, path_size, true);
-	
+
 	/* Check return data tag */
 	if (ret.tag == SYSINFO_VAL_DATA)
@@ -810,5 +810,5 @@
 	else
 		rc = EINVAL;
-	
+
 	return (sys_errno_t) rc;
 }
@@ -842,9 +842,9 @@
 {
 	errno_t rc;
-	
+
 	/* Get the keys */
 	sysinfo_return_t ret = sysinfo_get_keys_uspace(path_ptr, path_size,
 	    false);
-	
+
 	/* Check return data tag */
 	if (ret.tag == SYSINFO_VAL_DATA) {
@@ -853,9 +853,9 @@
 		if (rc == EOK)
 			rc = copy_to_uspace(size_ptr, &size, sizeof(size));
-		
+
 		free(ret.data.data);
 	} else
 		rc = EINVAL;
-	
+
 	return (sys_errno_t) rc;
 }
@@ -882,5 +882,5 @@
 	 */
 	sysinfo_return_t ret = sysinfo_get_item_uspace(path_ptr, path_size, true);
-	
+
 	/*
 	 * Map generated value types to constant types (user space does
@@ -891,5 +891,5 @@
 	else if (ret.tag == SYSINFO_VAL_FUNCTION_DATA)
 		ret.tag = SYSINFO_VAL_DATA;
-	
+
 	return (sysarg_t) ret.tag;
 }
@@ -913,5 +913,5 @@
 {
 	errno_t rc;
-	
+
 	/*
 	 * Get the item.
@@ -921,5 +921,5 @@
 	 */
 	sysinfo_return_t ret = sysinfo_get_item_uspace(path_ptr, path_size, true);
-	
+
 	/* Only constant or generated numerical value is returned */
 	if ((ret.tag == SYSINFO_VAL_VAL) || (ret.tag == SYSINFO_VAL_FUNCTION_VAL))
@@ -927,5 +927,5 @@
 	else
 		rc = EINVAL;
-	
+
 	return (sys_errno_t) rc;
 }
@@ -949,5 +949,5 @@
 {
 	errno_t rc;
-	
+
 	/*
 	 * Get the item.
@@ -957,5 +957,5 @@
 	 */
 	sysinfo_return_t ret = sysinfo_get_item_uspace(path_ptr, path_size, true);
-	
+
 	/* Only the size of constant or generated binary data is considered */
 	if ((ret.tag == SYSINFO_VAL_DATA) || (ret.tag == SYSINFO_VAL_FUNCTION_DATA))
@@ -964,5 +964,5 @@
 	else
 		rc = EINVAL;
-	
+
 	return (sys_errno_t) rc;
 }
@@ -999,9 +999,9 @@
 {
 	errno_t rc;
-	
+
 	/* Get the item */
 	sysinfo_return_t ret = sysinfo_get_item_uspace(path_ptr, path_size,
 	    false);
-	
+
 	/* Only constant or generated binary data is considered */
 	if ((ret.tag == SYSINFO_VAL_DATA) ||
@@ -1013,9 +1013,9 @@
 	} else
 		rc = EINVAL;
-	
+
 	/* N.B.: The generated binary data should be freed */
 	if ((ret.tag == SYSINFO_VAL_FUNCTION_DATA) && (ret.data.data != NULL))
 		free(ret.data.data);
-	
+
 	return (sys_errno_t) rc;
 }
Index: kernel/generic/src/time/clock.c
===================================================================
--- kernel/generic/src/time/clock.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/time/clock.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -84,11 +84,11 @@
 	if (faddr == 0)
 		panic("Cannot allocate page for clock.");
-	
+
 	uptime = (uptime_t *) PA2KA(faddr);
-	
+
 	uptime->seconds1 = 0;
 	uptime->seconds2 = 0;
 	uptime->useconds = 0;
-	
+
 	clock_parea.pbase = faddr;
 	clock_parea.frames = 1;
@@ -96,5 +96,5 @@
 	clock_parea.mapped = false;
 	ddi_parea_register(&clock_parea);
-	
+
 	/*
 	 * Prepare information for the userspace so that it can successfully
@@ -146,8 +146,8 @@
 {
 	size_t missed_clock_ticks = CPU->missed_clock_ticks;
-	
+
 	/* Account CPU usage */
 	cpu_update_accounting();
-	
+
 	/*
 	 * To avoid lock ordering problems,
@@ -160,12 +160,12 @@
 		clock_update_counters();
 		cpu_update_accounting();
-		
+
 		irq_spinlock_lock(&CPU->timeoutlock, false);
-		
+
 		link_t *cur;
 		while ((cur = list_first(&CPU->timeout_active_list)) != NULL) {
 			timeout_t *timeout = list_get_instance(cur, timeout_t,
 			    link);
-			
+
 			irq_spinlock_lock(&timeout->lock, false);
 			if (timeout->ticks-- != 0) {
@@ -173,34 +173,34 @@
 				break;
 			}
-			
+
 			list_remove(cur);
 			timeout_handler_t handler = timeout->handler;
 			void *arg = timeout->arg;
 			timeout_reinitialize(timeout);
-			
+
 			irq_spinlock_unlock(&timeout->lock, false);
 			irq_spinlock_unlock(&CPU->timeoutlock, false);
-			
+
 			handler(arg);
-			
+
 			irq_spinlock_lock(&CPU->timeoutlock, false);
 		}
-		
+
 		irq_spinlock_unlock(&CPU->timeoutlock, false);
 	}
 	CPU->missed_clock_ticks = 0;
-	
+
 	/*
 	 * Do CPU usage accounting and find out whether to preempt THREAD.
 	 *
 	 */
-	
+
 	if (THREAD) {
 		uint64_t ticks;
-		
+
 		irq_spinlock_lock(&CPU->lock, false);
 		CPU->needs_relink += 1 + missed_clock_ticks;
 		irq_spinlock_unlock(&CPU->lock, false);
-		
+
 		irq_spinlock_lock(&THREAD->lock, false);
 		if ((ticks = THREAD->ticks)) {
@@ -211,5 +211,5 @@
 		}
 		irq_spinlock_unlock(&THREAD->lock, false);
-		
+
 		if (ticks == 0 && PREEMPTION_ENABLED) {
 			scheduler();
Index: kernel/generic/src/time/delay.c
===================================================================
--- kernel/generic/src/time/delay.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/time/delay.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -35,5 +35,5 @@
  * @brief	Active delay function.
  */
- 
+
 #include <time/delay.h>
 #include <proc/thread.h>
Index: kernel/generic/src/time/timeout.c
===================================================================
--- kernel/generic/src/time/timeout.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/time/timeout.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -103,14 +103,14 @@
 	irq_spinlock_lock(&CPU->timeoutlock, true);
 	irq_spinlock_lock(&timeout->lock, false);
-	
+
 	if (timeout->cpu)
 		panic("Unexpected: timeout->cpu != 0.");
-	
+
 	timeout->cpu = CPU;
 	timeout->ticks = us2ticks(time);
-	
+
 	timeout->handler = handler;
 	timeout->arg = arg;
-	
+
 	/*
 	 * Insert timeout into the active timeouts list according to timeout->ticks.
@@ -123,18 +123,18 @@
 		target = list_get_instance(cur, timeout_t, link);
 		irq_spinlock_lock(&target->lock, false);
-		
+
 		if (timeout->ticks < sum + target->ticks) {
 			irq_spinlock_unlock(&target->lock, false);
 			break;
 		}
-		
+
 		sum += target->ticks;
 		irq_spinlock_unlock(&target->lock, false);
 	}
-	
+
 	/* Avoid using cur->prev directly */
 	link_t *prev = cur->prev;
 	list_insert_after(&timeout->link, prev);
-	
+
 	/*
 	 * Adjust timeout->ticks according to ticks
@@ -142,5 +142,5 @@
 	 */
 	timeout->ticks -= sum;
-	
+
 	/*
 	 * Decrease ticks of timeout's immediate succesor by timeout->ticks.
@@ -151,5 +151,5 @@
 		irq_spinlock_unlock(&target->lock, false);
 	}
-	
+
 	irq_spinlock_unlock(&timeout->lock, false);
 	irq_spinlock_unlock(&CPU->timeoutlock, true);
@@ -168,5 +168,5 @@
 {
 	DEADLOCK_PROBE_INIT(p_tolock);
-	
+
 grab_locks:
 	irq_spinlock_lock(&timeout->lock, true);
@@ -175,5 +175,5 @@
 		return false;
 	}
-	
+
 	if (!irq_spinlock_trylock(&timeout->cpu->timeoutlock)) {
 		irq_spinlock_unlock(&timeout->lock, true);
@@ -181,10 +181,10 @@
 		goto grab_locks;
 	}
-	
+
 	/*
 	 * Now we know for sure that timeout hasn't been activated yet
 	 * and is lurking in timeout->cpu->timeout_active_list.
 	 */
-	
+
 	link_t *cur = timeout->link.next;
 	if (cur != &timeout->cpu->timeout_active_list.head) {
@@ -194,11 +194,11 @@
 		irq_spinlock_unlock(&tmp->lock, false);
 	}
-	
+
 	list_remove(&timeout->link);
 	irq_spinlock_unlock(&timeout->cpu->timeoutlock, false);
-	
+
 	timeout_reinitialize(timeout);
 	irq_spinlock_unlock(&timeout->lock, true);
-	
+
 	return true;
 }
Index: kernel/generic/src/udebug/udebug.c
===================================================================
--- kernel/generic/src/udebug/udebug.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/udebug/udebug.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -75,5 +75,5 @@
 	waitq_initialize(&ut->go_wq);
 	condvar_initialize(&ut->active_cv);
-	
+
 	ut->go_call = NULL;
 	ut->uspace_state = NULL;
@@ -96,5 +96,5 @@
 {
 	ipl_t ipl = waitq_sleep_prepare(wq);
-	
+
 	wq->missed_wakeups = 0;  /* Enforce blocking. */
 	bool blocked;
@@ -119,14 +119,14 @@
 	assert(THREAD);
 	assert(TASK);
-	
+
 	mutex_lock(&TASK->udebug.lock);
-	
+
 	int nsc = --TASK->udebug.not_stoppable_count;
-	
+
 	/* Lock order OK, THREAD->udebug.lock is after TASK->udebug.lock */
 	mutex_lock(&THREAD->udebug.lock);
 	assert(THREAD->udebug.stoppable == false);
 	THREAD->udebug.stoppable = true;
-	
+
 	if ((TASK->udebug.dt_state == UDEBUG_TS_BEGINNING) && (nsc == 0)) {
 		/*
@@ -135,11 +135,11 @@
 		 *
 		 */
-		
+
 		call_t *db_call = TASK->udebug.begin_call;
 		assert(db_call);
-		
+
 		TASK->udebug.dt_state = UDEBUG_TS_ACTIVE;
 		TASK->udebug.begin_call = NULL;
-		
+
 		IPC_SET_RETVAL(db_call->data, 0);
 		ipc_answer(&TASK->answerbox, db_call);
@@ -148,5 +148,5 @@
 		 * Active debugging session
 		 */
-		
+
 		if (THREAD->udebug.active == true &&
 		    THREAD->udebug.go == false) {
@@ -155,18 +155,18 @@
 			 *
 			 */
-			
+
 			/* Make sure nobody takes this call away from us */
 			call_t *go_call = THREAD->udebug.go_call;
 			THREAD->udebug.go_call = NULL;
 			assert(go_call);
-			
+
 			IPC_SET_RETVAL(go_call->data, 0);
 			IPC_SET_ARG1(go_call->data, UDEBUG_EVENT_STOP);
-			
+
 			THREAD->udebug.cur_event = UDEBUG_EVENT_STOP;
 			ipc_answer(&TASK->answerbox, go_call);
 		}
 	}
-	
+
 	mutex_unlock(&THREAD->udebug.lock);
         mutex_unlock(&TASK->udebug.lock);
@@ -185,11 +185,11 @@
 	mutex_lock(&TASK->udebug.lock);
 	mutex_lock(&THREAD->udebug.lock);
-	
+
 	if ((THREAD->udebug.active) && (THREAD->udebug.go == false)) {
 		mutex_unlock(&THREAD->udebug.lock);
 		mutex_unlock(&TASK->udebug.lock);
-		
+
 		udebug_wait_for_go(&THREAD->udebug.go_wq);
-		
+
 		goto restart;
 		/* Must try again - have to lose stoppability atomically. */
@@ -198,5 +198,5 @@
 		assert(THREAD->udebug.stoppable == true);
 		THREAD->udebug.stoppable = false;
-		
+
 		mutex_unlock(&THREAD->udebug.lock);
 		mutex_unlock(&TASK->udebug.lock);
@@ -228,8 +228,8 @@
 	udebug_event_t etype =
 	    end_variant ? UDEBUG_EVENT_SYSCALL_E : UDEBUG_EVENT_SYSCALL_B;
-	
+
 	mutex_lock(&TASK->udebug.lock);
 	mutex_lock(&THREAD->udebug.lock);
-	
+
 	/* Must only generate events when in debugging session and is go. */
 	if (THREAD->udebug.active != true || THREAD->udebug.go == false ||
@@ -239,14 +239,14 @@
 		return;
 	}
-	
+
 	/* Fill in the GO response. */
 	call_t *call = THREAD->udebug.go_call;
 	THREAD->udebug.go_call = NULL;
-	
+
 	IPC_SET_RETVAL(call->data, 0);
 	IPC_SET_ARG1(call->data, etype);
 	IPC_SET_ARG2(call->data, id);
 	IPC_SET_ARG3(call->data, rc);
-	
+
 	THREAD->udebug.syscall_args[0] = a1;
 	THREAD->udebug.syscall_args[1] = a2;
@@ -255,5 +255,5 @@
 	THREAD->udebug.syscall_args[4] = a5;
 	THREAD->udebug.syscall_args[5] = a6;
-	
+
 	/*
 	 * Make sure udebug.go is false when going to sleep
@@ -264,10 +264,10 @@
 	THREAD->udebug.go = false;
 	THREAD->udebug.cur_event = etype;
-	
+
 	ipc_answer(&TASK->answerbox, call);
-	
+
 	mutex_unlock(&THREAD->udebug.lock);
 	mutex_unlock(&TASK->udebug.lock);
-	
+
 	udebug_wait_for_go(&THREAD->udebug.go_wq);
 }
@@ -294,9 +294,9 @@
 	mutex_lock(&TASK->udebug.lock);
 	mutex_lock(&THREAD->udebug.lock);
-	
+
 	thread_attach(thread, task);
-	
+
 	LOG("Check state");
-	
+
 	/* Must only generate events when in debugging session */
 	if (THREAD->udebug.active != true) {
@@ -304,19 +304,19 @@
 		    THREAD->udebug.active ? "Yes(+)" : "No",
 		    THREAD->udebug.go ? "Yes(-)" : "No");
-		
+
 		mutex_unlock(&THREAD->udebug.lock);
 		mutex_unlock(&TASK->udebug.lock);
 		return;
 	}
-	
+
 	LOG("Trigger event");
-	
+
 	call_t *call = THREAD->udebug.go_call;
-	
+
 	THREAD->udebug.go_call = NULL;
 	IPC_SET_RETVAL(call->data, 0);
 	IPC_SET_ARG1(call->data, UDEBUG_EVENT_THREAD_B);
 	IPC_SET_ARG2(call->data, (sysarg_t) thread);
-	
+
 	/*
 	 * Make sure udebug.go is false when going to sleep
@@ -327,10 +327,10 @@
 	THREAD->udebug.go = false;
 	THREAD->udebug.cur_event = UDEBUG_EVENT_THREAD_B;
-	
+
 	ipc_answer(&TASK->answerbox, call);
-	
+
 	mutex_unlock(&THREAD->udebug.lock);
 	mutex_unlock(&TASK->udebug.lock);
-	
+
 	LOG("Wait for Go");
 	udebug_wait_for_go(&THREAD->udebug.go_wq);
@@ -347,7 +347,7 @@
 	mutex_lock(&TASK->udebug.lock);
 	mutex_lock(&THREAD->udebug.lock);
-	
+
 	LOG("Check state");
-	
+
 	/* Must only generate events when in debugging session. */
 	if (THREAD->udebug.active != true) {
@@ -355,28 +355,28 @@
 		    THREAD->udebug.active ? "Yes" : "No",
 		    THREAD->udebug.go ? "Yes" : "No");
-		
+
 		mutex_unlock(&THREAD->udebug.lock);
 		mutex_unlock(&TASK->udebug.lock);
 		return;
 	}
-	
+
 	LOG("Trigger event");
-	
+
 	call_t *call = THREAD->udebug.go_call;
-	
+
 	THREAD->udebug.go_call = NULL;
 	IPC_SET_RETVAL(call->data, 0);
 	IPC_SET_ARG1(call->data, UDEBUG_EVENT_THREAD_E);
-	
+
 	/* Prevent any further debug activity in thread. */
 	THREAD->udebug.active = false;
 	THREAD->udebug.cur_event = 0;   /* None */
 	THREAD->udebug.go = false;      /* Set to initial value */
-	
+
 	ipc_answer(&TASK->answerbox, call);
-	
+
 	mutex_unlock(&THREAD->udebug.lock);
 	mutex_unlock(&TASK->udebug.lock);
-	
+
 	/*
 	 * This event does not sleep - debugging has finished
@@ -405,11 +405,11 @@
 		return EINVAL;
 	}
-	
+
 	LOG("Task %" PRIu64, task->taskid);
-	
+
 	/* Finish debugging of all userspace threads */
 	list_foreach(task->threads, th_link, thread_t, thread) {
 		mutex_lock(&thread->udebug.lock);
-		
+
 		/* Only process userspace threads. */
 		if (thread->uspace) {
@@ -417,5 +417,5 @@
 			thread->udebug.active = false;
 			thread->udebug.cur_event = 0;   /* None */
-			
+
 			/* Is the thread still go? */
 			if (thread->udebug.go == true) {
@@ -426,12 +426,12 @@
 				 */
 				thread->udebug.go = false;
-				
+
 				/* Answer GO call */
 				LOG("Answer GO call with EVENT_FINISHED.");
-				
+
 				IPC_SET_RETVAL(thread->udebug.go_call->data, 0);
 				IPC_SET_ARG1(thread->udebug.go_call->data,
 				    UDEBUG_EVENT_FINISHED);
-				
+
 				ipc_answer(&task->answerbox, thread->udebug.go_call);
 				thread->udebug.go_call = NULL;
@@ -442,5 +442,5 @@
 				 *
 				 */
-				
+
 				/*
 				 * thread's lock must not be held when calling
@@ -450,5 +450,5 @@
 				waitq_wakeup(&thread->udebug.go_wq, WAKEUP_FIRST);
 			}
-			
+
 			mutex_unlock(&thread->udebug.lock);
 			condvar_broadcast(&thread->udebug.active_cv);
@@ -456,8 +456,8 @@
 			mutex_unlock(&thread->udebug.lock);
 	}
-	
+
 	task->udebug.dt_state = UDEBUG_TS_INACTIVE;
 	task->udebug.debugger = NULL;
-	
+
 	return 0;
 }
@@ -474,5 +474,5 @@
 {
 	udebug_stoppable_begin();
-	
+
 	/* Wait until a debugger attends to us. */
 	mutex_lock(&THREAD->udebug.lock);
@@ -480,5 +480,5 @@
 		condvar_wait(&THREAD->udebug.active_cv, &THREAD->udebug.lock);
 	mutex_unlock(&THREAD->udebug.lock);
-	
+
 	/* Make sure the debugging session is over before proceeding. */
 	mutex_lock(&THREAD->udebug.lock);
@@ -486,5 +486,5 @@
 		condvar_wait(&THREAD->udebug.active_cv, &THREAD->udebug.lock);
 	mutex_unlock(&THREAD->udebug.lock);
-	
+
 	udebug_stoppable_end();
 }
Index: kernel/generic/src/udebug/udebug_ops.c
===================================================================
--- kernel/generic/src/udebug/udebug_ops.c	(revision 1b20da07baaa3e3c424f62c927274e676e4295cd)
+++ kernel/generic/src/udebug/udebug_ops.c	(revision 0aa06cbe8a6965cc7f1ebfa7236bcd8d5316da16)
@@ -82,8 +82,8 @@
 {
 	mutex_lock(&TASK->udebug.lock);
-	
+
 	/* thread_exists() must be called with threads_lock held */
 	irq_spinlock_lock(&threads_lock, true);
-	
+
 	if (!thread_exists(thread)) {
 		irq_spinlock_unlock(&threads_lock, true);
@@ -91,8 +91,8 @@
 		return ENOENT;
 	}
-	
+
 	/* thread->lock is enough to ensure the thread's existence */
 	irq_spinlock_exchange(&threads_lock, &thread->lock);
-	
+
 	/* Verify that 'thread' is a userspace thread. */
 	if (!thread->uspace) {
@@ -102,5 +102,5 @@
 		return ENOENT;
 	}
-	
+
 	/* Verify debugging state. */
 	if (thread->udebug.active != true) {
@@ -110,5 +110,5 @@
 		return ENOENT;
 	}
-	
+
 	/*
 	 * Since the thread has active == true, TASK->udebug.lock
@@ -118,7 +118,7 @@
 	 */
 	irq_spinlock_unlock(&thread->lock, true);
-	
+
 	/* Only mutex TASK->udebug.lock left. */
-	
+
 	/* Now verify that the thread belongs to the current task. */
 	if (thread->task != TASK) {
@@ -127,5 +127,5 @@
 		return ENOENT;
 	}
-	
+
 	/*
 	 * Now we need to grab the thread's debug lock for synchronization
@@ -134,8 +134,8 @@
 	 */
 	mutex_lock(&thread->udebug.lock);
-	
+
 	/* The big task mutex is no longer needed. */
 	mutex_unlock(&TASK->udebug.lock);
-	
+
 	if (thread->udebug.go != being_go) {
 		/* Not in debugging session or undesired GO state. */
@@ -143,7 +143,7 @@
 		return EINVAL;
 	}
-	
+
 	/* Only thread->udebug.lock left. */
-	
+
 	return EOK;  /* All went well. */
 }
@@ -177,16 +177,16 @@
 {
 	LOG("Debugging task %" PRIu64, TASK->taskid);
-	
-	mutex_lock(&TASK->udebug.lock);
-	
+
+	mutex_lock(&TASK->udebug.lock);
+
 	if (TASK->udebug.dt_state != UDEBUG_TS_INACTIVE) {
 		mutex_unlock(&TASK->udebug.lock);
 		return EBUSY;
 	}
-	
+
 	TASK->udebug.dt_state = UDEBUG_TS_BEGINNING;
 	TASK->udebug.begin_call = call;
 	TASK->udebug.debugger = call->sender;
-	
+
 	if (TASK->udebug.not_stoppable_count == 0) {
 		TASK->udebug.dt_state = UDEBUG_TS_ACTIVE;
@@ -195,7 +195,7 @@
 	} else
 		*active = false;  /* only in beginning state */
-	
+
 	/* Set udebug.active on all of the task's userspace threads. */
-	
+
 	list_foreach(TASK->threads, th_link, thread_t, thread) {
 		mutex_lock(&thread->udebug.lock);
@@ -207,5 +207,5 @@
 			mutex_unlock(&thread->udebug.lock);
 	}
-	
+
 	mutex_unlock(&TASK->udebug.lock);
 	return EOK;
@@ -222,9 +222,9 @@
 {
 	LOG("Task %" PRIu64, TASK->taskid);
-	
+
 	mutex_lock(&TASK->udebug.lock);
 	errno_t rc = udebug_task_cleanup(TASK);
 	mutex_unlock(&TASK->udebug.lock);
-	
+
 	return rc;
 }
@@ -242,15 +242,15 @@
 {
 	LOG("mask = 0x%x", mask);
-	
-	mutex_lock(&TASK->udebug.lock);
-	
+
+	mutex_lock(&TASK->udebug.lock);
+
 	if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) {
 		mutex_unlock(&TASK->udebug.lock);
 		return EINVAL;
 	}
-	
+
 	TASK->udebug.evmask = mask;
 	mutex_unlock(&TASK->udebug.lock);
-	
+
 	return EOK;
 }
@@ -272,9 +272,9 @@
 	if (rc != EOK)
 		return rc;
-	
+
 	thread->udebug.go_call = call;
 	thread->udebug.go = true;
 	thread->udebug.cur_event = 0;  /* none */
-	
+
 	/*
 	 * Neither thread's lock nor threads_lock may be held during wakeup.
@@ -282,7 +282,7 @@
 	 */
 	waitq_wakeup(&thread->udebug.go_wq, WAKEUP_FIRST);
-	
+
 	_thread_op_end(thread);
-	
+
 	return EOK;
 }
@@ -300,5 +300,5 @@
 {
 	LOG("udebug_stop()");
-	
+
 	/*
 	 * On success, this will lock thread->udebug.lock. Note that this
@@ -309,8 +309,8 @@
 	if (rc != EOK)
 		return rc;
-	
+
 	/* Take GO away from the thread. */
 	thread->udebug.go = false;
-	
+
 	if (thread->udebug.stoppable != true) {
 		/* Answer will be sent when the thread becomes stoppable. */
@@ -318,25 +318,25 @@
 		return EOK;
 	}
-	
+
 	/*
 	 * Answer GO call.
 	 *
 	 */
-	
+
 	/* Make sure nobody takes this call away from us. */
 	call = thread->udebug.go_call;
 	thread->udebug.go_call = NULL;
-	
+
 	IPC_SET_RETVAL(call->data, 0);
 	IPC_SET_ARG1(call->data, UDEBUG_EVENT_STOP);
-	
+
 	THREAD->udebug.cur_event = UDEBUG_EVENT_STOP;
-	
+
 	_thread_op_end(thread);
-	
+
 	mutex_lock(&TASK->udebug.lock);
 	ipc_answer(&TASK->answerbox, call);
 	mutex_unlock(&TASK->udebug.lock);
-	
+
 	return EOK;
 }
@@ -368,10 +368,10 @@
 {
 	LOG("udebug_thread_read()");
-	
+
 	/* Allocate a buffer to hold thread IDs */
 	sysarg_t *id_buffer = malloc(buf_size + 1, 0);
-	
-	mutex_lock(&TASK->udebug.lock);
-	
+
+	mutex_lock(&TASK->udebug.lock);
+
 	/* Verify task state */
 	if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) {
@@ -380,13 +380,13 @@
 		return EINVAL;
 	}
-	
+
 	irq_spinlock_lock(&TASK->lock, true);
-	
+
 	/* Copy down the thread IDs */
-	
+
 	size_t max_ids = buf_size / sizeof(sysarg_t);
 	size_t copied_ids = 0;
 	size_t extra_ids = 0;
-	
+
 	/* FIXME: make sure the thread isn't past debug shutdown... */
 	list_foreach(TASK->threads, th_link, thread_t, thread) {
@@ -394,9 +394,9 @@
 		bool uspace = thread->uspace;
 		irq_spinlock_unlock(&thread->lock, false);
-		
+
 		/* Not interested in kernel threads. */
 		if (!uspace)
 			continue;
-		
+
 		if (copied_ids < max_ids) {
 			/* Using thread struct pointer as identification hash */
@@ -405,13 +405,13 @@
 			extra_ids++;
 	}
-	
+
 	irq_spinlock_unlock(&TASK->lock, true);
-	
-	mutex_unlock(&TASK->udebug.lock);
-	
+
+	mutex_unlock(&TASK->udebug.lock);
+
 	*buffer = id_buffer;
 	*stored = copied_ids * sizeof(sysarg_t);
 	*needed = (copied_ids + extra_ids) * sizeof(sysarg_t);
-	
+
 	return EOK;
 }
@@ -431,10 +431,10 @@
 {
 	size_t name_size = str_size(TASK->name) + 1;
-	
+
 	*data = malloc(name_size, 0);
 	*data_size = name_size;
-	
+
 	memcpy(*data, TASK->name, name_size);
-	
+
 	return EOK;
 }
@@ -463,5 +463,5 @@
 	if (rc != EOK)
 		return rc;
-	
+
 	/* Additionally we need to verify that we are inside a syscall. */
 	if ((thread->udebug.cur_event != UDEBUG_EVENT_SYSCALL_B) &&
@@ -470,13 +470,13 @@
 		return EINVAL;
 	}
-	
+
 	/* Prepare a buffer to hold the arguments. */
 	sysarg_t *arg_buffer = malloc(6 * sizeof(sysarg_t), 0);
-	
+
 	/* Copy to a local buffer before releasing the lock. */
 	memcpy(arg_buffer, thread->udebug.syscall_args, 6 * sizeof(sysarg_t));
-	
+
 	_thread_op_end(thread);
-	
+
 	*buffer = arg_buffer;
 	return EOK;
@@ -506,5 +506,5 @@
 	if (rc != EOK)
 		return rc;
-	
+
 	istate_t *state = thread->udebug.uspace_state;
 	if (state == NULL) {
@@ -512,13 +512,13 @@
 		return EBUSY;
 	}
-	
+
 	/* Prepare a buffer to hold the data. */
 	istate_t *state_buf = malloc(sizeof(istate_t), 0);
-	
+
 	/* Copy to the allocated buffer */
 	memcpy(state_buf, state, sizeof(istate_t));
-	
+
 	_thread_op_end(thread);
-	
+
 	*buffer = (void *) state_buf;
 	return EOK;
@@ -540,12 +540,12 @@
 	/* Verify task state */
 	mutex_lock(&TASK->udebug.lock);
-	
+
 	if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) {
 		mutex_unlock(&TASK->udebug.lock);
 		return EBUSY;
 	}
-	
+
 	void *data_buffer = malloc(n, 0);
-	
+
 	/*
 	 * NOTE: this is not strictly from a syscall... but that shouldn't
@@ -555,8 +555,8 @@
 	errno_t rc = copy_from_uspace(data_buffer, (void *) uspace_addr, n);
 	mutex_unlock(&TASK->udebug.lock);
-	
+
 	if (rc != EOK)
 		return rc;
-	
+
 	*buffer = data_buffer;
 	return EOK;
