Index: kernel/generic/include/ddi/irq.h
===================================================================
--- kernel/generic/include/ddi/irq.h	(revision d9ec808b547e5990edc7280d9d8399acd2a5d86f)
+++ kernel/generic/include/ddi/irq.h	(revision 82d515e917f3eeeff253e4aa30e493f5f632c950)
@@ -132,5 +132,5 @@
 extern hash_table_t irq_uspace_hash_table;
 
-extern slab_cache_t *irq_slab;
+extern slab_cache_t *irq_cache;
 
 extern inr_t last_inr;
Index: kernel/generic/include/ipc/ipc.h
===================================================================
--- kernel/generic/include/ipc/ipc.h	(revision d9ec808b547e5990edc7280d9d8399acd2a5d86f)
+++ kernel/generic/include/ipc/ipc.h	(revision 82d515e917f3eeeff253e4aa30e493f5f632c950)
@@ -173,5 +173,5 @@
 } call_t;
 
-extern slab_cache_t *phone_slab;
+extern slab_cache_t *phone_cache;
 
 extern answerbox_t *ipc_phone_0;
Index: kernel/generic/include/proc/thread.h
===================================================================
--- kernel/generic/include/proc/thread.h	(revision d9ec808b547e5990edc7280d9d8399acd2a5d86f)
+++ kernel/generic/include/proc/thread.h	(revision 82d515e917f3eeeff253e4aa30e493f5f632c950)
@@ -272,5 +272,5 @@
 
 /** Fpu context slab cache. */
-extern slab_cache_t *fpu_context_slab;
+extern slab_cache_t *fpu_context_cache;
 
 /* Thread syscall prototypes. */
Index: kernel/generic/src/adt/btree.c
===================================================================
--- kernel/generic/src/adt/btree.c	(revision d9ec808b547e5990edc7280d9d8399acd2a5d86f)
+++ kernel/generic/src/adt/btree.c	(revision 82d515e917f3eeeff253e4aa30e493f5f632c950)
@@ -55,5 +55,5 @@
 #include <trace.h>
 
-static slab_cache_t *btree_node_slab;
+static slab_cache_t *btree_node_cache;
 
 #define ROOT_NODE(n)   (!(n)->parent)
@@ -71,5 +71,5 @@
 void btree_init(void)
 {
-	btree_node_slab = slab_cache_create("btree_node_t",
+	btree_node_cache = slab_cache_create("btree_node_t",
 	    sizeof(btree_node_t), 0, NULL, NULL, SLAB_CACHE_MAGDEFERRED);
 }
@@ -109,5 +109,5 @@
 {
 	list_initialize(&t->leaf_list);
-	t->root = (btree_node_t *) slab_alloc(btree_node_slab, 0);
+	t->root = (btree_node_t *) slab_alloc(btree_node_cache, 0);
 	node_initialize(t->root);
 	list_append(&t->root->leaf_link, &t->leaf_list);
@@ -130,5 +130,5 @@
 	}
 	
-	slab_free(btree_node_slab, root);
+	slab_free(btree_node_cache, root);
 }
 
@@ -516,5 +516,5 @@
 	 * Allocate and initialize new right sibling.
 	 */
-	rnode = (btree_node_t *) slab_alloc(btree_node_slab, 0);
+	rnode = (btree_node_t *) slab_alloc(btree_node_cache, 0);
 	node_initialize(rnode);
 	rnode->parent = node->parent;
@@ -595,5 +595,5 @@
 			 * We split the root node. Create new root.
 			 */
-			t->root = (btree_node_t *) slab_alloc(btree_node_slab, 0);
+			t->root = (btree_node_t *) slab_alloc(btree_node_cache, 0);
 			node->parent = t->root;
 			rnode->parent = t->root;
@@ -779,5 +779,5 @@
 			t->root = node->subtree[0];
 			t->root->parent = NULL;
-			slab_free(btree_node_slab, node);
+			slab_free(btree_node_cache, node);
 		} else {
 			/*
@@ -838,5 +838,5 @@
 		idx = find_key_by_subtree(parent, rnode, true);
 		assert((int) idx != -1);
-		slab_free(btree_node_slab, rnode);
+		slab_free(btree_node_cache, rnode);
 		_btree_remove(t, parent->key[idx], parent);
 	}
Index: kernel/generic/src/cap/cap.c
===================================================================
--- kernel/generic/src/cap/cap.c	(revision d9ec808b547e5990edc7280d9d8399acd2a5d86f)
+++ kernel/generic/src/cap/cap.c	(revision 82d515e917f3eeeff253e4aa30e493f5f632c950)
@@ -87,5 +87,5 @@
 #define CAPS_LAST	(CAPS_SIZE - 1)
 
-static slab_cache_t *cap_slab;
+static slab_cache_t *cap_cache;
 
 static size_t caps_hash(const ht_link_t *item)
@@ -116,5 +116,5 @@
 void caps_init(void)
 {
-	cap_slab = slab_cache_create("cap_t", sizeof(cap_t), 0, NULL,
+	cap_cache = slab_cache_create("cap_t", sizeof(cap_t), 0, NULL,
 	    NULL, 0);
 }
@@ -277,5 +277,5 @@
 	 */
 	if (!cap) {
-		cap = slab_alloc(cap_slab, FRAME_ATOMIC);
+		cap = slab_alloc(cap_cache, FRAME_ATOMIC);
 		if (!cap) {
 			mutex_unlock(&task->cap_info->lock);
@@ -284,5 +284,5 @@
 		uintptr_t hbase;
 		if (!ra_alloc(task->cap_info->handles, 1, 1, &hbase)) {
-			slab_free(cap_slab, cap);
+			slab_free(cap_cache, cap);
 			mutex_unlock(&task->cap_info->lock);
 			return ENOMEM;
@@ -371,5 +371,5 @@
 	hash_table_remove_item(&task->cap_info->caps, &cap->caps_link);
 	ra_free(task->cap_info->handles, handle, 1);
-	slab_free(cap_slab, cap);
+	slab_free(cap_cache, cap);
 	mutex_unlock(&task->cap_info->lock);
 }
Index: kernel/generic/src/console/cmd.c
===================================================================
--- kernel/generic/src/console/cmd.c	(revision d9ec808b547e5990edc7280d9d8399acd2a5d86f)
+++ kernel/generic/src/console/cmd.c	(revision 82d515e917f3eeeff253e4aa30e493f5f632c950)
@@ -497,9 +497,9 @@
 };
 
-static int cmd_slabs(cmd_arg_t *argv);
-static cmd_info_t slabs_info = {
-	.name = "slabs",
+static int cmd_caches(cmd_arg_t *argv);
+static cmd_info_t caches_info = {
+	.name = "caches",
 	.description = "List slab caches.",
-	.func = cmd_slabs,
+	.func = cmd_caches,
 	.argc = 0
 };
@@ -605,4 +605,5 @@
 	&call0_info,
 	&mcall0_info,
+	&caches_info,
 	&call1_info,
 	&call2_info,
@@ -620,5 +621,4 @@
 	&sched_info,
 	&set4_info,
-	&slabs_info,
 	&symaddr_info,
 	&sysinfo_info,
@@ -1214,11 +1214,11 @@
 }
 
-/** Command for listings SLAB caches
- *
- * @param argv Ignores
+/** Command for listing slab allocator caches
+ *
+ * @param argv Ignored
  *
  * @return Always 1
  */
-int cmd_slabs(cmd_arg_t *argv)
+int cmd_caches(cmd_arg_t *argv)
 {
 	slab_print_list();
Index: kernel/generic/src/ddi/irq.c
===================================================================
--- kernel/generic/src/ddi/irq.c	(revision d9ec808b547e5990edc7280d9d8399acd2a5d86f)
+++ kernel/generic/src/ddi/irq.c	(revision 82d515e917f3eeeff253e4aa30e493f5f632c950)
@@ -50,5 +50,5 @@
 #include <arch.h>
 
-slab_cache_t *irq_slab = NULL;
+slab_cache_t *irq_cache = NULL;
 
 /** Spinlock protecting the kernel IRQ hash table
@@ -97,7 +97,7 @@
 	last_inr = inrs - 1;
 
-	irq_slab = slab_cache_create("irq_t", sizeof(irq_t), 0, NULL, NULL,
+	irq_cache = slab_cache_create("irq_t", sizeof(irq_t), 0, NULL, NULL,
 	    FRAME_ATOMIC);
-	assert(irq_slab);
+	assert(irq_cache);
 
 	hash_table_create(&irq_uspace_hash_table, chains, 0, &irq_ht_ops);
Index: kernel/generic/src/ipc/ipc.c
===================================================================
--- kernel/generic/src/ipc/ipc.c	(revision d9ec808b547e5990edc7280d9d8399acd2a5d86f)
+++ kernel/generic/src/ipc/ipc.c	(revision 82d515e917f3eeeff253e4aa30e493f5f632c950)
@@ -66,8 +66,8 @@
 answerbox_t *ipc_phone_0 = NULL;
 
-static slab_cache_t *call_slab;
-static slab_cache_t *answerbox_slab;
-
-slab_cache_t *phone_slab = NULL; 
+static slab_cache_t *call_cache;
+static slab_cache_t *answerbox_cache;
+
+slab_cache_t *phone_cache = NULL; 
 
 /** Initialize a call structure.
@@ -95,5 +95,5 @@
 	if (call->caller_phone)
 		kobject_put(call->caller_phone->kobject);
-	slab_free(call_slab, call);
+	slab_free(call_cache, call);
 }
 
@@ -115,10 +115,10 @@
 call_t *ipc_call_alloc(unsigned int flags)
 {
-	call_t *call = slab_alloc(call_slab, flags);
+	call_t *call = slab_alloc(call_cache, flags);
 	if (!call)
 		return NULL;
 	kobject_t *kobj = (kobject_t *) malloc(sizeof(kobject_t), flags);
 	if (!kobj) {
-		slab_free(call_slab, call);
+		slab_free(call_cache, call);
 		return NULL;
 	}
@@ -210,5 +210,5 @@
 int ipc_call_sync(phone_t *phone, call_t *request)
 {
-	answerbox_t *mybox = slab_alloc(answerbox_slab, 0);
+	answerbox_t *mybox = slab_alloc(answerbox_cache, 0);
 	ipc_answerbox_init(mybox, TASK);
 	
@@ -218,5 +218,5 @@
 	int rc = ipc_call(phone, request);
 	if (rc != EOK) {
-		slab_free(answerbox_slab, mybox);
+		slab_free(answerbox_cache, mybox);
 		return rc;
 	}
@@ -265,5 +265,5 @@
 	assert(!answer || request == answer);
 	
-	slab_free(answerbox_slab, mybox);
+	slab_free(answerbox_cache, mybox);
 	return rc;
 }
@@ -906,9 +906,9 @@
 void ipc_init(void)
 {
-	call_slab = slab_cache_create("call_t", sizeof(call_t), 0, NULL,
+	call_cache = slab_cache_create("call_t", sizeof(call_t), 0, NULL,
 	    NULL, 0);
-	phone_slab = slab_cache_create("phone_t", sizeof(phone_t), 0, NULL,
+	phone_cache = slab_cache_create("phone_t", sizeof(phone_t), 0, NULL,
 	    NULL, 0);
-	answerbox_slab = slab_cache_create("answerbox_t", sizeof(answerbox_t),
+	answerbox_cache = slab_cache_create("answerbox_t", sizeof(answerbox_t),
 	    0, NULL, NULL, 0);
 }
Index: kernel/generic/src/ipc/ipcrsc.c
===================================================================
--- kernel/generic/src/ipc/ipcrsc.c	(revision d9ec808b547e5990edc7280d9d8399acd2a5d86f)
+++ kernel/generic/src/ipc/ipcrsc.c	(revision 82d515e917f3eeeff253e4aa30e493f5f632c950)
@@ -153,5 +153,5 @@
 {
 	phone_t *phone = (phone_t *) arg;
-	slab_free(phone_slab, phone);
+	slab_free(phone_cache, phone);
 }
 
@@ -173,5 +173,5 @@
 	cap_handle_t handle = cap_alloc(task);
 	if (handle >= 0) {
-		phone_t *phone = slab_alloc(phone_slab, FRAME_ATOMIC);
+		phone_t *phone = slab_alloc(phone_cache, FRAME_ATOMIC);
 		if (!phone) {
 			cap_free(TASK, handle);
@@ -181,5 +181,5 @@
 		if (!kobject) {
 			cap_free(TASK, handle);
-			slab_free(phone_slab, phone);
+			slab_free(phone_cache, phone);
 			return ENOMEM;
 		}
Index: kernel/generic/src/ipc/irq.c
===================================================================
--- kernel/generic/src/ipc/irq.c	(revision d9ec808b547e5990edc7280d9d8399acd2a5d86f)
+++ kernel/generic/src/ipc/irq.c	(revision 82d515e917f3eeeff253e4aa30e493f5f632c950)
@@ -294,5 +294,5 @@
 	/* Free up the IRQ code and associated structures. */
 	code_free(irq->notif_cfg.code);
-	slab_free(irq_slab, irq);
+	slab_free(irq_cache, irq);
 }
 
@@ -333,5 +333,5 @@
 		return handle;
 	
-	irq_t *irq = (irq_t *) slab_alloc(irq_slab, FRAME_ATOMIC);
+	irq_t *irq = (irq_t *) slab_alloc(irq_cache, FRAME_ATOMIC);
 	if (!irq) {
 		cap_free(TASK, handle);
@@ -342,5 +342,5 @@
 	if (!kobject) {
 		cap_free(TASK, handle);
-		slab_free(irq_slab, irq);
+		slab_free(irq_cache, irq);
 		return ENOMEM;
 	}
Index: kernel/generic/src/mm/as.c
===================================================================
--- kernel/generic/src/mm/as.c	(revision d9ec808b547e5990edc7280d9d8399acd2a5d86f)
+++ kernel/generic/src/mm/as.c	(revision 82d515e917f3eeeff253e4aa30e493f5f632c950)
@@ -90,5 +90,5 @@
  *
  */
-static slab_cache_t *as_slab;
+static slab_cache_t *as_cache;
 
 /** ASID subsystem lock.
@@ -131,5 +131,5 @@
 	as_arch_init();
 	
-	as_slab = slab_cache_create("as_t", sizeof(as_t), 0,
+	as_cache = slab_cache_create("as_t", sizeof(as_t), 0,
 	    as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED);
 	
@@ -153,5 +153,5 @@
 as_t *as_create(unsigned int flags)
 {
-	as_t *as = (as_t *) slab_alloc(as_slab, 0);
+	as_t *as = (as_t *) slab_alloc(as_cache, 0);
 	(void) as_create_arch(as, 0);
 	
@@ -254,5 +254,5 @@
 #endif
 	
-	slab_free(as_slab, as);
+	slab_free(as_cache, as);
 }
 
Index: kernel/generic/src/mm/slab.c
===================================================================
--- kernel/generic/src/mm/slab.c	(revision d9ec808b547e5990edc7280d9d8399acd2a5d86f)
+++ kernel/generic/src/mm/slab.c	(revision 82d515e917f3eeeff253e4aa30e493f5f632c950)
@@ -818,10 +818,8 @@
 }
 
-/* Print list of slabs
- *
- */
+/* Print list of caches */
 void slab_print_list(void)
 {
-	printf("[slab name       ] [size  ] [pages ] [obj/pg] [slabs ]"
+	printf("[cache name      ] [size  ] [pages ] [obj/pg] [slabs ]"
 	    " [cached] [alloc ] [ctl]\n");
 	
Index: kernel/generic/src/proc/scheduler.c
===================================================================
--- kernel/generic/src/proc/scheduler.c	(revision d9ec808b547e5990edc7280d9d8399acd2a5d86f)
+++ kernel/generic/src/proc/scheduler.c	(revision 82d515e917f3eeeff253e4aa30e493f5f632c950)
@@ -163,5 +163,5 @@
 			irq_spinlock_unlock(&CPU->lock, false);
 			THREAD->saved_fpu_context =
-			    (fpu_context_t *) slab_alloc(fpu_context_slab, 0);
+			    (fpu_context_t *) slab_alloc(fpu_context_cache, 0);
 			
 			/* We may have switched CPUs during slab_alloc */
Index: kernel/generic/src/proc/task.c
===================================================================
--- kernel/generic/src/proc/task.c	(revision d9ec808b547e5990edc7280d9d8399acd2a5d86f)
+++ kernel/generic/src/proc/task.c	(revision 82d515e917f3eeeff253e4aa30e493f5f632c950)
@@ -79,5 +79,5 @@
 static task_id_t task_counter = 0;
 
-static slab_cache_t *task_slab;
+static slab_cache_t *task_cache;
 
 /* Forward declarations. */
@@ -93,5 +93,5 @@
 	TASK = NULL;
 	avltree_create(&tasks_tree);
-	task_slab = slab_cache_create("task_t", sizeof(task_t), 0,
+	task_cache = slab_cache_create("task_t", sizeof(task_t), 0,
 	    tsk_constructor, tsk_destructor, 0);
 }
@@ -206,5 +206,5 @@
 task_t *task_create(as_t *as, const char *name)
 {
-	task_t *task = (task_t *) slab_alloc(task_slab, 0);
+	task_t *task = (task_t *) slab_alloc(task_cache, 0);
 	task_create_arch(task);
 	
@@ -295,5 +295,5 @@
 	as_release(task->as);
 	
-	slab_free(task_slab, task);
+	slab_free(task_cache, task);
 }
 
Index: kernel/generic/src/proc/thread.c
===================================================================
--- kernel/generic/src/proc/thread.c	(revision d9ec808b547e5990edc7280d9d8399acd2a5d86f)
+++ kernel/generic/src/proc/thread.c	(revision 82d515e917f3eeeff253e4aa30e493f5f632c950)
@@ -103,8 +103,8 @@
 static thread_id_t last_tid = 0;
 
-static slab_cache_t *thread_slab;
+static slab_cache_t *thread_cache;
 
 #ifdef CONFIG_FPU
-slab_cache_t *fpu_context_slab;
+slab_cache_t *fpu_context_cache;
 #endif
 
@@ -169,5 +169,5 @@
 	thread->saved_fpu_context = NULL;
 #else /* CONFIG_FPU_LAZY */
-	thread->saved_fpu_context = slab_alloc(fpu_context_slab, kmflags);
+	thread->saved_fpu_context = slab_alloc(fpu_context_cache, kmflags);
 	if (!thread->saved_fpu_context)
 		return -1;
@@ -199,5 +199,5 @@
 #ifdef CONFIG_FPU
 		if (thread->saved_fpu_context)
-			slab_free(fpu_context_slab, thread->saved_fpu_context);
+			slab_free(fpu_context_cache, thread->saved_fpu_context);
 #endif
 		return -1;
@@ -225,5 +225,5 @@
 #ifdef CONFIG_FPU
 	if (thread->saved_fpu_context)
-		slab_free(fpu_context_slab, thread->saved_fpu_context);
+		slab_free(fpu_context_cache, thread->saved_fpu_context);
 #endif
 	
@@ -241,9 +241,9 @@
 	
 	atomic_set(&nrdy, 0);
-	thread_slab = slab_cache_create("thread_t", sizeof(thread_t), 0,
+	thread_cache = slab_cache_create("thread_t", sizeof(thread_t), 0,
 	    thr_constructor, thr_destructor, 0);
 	
 #ifdef CONFIG_FPU
-	fpu_context_slab = slab_cache_create("fpu_context_t",
+	fpu_context_cache = slab_cache_create("fpu_context_t",
 	    sizeof(fpu_context_t), FPU_CONTEXT_ALIGN, NULL, NULL, 0);
 #endif
@@ -341,5 +341,5 @@
     thread_flags_t flags, const char *name)
 {
-	thread_t *thread = (thread_t *) slab_alloc(thread_slab, 0);
+	thread_t *thread = (thread_t *) slab_alloc(thread_cache, 0);
 	if (!thread)
 		return NULL;
@@ -457,5 +457,5 @@
 	 */
 	task_release(thread->task);
-	slab_free(thread_slab, thread);
+	slab_free(thread_cache, thread);
 }
 
@@ -974,5 +974,5 @@
 				 * We can safely deallocate it.
 				 */
-				slab_free(thread_slab, thread);
+				slab_free(thread_cache, thread);
 				free(kernel_uarg);
 				
Index: kernel/generic/src/sysinfo/sysinfo.c
===================================================================
--- kernel/generic/src/sysinfo/sysinfo.c	(revision d9ec808b547e5990edc7280d9d8399acd2a5d86f)
+++ kernel/generic/src/sysinfo/sysinfo.c	(revision 82d515e917f3eeeff253e4aa30e493f5f632c950)
@@ -53,5 +53,5 @@
 
 /** Sysinfo SLAB cache */
-static slab_cache_t *sysinfo_item_slab;
+static slab_cache_t *sysinfo_item_cache;
 
 /** Sysinfo lock */
@@ -98,5 +98,5 @@
 void sysinfo_init(void)
 {
-	sysinfo_item_slab = slab_cache_create("sysinfo_item_t",
+	sysinfo_item_cache = slab_cache_create("sysinfo_item_t",
 	    sizeof(sysinfo_item_t), 0, sysinfo_item_constructor,
 	    sysinfo_item_destructor, SLAB_CACHE_MAGDEFERRED);
@@ -204,5 +204,5 @@
 		
 		*psubtree =
-		    (sysinfo_item_t *) slab_alloc(sysinfo_item_slab, 0);
+		    (sysinfo_item_t *) slab_alloc(sysinfo_item_cache, 0);
 		assert(*psubtree);
 		
@@ -268,5 +268,5 @@
 			
 			sysinfo_item_t *item =
-			    (sysinfo_item_t *) slab_alloc(sysinfo_item_slab, 0);
+			    (sysinfo_item_t *) slab_alloc(sysinfo_item_cache, 0);
 			assert(item);
 			
