Index: kernel/arch/sparc64/include/arch/mm/sun4u/tsb.h
===================================================================
--- kernel/arch/sparc64/include/arch/mm/sun4u/tsb.h	(revision 7254df6b9c028277faef1784d448e4999d6330ab)
+++ kernel/arch/sparc64/include/arch/mm/sun4u/tsb.h	(revision 8f5e80be83eee36bd8401a84ab2b05b5235c1228)
@@ -43,8 +43,14 @@
  * in TLBs - only one TLB entry will do.
  */
-#define TSB_SIZE			2	/* when changing this, change
-						 * as.c as well */
-#define ITSB_ENTRY_COUNT		(512 * (1 << TSB_SIZE))
-#define DTSB_ENTRY_COUNT		(512 * (1 << TSB_SIZE))
+#define TSB_BASE_REG_SIZE	2	/* keep in sync with as.c */
+#define ITSB_ENTRY_COUNT	(512 * (1 << TSB_BASE_REG_SIZE))
+#define DTSB_ENTRY_COUNT	(512 * (1 << TSB_BASE_REG_SIZE))
+
+#define ITSB_ENTRY_MASK		(ITSB_ENTRY_COUNT - 1)
+#define DTSB_ENTRY_MASK		(DTSB_ENTRY_COUNT - 1)
+
+#define TSB_ENTRY_COUNT		(ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT)
+#define TSB_SIZE		(TSB_ENTRY_COUNT * sizeof(tsb_entry_t))
+#define TSB_FRAMES		SIZE2FRAMES(TSB_SIZE)
 
 #define TSB_TAG_TARGET_CONTEXT_SHIFT	48
Index: kernel/arch/sparc64/include/arch/mm/sun4v/tsb.h
===================================================================
--- kernel/arch/sparc64/include/arch/mm/sun4v/tsb.h	(revision 7254df6b9c028277faef1784d448e4999d6330ab)
+++ kernel/arch/sparc64/include/arch/mm/sun4v/tsb.h	(revision 8f5e80be83eee36bd8401a84ab2b05b5235c1228)
@@ -44,7 +44,8 @@
  * in TLBs - only one TLB entry will do.
  */
-#define TSB_SIZE			3	/* when changing this, change
-						 * as.c as well */
-#define TSB_ENTRY_COUNT			(512 * (1 << TSB_SIZE))
+#define TSB_ENTRY_COUNT			4096
+#define TSB_ENTRY_MASK			(TSB_ENTRY_COUNT - 1)
+#define TSB_SIZE			(TSB_ENTRY_COUNT * sizeof(tsb_entry_t))
+#define TSB_FRAMES			SIZE2FRAMES(TSB_SIZE)
 
 #ifndef __ASM__
Index: kernel/arch/sparc64/src/mm/sun4u/as.c
===================================================================
--- kernel/arch/sparc64/src/mm/sun4u/as.c	(revision 7254df6b9c028277faef1784d448e4999d6330ab)
+++ kernel/arch/sparc64/src/mm/sun4u/as.c	(revision 8f5e80be83eee36bd8401a84ab2b05b5235c1228)
@@ -63,17 +63,13 @@
 {
 #ifdef CONFIG_TSB
-	uintptr_t tsb_phys =
-	    frame_alloc(SIZE2FRAMES((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
-	    sizeof(tsb_entry_t)), flags, 0);
-	if (!tsb_phys)
+	uintptr_t tsb_base = frame_alloc(TSB_FRAMES, flags, TSB_SIZE - 1);
+	if (!tsb_base)
 		return -1;
-	
-	tsb_entry_t *tsb = (tsb_entry_t *) PA2KA(tsb_phys);
+
+	tsb_entry_t *tsb = (tsb_entry_t *) PA2KA(tsb_base);
+	memsetb(tsb, TSB_SIZE, 0);
 	
 	as->arch.itsb = tsb;
 	as->arch.dtsb = tsb + ITSB_ENTRY_COUNT;
-	
-	memsetb(as->arch.itsb, (ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
-	    sizeof(tsb_entry_t), 0);
 #endif
 	
@@ -84,9 +80,7 @@
 {
 #ifdef CONFIG_TSB
-	size_t frames = SIZE2FRAMES((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
-	    sizeof(tsb_entry_t));
-	frame_free(KA2PA((uintptr_t) as->arch.itsb), frames);
-	
-	return frames;
+	frame_free(KA2PA((uintptr_t) as->arch.itsb), TSB_FRAMES);
+	
+	return TSB_FRAMES;
 #else
 	return 0;
@@ -136,9 +130,10 @@
 	uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
 	
-	ASSERT(as->arch.itsb && as->arch.dtsb);
+	ASSERT(as->arch.itsb);
+	ASSERT(as->arch.dtsb);
 	
 	uintptr_t tsb = (uintptr_t) as->arch.itsb;
 	
-	if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
+	if (!overlaps(tsb, TSB_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
 		/*
 		 * TSBs were allocated from memory not covered
@@ -155,14 +150,14 @@
 	 *
 	 */
-	tsb_base_reg_t tsb_base;
-	
-	tsb_base.value = 0;
-	tsb_base.size = TSB_SIZE;
-	tsb_base.split = 0;
-	
-	tsb_base.base = ((uintptr_t) as->arch.itsb) >> MMU_PAGE_WIDTH;
-	itsb_base_write(tsb_base.value);
-	tsb_base.base = ((uintptr_t) as->arch.dtsb) >> MMU_PAGE_WIDTH;
-	dtsb_base_write(tsb_base.value);
+	tsb_base_reg_t tsb_base_reg;
+	
+	tsb_base_reg.value = 0;
+	tsb_base_reg.size = TSB_BASE_REG_SIZE;
+	tsb_base_reg.split = 0;
+	
+	tsb_base_reg.base = ((uintptr_t) as->arch.itsb) >> MMU_PAGE_WIDTH;
+	itsb_base_write(tsb_base_reg.value);
+	tsb_base_reg.base = ((uintptr_t) as->arch.dtsb) >> MMU_PAGE_WIDTH;
+	dtsb_base_write(tsb_base_reg.value);
 	
 #if defined (US3)
@@ -207,9 +202,10 @@
 	uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
 	
-	ASSERT(as->arch.itsb && as->arch.dtsb);
+	ASSERT(as->arch.itsb);
+	ASSERT(as->arch.dtsb);
 	
 	uintptr_t tsb = (uintptr_t) as->arch.itsb;
 	
-	if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
+	if (!overlaps(tsb, TSB_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
 		/*
 		 * TSBs were allocated from memory not covered
Index: kernel/arch/sparc64/src/mm/sun4u/tsb.c
===================================================================
--- kernel/arch/sparc64/src/mm/sun4u/tsb.c	(revision 7254df6b9c028277faef1784d448e4999d6330ab)
+++ kernel/arch/sparc64/src/mm/sun4u/tsb.c	(revision 8f5e80be83eee36bd8401a84ab2b05b5235c1228)
@@ -42,6 +42,4 @@
 #include <debug.h>
 
-#define TSB_INDEX_MASK	((1 << (21 + 1 + TSB_SIZE - MMU_PAGE_WIDTH)) - 1)
-
 /** Invalidate portion of TSB.
  *
@@ -60,8 +58,8 @@
 	size_t cnt;
 	
-	ASSERT(as->arch.itsb && as->arch.dtsb);
+	ASSERT(as->arch.itsb);
+	ASSERT(as->arch.dtsb);
 	
-	i0 = (page >> MMU_PAGE_WIDTH) & TSB_INDEX_MASK;
-	ASSERT(i0 < ITSB_ENTRY_COUNT && i0 < DTSB_ENTRY_COUNT);
+	i0 = (page >> MMU_PAGE_WIDTH) & ITSB_ENTRY_MASK;
 
 	if (pages == (size_t) -1 || (pages * 2) > ITSB_ENTRY_COUNT)
@@ -71,8 +69,6 @@
 	
 	for (i = 0; i < cnt; i++) {
-		as->arch.itsb[(i0 + i) & (ITSB_ENTRY_COUNT - 1)].tag.invalid =
-		    true;
-		as->arch.dtsb[(i0 + i) & (DTSB_ENTRY_COUNT - 1)].tag.invalid =
-		    true;
+		as->arch.itsb[(i0 + i) & ITSB_ENTRY_MASK].tag.invalid = true;
+		as->arch.dtsb[(i0 + i) & DTSB_ENTRY_MASK].tag.invalid = true;
 	}
 }
@@ -86,5 +82,5 @@
 {
 	as_t *as;
-	tsb_entry_t *tsb;
+	tsb_entry_t *tte;
 	size_t entry;
 
@@ -92,7 +88,6 @@
 	
 	as = t->as;
-	entry = ((t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK; 
-	ASSERT(entry < ITSB_ENTRY_COUNT);
-	tsb = &as->arch.itsb[entry];
+	entry = ((t->page >> MMU_PAGE_WIDTH) + index) & ITSB_ENTRY_MASK;
+	tte = &as->arch.itsb[entry];
 
 	/*
@@ -102,5 +97,5 @@
 	 */
 
-	tsb->tag.invalid = true;	/* invalidate the entry
+	tte->tag.invalid = true;	/* invalidate the entry
 					 * (tag target has this
 					 * set to 0) */
@@ -108,17 +103,17 @@
 	write_barrier();
 
-	tsb->tag.context = as->asid;
+	tte->tag.context = as->asid;
 	/* the shift is bigger than PAGE_WIDTH, do not bother with index  */
-	tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT;
-	tsb->data.value = 0;
-	tsb->data.size = PAGESIZE_8K;
-	tsb->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index;
-	tsb->data.cp = t->c;	/* cp as cache in phys.-idxed, c as cacheable */
-	tsb->data.p = t->k;	/* p as privileged, k as kernel */
-	tsb->data.v = t->p;	/* v as valid, p as present */
+	tte->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT;
+	tte->data.value = 0;
+	tte->data.size = PAGESIZE_8K;
+	tte->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index;
+	tte->data.cp = t->c;	/* cp as cache in phys.-idxed, c as cacheable */
+	tte->data.p = t->k;	/* p as privileged, k as kernel */
+	tte->data.v = t->p;	/* v as valid, p as present */
 	
 	write_barrier();
 	
-	tsb->tag.invalid = false;	/* mark the entry as valid */
+	tte->tag.invalid = false;	/* mark the entry as valid */
 }
 
@@ -132,5 +127,5 @@
 {
 	as_t *as;
-	tsb_entry_t *tsb;
+	tsb_entry_t *tte;
 	size_t entry;
 	
@@ -138,7 +133,6 @@
 
 	as = t->as;
-	entry = ((t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK;
-	ASSERT(entry < DTSB_ENTRY_COUNT);
-	tsb = &as->arch.dtsb[entry];
+	entry = ((t->page >> MMU_PAGE_WIDTH) + index) & DTSB_ENTRY_MASK;
+	tte = &as->arch.dtsb[entry];
 
 	/*
@@ -148,5 +142,5 @@
 	 */
 
-	tsb->tag.invalid = true;	/* invalidate the entry
+	tte->tag.invalid = true;	/* invalidate the entry
 					 * (tag target has this
 					 * set to 0) */
@@ -154,21 +148,21 @@
 	write_barrier();
 
-	tsb->tag.context = as->asid;
+	tte->tag.context = as->asid;
 	/* the shift is bigger than PAGE_WIDTH, do not bother with index */
-	tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT;
-	tsb->data.value = 0;
-	tsb->data.size = PAGESIZE_8K;
-	tsb->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index;
-	tsb->data.cp = t->c;
+	tte->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT;
+	tte->data.value = 0;
+	tte->data.size = PAGESIZE_8K;
+	tte->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index;
+	tte->data.cp = t->c;
 #ifdef CONFIG_VIRT_IDX_DCACHE
-	tsb->data.cv = t->c;
+	tte->data.cv = t->c;
 #endif /* CONFIG_VIRT_IDX_DCACHE */
-	tsb->data.p = t->k;		/* p as privileged */
-	tsb->data.w = ro ? false : t->w;
-	tsb->data.v = t->p;
+	tte->data.p = t->k;		/* p as privileged */
+	tte->data.w = ro ? false : t->w;
+	tte->data.v = t->p;
 	
 	write_barrier();
 	
-	tsb->tag.invalid = false;	/* mark the entry as valid */
+	tte->tag.invalid = false;	/* mark the entry as valid */
 }
 
Index: kernel/arch/sparc64/src/mm/sun4v/as.c
===================================================================
--- kernel/arch/sparc64/src/mm/sun4v/as.c	(revision 7254df6b9c028277faef1784d448e4999d6330ab)
+++ kernel/arch/sparc64/src/mm/sun4v/as.c	(revision 8f5e80be83eee36bd8401a84ab2b05b5235c1228)
@@ -66,20 +66,19 @@
 {
 #ifdef CONFIG_TSB
-	uintptr_t tsb =
-	    frame_alloc(SIZE2FRAMES(TSB_ENTRY_COUNT * sizeof(tsb_entry_t)),
-	    flags, 0);
-	if (!tsb)
+	uintptr_t tsb_base = frame_alloc(TSB_FRAMES, flags, TSB_SIZE - 1);
+	if (!tsb_base)
 		return -1;
-	
+
+	tsb_entry_t *tsb = (tsb_entry_t *) PA2KA(tsb_base);
+
 	as->arch.tsb_description.page_size = PAGESIZE_8K;
 	as->arch.tsb_description.associativity = 1;
 	as->arch.tsb_description.num_ttes = TSB_ENTRY_COUNT;
 	as->arch.tsb_description.pgsize_mask = 1 << PAGESIZE_8K;
-	as->arch.tsb_description.tsb_base = tsb;
+	as->arch.tsb_description.tsb_base = tsb_base;
 	as->arch.tsb_description.reserved = 0;
 	as->arch.tsb_description.context = 0;
 	
-	memsetb((void *) PA2KA(as->arch.tsb_description.tsb_base),
-		TSB_ENTRY_COUNT * sizeof(tsb_entry_t), 0);
+	memsetb(tsb, TSB_SIZE, 0);
 #endif
 	
@@ -90,8 +89,7 @@
 {
 #ifdef CONFIG_TSB
-	size_t frames = SIZE2FRAMES(TSB_ENTRY_COUNT * sizeof(tsb_entry_t));
-	frame_free(as->arch.tsb_description.tsb_base, frames);
+	frame_free(as->arch.tsb_description.tsb_base, TSB_FRAMES);
 	
-	return frames;
+	return TSB_FRAMES;
 #else
 	return 0;
@@ -126,5 +124,5 @@
 	uintptr_t tsb = PA2KA(as->arch.tsb_description.tsb_base);
 	
-	if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
+	if (!overlaps(tsb, TSB_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
 		/*
 		 * TSBs were allocated from memory not covered
@@ -137,5 +135,5 @@
 	}
 	
-	__hypercall_fast2(MMU_TSB_CTXNON0, 1, KA2PA(&(as->arch.tsb_description)));
+	__hypercall_fast2(MMU_TSB_CTXNON0, 1, KA2PA(&as->arch.tsb_description));
 #endif
 }
@@ -166,5 +164,5 @@
 	uintptr_t tsb = PA2KA(as->arch.tsb_description.tsb_base);
 	
-	if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
+	if (!overlaps(tsb, TSB_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
 		/*
 		 * TSBs were allocated from memory not covered
Index: kernel/arch/sparc64/src/mm/sun4v/tsb.c
===================================================================
--- kernel/arch/sparc64/src/mm/sun4v/tsb.c	(revision 7254df6b9c028277faef1784d448e4999d6330ab)
+++ kernel/arch/sparc64/src/mm/sun4v/tsb.c	(revision 8f5e80be83eee36bd8401a84ab2b05b5235c1228)
@@ -44,6 +44,4 @@
 #include <debug.h>
 
-#define TSB_INDEX_MASK	((1 << (21 + 1 + TSB_SIZE - MMU_PAGE_WIDTH)) - 1)
-
 /** Invalidate portion of TSB.
  *
@@ -58,4 +56,5 @@
 void tsb_invalidate(as_t *as, uintptr_t page, size_t pages)
 {
+	tsb_entry_t *tsb;
 	size_t i0, i;
 	size_t cnt;
@@ -63,16 +62,14 @@
 	ASSERT(as->arch.tsb_description.tsb_base);
 	
-	i0 = (page >> MMU_PAGE_WIDTH) & TSB_INDEX_MASK;
-	ASSERT(i0 < TSB_ENTRY_COUNT);
+	i0 = (page >> MMU_PAGE_WIDTH) & TSB_ENTRY_MASK;
 
-	if (pages == (size_t) - 1 || (pages) > TSB_ENTRY_COUNT)
+	if (pages == (size_t) -1 || pages > TSB_ENTRY_COUNT)
 		cnt = TSB_ENTRY_COUNT;
 	else
 		cnt = pages;
 	
-	for (i = 0; i < cnt; i++) {
-		((tsb_entry_t *) PA2KA(as->arch.tsb_description.tsb_base))[
-			(i0 + i) & (TSB_ENTRY_COUNT - 1)].data.v = false;
-	}
+	tsb = (tsb_entry_t *) PA2KA(as->arch.tsb_description.tsb_base);
+	for (i = 0; i < cnt; i++)
+		tsb[(i0 + i) & TSB_ENTRY_MASK].data.v = false;
 }
 
@@ -85,10 +82,12 @@
 	as_t *as;
 	tsb_entry_t *tsb;
-	size_t entry;
+	tsb_entry_t *tte;
+	size_t index;
 
 	as = t->as;
-	entry = (t->page >> MMU_PAGE_WIDTH) & TSB_INDEX_MASK; 
-	ASSERT(entry < TSB_ENTRY_COUNT);
-	tsb = &((tsb_entry_t *) PA2KA(as->arch.tsb_description.tsb_base))[entry];
+	index = (t->page >> MMU_PAGE_WIDTH) & TSB_ENTRY_MASK; 
+	
+	tsb = (tsb_entry_t *) PA2KA(as->arch.tsb_description.tsb_base);
+	tte = &tsb[index];
 
 	/*
@@ -98,25 +97,25 @@
 	 */
 
-	tsb->data.v = false;
+	tte->data.v = false;
 
 	write_barrier();
 
-	tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT;
+	tte->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT;
 
-	tsb->data.value = 0;
-	tsb->data.nfo = false;
-	tsb->data.ra = t->frame >> MMU_FRAME_WIDTH;
-	tsb->data.ie = false;
-	tsb->data.e = false;
-	tsb->data.cp = t->c;	/* cp as cache in phys.-idxed, c as cacheable */
-	tsb->data.cv = false;
-	tsb->data.p = t->k;	/* p as privileged, k as kernel */
-	tsb->data.x = true;
-	tsb->data.w = false;
-	tsb->data.size = PAGESIZE_8K;
+	tte->data.value = 0;
+	tte->data.nfo = false;
+	tte->data.ra = t->frame >> MMU_FRAME_WIDTH;
+	tte->data.ie = false;
+	tte->data.e = false;
+	tte->data.cp = t->c;	/* cp as cache in phys.-idxed, c as cacheable */
+	tte->data.cv = false;
+	tte->data.p = t->k;	/* p as privileged, k as kernel */
+	tte->data.x = true;
+	tte->data.w = false;
+	tte->data.size = PAGESIZE_8K;
 	
 	write_barrier();
 	
-	tsb->data.v = t->p;	/* v as valid, p as present */
+	tte->data.v = t->p;	/* v as valid, p as present */
 }
 
@@ -130,10 +129,11 @@
 	as_t *as;
 	tsb_entry_t *tsb;
-	size_t entry;
+	tsb_entry_t *tte;
+	size_t index;
 
 	as = t->as;
-	entry = (t->page >> MMU_PAGE_WIDTH) & TSB_INDEX_MASK; 
-	ASSERT(entry < TSB_ENTRY_COUNT);
-	tsb = &((tsb_entry_t *) PA2KA(as->arch.tsb_description.tsb_base))[entry];
+	index = (t->page >> MMU_PAGE_WIDTH) & TSB_ENTRY_MASK;
+	tsb = (tsb_entry_t *) PA2KA(as->arch.tsb_description.tsb_base);
+	tte = &tsb[index];
 
 	/*
@@ -143,27 +143,27 @@
 	 */
 
-	tsb->data.v = false;
+	tte->data.v = false;
 
 	write_barrier();
 
-	tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT;
+	tte->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT;
 
-	tsb->data.value = 0;
-	tsb->data.nfo = false;
-	tsb->data.ra = t->frame >> MMU_FRAME_WIDTH;
-	tsb->data.ie = false;
-	tsb->data.e = false;
-	tsb->data.cp = t->c;	/* cp as cache in phys.-idxed, c as cacheable */
+	tte->data.value = 0;
+	tte->data.nfo = false;
+	tte->data.ra = t->frame >> MMU_FRAME_WIDTH;
+	tte->data.ie = false;
+	tte->data.e = false;
+	tte->data.cp = t->c;	/* cp as cache in phys.-idxed, c as cacheable */
 #ifdef CONFIG_VIRT_IDX_DCACHE
-	tsb->data.cv = t->c;
+	tte->data.cv = t->c;
 #endif /* CONFIG_VIRT_IDX_DCACHE */
-	tsb->data.p = t->k;	/* p as privileged, k as kernel */
-	tsb->data.x = true;
-	tsb->data.w = ro ? false : t->w;
-	tsb->data.size = PAGESIZE_8K;
+	tte->data.p = t->k;	/* p as privileged, k as kernel */
+	tte->data.x = true;
+	tte->data.w = ro ? false : t->w;
+	tte->data.size = PAGESIZE_8K;
 	
 	write_barrier();
 	
-	tsb->data.v = t->p;	/* v as valid, p as present */
+	tte->data.v = t->p;	/* v as valid, p as present */
 }
 
