Index: kernel/arch/sparc64/include/arch/mm/sun4v/tlb.h
===================================================================
--- kernel/arch/sparc64/include/arch/mm/sun4v/tlb.h	(revision d70ebffee4c682353339148a2a7591029a0ed40c)
+++ kernel/arch/sparc64/include/arch/mm/sun4v/tlb.h	(revision 21373712dcb166726b19fe28f109a3aa73a8e68e)
@@ -141,7 +141,7 @@
 }
 
-extern void fast_instruction_access_mmu_miss(sysarg_t, istate_t *);
-extern void fast_data_access_mmu_miss(sysarg_t, istate_t *);
-extern void fast_data_access_protection(sysarg_t, istate_t *);
+extern void fast_instruction_access_mmu_miss(unsigned int, istate_t *);
+extern void fast_data_access_mmu_miss(unsigned int, istate_t *);
+extern void fast_data_access_protection(unsigned int, istate_t *);
 
 extern void dtlb_insert_mapping(uintptr_t, uintptr_t, int, bool, bool);
Index: kernel/arch/sparc64/include/arch/trap/sun4v/interrupt.h
===================================================================
--- kernel/arch/sparc64/include/arch/trap/sun4v/interrupt.h	(revision d70ebffee4c682353339148a2a7591029a0ed40c)
+++ kernel/arch/sparc64/include/arch/trap/sun4v/interrupt.h	(revision 21373712dcb166726b19fe28f109a3aa73a8e68e)
@@ -40,6 +40,8 @@
 #ifndef __ASM__
 
+#include <arch/istate_struct.h>
+
 extern void sun4v_ipi_init(void);
-extern void cpu_mondo(void);
+extern void cpu_mondo(unsigned int, istate_t *);
 
 #endif
Index: kernel/arch/sparc64/include/arch/trap/sun4v/mmu.h
===================================================================
--- kernel/arch/sparc64/include/arch/trap/sun4v/mmu.h	(revision d70ebffee4c682353339148a2a7591029a0ed40c)
+++ kernel/arch/sparc64/include/arch/trap/sun4v/mmu.h	(revision 21373712dcb166726b19fe28f109a3aa73a8e68e)
@@ -73,5 +73,7 @@
 
 .macro FAST_INSTRUCTION_ACCESS_MMU_MISS_HANDLER
-	PREEMPTIBLE_HANDLER fast_instruction_access_mmu_miss
+	mov TT_FAST_INSTRUCTION_ACCESS_MMU_MISS, %g2
+	clr %g5		! XXX
+	PREEMPTIBLE_HANDLER exc_dispatch 
 .endm
 
@@ -123,7 +125,7 @@
 	 * mapped. In such a case, this handler will be called from TL = 1.
 	 * We handle the situation by pretending that the MMU miss occurred
-	 * on TL = 0. Once the MMU miss trap is services, the instruction which
+	 * on TL = 0. Once the MMU miss trap is serviced, the instruction which
 	 * caused the spill/fill trap is restarted, the spill/fill trap occurs,
-	 * but this time its handler accesse memory which IS mapped.
+	 * but this time its handler accesses memory which is mapped.
 	 */
 	.if (\tl > 0)
@@ -131,16 +133,18 @@
 	.endif
 
+	mov TT_FAST_DATA_ACCESS_MMU_MISS, %g2
+
 	/*
-	 * Save the faulting virtual page and faulting context to the %g2
-	 * register. The most significant 51 bits of the %g2 register will
+	 * Save the faulting virtual page and faulting context to the %g5
+	 * register. The most significant 51 bits of the %g5 register will
 	 * contain the virtual address which caused the fault truncated to the
-	 * page boundary. The least significant 13 bits of the %g2 register
+	 * page boundary. The least significant 13 bits of the %g5 register
 	 * will contain the number of the context in which the fault occurred.
-	 * The value of the %g2 register will be passed as a parameter to the
-	 * higher level service routine.
+	 * The value of the %g5 register will be stored in the istate structure
+	 * for inspeciton by the higher level service routine.
 	 */
-	or %g1, %g3, %g2
+	or %g1, %g3, %g5
 
-	PREEMPTIBLE_HANDLER fast_data_access_mmu_miss
+	PREEMPTIBLE_HANDLER exc_dispatch
 .endm
 
@@ -170,8 +174,10 @@
 	sllx %g1, TTE_DATA_TADDR_OFFSET, %g1
 
+	mov TT_FAST_DATA_ACCESS_PROTECTION, %g2
+
 	/* the same as for FAST_DATA_ACCESS_MMU_MISS_HANDLER */
-	or %g1, %g3, %g2
+	or %g1, %g3, %g5
 
-	PREEMPTIBLE_HANDLER fast_data_access_protection
+	PREEMPTIBLE_HANDLER exc_dispatch 
 .endm
 #endif /* __ASM__ */
