Index: kernel/arch/sparc64/Makefile.inc
===================================================================
--- kernel/arch/sparc64/Makefile.inc	(revision b1d3c36aca6850dfcd12418b4a6f60aa37a83487)
+++ kernel/arch/sparc64/Makefile.inc	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -46,15 +46,27 @@
 ifeq ($(PROCESSOR),us)
 	DEFS += -DUS
+	DEFS += -DSUN4U
+	USARCH = sun4u
 endif
 
 ifeq ($(PROCESSOR),us3)
 	DEFS += -DUS3
+	DEFS += -DSUN4U
+	USARCH = sun4u
+endif
+
+ifeq ($(PROCESSOR),sun4v)
+	DEFS += -DSUN4V
+	USARCH = sun4v
+#MH
+	DEFS += -DUS
 endif
 
 ARCH_SOURCES = \
-	arch/$(KARCH)/src/cpu/cpu.c \
+	arch/$(KARCH)/src/cpu/$(USARCH)/cpu.c \
 	arch/$(KARCH)/src/debug/stacktrace.c \
 	arch/$(KARCH)/src/debug/stacktrace_asm.S \
 	arch/$(KARCH)/src/asm.S \
+	arch/$(KARCH)/src/$(USARCH)/asm.S \
 	arch/$(KARCH)/src/panic.S \
 	arch/$(KARCH)/src/console.c \
@@ -62,15 +74,15 @@
 	arch/$(KARCH)/src/fpu_context.c \
 	arch/$(KARCH)/src/dummy.s \
-	arch/$(KARCH)/src/mm/as.c \
+	arch/$(KARCH)/src/mm/$(USARCH)/as.c \
 	arch/$(KARCH)/src/mm/cache.S \
-	arch/$(KARCH)/src/mm/frame.c \
+	arch/$(KARCH)/src/mm/$(USARCH)/frame.c \
 	arch/$(KARCH)/src/mm/page.c \
-	arch/$(KARCH)/src/mm/tlb.c \
-	arch/$(KARCH)/src/sparc64.c \
-	arch/$(KARCH)/src/start.S \
-	arch/$(KARCH)/src/proc/scheduler.c \
+	arch/$(KARCH)/src/mm/$(USARCH)/tlb.c \
+	arch/$(KARCH)/src/$(USARCH)/sparc64.c \
+	arch/$(KARCH)/src/$(USARCH)/start.S \
+	arch/$(KARCH)/src/proc/$(USARCH)/scheduler.c \
 	arch/$(KARCH)/src/proc/thread.c \
-	arch/$(KARCH)/src/trap/mmu.S \
-	arch/$(KARCH)/src/trap/trap_table.S \
+	arch/$(KARCH)/src/trap/$(USARCH)/mmu.S \
+	arch/$(KARCH)/src/trap/$(USARCH)/trap_table.S \
 	arch/$(KARCH)/src/trap/trap.c \
 	arch/$(KARCH)/src/trap/exception.c \
@@ -82,4 +94,10 @@
 	arch/$(KARCH)/src/drivers/pci.c \
 	arch/$(KARCH)/src/drivers/fhc.c
+
+ifeq ($(USARCH),sun4v)
+	ARCH_SOURCES += \
+		arch/$(KARCH)/src/drivers/niagara.c \
+		arch/$(KARCH)/src/sun4v/md.c
+endif
 
 ifeq ($(CONFIG_FB),y)
@@ -96,4 +114,4 @@
 ifeq ($(CONFIG_TSB),y)
 	ARCH_SOURCES += \
-		arch/$(KARCH)/src/mm/tsb.c
+		arch/$(KARCH)/src/mm/$(USARCH)/tsb.c
 endif
Index: kernel/arch/sparc64/include/arch.h
===================================================================
--- kernel/arch/sparc64/include/arch.h	(revision b1d3c36aca6850dfcd12418b4a6f60aa37a83487)
+++ kernel/arch/sparc64/include/arch.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -38,9 +38,12 @@
 #define KERN_sparc64_ARCH_H_
 
+#if defined (SUN4U)
+#include <arch/sun4u/arch.h>
+#elif defined (SUN4V)
+#include <arch/sun4v/arch.h>
+#endif
+
 #define ASI_AIUP		0x10	/** Access to primary context with user privileges. */
 #define ASI_AIUS		0x11	/** Access to secondary context with user privileges. */
-#define ASI_NUCLEUS_QUAD_LDD	0x24	/** ASI for 16-byte atomic loads. */
-#define ASI_DCACHE_TAG		0x47	/** ASI D-Cache Tag. */
-#define ASI_ICBUS_CONFIG		0x4a	/** ASI of the UPA_CONFIG/FIREPLANE_CONFIG register. */
 
 #define NWINDOWS		8	/** Number of register window sets. */
@@ -52,4 +55,5 @@
 #endif /* __ASM__ */
 
+
 #endif
 
Index: kernel/arch/sparc64/include/cpu.h
===================================================================
--- kernel/arch/sparc64/include/cpu.h	(revision b1d3c36aca6850dfcd12418b4a6f60aa37a83487)
+++ kernel/arch/sparc64/include/cpu.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -64,31 +64,11 @@
 #endif
 
-typedef struct {
-	uint32_t mid;			/**< Processor ID as read from
-					     UPA_CONFIG/FIREPLANE_CONFIG. */
-	ver_reg_t ver;
-	uint32_t clock_frequency;	/**< Processor frequency in Hz. */
-	uint64_t next_tick_cmpr;	/**< Next clock interrupt should be
-					     generated when the TICK register
-					     matches this value. */
-} cpu_arch_t;
 
+#if defined (SUN4U)
+#include <arch/sun4u/cpu.h>
+#elif defined (SUN4V)
+#include <arch/sun4v/cpu.h>
+#endif
 
-/**
- * Reads the module ID (agent ID/CPUID) of the current CPU.
- */
-static inline uint32_t read_mid(void)
-{
-	uint64_t icbus_config = asi_u64_read(ASI_ICBUS_CONFIG, 0);
-	icbus_config = icbus_config >> ICBUS_CONFIG_MID_SHIFT;
-#if defined (US)
-	return icbus_config & 0x1f;
-#elif defined (US3)
-	if (((ver_reg_t) ver_read()).impl == IMPL_ULTRASPARCIII_I)
-		return icbus_config & 0x1f;
-	else
-		return icbus_config & 0x3ff;
-#endif
-}
 
 #endif	
Index: kernel/arch/sparc64/include/drivers/niagara.h
===================================================================
--- kernel/arch/sparc64/include/drivers/niagara.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/include/drivers/niagara.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2008 Pavel Rimsky
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64	
+ * @{
+ */
+/** @file
+ */
+
+#ifndef KERN_sparc64_NIAGARA_H
+#define KERN_sparc64_NIAGARA_H
+
+#include <proc/thread.h>
+#include <console/chardev.h>
+
+typedef struct {
+	thread_t *thread;
+	indev_t *srlnin;
+} niagara_instance_t;
+
+char niagara_getc(void);
+void niagara_grab(void);
+void niagara_release(void);
+niagara_instance_t *niagarain_init(void);
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/sparc64/include/drivers/tick.h
===================================================================
--- kernel/arch/sparc64/include/drivers/tick.h	(revision b1d3c36aca6850dfcd12418b4a6f60aa37a83487)
+++ kernel/arch/sparc64/include/drivers/tick.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -36,8 +36,20 @@
 #define KERN_sparc64_TICK_H_
 
+#include <arch/asm.h>
 #include <arch/interrupt.h>
+
+/* mask of the "counter" field of the Tick register */
+#define TICK_COUNTER_MASK	(~(1l << 63))
 
 extern void tick_init(void);
 extern void tick_interrupt(int n, istate_t *istate);
+
+/**
+ * Reads the Tick register counter.
+ */
+static inline uint64_t tick_counter_read(void)
+{
+	return TICK_COUNTER_MASK & tick_read();
+}
 
 #endif
Index: kernel/arch/sparc64/include/mm/frame.h
===================================================================
--- kernel/arch/sparc64/include/mm/frame.h	(revision b1d3c36aca6850dfcd12418b4a6f60aa37a83487)
+++ kernel/arch/sparc64/include/mm/frame.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -36,48 +36,8 @@
 #define KERN_sparc64_FRAME_H_
 
-/*
- * Page size supported by the MMU.
- * For 8K there is the nasty illegal virtual aliasing problem.
- * Therefore, the kernel uses 8K only internally on the TLB and TSB levels.
- */
-#define MMU_FRAME_WIDTH		13	/* 8K */
-#define MMU_FRAME_SIZE		(1 << MMU_FRAME_WIDTH)
-
-/*
- * Page size exported to the generic memory management subsystems.
- * This page size is not directly supported by the MMU, but we can emulate
- * each 16K page with a pair of adjacent 8K pages.
- */
-#define FRAME_WIDTH		14	/* 16K */
-#define FRAME_SIZE		(1 << FRAME_WIDTH)
-
-#ifdef KERNEL
-#ifndef __ASM__
-
-#include <arch/types.h>
-
-union frame_address {
-	uintptr_t address;
-	struct {
-#if defined (US)
-		unsigned : 23;
-		uint64_t pfn : 28;		/**< Physical Frame Number. */
-#elif defined (US3)
-		unsigned : 21;
-		uint64_t pfn : 30;		/**< Physical Frame Number. */
-#endif
-		unsigned offset : 13;		/**< Offset. */
-	} __attribute__ ((packed));
-};
-
-typedef union frame_address frame_address_t;
-
-extern uintptr_t last_frame;
-extern uintptr_t end_of_identity;
-
-extern void frame_arch_init(void);
-#define physmem_print()
-
-#endif
+#if defined (SUN4U)
+#include <arch/mm/sun4u/frame.h>
+#elif defined (SUN4V)
+#include <arch/mm/sun4v/frame.h>
 #endif
 
Index: kernel/arch/sparc64/include/mm/mmu.h
===================================================================
--- kernel/arch/sparc64/include/mm/mmu.h	(revision b1d3c36aca6850dfcd12418b4a6f60aa37a83487)
+++ kernel/arch/sparc64/include/mm/mmu.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -36,85 +36,10 @@
 #define KERN_sparc64_MMU_H_
 
-#if defined(US)
-/* LSU Control Register ASI. */
-#define ASI_LSU_CONTROL_REG		0x45	/**< Load/Store Unit Control Register. */
+#if defined (SUN4U)
+#include <arch/mm/sun4u/mmu.h>
+#elif defined (SUN4V)
+#include <arch/mm/sun4v/mmu.h>
 #endif
 
-/* I-MMU ASIs. */
-#define ASI_IMMU			0x50
-#define ASI_IMMU_TSB_8KB_PTR_REG	0x51	
-#define ASI_IMMU_TSB_64KB_PTR_REG	0x52
-#define ASI_ITLB_DATA_IN_REG		0x54
-#define ASI_ITLB_DATA_ACCESS_REG	0x55
-#define ASI_ITLB_TAG_READ_REG		0x56
-#define ASI_IMMU_DEMAP			0x57
-
-/* Virtual Addresses within ASI_IMMU. */
-#define VA_IMMU_TSB_TAG_TARGET		0x0	/**< IMMU TSB tag target register. */
-#define VA_IMMU_SFSR			0x18	/**< IMMU sync fault status register. */
-#define VA_IMMU_TSB_BASE		0x28	/**< IMMU TSB base register. */
-#define VA_IMMU_TAG_ACCESS		0x30	/**< IMMU TLB tag access register. */
-#if defined (US3)
-#define VA_IMMU_PRIMARY_EXTENSION	0x48	/**< IMMU TSB primary extension register */
-#define VA_IMMU_NUCLEUS_EXTENSION	0x58	/**< IMMU TSB nucleus extension register */
-#endif
-
-
-/* D-MMU ASIs. */
-#define ASI_DMMU			0x58
-#define ASI_DMMU_TSB_8KB_PTR_REG	0x59	
-#define ASI_DMMU_TSB_64KB_PTR_REG	0x5a
-#define ASI_DMMU_TSB_DIRECT_PTR_REG	0x5b
-#define ASI_DTLB_DATA_IN_REG		0x5c
-#define ASI_DTLB_DATA_ACCESS_REG	0x5d
-#define ASI_DTLB_TAG_READ_REG		0x5e
-#define ASI_DMMU_DEMAP			0x5f
-
-/* Virtual Addresses within ASI_DMMU. */
-#define VA_DMMU_TSB_TAG_TARGET		0x0	/**< DMMU TSB tag target register. */
-#define VA_PRIMARY_CONTEXT_REG		0x8	/**< DMMU primary context register. */
-#define VA_SECONDARY_CONTEXT_REG	0x10	/**< DMMU secondary context register. */
-#define VA_DMMU_SFSR			0x18	/**< DMMU sync fault status register. */
-#define VA_DMMU_SFAR			0x20	/**< DMMU sync fault address register. */
-#define VA_DMMU_TSB_BASE		0x28	/**< DMMU TSB base register. */
-#define VA_DMMU_TAG_ACCESS		0x30	/**< DMMU TLB tag access register. */
-#define VA_DMMU_VA_WATCHPOINT_REG	0x38	/**< DMMU VA data watchpoint register. */
-#define VA_DMMU_PA_WATCHPOINT_REG	0x40	/**< DMMU PA data watchpoint register. */
-#if defined (US3)
-#define VA_DMMU_PRIMARY_EXTENSION	0x48	/**< DMMU TSB primary extension register */
-#define VA_DMMU_SECONDARY_EXTENSION	0x50	/**< DMMU TSB secondary extension register */
-#define VA_DMMU_NUCLEUS_EXTENSION	0x58	/**< DMMU TSB nucleus extension register */
-#endif
-
-#ifndef __ASM__
-
-#include <arch/asm.h>
-#include <arch/barrier.h>
-#include <arch/types.h>
-
-#if defined(US)
-/** LSU Control Register. */
-typedef union {
-	uint64_t value;
-	struct {
-		unsigned : 23;
-		unsigned pm : 8;
-		unsigned vm : 8;
-		unsigned pr : 1;
-		unsigned pw : 1;
-		unsigned vr : 1;
-		unsigned vw : 1;
-		unsigned : 1;
-		unsigned fm : 16;	
-		unsigned dm : 1;	/**< D-MMU enable. */
-		unsigned im : 1;	/**< I-MMU enable. */
-		unsigned dc : 1;	/**< D-Cache enable. */
-		unsigned ic : 1;	/**< I-Cache enable. */
-		
-	} __attribute__ ((packed));
-} lsu_cr_reg_t;
-#endif /* US */
-
-#endif /* !def __ASM__ */
 
 #endif
Index: kernel/arch/sparc64/include/mm/pagesize.h
===================================================================
--- kernel/arch/sparc64/include/mm/pagesize.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/include/mm/pagesize.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2005 Jakub Jermar
+ * Copyright (c) 2008 Pavel Rimsky
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64mm	
+ * @{
+ */
+/** @file
+ */
+
+#ifndef KERN_sparc64_PAGESIZE_H
+#define KERN_sparc64_PAGESIZE_H
+
+/** Page sizes. */
+#define PAGESIZE_8K	0
+#define PAGESIZE_64K	1
+#define PAGESIZE_512K	2
+#define PAGESIZE_4M	3
+
+#endif
Index: kernel/arch/sparc64/include/mm/sun4u/frame.h
===================================================================
--- kernel/arch/sparc64/include/mm/sun4u/frame.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/include/mm/sun4u/frame.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2005 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64mm	
+ * @{
+ */
+/** @file
+ */
+
+#ifndef KERN_sparc64_SUN4U_FRAME_H_
+#define KERN_sparc64_SUN4U_FRAME_H_
+
+/*
+ * Page size supported by the MMU.
+ * For 8K there is the nasty illegal virtual aliasing problem.
+ * Therefore, the kernel uses 8K only internally on the TLB and TSB levels.
+ */
+#define MMU_FRAME_WIDTH		13	/* 8K */
+#define MMU_FRAME_SIZE		(1 << MMU_FRAME_WIDTH)
+
+/*
+ * Page size exported to the generic memory management subsystems.
+ * This page size is not directly supported by the MMU, but we can emulate
+ * each 16K page with a pair of adjacent 8K pages.
+ */
+#define FRAME_WIDTH		14	/* 16K */
+#define FRAME_SIZE		(1 << FRAME_WIDTH)
+
+#ifdef KERNEL
+#ifndef __ASM__
+
+#include <arch/types.h>
+
+union frame_address {
+	uintptr_t address;
+	struct {
+#if defined (US)
+		unsigned : 23;
+		uint64_t pfn : 28;		/**< Physical Frame Number. */
+#elif defined (US3)
+		unsigned : 21;
+		uint64_t pfn : 30;		/**< Physical Frame Number. */
+#endif
+		unsigned offset : 13;		/**< Offset. */
+	} __attribute__ ((packed));
+};
+
+typedef union frame_address frame_address_t;
+
+extern uintptr_t last_frame;
+extern uintptr_t end_of_identity;
+
+extern void frame_arch_init(void);
+#define physmem_print()
+
+#endif
+#endif
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/sparc64/include/mm/sun4u/mmu.h
===================================================================
--- kernel/arch/sparc64/include/mm/sun4u/mmu.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/include/mm/sun4u/mmu.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2005 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64mm	
+ * @{
+ */
+/** @file
+ */
+
+#ifndef KERN_sparc64_sun4u_MMU_H_
+#define KERN_sparc64_sun4u_MMU_H_
+
+#if defined(US)
+/* LSU Control Register ASI. */
+#define ASI_LSU_CONTROL_REG		0x45	/**< Load/Store Unit Control Register. */
+#endif
+
+/* I-MMU ASIs. */
+#define ASI_IMMU			0x50
+#define ASI_IMMU_TSB_8KB_PTR_REG	0x51	
+#define ASI_IMMU_TSB_64KB_PTR_REG	0x52
+#define ASI_ITLB_DATA_IN_REG		0x54
+#define ASI_ITLB_DATA_ACCESS_REG	0x55
+#define ASI_ITLB_TAG_READ_REG		0x56
+#define ASI_IMMU_DEMAP			0x57
+
+/* Virtual Addresses within ASI_IMMU. */
+#define VA_IMMU_TSB_TAG_TARGET		0x0	/**< IMMU TSB tag target register. */
+#define VA_IMMU_SFSR			0x18	/**< IMMU sync fault status register. */
+#define VA_IMMU_TSB_BASE		0x28	/**< IMMU TSB base register. */
+#define VA_IMMU_TAG_ACCESS		0x30	/**< IMMU TLB tag access register. */
+#if defined (US3)
+#define VA_IMMU_PRIMARY_EXTENSION	0x48	/**< IMMU TSB primary extension register */
+#define VA_IMMU_NUCLEUS_EXTENSION	0x58	/**< IMMU TSB nucleus extension register */
+#endif
+
+
+/* D-MMU ASIs. */
+#define ASI_DMMU			0x58
+#define ASI_DMMU_TSB_8KB_PTR_REG	0x59	
+#define ASI_DMMU_TSB_64KB_PTR_REG	0x5a
+#define ASI_DMMU_TSB_DIRECT_PTR_REG	0x5b
+#define ASI_DTLB_DATA_IN_REG		0x5c
+#define ASI_DTLB_DATA_ACCESS_REG	0x5d
+#define ASI_DTLB_TAG_READ_REG		0x5e
+#define ASI_DMMU_DEMAP			0x5f
+
+/* Virtual Addresses within ASI_DMMU. */
+#define VA_DMMU_TSB_TAG_TARGET		0x0	/**< DMMU TSB tag target register. */
+#define VA_PRIMARY_CONTEXT_REG		0x8	/**< DMMU primary context register. */
+#define VA_SECONDARY_CONTEXT_REG	0x10	/**< DMMU secondary context register. */
+#define VA_DMMU_SFSR			0x18	/**< DMMU sync fault status register. */
+#define VA_DMMU_SFAR			0x20	/**< DMMU sync fault address register. */
+#define VA_DMMU_TSB_BASE		0x28	/**< DMMU TSB base register. */
+#define VA_DMMU_TAG_ACCESS		0x30	/**< DMMU TLB tag access register. */
+#define VA_DMMU_VA_WATCHPOINT_REG	0x38	/**< DMMU VA data watchpoint register. */
+#define VA_DMMU_PA_WATCHPOINT_REG	0x40	/**< DMMU PA data watchpoint register. */
+#if defined (US3)
+#define VA_DMMU_PRIMARY_EXTENSION	0x48	/**< DMMU TSB primary extension register */
+#define VA_DMMU_SECONDARY_EXTENSION	0x50	/**< DMMU TSB secondary extension register */
+#define VA_DMMU_NUCLEUS_EXTENSION	0x58	/**< DMMU TSB nucleus extension register */
+#endif
+
+#ifndef __ASM__
+
+#include <arch/asm.h>
+#include <arch/barrier.h>
+#include <arch/types.h>
+
+#if defined(US)
+/** LSU Control Register. */
+typedef union {
+	uint64_t value;
+	struct {
+		unsigned : 23;
+		unsigned pm : 8;
+		unsigned vm : 8;
+		unsigned pr : 1;
+		unsigned pw : 1;
+		unsigned vr : 1;
+		unsigned vw : 1;
+		unsigned : 1;
+		unsigned fm : 16;	
+		unsigned dm : 1;	/**< D-MMU enable. */
+		unsigned im : 1;	/**< I-MMU enable. */
+		unsigned dc : 1;	/**< D-Cache enable. */
+		unsigned ic : 1;	/**< I-Cache enable. */
+		
+	} __attribute__ ((packed));
+} lsu_cr_reg_t;
+#endif /* US */
+
+#endif /* !def __ASM__ */
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/sparc64/include/mm/sun4u/tlb.h
===================================================================
--- kernel/arch/sparc64/include/mm/sun4u/tlb.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/include/mm/sun4u/tlb.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,692 @@
+/*
+ * Copyright (c) 2005 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64mm	
+ * @{
+ */
+/** @file
+ */
+
+#ifndef KERN_sparc64_TLB_sun4u_H_
+#define KERN_sparc64_TLB_sun4u_H_
+
+#if defined (US)
+#define ITLB_ENTRY_COUNT		64
+#define DTLB_ENTRY_COUNT		64
+#define DTLB_MAX_LOCKED_ENTRIES		DTLB_ENTRY_COUNT
+#endif
+
+/** TLB_DSMALL is the only of the three DMMUs that can hold locked entries. */
+#if defined (US3)
+#define DTLB_MAX_LOCKED_ENTRIES		16
+#endif
+
+#define MEM_CONTEXT_KERNEL		0
+#define MEM_CONTEXT_TEMP		1
+
+/** Page sizes. */
+#define PAGESIZE_8K	0
+#define PAGESIZE_64K	1
+#define PAGESIZE_512K	2
+#define PAGESIZE_4M	3
+
+/** Bit width of the TLB-locked portion of kernel address space. */
+#define KERNEL_PAGE_WIDTH       22	/* 4M */
+
+/* TLB Demap Operation types. */
+#define TLB_DEMAP_PAGE		0
+#define TLB_DEMAP_CONTEXT	1
+#if defined (US3)
+#define TLB_DEMAP_ALL		2
+#endif
+
+#define TLB_DEMAP_TYPE_SHIFT	6
+
+/* TLB Demap Operation Context register encodings. */
+#define TLB_DEMAP_PRIMARY	0
+#define TLB_DEMAP_SECONDARY	1
+#define TLB_DEMAP_NUCLEUS	2
+
+/* There are more TLBs in one MMU in US3, their codes are defined here. */
+#if defined (US3)
+/* D-MMU: one small (16-entry) TLB and two big (512-entry) TLBs */
+#define TLB_DSMALL	0
+#define TLB_DBIG_0	2
+#define TLB_DBIG_1	3
+	
+/* I-MMU: one small (16-entry) TLB and one big TLB */
+#define TLB_ISMALL	0
+#define TLB_IBIG	2
+#endif
+
+#define TLB_DEMAP_CONTEXT_SHIFT	4
+
+/* TLB Tag Access shifts */
+#define TLB_TAG_ACCESS_CONTEXT_SHIFT	0
+#define TLB_TAG_ACCESS_CONTEXT_MASK	((1 << 13) - 1)
+#define TLB_TAG_ACCESS_VPN_SHIFT	13
+
+#ifndef __ASM__
+
+#include <arch/mm/tte.h>
+#include <arch/mm/mmu.h>
+#include <arch/mm/page.h>
+#include <arch/asm.h>
+#include <arch/barrier.h>
+#include <arch/types.h>
+#include <arch/register.h>
+#include <arch/cpu.h>
+
+union tlb_context_reg {
+	uint64_t v;
+	struct {
+		unsigned long : 51;
+		unsigned context : 13;		/**< Context/ASID. */
+	} __attribute__ ((packed));
+};
+typedef union tlb_context_reg tlb_context_reg_t;
+
+/** I-/D-TLB Data In/Access Register type. */
+typedef tte_data_t tlb_data_t;
+
+/** I-/D-TLB Data Access Address in Alternate Space. */
+
+#if defined (US)
+
+union tlb_data_access_addr {
+	uint64_t value;
+	struct {
+		uint64_t : 55;
+		unsigned tlb_entry : 6;
+		unsigned : 3;
+	} __attribute__ ((packed));
+};
+typedef union tlb_data_access_addr dtlb_data_access_addr_t;
+typedef union tlb_data_access_addr dtlb_tag_read_addr_t;
+typedef union tlb_data_access_addr itlb_data_access_addr_t;
+typedef union tlb_data_access_addr itlb_tag_read_addr_t;
+
+#elif defined (US3)
+
+/*
+ * In US3, I-MMU and D-MMU have different formats of the data
+ * access register virtual address. In the corresponding
+ * structures the member variable for the entry number is
+ * called "local_tlb_entry" - it contrasts with the "tlb_entry"
+ * for the US data access register VA structure. The rationale
+ * behind this is to prevent careless mistakes in the code
+ * caused by setting only the entry number and not the TLB
+ * number in the US3 code (when taking the code from US). 
+ */
+
+union dtlb_data_access_addr {
+	uint64_t value;
+	struct {
+		uint64_t : 45;
+		unsigned : 1;
+		unsigned tlb_number : 2;
+		unsigned : 4;
+		unsigned local_tlb_entry : 9;
+		unsigned : 3;
+	} __attribute__ ((packed));
+};
+typedef union dtlb_data_access_addr dtlb_data_access_addr_t;
+typedef union dtlb_data_access_addr dtlb_tag_read_addr_t;
+
+union itlb_data_access_addr {
+	uint64_t value;
+	struct {
+		uint64_t : 45;
+		unsigned : 1;
+		unsigned tlb_number : 2;
+		unsigned : 6;
+		unsigned local_tlb_entry : 7;
+		unsigned : 3;
+	} __attribute__ ((packed));
+};
+typedef union itlb_data_access_addr itlb_data_access_addr_t;
+typedef union itlb_data_access_addr itlb_tag_read_addr_t;
+
+#endif
+
+/** I-/D-TLB Tag Read Register. */
+union tlb_tag_read_reg {
+	uint64_t value;
+	struct {
+		uint64_t vpn : 51;	/**< Virtual Address bits 63:13. */
+		unsigned context : 13;	/**< Context identifier. */
+	} __attribute__ ((packed));
+};
+typedef union tlb_tag_read_reg tlb_tag_read_reg_t;
+typedef union tlb_tag_read_reg tlb_tag_access_reg_t;
+
+
+/** TLB Demap Operation Address. */
+union tlb_demap_addr {
+	uint64_t value;
+	struct {
+		uint64_t vpn: 51;	/**< Virtual Address bits 63:13. */
+#if defined (US)
+		unsigned : 6;		/**< Ignored. */
+		unsigned type : 1;	/**< The type of demap operation. */
+#elif defined (US3)
+		unsigned : 5;		/**< Ignored. */
+		unsigned type: 2;	/**< The type of demap operation. */
+#endif
+		unsigned context : 2;	/**< Context register selection. */
+		unsigned : 4;		/**< Zero. */
+	} __attribute__ ((packed));
+};
+typedef union tlb_demap_addr tlb_demap_addr_t;
+
+/** TLB Synchronous Fault Status Register. */
+union tlb_sfsr_reg {
+	uint64_t value;
+	struct {
+#if defined (US)
+		unsigned long : 40;	/**< Implementation dependent. */
+		unsigned asi : 8;	/**< ASI. */
+		unsigned : 2;
+		unsigned ft : 7;	/**< Fault type. */
+#elif defined (US3)
+		unsigned long : 39;	/**< Implementation dependent. */
+		unsigned nf : 1;	/**< Non-faulting load. */
+		unsigned asi : 8;	/**< ASI. */
+		unsigned tm : 1;	/**< I-TLB miss. */
+		unsigned : 3;		/**< Reserved. */
+		unsigned ft : 5;	/**< Fault type. */
+#endif
+		unsigned e : 1;		/**< Side-effect bit. */
+		unsigned ct : 2;	/**< Context Register selection. */
+		unsigned pr : 1;	/**< Privilege bit. */
+		unsigned w : 1;		/**< Write bit. */
+		unsigned ow : 1;	/**< Overwrite bit. */
+		unsigned fv : 1;	/**< Fault Valid bit. */
+	} __attribute__ ((packed));
+};
+typedef union tlb_sfsr_reg tlb_sfsr_reg_t;
+
+#if defined (US3)
+
+/*
+ * Functions for determining the number of entries in TLBs. They either return
+ * a constant value or a value based on the CPU autodetection.
+ */
+
+/**
+ * Determine the number of entries in the DMMU's small TLB. 
+ */
+static inline uint16_t tlb_dsmall_size(void)
+{
+	return 16;
+}
+
+/**
+ * Determine the number of entries in each DMMU's big TLB. 
+ */
+static inline uint16_t tlb_dbig_size(void)
+{
+	return 512;
+}
+
+/**
+ * Determine the number of entries in the IMMU's small TLB. 
+ */
+static inline uint16_t tlb_ismall_size(void)
+{
+	return 16;
+}
+
+/**
+ * Determine the number of entries in the IMMU's big TLB. 
+ */
+static inline uint16_t tlb_ibig_size(void)
+{
+	if (((ver_reg_t) ver_read()).impl == IMPL_ULTRASPARCIV_PLUS)
+		return 512;
+	else
+		return 128;
+}
+
+#endif
+
+/** Read MMU Primary Context Register.
+ *
+ * @return		Current value of Primary Context Register.
+ */
+static inline uint64_t mmu_primary_context_read(void)
+{
+	return asi_u64_read(ASI_DMMU, VA_PRIMARY_CONTEXT_REG);
+}
+
+/** Write MMU Primary Context Register.
+ *
+ * @param v		New value of Primary Context Register.
+ */
+static inline void mmu_primary_context_write(uint64_t v)
+{
+	asi_u64_write(ASI_DMMU, VA_PRIMARY_CONTEXT_REG, v);
+	flush_pipeline();
+}
+
+/** Read MMU Secondary Context Register.
+ *
+ * @return		Current value of Secondary Context Register.
+ */
+static inline uint64_t mmu_secondary_context_read(void)
+{
+	return asi_u64_read(ASI_DMMU, VA_SECONDARY_CONTEXT_REG);
+}
+
+/** Write MMU Primary Context Register.
+ *
+ * @param v		New value of Primary Context Register.
+ */
+static inline void mmu_secondary_context_write(uint64_t v)
+{
+	asi_u64_write(ASI_DMMU, VA_SECONDARY_CONTEXT_REG, v);
+	flush_pipeline();
+}
+
+#if defined (US)
+
+/** Read IMMU TLB Data Access Register.
+ *
+ * @param entry		TLB Entry index.
+ *
+ * @return		Current value of specified IMMU TLB Data Access
+ * 			Register.
+ */
+static inline uint64_t itlb_data_access_read(size_t entry)
+{
+	itlb_data_access_addr_t reg;
+	
+	reg.value = 0;
+	reg.tlb_entry = entry;
+	return asi_u64_read(ASI_ITLB_DATA_ACCESS_REG, reg.value);
+}
+
+/** Write IMMU TLB Data Access Register.
+ *
+ * @param entry		TLB Entry index.
+ * @param value		Value to be written.
+ */
+static inline void itlb_data_access_write(size_t entry, uint64_t value)
+{
+	itlb_data_access_addr_t reg;
+	
+	reg.value = 0;
+	reg.tlb_entry = entry;
+	asi_u64_write(ASI_ITLB_DATA_ACCESS_REG, reg.value, value);
+	flush_pipeline();
+}
+
+/** Read DMMU TLB Data Access Register.
+ *
+ * @param entry		TLB Entry index.
+ *
+ * @return		Current value of specified DMMU TLB Data Access
+ * 			Register.
+ */
+static inline uint64_t dtlb_data_access_read(size_t entry)
+{
+	dtlb_data_access_addr_t reg;
+	
+	reg.value = 0;
+	reg.tlb_entry = entry;
+	return asi_u64_read(ASI_DTLB_DATA_ACCESS_REG, reg.value);
+}
+
+/** Write DMMU TLB Data Access Register.
+ *
+ * @param entry		TLB Entry index.
+ * @param value		Value to be written.
+ */
+static inline void dtlb_data_access_write(size_t entry, uint64_t value)
+{
+	dtlb_data_access_addr_t reg;
+	
+	reg.value = 0;
+	reg.tlb_entry = entry;
+	asi_u64_write(ASI_DTLB_DATA_ACCESS_REG, reg.value, value);
+	membar();
+}
+
+/** Read IMMU TLB Tag Read Register.
+ *
+ * @param entry		TLB Entry index.
+ *
+ * @return		Current value of specified IMMU TLB Tag Read Register.
+ */
+static inline uint64_t itlb_tag_read_read(size_t entry)
+{
+	itlb_tag_read_addr_t tag;
+
+	tag.value = 0;
+	tag.tlb_entry =	entry;
+	return asi_u64_read(ASI_ITLB_TAG_READ_REG, tag.value);
+}
+
+/** Read DMMU TLB Tag Read Register.
+ *
+ * @param entry		TLB Entry index.
+ *
+ * @return		Current value of specified DMMU TLB Tag Read Register.
+ */
+static inline uint64_t dtlb_tag_read_read(size_t entry)
+{
+	dtlb_tag_read_addr_t tag;
+
+	tag.value = 0;
+	tag.tlb_entry =	entry;
+	return asi_u64_read(ASI_DTLB_TAG_READ_REG, tag.value);
+}
+
+#elif defined (US3)
+
+
+/** Read IMMU TLB Data Access Register.
+ *
+ * @param tlb		TLB number (one of TLB_ISMALL or TLB_IBIG)
+ * @param entry		TLB Entry index.
+ *
+ * @return		Current value of specified IMMU TLB Data Access
+ * 			Register.
+ */
+static inline uint64_t itlb_data_access_read(int tlb, size_t entry)
+{
+	itlb_data_access_addr_t reg;
+	
+	reg.value = 0;
+	reg.tlb_number = tlb;
+	reg.local_tlb_entry = entry;
+	return asi_u64_read(ASI_ITLB_DATA_ACCESS_REG, reg.value);
+}
+
+/** Write IMMU TLB Data Access Register.
+ * @param tlb		TLB number (one of TLB_ISMALL or TLB_IBIG)
+ * @param entry		TLB Entry index.
+ * @param value		Value to be written.
+ */
+static inline void itlb_data_access_write(int tlb, size_t entry,
+	uint64_t value)
+{
+	itlb_data_access_addr_t reg;
+	
+	reg.value = 0;
+	reg.tlb_number = tlb;
+	reg.local_tlb_entry = entry;
+	asi_u64_write(ASI_ITLB_DATA_ACCESS_REG, reg.value, value);
+	flush_pipeline();
+}
+
+/** Read DMMU TLB Data Access Register.
+ *
+ * @param tlb		TLB number (one of TLB_DSMALL, TLB_DBIG, TLB_DBIG) 
+ * @param entry		TLB Entry index.
+ *
+ * @return		Current value of specified DMMU TLB Data Access
+ * 			Register.
+ */
+static inline uint64_t dtlb_data_access_read(int tlb, size_t entry)
+{
+	dtlb_data_access_addr_t reg;
+	
+	reg.value = 0;
+	reg.tlb_number = tlb;
+	reg.local_tlb_entry = entry;
+	return asi_u64_read(ASI_DTLB_DATA_ACCESS_REG, reg.value);
+}
+
+/** Write DMMU TLB Data Access Register.
+ *
+ * @param tlb		TLB number (one of TLB_DSMALL, TLB_DBIG_0, TLB_DBIG_1)  
+ * @param entry		TLB Entry index.
+ * @param value		Value to be written.
+ */
+static inline void dtlb_data_access_write(int tlb, size_t entry,
+	uint64_t value)
+{
+	dtlb_data_access_addr_t reg;
+	
+	reg.value = 0;
+	reg.tlb_number = tlb;
+	reg.local_tlb_entry = entry;
+	asi_u64_write(ASI_DTLB_DATA_ACCESS_REG, reg.value, value);
+	membar();
+}
+
+/** Read IMMU TLB Tag Read Register.
+ *
+ * @param tlb		TLB number (one of TLB_ISMALL or TLB_IBIG) 
+ * @param entry		TLB Entry index.
+ *
+ * @return		Current value of specified IMMU TLB Tag Read Register.
+ */
+static inline uint64_t itlb_tag_read_read(int tlb, size_t entry)
+{
+	itlb_tag_read_addr_t tag;
+
+	tag.value = 0;
+	tag.tlb_number = tlb;
+	tag.local_tlb_entry = entry;
+	return asi_u64_read(ASI_ITLB_TAG_READ_REG, tag.value);
+}
+
+/** Read DMMU TLB Tag Read Register.
+ *
+ * @param tlb		TLB number (one of TLB_DSMALL, TLB_DBIG_0, TLB_DBIG_1)
+ * @param entry		TLB Entry index.
+ *
+ * @return		Current value of specified DMMU TLB Tag Read Register.
+ */
+static inline uint64_t dtlb_tag_read_read(int tlb, size_t entry)
+{
+	dtlb_tag_read_addr_t tag;
+
+	tag.value = 0;
+	tag.tlb_number = tlb;
+	tag.local_tlb_entry = entry;
+	return asi_u64_read(ASI_DTLB_TAG_READ_REG, tag.value);
+}
+
+#endif
+
+
+/** Write IMMU TLB Tag Access Register.
+ *
+ * @param v		Value to be written.
+ */
+static inline void itlb_tag_access_write(uint64_t v)
+{
+	asi_u64_write(ASI_IMMU, VA_IMMU_TAG_ACCESS, v);
+	flush_pipeline();
+}
+
+/** Read IMMU TLB Tag Access Register.
+ *
+ * @return		Current value of IMMU TLB Tag Access Register.
+ */
+static inline uint64_t itlb_tag_access_read(void)
+{
+	return asi_u64_read(ASI_IMMU, VA_IMMU_TAG_ACCESS);
+}
+
+/** Write DMMU TLB Tag Access Register.
+ *
+ * @param v		Value to be written.
+ */
+static inline void dtlb_tag_access_write(uint64_t v)
+{
+	asi_u64_write(ASI_DMMU, VA_DMMU_TAG_ACCESS, v);
+	membar();
+}
+
+/** Read DMMU TLB Tag Access Register.
+ *
+ * @return 		Current value of DMMU TLB Tag Access Register.
+ */
+static inline uint64_t dtlb_tag_access_read(void)
+{
+	return asi_u64_read(ASI_DMMU, VA_DMMU_TAG_ACCESS);
+}
+
+
+/** Write IMMU TLB Data in Register.
+ *
+ * @param v		Value to be written.
+ */
+static inline void itlb_data_in_write(uint64_t v)
+{
+	asi_u64_write(ASI_ITLB_DATA_IN_REG, 0, v);
+	flush_pipeline();
+}
+
+/** Write DMMU TLB Data in Register.
+ *
+ * @param v		Value to be written.
+ */
+static inline void dtlb_data_in_write(uint64_t v)
+{
+	asi_u64_write(ASI_DTLB_DATA_IN_REG, 0, v);
+	membar();
+}
+
+/** Read ITLB Synchronous Fault Status Register.
+ *
+ * @return		Current content of I-SFSR register.
+ */
+static inline uint64_t itlb_sfsr_read(void)
+{
+	return asi_u64_read(ASI_IMMU, VA_IMMU_SFSR);
+}
+
+/** Write ITLB Synchronous Fault Status Register.
+ *
+ * @param v		New value of I-SFSR register.
+ */
+static inline void itlb_sfsr_write(uint64_t v)
+{
+	asi_u64_write(ASI_IMMU, VA_IMMU_SFSR, v);
+	flush_pipeline();
+}
+
+/** Read DTLB Synchronous Fault Status Register.
+ *
+ * @return		Current content of D-SFSR register.
+ */
+static inline uint64_t dtlb_sfsr_read(void)
+{
+	return asi_u64_read(ASI_DMMU, VA_DMMU_SFSR);
+}
+
+/** Write DTLB Synchronous Fault Status Register.
+ *
+ * @param v		New value of D-SFSR register.
+ */
+static inline void dtlb_sfsr_write(uint64_t v)
+{
+	asi_u64_write(ASI_DMMU, VA_DMMU_SFSR, v);
+	membar();
+}
+
+/** Read DTLB Synchronous Fault Address Register.
+ *
+ * @return		Current content of D-SFAR register.
+ */
+static inline uint64_t dtlb_sfar_read(void)
+{
+	return asi_u64_read(ASI_DMMU, VA_DMMU_SFAR);
+}
+
+/** Perform IMMU TLB Demap Operation.
+ *
+ * @param type		Selects between context and page demap (and entire MMU
+ * 			demap on US3).
+ * @param context_encoding Specifies which Context register has Context ID for
+ * 			demap.
+ * @param page		Address which is on the page to be demapped.
+ */
+static inline void itlb_demap(int type, int context_encoding, uintptr_t page)
+{
+	tlb_demap_addr_t da;
+	page_address_t pg;
+	
+	da.value = 0;
+	pg.address = page;
+	
+	da.type = type;
+	da.context = context_encoding;
+	da.vpn = pg.vpn;
+	
+	/* da.value is the address within the ASI */ 
+	asi_u64_write(ASI_IMMU_DEMAP, da.value, 0);
+
+	flush_pipeline();
+}
+
+/** Perform DMMU TLB Demap Operation.
+ *
+ * @param type		Selects between context and page demap (and entire MMU
+ * 			demap on US3).
+ * @param context_encoding Specifies which Context register has Context ID for
+ * 			demap.
+ * @param page		Address which is on the page to be demapped.
+ */
+static inline void dtlb_demap(int type, int context_encoding, uintptr_t page)
+{
+	tlb_demap_addr_t da;
+	page_address_t pg;
+	
+	da.value = 0;
+	pg.address = page;
+	
+	da.type = type;
+	da.context = context_encoding;
+	da.vpn = pg.vpn;
+	
+	/* da.value is the address within the ASI */ 
+	asi_u64_write(ASI_DMMU_DEMAP, da.value, 0);
+
+	membar();
+}
+
+extern void fast_instruction_access_mmu_miss(unative_t, istate_t *);
+extern void fast_data_access_mmu_miss(tlb_tag_access_reg_t, istate_t *);
+extern void fast_data_access_protection(tlb_tag_access_reg_t , istate_t *);
+
+extern void dtlb_insert_mapping(uintptr_t, uintptr_t, int, bool, bool);
+
+extern void dump_sfsr_and_sfar(void);
+
+#endif /* !def __ASM__ */
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/sparc64/include/mm/sun4u/tte.h
===================================================================
--- kernel/arch/sparc64/include/mm/sun4u/tte.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/include/mm/sun4u/tte.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2005 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64mm	
+ * @{
+ */
+/** @file
+ */
+
+#ifndef KERN_sparc64_sun4u_TTE_H_
+#define KERN_sparc64_sun4u_TTE_H_
+
+#define TTE_G		(1 << 0)
+#define TTE_W		(1 << 1)
+#define TTE_P		(1 << 2)
+#define TTE_E		(1 << 3)
+#define TTE_CV		(1 << 4)
+#define TTE_CP		(1 << 5)
+#define TTE_L		(1 << 6)
+
+#define TTE_V_SHIFT	63
+#define TTE_SIZE_SHIFT	61
+
+#ifndef __ASM__
+
+#include <arch/types.h>
+
+/* TTE tag's VA_tag field contains bits <63:VA_TAG_PAGE_SHIFT> of the VA */
+#define VA_TAG_PAGE_SHIFT	22
+
+/** Translation Table Entry - Tag. */
+union tte_tag {
+	uint64_t value;
+	struct {
+		unsigned g : 1;		/**< Global. */
+		unsigned : 2;		/**< Reserved. */
+		unsigned context : 13;	/**< Context identifier. */
+		unsigned : 6;		/**< Reserved. */
+		uint64_t va_tag : 42;	/**< Virtual Address Tag, bits 63:22. */
+	} __attribute__ ((packed));
+};
+
+typedef union tte_tag tte_tag_t;
+
+/** Translation Table Entry - Data. */
+union tte_data {
+	uint64_t value;
+	struct {
+		unsigned v : 1;		/**< Valid. */
+		unsigned size : 2;	/**< Page size of this entry. */
+		unsigned nfo : 1;	/**< No-Fault-Only. */
+		unsigned ie : 1;	/**< Invert Endianness. */
+		unsigned soft2 : 9;	/**< Software defined field. */
+#if defined (US)
+		unsigned diag : 9;	/**< Diagnostic data. */
+		unsigned pfn : 28;	/**< Physical Address bits, bits 40:13. */
+#elif defined (US3)
+		unsigned : 7;		/**< Reserved. */
+		unsigned pfn : 30;	/**< Physical Address bits, bits 42:13 */
+#endif
+		unsigned soft : 6;	/**< Software defined field. */
+		unsigned l : 1;		/**< Lock. */
+		unsigned cp : 1;	/**< Cacheable in physically indexed cache. */
+		unsigned cv : 1;	/**< Cacheable in virtually indexed cache. */
+		unsigned e : 1;		/**< Side-effect. */
+		unsigned p : 1;		/**< Privileged. */
+		unsigned w : 1;		/**< Writable. */
+		unsigned g : 1;		/**< Global. */
+	} __attribute__ ((packed));
+};
+
+typedef union tte_data tte_data_t;
+
+#endif /* !def __ASM__ */
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/sparc64/include/mm/sun4v/as.h
===================================================================
--- kernel/arch/sparc64/include/mm/sun4v/as.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/include/mm/sun4v/as.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2005 Jakub Jermar
+ * Copyright (c) 2009 Pavel Rimsky
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64mm	
+ * @{
+ */
+/** @file
+ */
+
+#ifndef KERN_sparc64_sun4v_AS_H_
+#define KERN_sparc64_sun4v_AS_H_
+
+#include <arch/mm/tte.h>
+#include <arch/mm/tsb.h>
+
+#define KERNEL_ADDRESS_SPACE_SHADOWED_ARCH	1
+
+#define KERNEL_ADDRESS_SPACE_START_ARCH		(unsigned long) 0x0000000000000000
+#define KERNEL_ADDRESS_SPACE_END_ARCH		(unsigned long) 0xffffffffffffffff
+#define USER_ADDRESS_SPACE_START_ARCH		(unsigned long) 0x0000000000000000
+#define USER_ADDRESS_SPACE_END_ARCH		(unsigned long) 0xffffffffffffffff
+
+#define USTACK_ADDRESS_ARCH	(0xffffffffffffffffULL - (PAGE_SIZE - 1))
+
+#ifdef CONFIG_TSB
+
+/**
+ * TTE Tag.
+ *
+ * Even though for sun4v the format of the TSB Tag states that the context
+ * field has 16 bits, the T1 CPU still only supports 13-bit contexts and the
+ * three most significant bits are always zero. 
+ */
+typedef union tte_tag {
+	uint64_t value;
+	struct {
+		unsigned : 3;
+		unsigned context : 13;	/**< Software ASID. */
+		unsigned : 6;
+		uint64_t va_tag : 42;	/**< Virtual address bits <63:22>. */
+	} __attribute__ ((packed));
+} tte_tag_t;
+
+/** TSB entry. */
+typedef struct tsb_entry {
+	tte_tag_t tag;
+	tte_data_t data;
+} __attribute__ ((packed)) tsb_entry_t;
+
+typedef struct {
+	tsb_descr_t tsb_description;
+} as_arch_t;
+
+#else
+
+typedef struct {
+} as_arch_t;
+
+#endif /* CONFIG_TSB */
+
+#include <genarch/mm/as_ht.h>
+
+#ifdef CONFIG_TSB
+#include <arch/mm/tsb.h>
+#define as_invalidate_translation_cache(as, page, cnt) \
+	tsb_invalidate((as), (page), (cnt))
+#else
+#define as_invalidate_translation_cache(as, page, cnt)
+#endif
+
+extern void as_arch_init(void);
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/sparc64/include/mm/sun4v/frame.h
===================================================================
--- kernel/arch/sparc64/include/mm/sun4v/frame.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/include/mm/sun4v/frame.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2005 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64mm	
+ * @{
+ */
+/** @file
+ */
+
+#ifndef KERN_sparc64_SUN4V_FRAME_H_
+#define KERN_sparc64_SUN4V_FRAME_H_
+
+/*
+ * Page size supported by the MMU.
+ * For 8K there is the nasty illegal virtual aliasing problem.
+ * Therefore, the kernel uses 8K only internally on the TLB and TSB levels.
+ */
+#define MMU_FRAME_WIDTH		13	/* 8K */
+#define MMU_FRAME_SIZE		(1 << MMU_FRAME_WIDTH)
+
+#define FRAME_WIDTH		13
+#define FRAME_SIZE		(1 << FRAME_WIDTH)
+
+#ifdef KERNEL
+#ifndef __ASM__
+
+#include <arch/types.h>
+
+union frame_address {
+	uintptr_t address;
+	struct {
+#if defined (US)
+		unsigned : 23;
+		uint64_t pfn : 28;		/**< Physical Frame Number. */
+#elif defined (US3)
+		unsigned : 21;
+		uint64_t pfn : 30;		/**< Physical Frame Number. */
+#endif
+		unsigned offset : 13;		/**< Offset. */
+	} __attribute__ ((packed));
+};
+
+typedef union frame_address frame_address_t;
+
+extern uintptr_t last_frame;
+//MH
+//extern uintptr_t end_of_identity;
+
+extern void frame_arch_init(void);
+#define physmem_print()
+
+#endif
+#endif
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/sparc64/include/mm/sun4v/mmu.h
===================================================================
--- kernel/arch/sparc64/include/mm/sun4v/mmu.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/include/mm/sun4v/mmu.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2005 Jakub Jermar
+ * Copyright (c) 2008 Pavel Rimsky
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64mm	
+ * @{
+ */
+/** @file
+ */
+
+#ifndef KERN_sparc64_sun4v_MMU_H_
+#define KERN_sparc64_sun4v_MMU_H_
+
+#define ASI_REAL			0x14	/**< MMU bypass ASI */
+
+#define VA_PRIMARY_CONTEXT_REG		0x8	/**< primary context register VA. */
+#define ASI_PRIMARY_CONTEXT_REG		0x21	/**< primary context register ASI. */
+ 
+#define VA_SECONDARY_CONTEXT_REG	0x10	/**< secondary context register VA. */
+#define ASI_SECONDARY_CONTEXT_REG	0x21	/**< secondary context register ASI. */
+
+
+
+
+
+
+
+
+
+
+
+/* I-MMU ASIs. */
+#define ASI_IMMU			0x50
+#define ASI_IMMU_TSB_8KB_PTR_REG	0x51	
+#define ASI_IMMU_TSB_64KB_PTR_REG	0x52
+#define ASI_ITLB_DATA_IN_REG		0x54
+#define ASI_ITLB_DATA_ACCESS_REG	0x55
+#define ASI_ITLB_TAG_READ_REG		0x56
+#define ASI_IMMU_DEMAP			0x57
+
+/* Virtual Addresses within ASI_IMMU. */
+#define VA_IMMU_TSB_TAG_TARGET		0x0	/**< IMMU TSB tag target register. */
+#define VA_IMMU_SFSR			0x18	/**< IMMU sync fault status register. */
+#define VA_IMMU_TSB_BASE		0x28	/**< IMMU TSB base register. */
+#define VA_IMMU_TAG_ACCESS		0x30	/**< IMMU TLB tag access register. */
+#if defined (US3)
+#define VA_IMMU_PRIMARY_EXTENSION	0x48	/**< IMMU TSB primary extension register */
+#define VA_IMMU_NUCLEUS_EXTENSION	0x58	/**< IMMU TSB nucleus extension register */
+#endif
+
+
+/* D-MMU ASIs. */
+#define ASI_DMMU			0x58
+#define ASI_DMMU_TSB_8KB_PTR_REG	0x59	
+#define ASI_DMMU_TSB_64KB_PTR_REG	0x5a
+#define ASI_DMMU_TSB_DIRECT_PTR_REG	0x5b
+#define ASI_DTLB_DATA_IN_REG		0x5c
+#define ASI_DTLB_DATA_ACCESS_REG	0x5d
+#define ASI_DTLB_TAG_READ_REG		0x5e
+#define ASI_DMMU_DEMAP			0x5f
+
+/* Virtual Addresses within ASI_DMMU. */
+#define VA_DMMU_TSB_TAG_TARGET		0x0	/**< DMMU TSB tag target register. */
+#define VA_PRIMARY_CONTEXT_REG		0x8	/**< DMMU primary context register. */
+#define VA_SECONDARY_CONTEXT_REG	0x10	/**< DMMU secondary context register. */
+#define VA_DMMU_SFSR			0x18	/**< DMMU sync fault status register. */
+#define VA_DMMU_SFAR			0x20	/**< DMMU sync fault address register. */
+#define VA_DMMU_TSB_BASE		0x28	/**< DMMU TSB base register. */
+#define VA_DMMU_TAG_ACCESS		0x30	/**< DMMU TLB tag access register. */
+#define VA_DMMU_VA_WATCHPOINT_REG	0x38	/**< DMMU VA data watchpoint register. */
+#define VA_DMMU_PA_WATCHPOINT_REG	0x40	/**< DMMU PA data watchpoint register. */
+#if defined (US3)
+#define VA_DMMU_PRIMARY_EXTENSION	0x48	/**< DMMU TSB primary extension register */
+#define VA_DMMU_SECONDARY_EXTENSION	0x50	/**< DMMU TSB secondary extension register */
+#define VA_DMMU_NUCLEUS_EXTENSION	0x58	/**< DMMU TSB nucleus extension register */
+#endif
+
+#ifndef __ASM__
+
+#include <arch/asm.h>
+#include <arch/barrier.h>
+#include <arch/types.h>
+
+#if defined(US)
+/** LSU Control Register. */
+typedef union {
+	uint64_t value;
+	struct {
+		unsigned : 23;
+		unsigned pm : 8;
+		unsigned vm : 8;
+		unsigned pr : 1;
+		unsigned pw : 1;
+		unsigned vr : 1;
+		unsigned vw : 1;
+		unsigned : 1;
+		unsigned fm : 16;	
+		unsigned dm : 1;	/**< D-MMU enable. */
+		unsigned im : 1;	/**< I-MMU enable. */
+		unsigned dc : 1;	/**< D-Cache enable. */
+		unsigned ic : 1;	/**< I-Cache enable. */
+		
+	} __attribute__ ((packed));
+} lsu_cr_reg_t;
+#endif /* US */
+
+#endif /* !def __ASM__ */
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/sparc64/include/mm/sun4v/page.h
===================================================================
--- kernel/arch/sparc64/include/mm/sun4v/page.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/include/mm/sun4v/page.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2005 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64mm	
+ * @{
+ */
+/** @file
+ */
+
+#ifndef KERN_sparc64_sun4v_PAGE_H_
+#define KERN_sparc64_sun4v_PAGE_H_
+
+#include <arch/mm/frame.h>
+
+#define MMU_PAGE_WIDTH	MMU_FRAME_WIDTH
+#define MMU_PAGE_SIZE	MMU_FRAME_SIZE
+
+#define PAGE_WIDTH	FRAME_WIDTH
+#define PAGE_SIZE	FRAME_SIZE
+
+#define MMU_PAGES_PER_PAGE	(1 << (PAGE_WIDTH - MMU_PAGE_WIDTH))
+
+#ifdef KERNEL
+
+#ifndef __ASM__
+
+#include <arch/interrupt.h>
+
+extern uintptr_t physmem_base;
+
+#define KA2PA(x)	(((uintptr_t) (x)) + physmem_base)
+#define PA2KA(x)	(((uintptr_t) (x)) - physmem_base)
+
+typedef union {
+	uintptr_t address;
+	struct {
+		uint64_t vpn : 51;		/**< Virtual Page Number. */
+		unsigned offset : 13;		/**< Offset. */
+	} __attribute__ ((packed));
+} page_address_t;
+
+extern void page_arch_init(void);
+
+#endif /* !def __ASM__ */
+
+#endif /* KERNEL */
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/sparc64/include/mm/sun4v/tlb.h
===================================================================
--- kernel/arch/sparc64/include/mm/sun4v/tlb.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/include/mm/sun4v/tlb.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2005 Jakub Jermar
+ * Copyright (c) 2008 Pavel Rimsky
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64mm	
+ * @{
+ */
+/** @file
+ */
+
+#ifndef KERN_sparc64_sun4v_TLB_H_
+#define KERN_sparc64_sun4v_TLB_H_
+
+#define MMU_FSA_ALIGNMENT	64
+#define MMU_FSA_SIZE		128
+
+#ifndef __ASM__
+
+#include <arch/mm/tte.h>
+#include <print.h>
+#include <arch/mm/mmu.h>
+#include <arch/mm/page.h>
+#include <arch/asm.h>
+#include <arch/barrier.h>
+#include <arch/types.h>
+#include <arch/register.h>
+#include <arch/cpu.h>
+#include <arch/sun4v/hypercall.h>
+
+/**
+ * Structure filled by hypervisor (or directly CPU, if implemented so) when
+ * a MMU fault occurs. The structure describes the exact condition which
+ * has caused the fault.
+ */
+typedef struct mmu_fault_status_area {
+	uint64_t ift;		/**< Instruction fault type (IFT) */
+	uint64_t ifa;		/**< Instruction fault address (IFA) */
+	uint64_t ifc;		/**< Instruction fault context (IFC) */
+	uint8_t reserved1[0x28];
+
+	uint64_t dft;		/**< Data fault type (DFT) */
+	uint64_t dfa;		/**< Data fault address (DFA) */
+	uint64_t dfc;		/**< Data fault context (DFC) */
+	uint8_t reserved2[0x28];
+} __attribute__ ((packed)) mmu_fault_status_area_t;
+
+#define DTLB_MAX_LOCKED_ENTRIES		8
+
+/** Bit width of the TLB-locked portion of kernel address space. */
+#define KERNEL_PAGE_WIDTH       22	/* 4M */
+
+/*
+ * Reading and writing context registers.
+ *
+ * Note that UltraSPARC Architecture-compatible processors do not require
+ * a MEMBAR #Sync, FLUSH, DONE, or RETRY instruction after a store to an
+ * MMU register for proper operation.
+ *
+ */
+
+/** Read MMU Primary Context Register.
+ *
+ * @return	Current value of Primary Context Register.
+ */
+static inline uint64_t mmu_primary_context_read(void)
+{
+	return asi_u64_read(ASI_PRIMARY_CONTEXT_REG, VA_PRIMARY_CONTEXT_REG);
+}
+ 
+/** Write MMU Primary Context Register.
+ *
+ * @param v	New value of Primary Context Register.
+ */
+static inline void mmu_primary_context_write(uint64_t v)
+{
+	asi_u64_write(ASI_PRIMARY_CONTEXT_REG, VA_PRIMARY_CONTEXT_REG, v);
+}
+ 
+/** Read MMU Secondary Context Register.
+ *
+ * @return	Current value of Secondary Context Register.
+ */
+static inline uint64_t mmu_secondary_context_read(void)
+{
+	return asi_u64_read(ASI_SECONDARY_CONTEXT_REG, VA_SECONDARY_CONTEXT_REG);
+}
+ 
+/** Write MMU Secondary Context Register.
+ *
+ * @param v	New value of Secondary Context Register.
+ */
+static inline void mmu_secondary_context_write(uint64_t v)
+{
+	asi_u64_write(ASI_SECONDARY_CONTEXT_REG, VA_SECONDARY_CONTEXT_REG, v);
+}
+
+/**
+ * Demaps all mappings in a context.
+ *
+ * @param context	number of the context
+ * @param mmu_flag	MMU_FLAG_DTLB, MMU_FLAG_ITLB or a combination of both
+ */
+static inline void mmu_demap_ctx(int context, int mmu_flag) {
+	__hypercall_fast4(MMU_DEMAP_CTX, 0, 0, context, mmu_flag);
+}
+
+/**
+ * Demaps given page.
+ *
+ * @param vaddr		VA of the page to be demapped
+ * @param context	number of the context
+ * @param mmu_flag	MMU_FLAG_DTLB, MMU_FLAG_ITLB or a combination of both
+ */
+static inline void mmu_demap_page(uintptr_t vaddr, int context, int mmu_flag) {
+	__hypercall_fast5(MMU_DEMAP_PAGE, 0, 0, vaddr, context, mmu_flag);
+}
+
+extern void fast_instruction_access_mmu_miss(unative_t, istate_t *);
+extern void fast_data_access_mmu_miss(unative_t, istate_t *);
+extern void fast_data_access_protection(unative_t, istate_t *);
+
+extern void dtlb_insert_mapping(uintptr_t, uintptr_t, int, bool, bool);
+
+extern void describe_dmmu_fault(void);
+
+#endif /* !def __ASM__ */
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/sparc64/include/mm/sun4v/tsb.h
===================================================================
--- kernel/arch/sparc64/include/mm/sun4v/tsb.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/include/mm/sun4v/tsb.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2006 Jakub Jermar
+ * Copyright (c) 2009 Pavel Rimsky
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64mm	
+ * @{
+ */
+/** @file
+ */
+
+#ifndef KERN_sparc64_sun4v_TSB_H_
+#define KERN_sparc64_sun4v_TSB_H_
+
+/*
+ * TSB will claim 64K of memory, which
+ * is a nice number considered that it is one of
+ * the page sizes supported by hardware, which,
+ * again, is nice because TSBs need to be locked
+ * in TLBs - only one TLB entry will do.
+ */
+#define TSB_SIZE			3	/* when changing this, change
+						 * as.c as well */
+#define TSB_ENTRY_COUNT			(512 * (1 << TSB_SIZE))
+
+#ifndef __ASM__
+
+#include <typedefs.h>
+#include <arch/mm/tte.h>
+#include <arch/mm/mmu.h>
+#include <arch/types.h>
+
+/** TSB description, used in hypercalls */
+typedef struct tsb_descr {
+	uint16_t page_size;	/**< Page size (0 = 8K, 1 = 64K,...). */
+	uint16_t associativity;	/**< TSB associativity (will be 1). */
+	uint32_t num_ttes;	/**< Number of TTEs. */
+	uint32_t context;	/**< Context number. */
+	uint32_t pgsize_mask;	/**< Equals "1 << page_size". */
+	uint64_t tsb_base;	/**< Real address of TSB base. */
+	uint64_t reserved;
+} __attribute__ ((packed)) tsb_descr_t;
+
+
+/* Forward declarations. */
+struct as;
+struct pte;
+
+extern void tsb_invalidate(struct as *as, uintptr_t page, count_t pages);
+extern void itsb_pte_copy(struct pte *t);
+extern void dtsb_pte_copy(struct pte *t, bool ro);
+
+#endif /* !def __ASM__ */
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/sparc64/include/mm/sun4v/tte.h
===================================================================
--- kernel/arch/sparc64/include/mm/sun4v/tte.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/include/mm/sun4v/tte.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2005 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64mm	
+ * @{
+ */
+/** @file
+ */
+
+#ifndef KERN_sparc64_sun4v_TTE_H_
+#define KERN_sparc64_sun4v_TTE_H_
+
+#define TTE_V_SHIFT	63	/**< valid */
+#define TTE_TADDR_SHIFT	13	/**< target address */
+#define TTE_CP_SHIFT	10	/**< cacheable physically */
+#define TTE_CV_SHIFT	9	/**< caheable virtually */
+#define TTE_P_SHIFT	8	/**< privileged */
+#define TTE_EP_SHIFT	7	/**< execute permission */
+#define TTE_W_SHIFT	6	/**< writable */
+#define TTE_SZ_SHIFT	0	/**< size */
+
+#define MMU_FLAG_ITLB	2	/**< operation applies to ITLB */
+#define MMU_FLAG_DTLB	1	/**< operation applies to DTLB */
+
+#ifndef __ASM__
+
+#include <arch/types.h>
+
+/** Translation Table Entry - Data. */
+union tte_data {
+	uint64_t value;
+	struct {
+		unsigned v : 1;		/**< Valid. */
+		unsigned nfo : 1;	/**< No-Fault-Only. */
+		unsigned soft : 6;	/**< Software defined field. */
+		unsigned long ra : 43;	/**< Real address. */
+		unsigned ie : 1;	/**< Invert endianess. */
+		unsigned e : 1;		/**< Side-effect. */
+		unsigned cp : 1;	/**< Cacheable in physically indexed cache. */
+		unsigned cv : 1;	/**< Cacheable in virtually indexed cache. */
+		unsigned p : 1;		/**< Privileged. */
+		unsigned x : 1;		/**< Executable. */
+		unsigned w : 1;		/**< Writable. */
+		unsigned soft2 : 2;	/**< Software defined field. */
+		unsigned size : 4;	/**< Page size. */
+	} __attribute__ ((packed));
+};
+
+typedef union tte_data tte_data_t;
+
+#define VA_TAG_PAGE_SHIFT	22
+
+#endif /* !def __ASM__ */
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/sparc64/include/mm/tlb.h
===================================================================
--- kernel/arch/sparc64/include/mm/tlb.h	(revision b1d3c36aca6850dfcd12418b4a6f60aa37a83487)
+++ kernel/arch/sparc64/include/mm/tlb.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -36,654 +36,9 @@
 #define KERN_sparc64_TLB_H_
 
-#if defined (US)
-#define ITLB_ENTRY_COUNT		64
-#define DTLB_ENTRY_COUNT		64
-#define DTLB_MAX_LOCKED_ENTRIES		DTLB_ENTRY_COUNT
+#if defined (SUN4U)
+#include <arch/mm/sun4u/tlb.h>
+#elif defined (SUN4V)
+#include <arch/mm/sun4v/tlb.h>
 #endif
-
-/** TLB_DSMALL is the only of the three DMMUs that can hold locked entries. */
-#if defined (US3)
-#define DTLB_MAX_LOCKED_ENTRIES		16
-#endif
-
-#define MEM_CONTEXT_KERNEL		0
-#define MEM_CONTEXT_TEMP		1
-
-/** Page sizes. */
-#define PAGESIZE_8K	0
-#define PAGESIZE_64K	1
-#define PAGESIZE_512K	2
-#define PAGESIZE_4M	3
-
-/** Bit width of the TLB-locked portion of kernel address space. */
-#define KERNEL_PAGE_WIDTH       22	/* 4M */
-
-/* TLB Demap Operation types. */
-#define TLB_DEMAP_PAGE		0
-#define TLB_DEMAP_CONTEXT	1
-#if defined (US3)
-#define TLB_DEMAP_ALL		2
-#endif
-
-#define TLB_DEMAP_TYPE_SHIFT	6
-
-/* TLB Demap Operation Context register encodings. */
-#define TLB_DEMAP_PRIMARY	0
-#define TLB_DEMAP_SECONDARY	1
-#define TLB_DEMAP_NUCLEUS	2
-
-/* There are more TLBs in one MMU in US3, their codes are defined here. */
-#if defined (US3)
-/* D-MMU: one small (16-entry) TLB and two big (512-entry) TLBs */
-#define TLB_DSMALL	0
-#define TLB_DBIG_0	2
-#define TLB_DBIG_1	3
-	
-/* I-MMU: one small (16-entry) TLB and one big TLB */
-#define TLB_ISMALL	0
-#define TLB_IBIG	2
-#endif
-
-#define TLB_DEMAP_CONTEXT_SHIFT	4
-
-/* TLB Tag Access shifts */
-#define TLB_TAG_ACCESS_CONTEXT_SHIFT	0
-#define TLB_TAG_ACCESS_CONTEXT_MASK	((1 << 13) - 1)
-#define TLB_TAG_ACCESS_VPN_SHIFT	13
-
-#ifndef __ASM__
-
-#include <arch/mm/tte.h>
-#include <arch/mm/mmu.h>
-#include <arch/mm/page.h>
-#include <arch/asm.h>
-#include <arch/barrier.h>
-#include <arch/types.h>
-#include <arch/register.h>
-#include <arch/cpu.h>
-
-union tlb_context_reg {
-	uint64_t v;
-	struct {
-		unsigned long : 51;
-		unsigned context : 13;		/**< Context/ASID. */
-	} __attribute__ ((packed));
-};
-typedef union tlb_context_reg tlb_context_reg_t;
-
-/** I-/D-TLB Data In/Access Register type. */
-typedef tte_data_t tlb_data_t;
-
-/** I-/D-TLB Data Access Address in Alternate Space. */
-
-#if defined (US)
-
-union tlb_data_access_addr {
-	uint64_t value;
-	struct {
-		uint64_t : 55;
-		unsigned tlb_entry : 6;
-		unsigned : 3;
-	} __attribute__ ((packed));
-};
-typedef union tlb_data_access_addr dtlb_data_access_addr_t;
-typedef union tlb_data_access_addr dtlb_tag_read_addr_t;
-typedef union tlb_data_access_addr itlb_data_access_addr_t;
-typedef union tlb_data_access_addr itlb_tag_read_addr_t;
-
-#elif defined (US3)
-
-/*
- * In US3, I-MMU and D-MMU have different formats of the data
- * access register virtual address. In the corresponding
- * structures the member variable for the entry number is
- * called "local_tlb_entry" - it contrasts with the "tlb_entry"
- * for the US data access register VA structure. The rationale
- * behind this is to prevent careless mistakes in the code
- * caused by setting only the entry number and not the TLB
- * number in the US3 code (when taking the code from US). 
- */
-
-union dtlb_data_access_addr {
-	uint64_t value;
-	struct {
-		uint64_t : 45;
-		unsigned : 1;
-		unsigned tlb_number : 2;
-		unsigned : 4;
-		unsigned local_tlb_entry : 9;
-		unsigned : 3;
-	} __attribute__ ((packed));
-};
-typedef union dtlb_data_access_addr dtlb_data_access_addr_t;
-typedef union dtlb_data_access_addr dtlb_tag_read_addr_t;
-
-union itlb_data_access_addr {
-	uint64_t value;
-	struct {
-		uint64_t : 45;
-		unsigned : 1;
-		unsigned tlb_number : 2;
-		unsigned : 6;
-		unsigned local_tlb_entry : 7;
-		unsigned : 3;
-	} __attribute__ ((packed));
-};
-typedef union itlb_data_access_addr itlb_data_access_addr_t;
-typedef union itlb_data_access_addr itlb_tag_read_addr_t;
-
-#endif
-
-/** I-/D-TLB Tag Read Register. */
-union tlb_tag_read_reg {
-	uint64_t value;
-	struct {
-		uint64_t vpn : 51;	/**< Virtual Address bits 63:13. */
-		unsigned context : 13;	/**< Context identifier. */
-	} __attribute__ ((packed));
-};
-typedef union tlb_tag_read_reg tlb_tag_read_reg_t;
-typedef union tlb_tag_read_reg tlb_tag_access_reg_t;
-
-
-/** TLB Demap Operation Address. */
-union tlb_demap_addr {
-	uint64_t value;
-	struct {
-		uint64_t vpn: 51;	/**< Virtual Address bits 63:13. */
-#if defined (US)
-		unsigned : 6;		/**< Ignored. */
-		unsigned type : 1;	/**< The type of demap operation. */
-#elif defined (US3)
-		unsigned : 5;		/**< Ignored. */
-		unsigned type: 2;	/**< The type of demap operation. */
-#endif
-		unsigned context : 2;	/**< Context register selection. */
-		unsigned : 4;		/**< Zero. */
-	} __attribute__ ((packed));
-};
-typedef union tlb_demap_addr tlb_demap_addr_t;
-
-/** TLB Synchronous Fault Status Register. */
-union tlb_sfsr_reg {
-	uint64_t value;
-	struct {
-#if defined (US)
-		unsigned long : 40;	/**< Implementation dependent. */
-		unsigned asi : 8;	/**< ASI. */
-		unsigned : 2;
-		unsigned ft : 7;	/**< Fault type. */
-#elif defined (US3)
-		unsigned long : 39;	/**< Implementation dependent. */
-		unsigned nf : 1;	/**< Non-faulting load. */
-		unsigned asi : 8;	/**< ASI. */
-		unsigned tm : 1;	/**< I-TLB miss. */
-		unsigned : 3;		/**< Reserved. */
-		unsigned ft : 5;	/**< Fault type. */
-#endif
-		unsigned e : 1;		/**< Side-effect bit. */
-		unsigned ct : 2;	/**< Context Register selection. */
-		unsigned pr : 1;	/**< Privilege bit. */
-		unsigned w : 1;		/**< Write bit. */
-		unsigned ow : 1;	/**< Overwrite bit. */
-		unsigned fv : 1;	/**< Fault Valid bit. */
-	} __attribute__ ((packed));
-};
-typedef union tlb_sfsr_reg tlb_sfsr_reg_t;
-
-#if defined (US3)
-
-/*
- * Functions for determining the number of entries in TLBs. They either return
- * a constant value or a value based on the CPU autodetection.
- */
-
-/**
- * Determine the number of entries in the DMMU's small TLB. 
- */
-static inline uint16_t tlb_dsmall_size(void)
-{
-	return 16;
-}
-
-/**
- * Determine the number of entries in each DMMU's big TLB. 
- */
-static inline uint16_t tlb_dbig_size(void)
-{
-	return 512;
-}
-
-/**
- * Determine the number of entries in the IMMU's small TLB. 
- */
-static inline uint16_t tlb_ismall_size(void)
-{
-	return 16;
-}
-
-/**
- * Determine the number of entries in the IMMU's big TLB. 
- */
-static inline uint16_t tlb_ibig_size(void)
-{
-	if (((ver_reg_t) ver_read()).impl == IMPL_ULTRASPARCIV_PLUS)
-		return 512;
-	else
-		return 128;
-}
-
-#endif
-
-/** Read MMU Primary Context Register.
- *
- * @return		Current value of Primary Context Register.
- */
-static inline uint64_t mmu_primary_context_read(void)
-{
-	return asi_u64_read(ASI_DMMU, VA_PRIMARY_CONTEXT_REG);
-}
-
-/** Write MMU Primary Context Register.
- *
- * @param v		New value of Primary Context Register.
- */
-static inline void mmu_primary_context_write(uint64_t v)
-{
-	asi_u64_write(ASI_DMMU, VA_PRIMARY_CONTEXT_REG, v);
-	flush_pipeline();
-}
-
-/** Read MMU Secondary Context Register.
- *
- * @return		Current value of Secondary Context Register.
- */
-static inline uint64_t mmu_secondary_context_read(void)
-{
-	return asi_u64_read(ASI_DMMU, VA_SECONDARY_CONTEXT_REG);
-}
-
-/** Write MMU Primary Context Register.
- *
- * @param v		New value of Primary Context Register.
- */
-static inline void mmu_secondary_context_write(uint64_t v)
-{
-	asi_u64_write(ASI_DMMU, VA_SECONDARY_CONTEXT_REG, v);
-	flush_pipeline();
-}
-
-#if defined (US)
-
-/** Read IMMU TLB Data Access Register.
- *
- * @param entry		TLB Entry index.
- *
- * @return		Current value of specified IMMU TLB Data Access
- * 			Register.
- */
-static inline uint64_t itlb_data_access_read(size_t entry)
-{
-	itlb_data_access_addr_t reg;
-	
-	reg.value = 0;
-	reg.tlb_entry = entry;
-	return asi_u64_read(ASI_ITLB_DATA_ACCESS_REG, reg.value);
-}
-
-/** Write IMMU TLB Data Access Register.
- *
- * @param entry		TLB Entry index.
- * @param value		Value to be written.
- */
-static inline void itlb_data_access_write(size_t entry, uint64_t value)
-{
-	itlb_data_access_addr_t reg;
-	
-	reg.value = 0;
-	reg.tlb_entry = entry;
-	asi_u64_write(ASI_ITLB_DATA_ACCESS_REG, reg.value, value);
-	flush_pipeline();
-}
-
-/** Read DMMU TLB Data Access Register.
- *
- * @param entry		TLB Entry index.
- *
- * @return		Current value of specified DMMU TLB Data Access
- * 			Register.
- */
-static inline uint64_t dtlb_data_access_read(size_t entry)
-{
-	dtlb_data_access_addr_t reg;
-	
-	reg.value = 0;
-	reg.tlb_entry = entry;
-	return asi_u64_read(ASI_DTLB_DATA_ACCESS_REG, reg.value);
-}
-
-/** Write DMMU TLB Data Access Register.
- *
- * @param entry		TLB Entry index.
- * @param value		Value to be written.
- */
-static inline void dtlb_data_access_write(size_t entry, uint64_t value)
-{
-	dtlb_data_access_addr_t reg;
-	
-	reg.value = 0;
-	reg.tlb_entry = entry;
-	asi_u64_write(ASI_DTLB_DATA_ACCESS_REG, reg.value, value);
-	membar();
-}
-
-/** Read IMMU TLB Tag Read Register.
- *
- * @param entry		TLB Entry index.
- *
- * @return		Current value of specified IMMU TLB Tag Read Register.
- */
-static inline uint64_t itlb_tag_read_read(size_t entry)
-{
-	itlb_tag_read_addr_t tag;
-
-	tag.value = 0;
-	tag.tlb_entry =	entry;
-	return asi_u64_read(ASI_ITLB_TAG_READ_REG, tag.value);
-}
-
-/** Read DMMU TLB Tag Read Register.
- *
- * @param entry		TLB Entry index.
- *
- * @return		Current value of specified DMMU TLB Tag Read Register.
- */
-static inline uint64_t dtlb_tag_read_read(size_t entry)
-{
-	dtlb_tag_read_addr_t tag;
-
-	tag.value = 0;
-	tag.tlb_entry =	entry;
-	return asi_u64_read(ASI_DTLB_TAG_READ_REG, tag.value);
-}
-
-#elif defined (US3)
-
-
-/** Read IMMU TLB Data Access Register.
- *
- * @param tlb		TLB number (one of TLB_ISMALL or TLB_IBIG)
- * @param entry		TLB Entry index.
- *
- * @return		Current value of specified IMMU TLB Data Access
- * 			Register.
- */
-static inline uint64_t itlb_data_access_read(int tlb, size_t entry)
-{
-	itlb_data_access_addr_t reg;
-	
-	reg.value = 0;
-	reg.tlb_number = tlb;
-	reg.local_tlb_entry = entry;
-	return asi_u64_read(ASI_ITLB_DATA_ACCESS_REG, reg.value);
-}
-
-/** Write IMMU TLB Data Access Register.
- * @param tlb		TLB number (one of TLB_ISMALL or TLB_IBIG)
- * @param entry		TLB Entry index.
- * @param value		Value to be written.
- */
-static inline void itlb_data_access_write(int tlb, size_t entry,
-	uint64_t value)
-{
-	itlb_data_access_addr_t reg;
-	
-	reg.value = 0;
-	reg.tlb_number = tlb;
-	reg.local_tlb_entry = entry;
-	asi_u64_write(ASI_ITLB_DATA_ACCESS_REG, reg.value, value);
-	flush_pipeline();
-}
-
-/** Read DMMU TLB Data Access Register.
- *
- * @param tlb		TLB number (one of TLB_DSMALL, TLB_DBIG, TLB_DBIG) 
- * @param entry		TLB Entry index.
- *
- * @return		Current value of specified DMMU TLB Data Access
- * 			Register.
- */
-static inline uint64_t dtlb_data_access_read(int tlb, size_t entry)
-{
-	dtlb_data_access_addr_t reg;
-	
-	reg.value = 0;
-	reg.tlb_number = tlb;
-	reg.local_tlb_entry = entry;
-	return asi_u64_read(ASI_DTLB_DATA_ACCESS_REG, reg.value);
-}
-
-/** Write DMMU TLB Data Access Register.
- *
- * @param tlb		TLB number (one of TLB_DSMALL, TLB_DBIG_0, TLB_DBIG_1)  
- * @param entry		TLB Entry index.
- * @param value		Value to be written.
- */
-static inline void dtlb_data_access_write(int tlb, size_t entry,
-	uint64_t value)
-{
-	dtlb_data_access_addr_t reg;
-	
-	reg.value = 0;
-	reg.tlb_number = tlb;
-	reg.local_tlb_entry = entry;
-	asi_u64_write(ASI_DTLB_DATA_ACCESS_REG, reg.value, value);
-	membar();
-}
-
-/** Read IMMU TLB Tag Read Register.
- *
- * @param tlb		TLB number (one of TLB_ISMALL or TLB_IBIG) 
- * @param entry		TLB Entry index.
- *
- * @return		Current value of specified IMMU TLB Tag Read Register.
- */
-static inline uint64_t itlb_tag_read_read(int tlb, size_t entry)
-{
-	itlb_tag_read_addr_t tag;
-
-	tag.value = 0;
-	tag.tlb_number = tlb;
-	tag.local_tlb_entry = entry;
-	return asi_u64_read(ASI_ITLB_TAG_READ_REG, tag.value);
-}
-
-/** Read DMMU TLB Tag Read Register.
- *
- * @param tlb		TLB number (one of TLB_DSMALL, TLB_DBIG_0, TLB_DBIG_1)
- * @param entry		TLB Entry index.
- *
- * @return		Current value of specified DMMU TLB Tag Read Register.
- */
-static inline uint64_t dtlb_tag_read_read(int tlb, size_t entry)
-{
-	dtlb_tag_read_addr_t tag;
-
-	tag.value = 0;
-	tag.tlb_number = tlb;
-	tag.local_tlb_entry = entry;
-	return asi_u64_read(ASI_DTLB_TAG_READ_REG, tag.value);
-}
-
-#endif
-
-
-/** Write IMMU TLB Tag Access Register.
- *
- * @param v		Value to be written.
- */
-static inline void itlb_tag_access_write(uint64_t v)
-{
-	asi_u64_write(ASI_IMMU, VA_IMMU_TAG_ACCESS, v);
-	flush_pipeline();
-}
-
-/** Read IMMU TLB Tag Access Register.
- *
- * @return		Current value of IMMU TLB Tag Access Register.
- */
-static inline uint64_t itlb_tag_access_read(void)
-{
-	return asi_u64_read(ASI_IMMU, VA_IMMU_TAG_ACCESS);
-}
-
-/** Write DMMU TLB Tag Access Register.
- *
- * @param v		Value to be written.
- */
-static inline void dtlb_tag_access_write(uint64_t v)
-{
-	asi_u64_write(ASI_DMMU, VA_DMMU_TAG_ACCESS, v);
-	membar();
-}
-
-/** Read DMMU TLB Tag Access Register.
- *
- * @return 		Current value of DMMU TLB Tag Access Register.
- */
-static inline uint64_t dtlb_tag_access_read(void)
-{
-	return asi_u64_read(ASI_DMMU, VA_DMMU_TAG_ACCESS);
-}
-
-
-/** Write IMMU TLB Data in Register.
- *
- * @param v		Value to be written.
- */
-static inline void itlb_data_in_write(uint64_t v)
-{
-	asi_u64_write(ASI_ITLB_DATA_IN_REG, 0, v);
-	flush_pipeline();
-}
-
-/** Write DMMU TLB Data in Register.
- *
- * @param v		Value to be written.
- */
-static inline void dtlb_data_in_write(uint64_t v)
-{
-	asi_u64_write(ASI_DTLB_DATA_IN_REG, 0, v);
-	membar();
-}
-
-/** Read ITLB Synchronous Fault Status Register.
- *
- * @return		Current content of I-SFSR register.
- */
-static inline uint64_t itlb_sfsr_read(void)
-{
-	return asi_u64_read(ASI_IMMU, VA_IMMU_SFSR);
-}
-
-/** Write ITLB Synchronous Fault Status Register.
- *
- * @param v		New value of I-SFSR register.
- */
-static inline void itlb_sfsr_write(uint64_t v)
-{
-	asi_u64_write(ASI_IMMU, VA_IMMU_SFSR, v);
-	flush_pipeline();
-}
-
-/** Read DTLB Synchronous Fault Status Register.
- *
- * @return		Current content of D-SFSR register.
- */
-static inline uint64_t dtlb_sfsr_read(void)
-{
-	return asi_u64_read(ASI_DMMU, VA_DMMU_SFSR);
-}
-
-/** Write DTLB Synchronous Fault Status Register.
- *
- * @param v		New value of D-SFSR register.
- */
-static inline void dtlb_sfsr_write(uint64_t v)
-{
-	asi_u64_write(ASI_DMMU, VA_DMMU_SFSR, v);
-	membar();
-}
-
-/** Read DTLB Synchronous Fault Address Register.
- *
- * @return		Current content of D-SFAR register.
- */
-static inline uint64_t dtlb_sfar_read(void)
-{
-	return asi_u64_read(ASI_DMMU, VA_DMMU_SFAR);
-}
-
-/** Perform IMMU TLB Demap Operation.
- *
- * @param type		Selects between context and page demap (and entire MMU
- * 			demap on US3).
- * @param context_encoding Specifies which Context register has Context ID for
- * 			demap.
- * @param page		Address which is on the page to be demapped.
- */
-static inline void itlb_demap(int type, int context_encoding, uintptr_t page)
-{
-	tlb_demap_addr_t da;
-	page_address_t pg;
-	
-	da.value = 0;
-	pg.address = page;
-	
-	da.type = type;
-	da.context = context_encoding;
-	da.vpn = pg.vpn;
-	
-	/* da.value is the address within the ASI */ 
-	asi_u64_write(ASI_IMMU_DEMAP, da.value, 0);
-
-	flush_pipeline();
-}
-
-/** Perform DMMU TLB Demap Operation.
- *
- * @param type		Selects between context and page demap (and entire MMU
- * 			demap on US3).
- * @param context_encoding Specifies which Context register has Context ID for
- * 			demap.
- * @param page		Address which is on the page to be demapped.
- */
-static inline void dtlb_demap(int type, int context_encoding, uintptr_t page)
-{
-	tlb_demap_addr_t da;
-	page_address_t pg;
-	
-	da.value = 0;
-	pg.address = page;
-	
-	da.type = type;
-	da.context = context_encoding;
-	da.vpn = pg.vpn;
-	
-	/* da.value is the address within the ASI */ 
-	asi_u64_write(ASI_DMMU_DEMAP, da.value, 0);
-
-	membar();
-}
-
-extern void fast_instruction_access_mmu_miss(unative_t, istate_t *);
-extern void fast_data_access_mmu_miss(tlb_tag_access_reg_t, istate_t *);
-extern void fast_data_access_protection(tlb_tag_access_reg_t , istate_t *);
-
-extern void dtlb_insert_mapping(uintptr_t, uintptr_t, int, bool, bool);
-
-extern void dump_sfsr_and_sfar(void);
-
-#endif /* !def __ASM__ */
 
 #endif
Index: kernel/arch/sparc64/include/mm/tte.h
===================================================================
--- kernel/arch/sparc64/include/mm/tte.h	(revision b1d3c36aca6850dfcd12418b4a6f60aa37a83487)
+++ kernel/arch/sparc64/include/mm/tte.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -36,66 +36,9 @@
 #define KERN_sparc64_TTE_H_
 
-#define TTE_G		(1 << 0)
-#define TTE_W		(1 << 1)
-#define TTE_P		(1 << 2)
-#define TTE_E		(1 << 3)
-#define TTE_CV		(1 << 4)
-#define TTE_CP		(1 << 5)
-#define TTE_L		(1 << 6)
-
-#define TTE_V_SHIFT	63
-#define TTE_SIZE_SHIFT	61
-
-#ifndef __ASM__
-
-#include <arch/types.h>
-
-/* TTE tag's VA_tag field contains bits <63:VA_TAG_PAGE_SHIFT> of the VA */
-#define VA_TAG_PAGE_SHIFT	22
-
-/** Translation Table Entry - Tag. */
-union tte_tag {
-	uint64_t value;
-	struct {
-		unsigned g : 1;		/**< Global. */
-		unsigned : 2;		/**< Reserved. */
-		unsigned context : 13;	/**< Context identifier. */
-		unsigned : 6;		/**< Reserved. */
-		uint64_t va_tag : 42;	/**< Virtual Address Tag, bits 63:22. */
-	} __attribute__ ((packed));
-};
-
-typedef union tte_tag tte_tag_t;
-
-/** Translation Table Entry - Data. */
-union tte_data {
-	uint64_t value;
-	struct {
-		unsigned v : 1;		/**< Valid. */
-		unsigned size : 2;	/**< Page size of this entry. */
-		unsigned nfo : 1;	/**< No-Fault-Only. */
-		unsigned ie : 1;	/**< Invert Endianness. */
-		unsigned soft2 : 9;	/**< Software defined field. */
-#if defined (US)
-		unsigned diag : 9;	/**< Diagnostic data. */
-		unsigned pfn : 28;	/**< Physical Address bits, bits 40:13. */
-#elif defined (US3)
-		unsigned : 7;		/**< Reserved. */
-		unsigned pfn : 30;	/**< Physical Address bits, bits 42:13 */
+#if defined (SUN4U)
+#include <arch/mm/sun4u/tte.h>
+#elif defined (SUN4V)
+#include <arch/mm/sun4v/tte.h>
 #endif
-		unsigned soft : 6;	/**< Software defined field. */
-		unsigned l : 1;		/**< Lock. */
-		unsigned cp : 1;	/**< Cacheable in physically indexed cache. */
-		unsigned cv : 1;	/**< Cacheable in virtually indexed cache. */
-		unsigned e : 1;		/**< Side-effect. */
-		unsigned p : 1;		/**< Privileged. */
-		unsigned w : 1;		/**< Writable. */
-		unsigned g : 1;		/**< Global. */
-	} __attribute__ ((packed));
-};
-
-typedef union tte_data tte_data_t;
-
-#endif /* !def __ASM__ */
 
 #endif
Index: kernel/arch/sparc64/include/sun4u/arch.h
===================================================================
--- kernel/arch/sparc64/include/sun4u/arch.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/include/sun4u/arch.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2005 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64	
+ * @{
+ */
+/**
+ * @file
+ * @brief	Various sun4u-specific macros.
+ */
+
+#ifndef KERN_sparc64_sun4u_ARCH_H_
+#define KERN_sparc64_sun4u_ARCH_H_
+
+#define ASI_NUCLEUS_QUAD_LDD	0x24	/** ASI for 16-byte atomic loads. */
+#define ASI_DCACHE_TAG		0x47	/** ASI D-Cache Tag. */
+#define ASI_ICBUS_CONFIG	0x4a	/** ASI of the UPA_CONFIG/FIREPLANE_CONFIG register. */
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/sparc64/include/sun4u/asm.h
===================================================================
--- kernel/arch/sparc64/include/sun4u/asm.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/include/sun4u/asm.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2005 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64	
+ * @{
+ */
+/** @file
+ */
+
+#ifndef KERN_sparc64_sun4u_ASM_H_
+#define KERN_sparc64_sun4u_ASM_H_
+
+extern uint64_t read_from_ag_g7(void);
+extern void write_to_ag_g6(uint64_t val);
+extern void write_to_ag_g7(uint64_t val);
+extern void write_to_ig_g6(uint64_t val);
+
+
+/** Read Version Register.
+ *
+ * @return Value of VER register.
+ */
+static inline uint64_t ver_read(void)
+{
+	uint64_t v;
+	
+	asm volatile ("rdpr %%ver, %0\n" : "=r" (v));
+	
+	return v;
+}
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/sparc64/include/sun4u/cpu.h
===================================================================
--- kernel/arch/sparc64/include/sun4u/cpu.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/include/sun4u/cpu.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2005 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64
+ * @{
+ */
+/** @file
+ */
+
+#ifndef KERN_sparc64_sun4u_CPU_H_
+#define KERN_sparc64_sun4u_CPU_H_
+
+#define MANUF_FUJITSU		0x04
+#define MANUF_ULTRASPARC	0x17	/**< UltraSPARC I, UltraSPARC II */
+#define MANUF_SUN		0x3e
+
+#define IMPL_ULTRASPARCI	0x10
+#define IMPL_ULTRASPARCII	0x11
+#define IMPL_ULTRASPARCII_I	0x12
+#define IMPL_ULTRASPARCII_E	0x13
+#define IMPL_ULTRASPARCIII	0x14
+#define IMPL_ULTRASPARCIII_PLUS	0x15
+#define IMPL_ULTRASPARCIII_I	0x16
+#define IMPL_ULTRASPARCIV	0x18
+#define IMPL_ULTRASPARCIV_PLUS	0x19
+
+#define IMPL_SPARC64V		0x5
+
+#ifndef __ASM__
+
+#include <arch/types.h>
+#include <typedefs.h>
+#include <arch/register.h>
+#include <arch/regdef.h>
+#include <arch/asm.h>
+
+#ifdef CONFIG_SMP
+#include <arch/mm/cache.h>
+#endif
+
+typedef struct {
+	uint32_t mid;			/**< Processor ID as read from
+					     UPA_CONFIG/FIREPLANE_CONFIG. */
+	ver_reg_t ver;
+	uint32_t clock_frequency;	/**< Processor frequency in Hz. */
+	uint64_t next_tick_cmpr;	/**< Next clock interrupt should be
+					     generated when the TICK register
+					     matches this value. */
+} cpu_arch_t;
+
+
+/**
+ * Reads the module ID (agent ID/CPUID) of the current CPU.
+ */
+static inline uint32_t read_mid(void)
+{
+	uint64_t icbus_config = asi_u64_read(ASI_ICBUS_CONFIG, 0);
+	icbus_config = icbus_config >> ICBUS_CONFIG_MID_SHIFT;
+#if defined (US)
+	return icbus_config & 0x1f;
+#elif defined (US3)
+	if (((ver_reg_t) ver_read()).impl == IMPL_ULTRASPARCIII_I)
+		return icbus_config & 0x1f;
+	else
+		return icbus_config & 0x3ff;
+#endif
+}
+
+#endif	
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/sparc64/include/sun4v/arch.h
===================================================================
--- kernel/arch/sparc64/include/sun4v/arch.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/include/sun4v/arch.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2009 Pavel Rimsky
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64	
+ * @{
+ */
+/**
+ * @file
+ * @brief	Various sun4v-specific macros.
+ */
+
+#ifndef KERN_sparc64_sun4v_ARCH_H_
+#define KERN_sparc64_sun4v_ARCH_H_
+
+/* scratch pad registers ASI */
+#define	ASI_SCRATCHPAD		0x20
+
+/*
+ * Assignment of scratchpad register virtual addresses. The same convention is
+ * used by both Linux and Solaris.
+ */
+
+/* register where the address of the MMU fault status area will be stored */
+#define SCRATCHPAD_MMU_FSA	0x00	
+
+/* register where the CPUID will be stored */
+#define SCRATCHPAD_CPUID	0x08
+
+/* register where the kernel stack address will be stored */
+#define SCRATCHPAD_KSTACK	0x10
+
+/* register where the userspace window buffer address will be stored */
+#define SCRATCHPAD_WBUF		0x18
+
+//MH - remove when cpu.h is forked
+#define ASI_NUCLEUS_QUAD_LDD	0x24	/** ASI for 16-byte atomic loads. */
+#define ASI_DCACHE_TAG		0x47	/** ASI D-Cache Tag. */
+#define ASI_ICBUS_CONFIG	0x4a	/** ASI of the UPA_CONFIG/FIREPLANE_CONFIG register. */
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/sparc64/include/sun4v/asm.h
===================================================================
--- kernel/arch/sparc64/include/sun4v/asm.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/include/sun4v/asm.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2005 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64	
+ * @{
+ */
+/** @file
+ */
+
+#ifndef KERN_sparc64_sun4v_ASM_H_
+#define KERN_sparc64_sun4v_ASM_H_
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/sparc64/include/sun4v/cpu.h
===================================================================
--- kernel/arch/sparc64/include/sun4v/cpu.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/include/sun4v/cpu.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2005 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64
+ * @{
+ */
+/** @file
+ */
+
+#ifndef KERN_sparc64_sun4v_CPU_H_
+#define KERN_sparc64_sun4v_CPU_H_
+
+/** Maximum number of virtual processors. */
+#define MAX_NUM_STRANDS		64
+
+/** Maximum number of logical processors in a processor core */
+#define MAX_CORE_STRANDS	8
+
+#ifndef __ASM__
+
+struct cpu;
+
+/*
+typedef struct {
+	uint64_t exec_unit_id;
+	uint8_t strand_count;
+	uint64_t cpuids[MAX_CORE_STRANDS];
+	struct cpu *cpus[MAX_CORE_STRANDS];
+	atomic_t nrdy;
+	SPINLOCK_DECLARE(proposed_nrdy_lock);
+} exec_unit_t;
+*/
+
+typedef struct cpu_arch {
+	uint64_t id;			/**< virtual processor ID */
+	uint32_t clock_frequency;	/**< Processor frequency in Hz. */
+	uint64_t next_tick_cmpr;	/**< Next clock interrupt should be
+					     generated when the TICK register
+					     matches this value. */
+	//exec_unit_t *exec_unit;		/**< Physical core. */
+	//unsigned long proposed_nrdy;	/**< Proposed No. of ready threads
+	//				     so that cores are equally balanced. */
+} cpu_arch_t;
+
+#endif	
+
+#ifdef __ASM__
+
+#endif
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/sparc64/include/sun4v/hypercall.h
===================================================================
--- kernel/arch/sparc64/include/sun4v/hypercall.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/include/sun4v/hypercall.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2008 Pavel Rimsky
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64	
+ * @{
+ */
+/**
+ * @file
+ * @brief	Macros, constants and functions needed to perform a call to the
+ * 		hypervisor API. For details and terminology see this document:
+ *		UltraSPARC Virtual Machine Specification (The Hypervisor API
+ *		specification for Logical Domains).
+ *
+ */
+
+#ifndef KERN_sparc64_sun4v_HYPERCALL_H_
+#define KERN_sparc64_sun4v_HYPERCALL_H_
+
+/* SW trap numbers for hyperfast traps */
+#define FAST_TRAP		0x80
+#define MMU_MAP_ADDR		0x83
+#define MMU_UNMAP_ADDR		0x84
+
+/* function codes for fast traps */
+#define MACH_DESC		0x01
+#define CPU_START		0x10
+#define CPU_STOP		0x11
+#define CPU_YIELD		0x12
+#define CPU_QCONF		0x14
+#define CPU_MYID		0x16
+#define CPU_STATE		0x17
+#define CPU_SET_RTBA		0x18
+#define CPU_GET_RTBA		0x19
+#define MMU_TSB_CTX0		0x20
+#define MMU_TSB_CTXNON0		0x21
+#define MMU_DEMAP_PAGE		0x22
+#define MMU_DEMAP_CTX		0x23
+#define MMU_DEMAP_ALL		0x24
+#define MMU_MAP_PERM_ADDR	0x25
+#define MMU_FAULT_AREA_CONF	0x26
+#define MMU_ENABLE		0x27
+#define MMU_UNMAP_PERM_ADDR	0x28
+#define MMU_TSB_CTX0_INFO	0x29
+#define MMU_TSB_CTXNON0_INFO	0x2a
+#define MMU_FAULT_AREA_INFO	0x2b
+#define CPU_MONDO_SEND		0x42
+#define CONS_GETCHAR		0x60
+#define CONS_PUTCHAR		0x61
+
+
+/* return codes */
+#define EOK		0	/**< Successful return */
+#define ENOCPU		1	/**< Invalid CPU id */
+#define ENORADDR	2	/**< Invalid real address */
+#define ENOINTR		3	/**< Invalid interrupt id */
+#define EBADPGSZ	4	/**< Invalid pagesize encoding */
+#define EBADTSB		5	/**< Invalid TSB description */
+#define	EINVAL		6	/**< Invalid argument */
+#define EBADTRAP	7	/**< Invalid function number */
+#define EBADALIGN	8	/**< Invalid address alignment */
+#define EWOULDBLOCK	9	/**< Cannot complete operation without blocking */
+#define ENOACCESS	10	/**< No access to specified resource */
+#define EIO		11	/**< I/O Error */
+#define ECPUERROR	12	/**< CPU is in error state */
+#define ENOTSUPPORTED	13	/**< Function not supported */
+#define ENOMAP		14	/**< No mapping found */
+#define ETOOMANY	15	/**< Too many items specified / limit reached */
+#define ECHANNEL	16	/**< Invalid LDC channel */
+#define EBUSY		17	/**< Operation failed as resource is otherwise busy */
+
+
+/**
+ * Performs a hyperfast hypervisor API call from the assembly language code.
+ * Expects the registers %o1-%o4 are properly filled with the arguments of the
+ * call.
+ *
+ * @param function_number	hyperfast call function number
+ */
+#define __HYPERCALL_FAST(function_number) \
+	set function_number, %o5; \
+	ta FAST_TRAP;
+	
+/**
+ * Performs a fast hypervisor API call from the assembly language code.
+ * Expects the registers %o1-%o4 are properly filled with the arguments of the
+ * call.
+ *
+ * @param sw_trap_number	software trap number
+ */
+#define __HYPERCALL_HYPERFAST(sw_trap_number) \
+	ta (sw_trap_number);
+
+
+#ifndef __ASM__
+
+#include <typedefs.h>
+#include <arch/types.h>
+
+/*
+ * Macros to be used from the C-language code; __hypercall_fastN performs
+ * a fast hypervisor API call taking exactly N arguments.
+ */
+
+#define __hypercall_fast0(function_number) \
+	__hypercall_fast(0, 0, 0, 0, 0, function_number)
+#define __hypercall_fast1(function_number, p1) \
+	__hypercall_fast(p1, 0, 0, 0, 0, function_number)
+#define __hypercall_fast2(function_number, p1, p2) \
+	__hypercall_fast(p1, p2, 0, 0, 0, function_number)
+#define __hypercall_fast3(function_number, p1, p2, p3) \
+	__hypercall_fast(p1, p2, p3, 0, 0, function_number)
+#define __hypercall_fast4(function_number, p1, p2, p3, p4) \
+	__hypercall_fast(p1, p2, p3, p4, 0, function_number)
+#define __hypercall_fast5(function_number, p1, p2, p3, p4, p5) \
+	__hypercall_fast(p1, p2, p3, p4, p5, function_number)
+
+/**
+ * Performs a fast hypervisor API call which returns no value except for the
+ * error status.
+ *
+ * @param p1			the 1st argument of the hypervisor API call
+ * @param p2			the 2nd argument of the hypervisor API call
+ * @param p3			the 3rd argument of the hypervisor API call
+ * @param p4			the 4th argument of the hypervisor API call
+ * @param p5			the 5th argument of the hypervisor API call
+ * @param function_number	function number of the call
+ * @return			error status
+ */
+static inline uint64_t
+__hypercall_fast(const uint64_t p1, const uint64_t p2, const uint64_t p3,
+    const uint64_t p4, const uint64_t p5, const uint64_t function_number)
+{
+	register uint64_t a6 asm("o5") = function_number;
+	register uint64_t a1 asm("o0") = p1;
+	register uint64_t a2 asm("o1") = p2;
+	register uint64_t a3 asm("o2") = p3;
+	register uint64_t a4 asm("o3") = p4;
+	register uint64_t a5 asm("o4") = p5;
+
+	asm volatile (
+		"ta %7\n"
+		: "=r" (a1)
+		: "r" (a1), "r" (a2), "r" (a3), "r" (a4), "r" (a5), "r" (a6),
+		  "i" (FAST_TRAP)
+		: "memory"
+	);
+
+	return a1;
+}
+
+/**
+ * Performs a fast hypervisor API call which can return a value.
+ *
+ * @param p1			the 1st argument of the hypervisor API call
+ * @param p2			the 2nd argument of the hypervisor API call
+ * @param p3			the 3rd argument of the hypervisor API call
+ * @param p4			the 4th argument of the hypervisor API call
+ * @param p5			the 5th argument of the hypervisor API call
+ * @param function_number	function number of the call
+ * @param ret1			pointer to an address where the return value
+ * 				of the hypercall should be saved, or NULL
+ * @return			error status
+ */
+static inline uint64_t
+__hypercall_fast_ret1(const uint64_t p1, const uint64_t p2, const uint64_t p3,
+    const uint64_t p4, const uint64_t p5, const uint64_t function_number,
+    uint64_t * const ret1)
+{
+	uint64_t errno = __hypercall_fast(p1, p2, p3, p4, p5, function_number);
+	if (ret1 != NULL) {
+		asm volatile ("mov %%o1, %0\n" : "=r" (*ret1));
+	}
+	return errno;
+}
+
+/**
+ * Performs a hyperfast hypervisor API call.
+ *
+ * @param p1			the 1st argument of the hypervisor API call
+ * @param p2			the 2nd argument of the hypervisor API call
+ * @param p3			the 3rd argument of the hypervisor API call
+ * @param p4			the 4th argument of the hypervisor API call
+ * @param p5			the 5th argument of the hypervisor API call
+ * @param sw_trap_number	software trap number
+ */
+static inline uint64_t
+__hypercall_hyperfast(const uint64_t p1, const uint64_t p2, const uint64_t p3,
+    const uint64_t p4, const uint64_t p5, const uint64_t sw_trap_number)
+{
+	register uint64_t a1 asm("o0") = p1;
+	register uint64_t a2 asm("o1") = p2;
+	register uint64_t a3 asm("o2") = p3;
+	register uint64_t a4 asm("o3") = p4;
+	register uint64_t a5 asm("o4") = p5;
+
+	asm volatile (
+		"ta %6\n"
+		: "=r" (a1)
+		: "r" (a1), "r" (a2), "r" (a3), "r" (a4), "r" (a5),
+		  "i" (sw_trap_number)
+		: "memory"
+	);
+	
+	return a1;
+}
+
+#endif /* ASM */
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/sparc64/include/sun4v/ipi.h
===================================================================
--- kernel/arch/sparc64/include/sun4v/ipi.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/include/sun4v/ipi.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2009 Pavel Rimsky
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64	
+ * @{
+ */
+/**
+ * @file
+ * @brief	sun4v-specific IPI functions
+ */
+
+#ifndef KERN_sparc64_sun4v_IPI_H_
+#define KERN_sparc64_sun4v_IPI_H_
+
+uint64_t ipi_brodcast_to(void (*func)(void), uint16_t cpu_list[MAX_NUM_STRANDS],
+		uint64_t list_size);
+uint64_t ipi_unicast_to(void (*func)(void), uint16_t cpu_id);
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/sparc64/include/sun4v/md.h
===================================================================
--- kernel/arch/sparc64/include/sun4v/md.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/include/sun4v/md.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2009 Pavel Rimsky
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64
+ * @{
+ */
+/** @file
+ */
+
+#ifndef KERN_sparc64_sun4v_MD_H_
+#define KERN_sparc64_sun4v_MD_H_
+
+#include <typedefs.h>
+
+/**
+ * Data type used to iterate through MD nodes. Internally represented as
+ * an index to the first element of the node.
+ */
+typedef unsigned int md_node_t;
+
+/** used to iterate over children of a given node */
+typedef unsigned int md_child_iter_t;
+
+md_node_t md_get_root(void);
+md_node_t md_get_child(md_node_t node, char *name);
+md_child_iter_t md_get_child_iterator(md_node_t node);
+bool md_next_child(md_child_iter_t *it);
+md_node_t md_get_child_node(md_child_iter_t it);
+const char *md_get_node_name(md_node_t node);
+bool md_get_integer_property(md_node_t node, const char *key,
+	uint64_t *result);
+bool md_get_string_property(md_node_t node, const char *key,
+	const char **result);
+bool md_next_node(md_node_t *node, const char *name);
+void md_init(void);
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/sparc64/include/sun4v/regdef.h
===================================================================
--- kernel/arch/sparc64/include/sun4v/regdef.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/include/sun4v/regdef.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2005 Jakub Jermar
+ * Copyright (c) 2008 Pavel Rimsky
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64	
+ * @{
+ */
+/** @file
+ */
+
+#ifndef KERN_sparc64_sun4v_REGDEF_H_
+#define KERN_sparc64_sun4v_REGDEF_H_
+
+#define PSTATE_IE_BIT	(1 << 1)
+#define PSTATE_PRIV_BIT	(1 << 2)
+#define PSTATE_PEF_BIT	(1 << 4)
+
+#define TSTATE_PSTATE_SHIFT	8
+#define TSTATE_PRIV_BIT		(PSTATE_PRIV_BIT << TSTATE_PSTATE_SHIFT)
+#define TSTATE_CWP_MASK		0x1f
+#define TSTATE_IE_BIT		(PSTATE_IE_BIT << TSTATE_PSTATE_SHIFT)
+
+#define WSTATE_NORMAL(n)	(n)
+#define WSTATE_OTHER(n)		((n) << 3)
+
+#define TSTATE_PEF_BIT		(PSTATE_PEF_BIT << TSTATE_PSTATE_SHIFT)
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/sparc64/include/sun4v/register.h
===================================================================
--- kernel/arch/sparc64/include/sun4v/register.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/include/sun4v/register.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2005 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64	
+ * @{
+ */
+/** @file
+ */
+
+#ifndef KERN_sparc64_sun4v_REGISTER_H_
+#define KERN_sparc64_sun4v_REGISTER_H_
+
+#include <arch/regdef.h>
+#include <arch/types.h>
+
+/** Processor State Register. */
+union pstate_reg {
+	uint64_t value;
+	struct {
+		uint64_t : 54;
+		unsigned cle : 1;	/**< Current Little Endian. */
+		unsigned tle : 1;	/**< Trap Little Endian. */
+		unsigned mm : 2;	/**< Memory Model. */
+		unsigned : 1;		/**< RED state. */
+		unsigned pef : 1;	/**< Enable floating-point. */
+		unsigned am : 1;	/**< 32-bit Address Mask. */
+		unsigned priv : 1;	/**< Privileged Mode. */
+		unsigned ie : 1;	/**< Interrupt Enable. */
+		unsigned : 1;
+	} __attribute__ ((packed));
+};
+typedef union pstate_reg pstate_reg_t;
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/sparc64/include/trap/mmu.h
===================================================================
--- kernel/arch/sparc64/include/trap/mmu.h	(revision b1d3c36aca6850dfcd12418b4a6f60aa37a83487)
+++ kernel/arch/sparc64/include/trap/mmu.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -38,144 +38,9 @@
 #define KERN_sparc64_MMU_TRAP_H_
 
-#include <arch/stack.h>
-#include <arch/regdef.h>
-#include <arch/mm/tlb.h>
-#include <arch/mm/mmu.h>
-#include <arch/mm/tte.h>
-#include <arch/trap/regwin.h>
-
-#ifdef CONFIG_TSB
-#include <arch/mm/tsb.h>
+#if defined (SUN4U)
+#include <arch/trap/sun4u/mmu.h>
+#elif defined (SUN4V)
+#include <arch/trap/sun4v/mmu.h>
 #endif
-
-#define TT_FAST_INSTRUCTION_ACCESS_MMU_MISS	0x64
-#define TT_FAST_DATA_ACCESS_MMU_MISS		0x68
-#define TT_FAST_DATA_ACCESS_PROTECTION		0x6c
-
-#define FAST_MMU_HANDLER_SIZE			128
-
-#ifdef __ASM__
-
-.macro FAST_INSTRUCTION_ACCESS_MMU_MISS_HANDLER
-	/*
-	 * First, try to refill TLB from TSB.
-	 */
-#ifdef CONFIG_TSB
-	ldxa [%g0] ASI_IMMU, %g1			! read TSB Tag Target Register
-	ldxa [%g0] ASI_IMMU_TSB_8KB_PTR_REG, %g2	! read TSB 8K Pointer
-	ldda [%g2] ASI_NUCLEUS_QUAD_LDD, %g4		! 16-byte atomic load into %g4 and %g5
-	cmp %g1, %g4					! is this the entry we are looking for?
-	bne,pn %xcc, 0f
-	nop
-	stxa %g5, [%g0] ASI_ITLB_DATA_IN_REG		! copy mapping from ITSB to ITLB
-	retry
-#endif
-
-0:
-	wrpr %g0, PSTATE_PRIV_BIT | PSTATE_AG_BIT, %pstate
-	PREEMPTIBLE_HANDLER fast_instruction_access_mmu_miss
-.endm
-
-.macro FAST_DATA_ACCESS_MMU_MISS_HANDLER tl
-	/*
-	 * First, try to refill TLB from TSB.
-	 */
-
-#ifdef CONFIG_TSB
-	ldxa [%g0] ASI_DMMU, %g1			! read TSB Tag Target Register
-	srlx %g1, TSB_TAG_TARGET_CONTEXT_SHIFT, %g2	! is this a kernel miss?
-	brz,pn %g2, 0f
-	ldxa [%g0] ASI_DMMU_TSB_8KB_PTR_REG, %g3	! read TSB 8K Pointer
-	ldda [%g3] ASI_NUCLEUS_QUAD_LDD, %g4		! 16-byte atomic load into %g4 and %g5
-	cmp %g1, %g4					! is this the entry we are looking for?
-	bne,pn %xcc, 0f
-	nop
-	stxa %g5, [%g0] ASI_DTLB_DATA_IN_REG		! copy mapping from DTSB to DTLB
-	retry
-#endif
-
-	/*
-	 * Second, test if it is the portion of the kernel address space
-	 * which is faulting. If that is the case, immediately create
-	 * identity mapping for that page in DTLB. VPN 0 is excluded from
-	 * this treatment.
-	 *
-	 * Note that branch-delay slots are used in order to save space.
-	 */
-0:
-	sethi %hi(fast_data_access_mmu_miss_data_hi), %g7
-	wr %g0, ASI_DMMU, %asi
-	ldxa [VA_DMMU_TAG_ACCESS] %asi, %g1		! read the faulting Context and VPN
-	set TLB_TAG_ACCESS_CONTEXT_MASK, %g2
-	andcc %g1, %g2, %g3				! get Context
-	bnz %xcc, 0f					! Context is non-zero
-	andncc %g1, %g2, %g3				! get page address into %g3
-	bz  %xcc, 0f					! page address is zero
-	ldx [%g7 + %lo(end_of_identity)], %g4
-	cmp %g3, %g4
-	bgeu %xcc, 0f
-
-	ldx [%g7 + %lo(kernel_8k_tlb_data_template)], %g2
-	add %g3, %g2, %g2
-	stxa %g2, [%g0] ASI_DTLB_DATA_IN_REG		! identity map the kernel page
-	retry
-
-	/*
-	 * Third, catch and handle special cases when the trap is caused by
-	 * the userspace register window spill or fill handler. In case
-	 * one of these two traps caused this trap, we just lower the trap
-	 * level and service the DTLB miss. In the end, we restart
-	 * the offending SAVE or RESTORE.
-	 */
-0:
-.if (\tl > 0)
-	wrpr %g0, 1, %tl
-.endif
-
-	/*
-	 * Switch from the MM globals.
-	 */
-	wrpr %g0, PSTATE_PRIV_BIT | PSTATE_AG_BIT, %pstate
-
-	/*
-	 * Read the Tag Access register for the higher-level handler.
-	 * This is necessary to survive nested DTLB misses.
-	 */	
-	ldxa [VA_DMMU_TAG_ACCESS] %asi, %g2
-
-	/*
-	 * g2 will be passed as an argument to fast_data_access_mmu_miss().
-	 */
-	PREEMPTIBLE_HANDLER fast_data_access_mmu_miss
-.endm
-
-.macro FAST_DATA_ACCESS_PROTECTION_HANDLER tl
-	/*
-	 * The same special case as in FAST_DATA_ACCESS_MMU_MISS_HANDLER.
-	 */
-
-.if (\tl > 0)
-	wrpr %g0, 1, %tl
-.endif
-
-	/*
-	 * Switch from the MM globals.
-	 */
-	wrpr %g0, PSTATE_PRIV_BIT | PSTATE_AG_BIT, %pstate
-
-	/*
-	 * Read the Tag Access register for the higher-level handler.
-	 * This is necessary to survive nested DTLB misses.
-	 */	
-	mov VA_DMMU_TAG_ACCESS, %g2
-	ldxa [%g2] ASI_DMMU, %g2
-
-	/*
-	 * g2 will be passed as an argument to fast_data_access_mmu_miss().
-	 */
-	PREEMPTIBLE_HANDLER fast_data_access_protection
-.endm
-
-#endif /* __ASM__ */
 
 #endif
Index: kernel/arch/sparc64/include/trap/regwin.h
===================================================================
--- kernel/arch/sparc64/include/trap/regwin.h	(revision b1d3c36aca6850dfcd12418b4a6f60aa37a83487)
+++ kernel/arch/sparc64/include/trap/regwin.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -131,33 +131,4 @@
 
 /*
- * Macro used to spill userspace window to userspace window buffer.
- * It can be either triggered from preemptible_handler doing SAVE
- * at (TL=1) or from normal kernel code doing SAVE when OTHERWIN>0
- * at (TL=0).
- */
-.macro SPILL_TO_USPACE_WINDOW_BUFFER
-	stx %l0, [%g7 + L0_OFFSET]	
-	stx %l1, [%g7 + L1_OFFSET]
-	stx %l2, [%g7 + L2_OFFSET]
-	stx %l3, [%g7 + L3_OFFSET]
-	stx %l4, [%g7 + L4_OFFSET]
-	stx %l5, [%g7 + L5_OFFSET]
-	stx %l6, [%g7 + L6_OFFSET]
-	stx %l7, [%g7 + L7_OFFSET]
-	stx %i0, [%g7 + I0_OFFSET]
-	stx %i1, [%g7 + I1_OFFSET]
-	stx %i2, [%g7 + I2_OFFSET]
-	stx %i3, [%g7 + I3_OFFSET]
-	stx %i4, [%g7 + I4_OFFSET]
-	stx %i5, [%g7 + I5_OFFSET]
-	stx %i6, [%g7 + I6_OFFSET]
-	stx %i7, [%g7 + I7_OFFSET]
-	add %g7, STACK_WINDOW_SAVE_AREA_SIZE, %g7
-	saved
-	retry
-.endm
-
-
-/*
  * Macro used by the nucleus and the primary context 0 during normal fills.
  */
@@ -232,6 +203,12 @@
 #endif /* __ASM__ */
 
+#if defined (SUN4U)
+#include <arch/trap/sun4u/regwin.h>
+#elif defined (SUN4V)
+#include <arch/trap/sun4v/regwin.h>
 #endif
 
+#endif
+
 /** @}
  */
Index: kernel/arch/sparc64/include/trap/sun4u/mmu.h
===================================================================
--- kernel/arch/sparc64/include/trap/sun4u/mmu.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/include/trap/sun4u/mmu.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) 2006 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64interrupt
+ * @{
+ */
+/**
+ * @file
+ * @brief This file contains fast MMU trap handlers.
+ */
+
+#ifndef KERN_sparc64_SUN4U_MMU_TRAP_H_
+#define KERN_sparc64_SUN4U_MMU_TRAP_H_
+
+#include <arch/stack.h>
+#include <arch/regdef.h>
+#include <arch/mm/tlb.h>
+#include <arch/mm/mmu.h>
+#include <arch/mm/tte.h>
+#include <arch/trap/regwin.h>
+
+#ifdef CONFIG_TSB
+#include <arch/mm/tsb.h>
+#endif
+
+#define TT_FAST_INSTRUCTION_ACCESS_MMU_MISS	0x64
+#define TT_FAST_DATA_ACCESS_MMU_MISS		0x68
+#define TT_FAST_DATA_ACCESS_PROTECTION		0x6c
+
+#define FAST_MMU_HANDLER_SIZE			128
+
+#ifdef __ASM__
+
+.macro FAST_INSTRUCTION_ACCESS_MMU_MISS_HANDLER
+	/*
+	 * First, try to refill TLB from TSB.
+	 */
+#ifdef CONFIG_TSB
+	ldxa [%g0] ASI_IMMU, %g1			! read TSB Tag Target Register
+	ldxa [%g0] ASI_IMMU_TSB_8KB_PTR_REG, %g2	! read TSB 8K Pointer
+	ldda [%g2] ASI_NUCLEUS_QUAD_LDD, %g4		! 16-byte atomic load into %g4 and %g5
+	cmp %g1, %g4					! is this the entry we are looking for?
+	bne,pn %xcc, 0f
+	nop
+	stxa %g5, [%g0] ASI_ITLB_DATA_IN_REG		! copy mapping from ITSB to ITLB
+	retry
+#endif
+
+0:
+	wrpr %g0, PSTATE_PRIV_BIT | PSTATE_AG_BIT, %pstate
+	PREEMPTIBLE_HANDLER fast_instruction_access_mmu_miss
+.endm
+
+.macro FAST_DATA_ACCESS_MMU_MISS_HANDLER tl
+	/*
+	 * First, try to refill TLB from TSB.
+	 */
+
+#ifdef CONFIG_TSB
+	ldxa [%g0] ASI_DMMU, %g1			! read TSB Tag Target Register
+	srlx %g1, TSB_TAG_TARGET_CONTEXT_SHIFT, %g2	! is this a kernel miss?
+	brz,pn %g2, 0f
+	ldxa [%g0] ASI_DMMU_TSB_8KB_PTR_REG, %g3	! read TSB 8K Pointer
+	ldda [%g3] ASI_NUCLEUS_QUAD_LDD, %g4		! 16-byte atomic load into %g4 and %g5
+	cmp %g1, %g4					! is this the entry we are looking for?
+	bne,pn %xcc, 0f
+	nop
+	stxa %g5, [%g0] ASI_DTLB_DATA_IN_REG		! copy mapping from DTSB to DTLB
+	retry
+#endif
+
+	/*
+	 * Second, test if it is the portion of the kernel address space
+	 * which is faulting. If that is the case, immediately create
+	 * identity mapping for that page in DTLB. VPN 0 is excluded from
+	 * this treatment.
+	 *
+	 * Note that branch-delay slots are used in order to save space.
+	 */
+0:
+	sethi %hi(fast_data_access_mmu_miss_data_hi), %g7
+	wr %g0, ASI_DMMU, %asi
+	ldxa [VA_DMMU_TAG_ACCESS] %asi, %g1		! read the faulting Context and VPN
+	set TLB_TAG_ACCESS_CONTEXT_MASK, %g2
+	andcc %g1, %g2, %g3				! get Context
+	bnz %xcc, 0f					! Context is non-zero
+	andncc %g1, %g2, %g3				! get page address into %g3
+	bz  %xcc, 0f					! page address is zero
+	ldx [%g7 + %lo(end_of_identity)], %g4
+	cmp %g3, %g4
+	bgeu %xcc, 0f
+
+	ldx [%g7 + %lo(kernel_8k_tlb_data_template)], %g2
+	add %g3, %g2, %g2
+	stxa %g2, [%g0] ASI_DTLB_DATA_IN_REG		! identity map the kernel page
+	retry
+
+	/*
+	 * Third, catch and handle special cases when the trap is caused by
+	 * the userspace register window spill or fill handler. In case
+	 * one of these two traps caused this trap, we just lower the trap
+	 * level and service the DTLB miss. In the end, we restart
+	 * the offending SAVE or RESTORE.
+	 */
+0:
+.if (\tl > 0)
+	wrpr %g0, 1, %tl
+.endif
+
+	/*
+	 * Switch from the MM globals.
+	 */
+	wrpr %g0, PSTATE_PRIV_BIT | PSTATE_AG_BIT, %pstate
+
+	/*
+	 * Read the Tag Access register for the higher-level handler.
+	 * This is necessary to survive nested DTLB misses.
+	 */	
+	ldxa [VA_DMMU_TAG_ACCESS] %asi, %g2
+
+	/*
+	 * g2 will be passed as an argument to fast_data_access_mmu_miss().
+	 */
+	PREEMPTIBLE_HANDLER fast_data_access_mmu_miss
+.endm
+
+.macro FAST_DATA_ACCESS_PROTECTION_HANDLER tl
+	/*
+	 * The same special case as in FAST_DATA_ACCESS_MMU_MISS_HANDLER.
+	 */
+
+.if (\tl > 0)
+	wrpr %g0, 1, %tl
+.endif
+
+	/*
+	 * Switch from the MM globals.
+	 */
+	wrpr %g0, PSTATE_PRIV_BIT | PSTATE_AG_BIT, %pstate
+
+	/*
+	 * Read the Tag Access register for the higher-level handler.
+	 * This is necessary to survive nested DTLB misses.
+	 */	
+	mov VA_DMMU_TAG_ACCESS, %g2
+	ldxa [%g2] ASI_DMMU, %g2
+
+	/*
+	 * g2 will be passed as an argument to fast_data_access_mmu_miss().
+	 */
+	PREEMPTIBLE_HANDLER fast_data_access_protection
+.endm
+
+#endif /* __ASM__ */
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/sparc64/include/trap/sun4u/regwin.h
===================================================================
--- kernel/arch/sparc64/include/trap/sun4u/regwin.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/include/trap/sun4u/regwin.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2005 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64interrupt
+ * @{
+ */
+#ifndef KERN_sparc64_sun4u_REGWIN_H_
+#define KERN_sparc64_sun4u_REGWIN_H_
+
+#ifdef __ASM__
+
+/*
+ * Macro used to spill userspace window to userspace window buffer.
+ * It can be either triggered from preemptible_handler doing SAVE
+ * at (TL=1) or from normal kernel code doing SAVE when OTHERWIN>0
+ * at (TL=0).
+ */
+.macro SPILL_TO_USPACE_WINDOW_BUFFER
+	stx %l0, [%g7 + L0_OFFSET]	
+	stx %l1, [%g7 + L1_OFFSET]
+	stx %l2, [%g7 + L2_OFFSET]
+	stx %l3, [%g7 + L3_OFFSET]
+	stx %l4, [%g7 + L4_OFFSET]
+	stx %l5, [%g7 + L5_OFFSET]
+	stx %l6, [%g7 + L6_OFFSET]
+	stx %l7, [%g7 + L7_OFFSET]
+	stx %i0, [%g7 + I0_OFFSET]
+	stx %i1, [%g7 + I1_OFFSET]
+	stx %i2, [%g7 + I2_OFFSET]
+	stx %i3, [%g7 + I3_OFFSET]
+	stx %i4, [%g7 + I4_OFFSET]
+	stx %i5, [%g7 + I5_OFFSET]
+	stx %i6, [%g7 + I6_OFFSET]
+	stx %i7, [%g7 + I7_OFFSET]
+	add %g7, STACK_WINDOW_SAVE_AREA_SIZE, %g7
+	saved
+	retry
+.endm
+
+#endif
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/sparc64/include/trap/sun4v/mmu.h
===================================================================
--- kernel/arch/sparc64/include/trap/sun4v/mmu.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/include/trap/sun4v/mmu.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2006 Jakub Jermar
+ * Copyright (c) 2008 Pavel Rimsky
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64interrupt
+ * @{
+ */
+/**
+ * @file
+ * @brief This file contains fast MMU trap handlers.
+ */
+
+#ifndef KERN_sparc64_SUN4V_MMU_TRAP_H_
+#define KERN_sparc64_SUN4V_MMU_TRAP_H_
+
+#include <arch/stack.h>
+#include <arch/regdef.h>
+#include <arch/arch.h>
+#include <arch/sun4v/arch.h>
+#include <arch/sun4v/hypercall.h>
+#include <arch/mm/sun4v/mmu.h>
+#include <arch/mm/tlb.h>
+#include <arch/mm/mmu.h>
+#include <arch/mm/tte.h>
+#include <arch/trap/regwin.h>
+
+#ifdef CONFIG_TSB
+#include <arch/mm/tsb.h>
+#endif
+
+#define TT_FAST_INSTRUCTION_ACCESS_MMU_MISS	0x64
+#define TT_FAST_DATA_ACCESS_MMU_MISS		0x68
+#define TT_FAST_DATA_ACCESS_PROTECTION		0x6c
+#define TT_CPU_MONDO				0x7c
+
+#define FAST_MMU_HANDLER_SIZE			128
+
+#ifdef __ASM__
+
+/* MMU fault status area data fault offset */
+#define FSA_DFA_OFFSET				0x48
+
+/* MMU fault status area data context */
+#define FSA_DFC_OFFSET				0x50
+
+/* offset of the target address within the TTE Data entry */
+#define TTE_DATA_TADDR_OFFSET			13
+
+.macro FAST_INSTRUCTION_ACCESS_MMU_MISS_HANDLER
+	PREEMPTIBLE_HANDLER fast_instruction_access_mmu_miss
+.endm
+
+/*
+ * Handler of the Fast Data Access MMU Miss trap. If the trap occurred in the kernel
+ * (context 0), an identity mapping (with displacement) is installed. Otherwise
+ * a higher level service routine is called.
+ */
+.macro FAST_DATA_ACCESS_MMU_MISS_HANDLER tl
+	mov SCRATCHPAD_MMU_FSA, %g1
+	ldxa [%g1] ASI_SCRATCHPAD, %g1			! g1 <= RA of MMU fault status area
+
+	/* read faulting context */
+	add %g1, FSA_DFC_OFFSET, %g2			! g2 <= RA of data fault context
+	ldxa [%g2] ASI_REAL, %g3			! read the fault context
+
+	/* read the faulting address */
+	add %g1, FSA_DFA_OFFSET, %g2			! g2 <= RA of data fault address
+	ldxa [%g2] ASI_REAL, %g1			! read the fault address
+	srlx %g1, TTE_DATA_TADDR_OFFSET, %g1		! truncate it to page boundary
+	sllx %g1, TTE_DATA_TADDR_OFFSET, %g1
+
+	/* service by higher-level routine when context != 0 */
+	brnz %g3, 0f 
+	nop
+	/* exclude page number 0 from installing the identity mapping */
+	brz %g1, 0f
+	nop
+
+	/*
+	 * Installing the identity does not fit into 32 instructions, call
+	 * a separate routine. The routine performs RETRY, hence the call never
+	 * returns.
+	 */
+	ba install_identity_mapping
+	nop	
+
+	0:
+
+	/*
+	 * One of the scenarios in which this trap can occur is when the
+	 * register window spill/fill handler accesses a memory which is not
+	 * mapped. In such a case, this handler will be called from TL = 1.
+	 * We handle the situation by pretending that the MMU miss occurred
+	 * on TL = 0. Once the MMU miss trap is services, the instruction which
+	 * caused the spill/fill trap is restarted, the spill/fill trap occurs,
+	 * but this time its handler accesse memory which IS mapped.
+	 */
+0:
+.if (\tl > 0)
+	wrpr %g0, 1, %tl
+.endif
+
+	/*
+	 * Save the faulting virtual page and faulting context to the %g2
+	 * register. The most significant 51 bits of the %g2 register will
+	 * contain the virtual address which caused the fault truncated to the
+	 * page boundary. The least significant 13 bits of the %g2 register
+	 * will contain the number of the context in which the fault occurred.
+	 * The value of the %g2 register will be passed as a parameter to the
+	 * higher level service routine.
+	 */
+	or %g1, %g3, %g2
+
+	PREEMPTIBLE_HANDLER fast_data_access_mmu_miss
+.endm
+
+/*
+ * Handler of the Fast Data MMU Protection trap. Finds the trapping address
+ * and context and calls higher level service routine.
+ */
+.macro FAST_DATA_ACCESS_PROTECTION_HANDLER tl
+	/*
+	 * The same special case as in FAST_DATA_ACCESS_MMU_MISS_HANDLER.
+	 */
+	.if (\tl > 0)
+		wrpr %g0, 1, %tl
+	.endif
+
+	mov SCRATCHPAD_MMU_FSA, %g1
+	ldxa [%g1] ASI_SCRATCHPAD, %g1			! g1 <= RA of MMU fault status area
+
+	/* read faulting context */
+	add %g1, FSA_DFC_OFFSET, %g2			! g2 <= RA of data fault context
+	ldxa [%g2] ASI_REAL, %g3			! read the fault context
+
+	/* read the faulting address */
+	add %g1, FSA_DFA_OFFSET, %g2			! g2 <= RA of data fault address
+	ldxa [%g2] ASI_REAL, %g1			! read the fault address
+	srlx %g1, TTE_DATA_TADDR_OFFSET, %g1		! truncate it to page boundary
+	sllx %g1, TTE_DATA_TADDR_OFFSET, %g1
+
+	/* the same as for FAST_DATA_ACCESS_MMU_MISS_HANDLER */
+	or %g1, %g3, %g2
+
+	PREEMPTIBLE_HANDLER fast_data_access_protection
+.endm
+#endif /* __ASM__ */
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/sparc64/include/trap/sun4v/regwin.h
===================================================================
--- kernel/arch/sparc64/include/trap/sun4v/regwin.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/include/trap/sun4v/regwin.h	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2005 Jakub Jermar
+ * Copyright (c) 2009 Pavel Rimsky
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64interrupt
+ * @{
+ */
+#ifndef KERN_sparc64_sun4v_REGWIN_H_
+#define KERN_sparc64_sun4v_REGWIN_H_
+
+#ifdef __ASM__
+
+/*
+ * Saves the contents of the current window to the userspace window buffer.
+ * Does not modify any register window registers, but updates pointer to the
+ * top of the userspace window buffer.
+ *
+ * Parameters:
+ * 	\tmpreg1	global register to be used for scratching purposes
+ * 	\tmpreg2	global register to be used for scratching purposes
+ */
+.macro SAVE_TO_USPACE_WBUF tmpreg1, tmpreg2
+	set SCRATCHPAD_WBUF, \tmpreg2
+	ldxa [\tmpreg2] ASI_SCRATCHPAD, \tmpreg1
+	stx %l0, [\tmpreg1 + L0_OFFSET]	
+	stx %l1, [\tmpreg1 + L1_OFFSET]
+	stx %l2, [\tmpreg1 + L2_OFFSET]
+	stx %l3, [\tmpreg1 + L3_OFFSET]
+	stx %l4, [\tmpreg1 + L4_OFFSET]
+	stx %l5, [\tmpreg1 + L5_OFFSET]
+	stx %l6, [\tmpreg1 + L6_OFFSET]
+	stx %l7, [\tmpreg1 + L7_OFFSET]
+	stx %i0, [\tmpreg1 + I0_OFFSET]
+	stx %i1, [\tmpreg1 + I1_OFFSET]
+	stx %i2, [\tmpreg1 + I2_OFFSET]
+	stx %i3, [\tmpreg1 + I3_OFFSET]
+	stx %i4, [\tmpreg1 + I4_OFFSET]
+	stx %i5, [\tmpreg1 + I5_OFFSET]
+	stx %i6, [\tmpreg1 + I6_OFFSET]
+	stx %i7, [\tmpreg1 + I7_OFFSET]
+	add \tmpreg1, STACK_WINDOW_SAVE_AREA_SIZE, \tmpreg1
+	stxa \tmpreg1, [\tmpreg2] ASI_SCRATCHPAD
+.endm
+
+/*
+ * Macro used to spill userspace window to userspace window buffer.
+ * It is triggered from normal kernel code doing SAVE when
+ * OTHERWIN>0 at (TL=0).
+ */
+.macro SPILL_TO_USPACE_WINDOW_BUFFER
+	SAVE_TO_USPACE_WBUF %g7, %g4
+	saved
+	retry
+.endm
+
+#endif
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/sparc64/src/asm.S
===================================================================
--- kernel/arch/sparc64/src/asm.S	(revision b1d3c36aca6850dfcd12418b4a6f60aa37a83487)
+++ kernel/arch/sparc64/src/asm.S	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -29,6 +29,4 @@
 #include <arch/arch.h>
 #include <arch/stack.h>
-#include <arch/regdef.h>
-#include <arch/mm/mmu.h>
 
 .text
@@ -234,83 +232,2 @@
 	nop
 
-
-.macro WRITE_ALTERNATE_REGISTER reg, bit
-	rdpr %pstate, %g1				! save PSTATE.PEF
-	wrpr %g0, (\bit | PSTATE_PRIV_BIT), %pstate
-	mov %o0, \reg
-	wrpr %g0, PSTATE_PRIV_BIT, %pstate
-	retl
-	wrpr %g1, 0, %pstate				! restore PSTATE.PEF
-.endm
-
-.macro READ_ALTERNATE_REGISTER reg, bit
-	rdpr %pstate, %g1				! save PSTATE.PEF
-	wrpr %g0, (\bit | PSTATE_PRIV_BIT), %pstate
-	mov \reg, %o0
-	wrpr %g0, PSTATE_PRIV_BIT, %pstate
-	retl
-	wrpr %g1, 0, %pstate				! restore PSTATE.PEF
-.endm
-
-.global write_to_ag_g6
-write_to_ag_g6:
-	WRITE_ALTERNATE_REGISTER %g6, PSTATE_AG_BIT
-
-.global write_to_ag_g7
-write_to_ag_g7:
-	WRITE_ALTERNATE_REGISTER %g7, PSTATE_AG_BIT
-
-.global write_to_ig_g6
-write_to_ig_g6:
-	WRITE_ALTERNATE_REGISTER %g6, PSTATE_IG_BIT
-
-.global read_from_ag_g7
-read_from_ag_g7:
-	READ_ALTERNATE_REGISTER %g7, PSTATE_AG_BIT
-
-
-/** Switch to userspace.
- *
- * %o0	Userspace entry address.
- * %o1	Userspace stack pointer address.
- * %o2  Userspace address of uarg structure.
- */
-.global switch_to_userspace
-switch_to_userspace:
-	save %o1, -(STACK_WINDOW_SAVE_AREA_SIZE + STACK_ARG_SAVE_AREA_SIZE), %sp
-	flushw
-	wrpr %g0, 0, %cleanwin		! avoid information leak
-
-	mov %i2, %o0			! uarg
-	xor %o1, %o1, %o1		! %o1 is defined to hold pcb_ptr
-					! set it to 0
-
-	clr %i2
-	clr %i3
-	clr %i4
-	clr %i5
-	clr %i6
-
-	wrpr %g0, 1, %tl		! enforce mapping via nucleus
-
-	rdpr %cwp, %g1
-	wrpr %g1, TSTATE_IE_BIT, %tstate
-	wrpr %i0, 0, %tnpc
-	
-	/*
-	 * Set primary context according to secondary context.
-	 * Secondary context has been already installed by
-	 * higher-level functions.
-	 */
-	wr %g0, ASI_DMMU, %asi
-	ldxa [VA_SECONDARY_CONTEXT_REG] %asi, %g1
-	stxa %g1, [VA_PRIMARY_CONTEXT_REG] %asi
-	flush %i7
-
-	/*
-	 * Spills and fills will be handled by the userspace handlers.
-	 */
-	wrpr %g0, WSTATE_OTHER(0) | WSTATE_NORMAL(1), %wstate
-	
-	done				! jump to userspace
-
Index: kernel/arch/sparc64/src/cpu/cpu.c
===================================================================
--- kernel/arch/sparc64/src/cpu/cpu.c	(revision b1d3c36aca6850dfcd12418b4a6f60aa37a83487)
+++ 	(revision )
@@ -1,189 +1,0 @@
-/*
- * Copyright (c) 2005 Jakub Jermar
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- *   notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- *   notice, this list of conditions and the following disclaimer in the
- *   documentation and/or other materials provided with the distribution.
- * - The name of the author may not be used to endorse or promote products
- *   derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** @addtogroup sparc64
- * @{
- */
-/** @file
- */
-
-#include <arch/cpu_family.h>
-#include <cpu.h>
-#include <arch.h>
-#include <genarch/ofw/ofw_tree.h>
-#include <arch/drivers/tick.h>
-#include <print.h>
-#include <arch/cpu_node.h>
-
-/**
- * Finds out the clock frequency of the current CPU.
- *
- * @param node	node representing the current CPU in the OFW tree
- * @return 	clock frequency if "node" is the current CPU and no error
- *		occurs,	-1 if "node" is not the current CPU or on error
- */
-static int find_cpu_frequency(ofw_tree_node_t *node)
-{
-	ofw_tree_property_t *prop;
-	uint32_t mid;
-
-	/* 'upa-portid' for US, 'portid' for US-III, 'cpuid' for US-IV */
-	prop = ofw_tree_getprop(node, "upa-portid");
-	if ((!prop) || (!prop->value))
-		prop = ofw_tree_getprop(node, "portid");
-	if ((!prop) || (!prop->value))
-		prop = ofw_tree_getprop(node, "cpuid");
-	
-	if (prop && prop->value) {
-		mid = *((uint32_t *) prop->value);
-		if (mid == CPU->arch.mid) {
-			prop = ofw_tree_getprop(node, "clock-frequency");
-			if (prop && prop->value) {
-				return *((uint32_t *) prop->value);
-			}
-		}
-	}
-	
-	return -1;
-}
-
-/** Perform sparc64 specific initialization of the processor structure for the
- * current processor.
- */
-void cpu_arch_init(void)
-{
-	ofw_tree_node_t *node;
-	uint32_t clock_frequency = 0;
-	
-	CPU->arch.mid = read_mid();
-	
-	/*
-	 * Detect processor frequency.
-	 */
-	if (is_us() || is_us_iii()) { 
-		node = ofw_tree_find_child_by_device_type(cpus_parent(), "cpu");
-		while (node) {
-			int f = find_cpu_frequency(node);
-			if (f != -1) 
-				clock_frequency = (uint32_t) f;
-			node = ofw_tree_find_peer_by_device_type(node, "cpu");
-		}
-	} else if (is_us_iv()) {
-		node = ofw_tree_find_child(cpus_parent(), "cmp");
-		while (node) {
-			int f;
-			f = find_cpu_frequency(
-				ofw_tree_find_child(node, "cpu@0"));
-			if (f != -1) 
-				clock_frequency = (uint32_t) f;
-			f = find_cpu_frequency(
-				ofw_tree_find_child(node, "cpu@1"));
-			if (f != -1) 
-				clock_frequency = (uint32_t) f;
-			node = ofw_tree_find_peer_by_name(node, "cmp");
-		}
-	}
-		
-	CPU->arch.clock_frequency = clock_frequency;
-	tick_init();
-}
-
-/** Read version information from the current processor. */
-void cpu_identify(void)
-{
-	CPU->arch.ver.value = ver_read();
-}
-
-/** Print version information for a processor.
- *
- * This function is called by the bootstrap processor.
- *
- * @param m Processor structure of the CPU for which version information is to
- * 	be printed.
- */
-void cpu_print_report(cpu_t *m)
-{
-	char *manuf, *impl;
-
-	switch (m->arch.ver.manuf) {
-	case MANUF_FUJITSU:
-		manuf = "Fujitsu";
-		break;
-	case MANUF_ULTRASPARC:
-		manuf = "UltraSPARC";
-		break;
-	case MANUF_SUN:
-	    	manuf = "Sun";
-		break;
-	default:
-		manuf = "Unknown";
-		break;
-	}
-	
-	switch (CPU->arch.ver.impl) {
-	case IMPL_ULTRASPARCI:
-		impl = "UltraSPARC I";
-		break;
-	case IMPL_ULTRASPARCII:
-		impl = "UltraSPARC II";
-		break;
-	case IMPL_ULTRASPARCII_I:
-		impl = "UltraSPARC IIi";
-		break;
-	case IMPL_ULTRASPARCII_E:
-		impl = "UltraSPARC IIe";
-		break;
-	case IMPL_ULTRASPARCIII:
-		impl = "UltraSPARC III";
-		break;
-	case IMPL_ULTRASPARCIII_PLUS:
-		impl = "UltraSPARC III+";
-		break;
-	case IMPL_ULTRASPARCIII_I:
-		impl = "UltraSPARC IIIi";
-		break;
-	case IMPL_ULTRASPARCIV:
-		impl = "UltraSPARC IV";
-		break;
-	case IMPL_ULTRASPARCIV_PLUS:
-		impl = "UltraSPARC IV+";
-		break;
-	case IMPL_SPARC64V:
-		impl = "SPARC 64V";
-		break;
-	default:
-		impl = "Unknown";
-		break;
-	}
-
-	printf("cpu%d: manuf=%s, impl=%s, mask=%d (%d MHz)\n", m->id, manuf,
-		impl, m->arch.ver.mask, m->arch.clock_frequency / 1000000);
-}
-
-/** @}
- */
Index: kernel/arch/sparc64/src/cpu/sun4u/cpu.c
===================================================================
--- kernel/arch/sparc64/src/cpu/sun4u/cpu.c	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/src/cpu/sun4u/cpu.c	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2005 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64
+ * @{
+ */
+/** @file
+ */
+
+#include <arch/cpu_family.h>
+#include <cpu.h>
+#include <arch.h>
+#include <genarch/ofw/ofw_tree.h>
+#include <arch/drivers/tick.h>
+#include <print.h>
+#include <arch/cpu_node.h>
+
+/**
+ * Finds out the clock frequency of the current CPU.
+ *
+ * @param node	node representing the current CPU in the OFW tree
+ * @return 	clock frequency if "node" is the current CPU and no error
+ *		occurs,	-1 if "node" is not the current CPU or on error
+ */
+static int find_cpu_frequency(ofw_tree_node_t *node)
+{
+	ofw_tree_property_t *prop;
+	uint32_t mid;
+
+	/* 'upa-portid' for US, 'portid' for US-III, 'cpuid' for US-IV */
+	prop = ofw_tree_getprop(node, "upa-portid");
+	if ((!prop) || (!prop->value))
+		prop = ofw_tree_getprop(node, "portid");
+	if ((!prop) || (!prop->value))
+		prop = ofw_tree_getprop(node, "cpuid");
+	
+	if (prop && prop->value) {
+		mid = *((uint32_t *) prop->value);
+		if (mid == CPU->arch.mid) {
+			prop = ofw_tree_getprop(node, "clock-frequency");
+			if (prop && prop->value) {
+				return *((uint32_t *) prop->value);
+			}
+		}
+	}
+	
+	return -1;
+}
+
+/** Perform sparc64 specific initialization of the processor structure for the
+ * current processor.
+ */
+void cpu_arch_init(void)
+{
+	ofw_tree_node_t *node;
+	uint32_t clock_frequency = 0;
+	
+	CPU->arch.mid = read_mid();
+	
+	/*
+	 * Detect processor frequency.
+	 */
+	if (is_us() || is_us_iii()) { 
+		node = ofw_tree_find_child_by_device_type(cpus_parent(), "cpu");
+		while (node) {
+			int f = find_cpu_frequency(node);
+			if (f != -1) 
+				clock_frequency = (uint32_t) f;
+			node = ofw_tree_find_peer_by_device_type(node, "cpu");
+		}
+	} else if (is_us_iv()) {
+		node = ofw_tree_find_child(cpus_parent(), "cmp");
+		while (node) {
+			int f;
+			f = find_cpu_frequency(
+				ofw_tree_find_child(node, "cpu@0"));
+			if (f != -1) 
+				clock_frequency = (uint32_t) f;
+			f = find_cpu_frequency(
+				ofw_tree_find_child(node, "cpu@1"));
+			if (f != -1) 
+				clock_frequency = (uint32_t) f;
+			node = ofw_tree_find_peer_by_name(node, "cmp");
+		}
+	}
+		
+	CPU->arch.clock_frequency = clock_frequency;
+	tick_init();
+}
+
+/** Read version information from the current processor. */
+void cpu_identify(void)
+{
+	CPU->arch.ver.value = ver_read();
+}
+
+/** Print version information for a processor.
+ *
+ * This function is called by the bootstrap processor.
+ *
+ * @param m Processor structure of the CPU for which version information is to
+ * 	be printed.
+ */
+void cpu_print_report(cpu_t *m)
+{
+	char *manuf, *impl;
+
+	switch (m->arch.ver.manuf) {
+	case MANUF_FUJITSU:
+		manuf = "Fujitsu";
+		break;
+	case MANUF_ULTRASPARC:
+		manuf = "UltraSPARC";
+		break;
+	case MANUF_SUN:
+	    	manuf = "Sun";
+		break;
+	default:
+		manuf = "Unknown";
+		break;
+	}
+	
+	switch (CPU->arch.ver.impl) {
+	case IMPL_ULTRASPARCI:
+		impl = "UltraSPARC I";
+		break;
+	case IMPL_ULTRASPARCII:
+		impl = "UltraSPARC II";
+		break;
+	case IMPL_ULTRASPARCII_I:
+		impl = "UltraSPARC IIi";
+		break;
+	case IMPL_ULTRASPARCII_E:
+		impl = "UltraSPARC IIe";
+		break;
+	case IMPL_ULTRASPARCIII:
+		impl = "UltraSPARC III";
+		break;
+	case IMPL_ULTRASPARCIII_PLUS:
+		impl = "UltraSPARC III+";
+		break;
+	case IMPL_ULTRASPARCIII_I:
+		impl = "UltraSPARC IIIi";
+		break;
+	case IMPL_ULTRASPARCIV:
+		impl = "UltraSPARC IV";
+		break;
+	case IMPL_ULTRASPARCIV_PLUS:
+		impl = "UltraSPARC IV+";
+		break;
+	case IMPL_SPARC64V:
+		impl = "SPARC 64V";
+		break;
+	default:
+		impl = "Unknown";
+		break;
+	}
+
+	printf("cpu%d: manuf=%s, impl=%s, mask=%d (%d MHz)\n", m->id, manuf,
+		impl, m->arch.ver.mask, m->arch.clock_frequency / 1000000);
+}
+
+/** @}
+ */
Index: kernel/arch/sparc64/src/cpu/sun4v/cpu.c
===================================================================
--- kernel/arch/sparc64/src/cpu/sun4v/cpu.c	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/src/cpu/sun4v/cpu.c	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2005 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64
+ * @{
+ */
+/** @file
+ */
+
+#include <cpu.h>
+#include <arch.h>
+#include <genarch/ofw/ofw_tree.h>
+#include <arch/drivers/tick.h>
+#include <print.h>
+#include <arch/sun4v/md.h>
+#include <arch/sun4v/hypercall.h>
+
+//#include <arch/trap/sun4v/interrupt.h>
+
+/** Perform sparc64 specific initialization of the processor structure for the
+ * current processor.
+ */
+void cpu_arch_init(void)
+{
+	uint64_t myid;
+	__hypercall_fast_ret1(0, 0, 0, 0, 0, CPU_MYID, &myid);
+
+	//MH
+	//CPU->arch.id = myid;
+
+	md_node_t node = md_get_root();
+
+	/* walk through MD, find the current CPU node & its clock-frequency */
+	while (true) {
+		if (!md_next_node(&node, "cpu")) {
+			panic("Could not determine CPU frequency.");
+		}
+		uint64_t id = 0;
+		md_get_integer_property(node, "id", &id);
+
+		if (id == myid) {
+			uint64_t clock_frequency = 0;
+			md_get_integer_property(node, "clock-frequency",
+				&clock_frequency);
+			CPU->arch.clock_frequency = clock_frequency;
+			break;
+		}
+	}
+		
+	tick_init();
+    	//MH - uncomment later
+	//sun4v_ipi_init();
+}
+
+/**
+ * Implemented as an empty function as accessing the VER register is
+ * a hyperprivileged operation on sun4v.
+ */
+void cpu_identify(void)
+{
+}
+
+/** Print version information for a processor.
+ *
+ * This function is called by the bootstrap processor.
+ *
+ * @param m Processor structure of the CPU for which version information is to
+ * 	be printed.
+ */
+void cpu_print_report(cpu_t *m)
+{
+	printf("cpu%d: Niagara (%d MHz)\n", m->id,
+		m->arch.clock_frequency / 1000000);
+}
+
+/** @}
+ */
Index: kernel/arch/sparc64/src/drivers/kbd.c
===================================================================
--- kernel/arch/sparc64/src/drivers/kbd.c	(revision b1d3c36aca6850dfcd12418b4a6f60aa37a83487)
+++ kernel/arch/sparc64/src/drivers/kbd.c	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -39,4 +39,5 @@
 #include <console/console.h>
 #include <ddi/irq.h>
+#include <mm/page.h>
 #include <arch/mm/page.h>
 #include <arch/types.h>
Index: kernel/arch/sparc64/src/drivers/niagara.c
===================================================================
--- kernel/arch/sparc64/src/drivers/niagara.c	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/src/drivers/niagara.c	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2008 Pavel Rimsky
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64
+ * @{
+ */
+/**
+ * @file
+ * @brief	Niagara input/output driver based on hypervisor calls.
+ */
+
+#include <arch/drivers/niagara.h>
+#include <console/chardev.h>
+#include <console/console.h>
+#include <ddi/ddi.h>
+#include <ddi/device.h>
+#include <arch/asm.h>
+#include <arch.h>
+#include <mm/slab.h>
+#include <arch/drivers/kbd.h>
+#include <arch/sun4v/hypercall.h>
+#include <sysinfo/sysinfo.h>
+#include <ipc/irq.h>
+#include <print.h>
+#include <proc/thread.h>
+#include <console/console.h>
+#include <genarch/srln/srln.h>
+
+/* polling interval in miliseconds */
+#define POLL_INTERVAL  10000
+
+/* device instance */
+static niagara_instance_t *instance = NULL;
+
+static void niagara_putchar(outdev_t *, const wchar_t, bool);
+
+/** character device operations */
+static outdev_operations_t niagara_ops = {
+	.write = niagara_putchar,
+	.redraw = NULL
+};
+
+/*
+ * The driver uses hypercalls to print characters to the console. Since the
+ * hypercall cannot be performed from the userspace, we do this:
+ * The kernel "little brother" driver (which will be present no matter what the
+ * DDI architecture is - as we need the kernel part for the kconsole)
+ * defines a shared buffer. Kernel walks through the buffer (in the same thread
+ * which is used for polling the keyboard) and prints any pending characters
+ * to the console (using hypercalls). The userspace fb server maps this shared
+ * buffer to its address space and output operation it does is performed using
+ * the mapped buffer. The shared buffer definition follows.
+ */
+#define OUTPUT_BUFFER_SIZE	((PAGE_SIZE) - 2 * 8)
+static volatile struct {
+	uint64_t read_ptr;
+	uint64_t write_ptr;
+	char data[OUTPUT_BUFFER_SIZE];
+}
+	__attribute__ ((packed))
+	__attribute__ ((aligned(PAGE_SIZE)))
+	output_buffer;
+
+/**
+ * Analogous to the output_buffer, see the previous definition.
+ */
+#define INPUT_BUFFER_SIZE	((PAGE_SIZE) - 2 * 8)
+static volatile struct {
+	uint64_t write_ptr;
+	uint64_t read_ptr;
+	char data[INPUT_BUFFER_SIZE];
+}
+	__attribute__ ((packed))
+	__attribute__ ((aligned(PAGE_SIZE)))
+	input_buffer;
+
+
+/** Writes a single character to the standard output. */
+static inline void do_putchar(const char c) {
+	/* repeat until the buffer is non-full */
+	while (__hypercall_fast1(CONS_PUTCHAR, c) == EWOULDBLOCK)
+		;
+}
+
+/** Writes a single character to the standard output. */
+static void niagara_putchar(outdev_t *dev, const wchar_t ch, bool silent)
+{
+	do_putchar(ch);
+	if (ch == '\n')
+		do_putchar('\r');
+}
+
+/**
+ * Function regularly called by the keyboard polling thread. Asks the
+ * hypervisor whether there is any unread character. If so, it picks it up
+ * and sends it to the upper layers of HelenOS.
+ *
+ * Apart from that, it also checks whether the userspace output driver has
+ * pushed any characters to the output buffer. If so, it prints them.
+ */
+static void niagara_poll(niagara_instance_t *instance)
+{
+	/* print any pending characters from the shared buffer to the console */
+	while (output_buffer.read_ptr != output_buffer.write_ptr) {
+		do_putchar(output_buffer.data[output_buffer.read_ptr]);
+		output_buffer.read_ptr =
+			((output_buffer.read_ptr) + 1) % OUTPUT_BUFFER_SIZE;
+	}
+
+	uint64_t c;
+
+	/* read character from keyboard, send it to upper layers of HelenOS */
+	if (__hypercall_fast_ret1(0, 0, 0, 0, 0, CONS_GETCHAR, &c) == EOK) {
+		if (!silent) {
+			/* kconsole active, send the character to kernel */
+			indev_push_character(instance->srlnin, c);
+		} else {
+			/* kconsole inactive, send the character to uspace driver */
+			input_buffer.data[input_buffer.write_ptr] = (char) c;
+			input_buffer.write_ptr =
+				((input_buffer.write_ptr) + 1) % INPUT_BUFFER_SIZE;
+		}
+	}
+}
+
+/**
+ * Polling thread function.
+ */
+static void kniagarapoll(void *instance) {
+	while (true) {
+		niagara_poll(instance);
+		thread_usleep(POLL_INTERVAL);
+	}
+}
+
+/**
+ * Initializes the input/output subsystem so that the Niagara standard
+ * input/output is used.
+ */
+static void niagara_init(void)
+{
+	if (instance)
+		return;
+	
+	instance = malloc(sizeof(niagara_instance_t), FRAME_ATOMIC);
+	
+	if (instance) {
+		instance->thread = thread_create(kniagarapoll, instance, TASK, 0,
+			"kniagarapoll", true);
+		
+		if (!instance->thread) {
+			free(instance);
+			instance = NULL;
+			return;
+		}
+	}
+
+	instance->srlnin = NULL;
+
+	output_buffer.read_ptr = 0;
+	output_buffer.write_ptr = 0;
+	input_buffer.write_ptr = 0;
+	input_buffer.read_ptr = 0;
+
+	/*
+	 * Set sysinfos and pareas so that the userspace counterpart of the
+	 * niagara fb and kbd driver can communicate with kernel using shared
+	 * buffers.
+ 	 */
+
+	sysinfo_set_item_val("fb.kind", NULL, 5);
+
+	sysinfo_set_item_val("niagara.outbuf.address", NULL,
+		KA2PA(&output_buffer));
+	sysinfo_set_item_val("niagara.outbuf.size", NULL,
+		PAGE_SIZE);
+	sysinfo_set_item_val("niagara.outbuf.datasize", NULL,
+		OUTPUT_BUFFER_SIZE);
+
+	sysinfo_set_item_val("niagara.inbuf.address", NULL,
+		KA2PA(&input_buffer));
+	sysinfo_set_item_val("niagara.inbuf.size", NULL,
+		PAGE_SIZE);
+	sysinfo_set_item_val("niagara.inbuf.datasize", NULL,
+		INPUT_BUFFER_SIZE);
+
+	static parea_t outbuf_parea;
+	outbuf_parea.pbase = (uintptr_t) (KA2PA(&output_buffer));
+	outbuf_parea.frames = 1;
+	ddi_parea_register(&outbuf_parea);
+
+	static parea_t inbuf_parea;
+	inbuf_parea.pbase = (uintptr_t) (KA2PA(&input_buffer));
+	inbuf_parea.frames = 1;
+	ddi_parea_register(&inbuf_parea);
+
+	outdev_t *niagara_dev = malloc(sizeof(outdev_t), FRAME_ATOMIC);
+	outdev_initialize("niagara_dev", niagara_dev, &niagara_ops);
+	stdout_wire(niagara_dev);
+}
+
+/**
+ * A public function which initializes input from the Niagara console.
+ */
+niagara_instance_t *niagarain_init(void)
+{
+	niagara_init();
+
+	if (instance) {
+		srln_instance_t *srln_instance = srln_init();
+		if (srln_instance) {
+			indev_t *sink = stdin_wire();
+			indev_t *srln = srln_wire(srln_instance, sink);
+
+			// wire std. input to niagara
+			instance->srlnin = srln;
+			thread_ready(instance->thread);
+		}
+	}
+	return instance;
+}
+
+/** @}
+ */
Index: kernel/arch/sparc64/src/drivers/tick.c
===================================================================
--- kernel/arch/sparc64/src/drivers/tick.c	(revision b1d3c36aca6850dfcd12418b4a6f60aa37a83487)
+++ kernel/arch/sparc64/src/drivers/tick.c	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -54,10 +54,10 @@
 	interrupt_register(14, "tick_int", tick_interrupt);
 	compare.int_dis = false;
-	compare.tick_cmpr = CPU->arch.clock_frequency / HZ;
+	compare.tick_cmpr = tick_counter_read() +
+		CPU->arch.clock_frequency / HZ;
 	CPU->arch.next_tick_cmpr = compare.tick_cmpr;
 	tick_compare_write(compare.value);
-	tick_write(0);
 
-#if defined (US3)
+#if defined (US3) || defined (SUN4V)
 	/* disable STICK interrupts and clear any pending ones */
 	tick_compare_reg_t stick_compare;
@@ -111,10 +111,10 @@
 	 * overflow only in 146 years.
 	 */
-	drift = tick_read() - CPU->arch.next_tick_cmpr;
+	drift = tick_counter_read() - CPU->arch.next_tick_cmpr;
 	while (drift > CPU->arch.clock_frequency / HZ) {
 		drift -= CPU->arch.clock_frequency / HZ;
 		CPU->missed_clock_ticks++;
 	}
-	CPU->arch.next_tick_cmpr = tick_read() +
+	CPU->arch.next_tick_cmpr = tick_counter_read() +
 	    (CPU->arch.clock_frequency / HZ) - drift;
 	tick_compare_write(CPU->arch.next_tick_cmpr);
Index: kernel/arch/sparc64/src/mm/as.c
===================================================================
--- kernel/arch/sparc64/src/mm/as.c	(revision b1d3c36aca6850dfcd12418b4a6f60aa37a83487)
+++ 	(revision )
@@ -1,224 +1,0 @@
-/*
- * Copyright (c) 2006 Jakub Jermar
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- *   notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- *   notice, this list of conditions and the following disclaimer in the
- *   documentation and/or other materials provided with the distribution.
- * - The name of the author may not be used to endorse or promote products
- *   derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** @addtogroup sparc64mm
- * @{
- */
-/** @file
- */
-
-#include <arch/mm/as.h>
-#include <arch/mm/tlb.h>
-#include <genarch/mm/page_ht.h>
-#include <genarch/mm/asid_fifo.h>
-#include <debug.h>
-#include <config.h>
-
-#ifdef CONFIG_TSB
-#include <arch/mm/tsb.h>
-#include <arch/memstr.h>
-#include <arch/asm.h>
-#include <mm/frame.h>
-#include <bitops.h>
-#include <macros.h>
-#endif /* CONFIG_TSB */
-
-/** Architecture dependent address space init. */
-void as_arch_init(void)
-{
-	if (config.cpu_active == 1) {
-		as_operations = &as_ht_operations;
-		asid_fifo_init();
-	}
-}
-
-int as_constructor_arch(as_t *as, int flags)
-{
-#ifdef CONFIG_TSB
-	/*
-	 * The order must be calculated with respect to the emulated
-	 * 16K page size.
-	 */
-	int order = fnzb32(((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
-	    sizeof(tsb_entry_t)) >> FRAME_WIDTH);
-
-	uintptr_t tsb = (uintptr_t) frame_alloc(order, flags | FRAME_KA);
-
-	if (!tsb)
-		return -1;
-
-	as->arch.itsb = (tsb_entry_t *) tsb;
-	as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT *
-	    sizeof(tsb_entry_t));
-
-	memsetb(as->arch.itsb,
-	    (ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * sizeof(tsb_entry_t), 0);
-#endif
-	return 0;
-}
-
-int as_destructor_arch(as_t *as)
-{
-#ifdef CONFIG_TSB
-	/*
-	 * The count must be calculated with respect to the emualted 16K page
-	 * size.
-	 */
-	size_t cnt = ((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
-	    sizeof(tsb_entry_t)) >> FRAME_WIDTH;
-	frame_free(KA2PA((uintptr_t) as->arch.itsb));
-	return cnt;
-#else
-	return 0;
-#endif
-}
-
-int as_create_arch(as_t *as, int flags)
-{
-#ifdef CONFIG_TSB
-	tsb_invalidate(as, 0, (size_t) -1);
-#endif
-	return 0;
-}
-
-/** Perform sparc64-specific tasks when an address space becomes active on the
- * processor.
- *
- * Install ASID and map TSBs.
- *
- * @param as Address space.
- */
-void as_install_arch(as_t *as)
-{
-	tlb_context_reg_t ctx;
-	
-	/*
-	 * Note that we don't and may not lock the address space. That's ok
-	 * since we only read members that are currently read-only.
-	 *
-	 * Moreover, the as->asid is protected by asidlock, which is being held.
-	 */
-	
-	/*
-	 * Write ASID to secondary context register. The primary context
-	 * register has to be set from TL>0 so it will be filled from the
-	 * secondary context register from the TL=1 code just before switch to
-	 * userspace.
-	 */
-	ctx.v = 0;
-	ctx.context = as->asid;
-	mmu_secondary_context_write(ctx.v);
-
-#ifdef CONFIG_TSB	
-	uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
-
-	ASSERT(as->arch.itsb && as->arch.dtsb);
-
-	uintptr_t tsb = (uintptr_t) as->arch.itsb;
-		
-	if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
-		/*
-		 * TSBs were allocated from memory not covered
-		 * by the locked 4M kernel DTLB entry. We need
-		 * to map both TSBs explicitly.
-		 */
-		dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
-		dtlb_insert_mapping(tsb, KA2PA(tsb), PAGESIZE_64K, true, true);
-	}
-		
-	/*
-	 * Setup TSB Base registers.
-	 */
-	tsb_base_reg_t tsb_base;
-		
-	tsb_base.value = 0;
-	tsb_base.size = TSB_SIZE;
-	tsb_base.split = 0;
-
-	tsb_base.base = ((uintptr_t) as->arch.itsb) >> MMU_PAGE_WIDTH;
-	itsb_base_write(tsb_base.value);
-	tsb_base.base = ((uintptr_t) as->arch.dtsb) >> MMU_PAGE_WIDTH;
-	dtsb_base_write(tsb_base.value);
-	
-#if defined (US3)
-	/*
-	 * Clear the extension registers.
-	 * In HelenOS, primary and secondary context registers contain
-	 * equal values and kernel misses (context 0, ie. the nucleus context)
-	 * are excluded from the TSB miss handler, so it makes no sense
-	 * to have separate TSBs for primary, secondary and nucleus contexts.
-	 * Clearing the extension registers will ensure that the value of the
-	 * TSB Base register will be used as an address of TSB, making the code
-	 * compatible with the US port. 
-	 */
-	itsb_primary_extension_write(0);
-	itsb_nucleus_extension_write(0);
-	dtsb_primary_extension_write(0);
-	dtsb_secondary_extension_write(0);
-	dtsb_nucleus_extension_write(0);
-#endif
-#endif
-}
-
-/** Perform sparc64-specific tasks when an address space is removed from the
- * processor.
- *
- * Demap TSBs.
- *
- * @param as Address space.
- */
-void as_deinstall_arch(as_t *as)
-{
-
-	/*
-	 * Note that we don't and may not lock the address space. That's ok
-	 * since we only read members that are currently read-only.
-	 *
-	 * Moreover, the as->asid is protected by asidlock, which is being held.
-	 */
-
-#ifdef CONFIG_TSB
-	uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
-
-	ASSERT(as->arch.itsb && as->arch.dtsb);
-
-	uintptr_t tsb = (uintptr_t) as->arch.itsb;
-		
-	if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
-		/*
-		 * TSBs were allocated from memory not covered
-		 * by the locked 4M kernel DTLB entry. We need
-		 * to demap the entry installed by as_install_arch().
-		 */
-		dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
-	}
-#endif
-}
-
-/** @}
- */
Index: kernel/arch/sparc64/src/mm/frame.c
===================================================================
--- kernel/arch/sparc64/src/mm/frame.c	(revision b1d3c36aca6850dfcd12418b4a6f60aa37a83487)
+++ 	(revision )
@@ -1,87 +1,0 @@
-/*
- * Copyright (c) 2005 Jakub Jermar
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- *   notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- *   notice, this list of conditions and the following disclaimer in the
- *   documentation and/or other materials provided with the distribution.
- * - The name of the author may not be used to endorse or promote products
- *   derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** @addtogroup sparc64mm
- * @{
- */
-/** @file
- */
-
-#include <arch/mm/frame.h>
-#include <mm/frame.h>
-#include <arch/boot/boot.h>
-#include <arch/types.h>
-#include <config.h>
-#include <align.h>
-#include <macros.h>
-
-uintptr_t last_frame = NULL;
-
-/** Create memory zones according to information stored in bootinfo.
- *
- * Walk the bootinfo memory map and create frame zones according to it.
- */
-void frame_arch_init(void)
-{
-	unsigned int i;
-	pfn_t confdata;
-
-	if (config.cpu_active == 1) {
-		for (i = 0; i < bootinfo.memmap.count; i++) {
-			uintptr_t start = bootinfo.memmap.zones[i].start;
-			size_t size = bootinfo.memmap.zones[i].size;
-
-			/*
-			 * The memmap is created by HelenOS boot loader.
-			 * It already contains no holes.
-			 */
-
-			confdata = ADDR2PFN(start);
-			if (confdata == ADDR2PFN(KA2PA(PFN2ADDR(0))))
-				confdata = ADDR2PFN(KA2PA(PFN2ADDR(2)));
-			zone_create(ADDR2PFN(start),
-			    SIZE2FRAMES(ALIGN_DOWN(size, FRAME_SIZE)),
-			    confdata, 0);
-			last_frame = max(last_frame, start + ALIGN_UP(size,
-			    FRAME_SIZE));
-		}
-
-		/*
-		 * On sparc64, physical memory can start on a non-zero address.
-		 * The generic frame_init() only marks PFN 0 as not free, so we
-		 * must mark the physically first frame not free explicitly
-		 * here, no matter what is its address.
-		 */
-		frame_mark_unavailable(ADDR2PFN(KA2PA(PFN2ADDR(0))), 1);
-	}
-
-	end_of_identity = PA2KA(last_frame);
-}
-
-/** @}
- */
Index: kernel/arch/sparc64/src/mm/page.c
===================================================================
--- kernel/arch/sparc64/src/mm/page.c	(revision b1d3c36aca6850dfcd12418b4a6f60aa37a83487)
+++ kernel/arch/sparc64/src/mm/page.c	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -33,4 +33,5 @@
  */
 
+#include <mm/page.h>
 #include <arch/mm/page.h>
 #include <arch/mm/tlb.h>
Index: kernel/arch/sparc64/src/mm/sun4u/as.c
===================================================================
--- kernel/arch/sparc64/src/mm/sun4u/as.c	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/src/mm/sun4u/as.c	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,224 @@
+/*
+ * Copyright (c) 2006 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64mm
+ * @{
+ */
+/** @file
+ */
+
+#include <arch/mm/as.h>
+#include <arch/mm/tlb.h>
+#include <genarch/mm/page_ht.h>
+#include <genarch/mm/asid_fifo.h>
+#include <debug.h>
+#include <config.h>
+
+#ifdef CONFIG_TSB
+#include <arch/mm/tsb.h>
+#include <arch/memstr.h>
+#include <arch/asm.h>
+#include <mm/frame.h>
+#include <bitops.h>
+#include <macros.h>
+#endif /* CONFIG_TSB */
+
+/** Architecture dependent address space init. */
+void as_arch_init(void)
+{
+	if (config.cpu_active == 1) {
+		as_operations = &as_ht_operations;
+		asid_fifo_init();
+	}
+}
+
+int as_constructor_arch(as_t *as, int flags)
+{
+#ifdef CONFIG_TSB
+	/*
+	 * The order must be calculated with respect to the emulated
+	 * 16K page size.
+	 */
+	int order = fnzb32(((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
+	    sizeof(tsb_entry_t)) >> FRAME_WIDTH);
+
+	uintptr_t tsb = (uintptr_t) frame_alloc(order, flags | FRAME_KA);
+
+	if (!tsb)
+		return -1;
+
+	as->arch.itsb = (tsb_entry_t *) tsb;
+	as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT *
+	    sizeof(tsb_entry_t));
+
+	memsetb(as->arch.itsb,
+	    (ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * sizeof(tsb_entry_t), 0);
+#endif
+	return 0;
+}
+
+int as_destructor_arch(as_t *as)
+{
+#ifdef CONFIG_TSB
+	/*
+	 * The count must be calculated with respect to the emualted 16K page
+	 * size.
+	 */
+	size_t cnt = ((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
+	    sizeof(tsb_entry_t)) >> FRAME_WIDTH;
+	frame_free(KA2PA((uintptr_t) as->arch.itsb));
+	return cnt;
+#else
+	return 0;
+#endif
+}
+
+int as_create_arch(as_t *as, int flags)
+{
+#ifdef CONFIG_TSB
+	tsb_invalidate(as, 0, (size_t) -1);
+#endif
+	return 0;
+}
+
+/** Perform sparc64-specific tasks when an address space becomes active on the
+ * processor.
+ *
+ * Install ASID and map TSBs.
+ *
+ * @param as Address space.
+ */
+void as_install_arch(as_t *as)
+{
+	tlb_context_reg_t ctx;
+	
+	/*
+	 * Note that we don't and may not lock the address space. That's ok
+	 * since we only read members that are currently read-only.
+	 *
+	 * Moreover, the as->asid is protected by asidlock, which is being held.
+	 */
+	
+	/*
+	 * Write ASID to secondary context register. The primary context
+	 * register has to be set from TL>0 so it will be filled from the
+	 * secondary context register from the TL=1 code just before switch to
+	 * userspace.
+	 */
+	ctx.v = 0;
+	ctx.context = as->asid;
+	mmu_secondary_context_write(ctx.v);
+
+#ifdef CONFIG_TSB	
+	uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
+
+	ASSERT(as->arch.itsb && as->arch.dtsb);
+
+	uintptr_t tsb = (uintptr_t) as->arch.itsb;
+		
+	if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
+		/*
+		 * TSBs were allocated from memory not covered
+		 * by the locked 4M kernel DTLB entry. We need
+		 * to map both TSBs explicitly.
+		 */
+		dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
+		dtlb_insert_mapping(tsb, KA2PA(tsb), PAGESIZE_64K, true, true);
+	}
+		
+	/*
+	 * Setup TSB Base registers.
+	 */
+	tsb_base_reg_t tsb_base;
+		
+	tsb_base.value = 0;
+	tsb_base.size = TSB_SIZE;
+	tsb_base.split = 0;
+
+	tsb_base.base = ((uintptr_t) as->arch.itsb) >> MMU_PAGE_WIDTH;
+	itsb_base_write(tsb_base.value);
+	tsb_base.base = ((uintptr_t) as->arch.dtsb) >> MMU_PAGE_WIDTH;
+	dtsb_base_write(tsb_base.value);
+	
+#if defined (US3)
+	/*
+	 * Clear the extension registers.
+	 * In HelenOS, primary and secondary context registers contain
+	 * equal values and kernel misses (context 0, ie. the nucleus context)
+	 * are excluded from the TSB miss handler, so it makes no sense
+	 * to have separate TSBs for primary, secondary and nucleus contexts.
+	 * Clearing the extension registers will ensure that the value of the
+	 * TSB Base register will be used as an address of TSB, making the code
+	 * compatible with the US port. 
+	 */
+	itsb_primary_extension_write(0);
+	itsb_nucleus_extension_write(0);
+	dtsb_primary_extension_write(0);
+	dtsb_secondary_extension_write(0);
+	dtsb_nucleus_extension_write(0);
+#endif
+#endif
+}
+
+/** Perform sparc64-specific tasks when an address space is removed from the
+ * processor.
+ *
+ * Demap TSBs.
+ *
+ * @param as Address space.
+ */
+void as_deinstall_arch(as_t *as)
+{
+
+	/*
+	 * Note that we don't and may not lock the address space. That's ok
+	 * since we only read members that are currently read-only.
+	 *
+	 * Moreover, the as->asid is protected by asidlock, which is being held.
+	 */
+
+#ifdef CONFIG_TSB
+	uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
+
+	ASSERT(as->arch.itsb && as->arch.dtsb);
+
+	uintptr_t tsb = (uintptr_t) as->arch.itsb;
+		
+	if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
+		/*
+		 * TSBs were allocated from memory not covered
+		 * by the locked 4M kernel DTLB entry. We need
+		 * to demap the entry installed by as_install_arch().
+		 */
+		dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
+	}
+#endif
+}
+
+/** @}
+ */
Index: kernel/arch/sparc64/src/mm/sun4u/frame.c
===================================================================
--- kernel/arch/sparc64/src/mm/sun4u/frame.c	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/src/mm/sun4u/frame.c	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2005 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64mm
+ * @{
+ */
+/** @file
+ */
+
+#include <arch/mm/frame.h>
+#include <mm/frame.h>
+#include <arch/boot/boot.h>
+#include <arch/types.h>
+#include <config.h>
+#include <align.h>
+#include <macros.h>
+
+uintptr_t last_frame = NULL;
+
+/** Create memory zones according to information stored in bootinfo.
+ *
+ * Walk the bootinfo memory map and create frame zones according to it.
+ */
+void frame_arch_init(void)
+{
+	unsigned int i;
+	pfn_t confdata;
+
+	if (config.cpu_active == 1) {
+		for (i = 0; i < bootinfo.memmap.count; i++) {
+			uintptr_t start = bootinfo.memmap.zones[i].start;
+			size_t size = bootinfo.memmap.zones[i].size;
+
+			/*
+			 * The memmap is created by HelenOS boot loader.
+			 * It already contains no holes.
+			 */
+
+			confdata = ADDR2PFN(start);
+			if (confdata == ADDR2PFN(KA2PA(PFN2ADDR(0))))
+				confdata = ADDR2PFN(KA2PA(PFN2ADDR(2)));
+			zone_create(ADDR2PFN(start),
+			    SIZE2FRAMES(ALIGN_DOWN(size, FRAME_SIZE)),
+			    confdata, 0);
+			last_frame = max(last_frame, start + ALIGN_UP(size,
+			    FRAME_SIZE));
+		}
+
+		/*
+		 * On sparc64, physical memory can start on a non-zero address.
+		 * The generic frame_init() only marks PFN 0 as not free, so we
+		 * must mark the physically first frame not free explicitly
+		 * here, no matter what is its address.
+		 */
+		frame_mark_unavailable(ADDR2PFN(KA2PA(PFN2ADDR(0))), 1);
+	}
+
+	end_of_identity = PA2KA(last_frame);
+}
+
+/** @}
+ */
Index: kernel/arch/sparc64/src/mm/sun4u/tlb.c
===================================================================
--- kernel/arch/sparc64/src/mm/sun4u/tlb.c	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/src/mm/sun4u/tlb.c	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,608 @@
+/*
+ * Copyright (c) 2005 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64mm	
+ * @{
+ */
+/** @file
+ */
+
+#include <arch/mm/tlb.h>
+#include <mm/tlb.h>
+#include <mm/as.h>
+#include <mm/asid.h>
+#include <arch/mm/frame.h>
+#include <arch/mm/page.h>
+#include <arch/mm/mmu.h>
+#include <arch/interrupt.h>
+#include <interrupt.h>
+#include <arch.h>
+#include <print.h>
+#include <arch/types.h>
+#include <config.h>
+#include <arch/trap/trap.h>
+#include <arch/trap/exception.h>
+#include <panic.h>
+#include <arch/asm.h>
+#include <genarch/mm/page_ht.h>
+
+#ifdef CONFIG_TSB
+#include <arch/mm/tsb.h>
+#endif
+
+static void dtlb_pte_copy(pte_t *, size_t, bool);
+static void itlb_pte_copy(pte_t *, size_t);
+static void do_fast_instruction_access_mmu_miss_fault(istate_t *, const char *);
+static void do_fast_data_access_mmu_miss_fault(istate_t *, tlb_tag_access_reg_t,
+    const char *);
+static void do_fast_data_access_protection_fault(istate_t *,
+    tlb_tag_access_reg_t, const char *);
+
+char *context_encoding[] = {
+	"Primary",
+	"Secondary",
+	"Nucleus",
+	"Reserved"
+};
+
+void tlb_arch_init(void)
+{
+	/*
+	 * Invalidate all non-locked DTLB and ITLB entries.
+	 */
+	tlb_invalidate_all();
+
+	/*
+	 * Clear both SFSRs.
+	 */
+	dtlb_sfsr_write(0);
+	itlb_sfsr_write(0);
+}
+
+/** Insert privileged mapping into DMMU TLB.
+ *
+ * @param page		Virtual page address.
+ * @param frame		Physical frame address.
+ * @param pagesize	Page size.
+ * @param locked	True for permanent mappings, false otherwise.
+ * @param cacheable	True if the mapping is cacheable, false otherwise.
+ */
+void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize,
+    bool locked, bool cacheable)
+{
+	tlb_tag_access_reg_t tag;
+	tlb_data_t data;
+	page_address_t pg;
+	frame_address_t fr;
+
+	pg.address = page;
+	fr.address = frame;
+
+	tag.context = ASID_KERNEL;
+	tag.vpn = pg.vpn;
+
+	dtlb_tag_access_write(tag.value);
+
+	data.value = 0;
+	data.v = true;
+	data.size = pagesize;
+	data.pfn = fr.pfn;
+	data.l = locked;
+	data.cp = cacheable;
+#ifdef CONFIG_VIRT_IDX_DCACHE
+	data.cv = cacheable;
+#endif /* CONFIG_VIRT_IDX_DCACHE */
+	data.p = true;
+	data.w = true;
+	data.g = false;
+
+	dtlb_data_in_write(data.value);
+}
+
+/** Copy PTE to TLB.
+ *
+ * @param t 		Page Table Entry to be copied.
+ * @param index		Zero if lower 8K-subpage, one if higher 8K-subpage.
+ * @param ro		If true, the entry will be created read-only, regardless
+ * 			of its w field.
+ */
+void dtlb_pte_copy(pte_t *t, size_t index, bool ro)
+{
+	tlb_tag_access_reg_t tag;
+	tlb_data_t data;
+	page_address_t pg;
+	frame_address_t fr;
+
+	pg.address = t->page + (index << MMU_PAGE_WIDTH);
+	fr.address = t->frame + (index << MMU_PAGE_WIDTH);
+
+	tag.value = 0;
+	tag.context = t->as->asid;
+	tag.vpn = pg.vpn;
+
+	dtlb_tag_access_write(tag.value);
+
+	data.value = 0;
+	data.v = true;
+	data.size = PAGESIZE_8K;
+	data.pfn = fr.pfn;
+	data.l = false;
+	data.cp = t->c;
+#ifdef CONFIG_VIRT_IDX_DCACHE
+	data.cv = t->c;
+#endif /* CONFIG_VIRT_IDX_DCACHE */
+	data.p = t->k;		/* p like privileged */
+	data.w = ro ? false : t->w;
+	data.g = t->g;
+
+	dtlb_data_in_write(data.value);
+}
+
+/** Copy PTE to ITLB.
+ *
+ * @param t		Page Table Entry to be copied.
+ * @param index		Zero if lower 8K-subpage, one if higher 8K-subpage.
+ */
+void itlb_pte_copy(pte_t *t, size_t index)
+{
+	tlb_tag_access_reg_t tag;
+	tlb_data_t data;
+	page_address_t pg;
+	frame_address_t fr;
+
+	pg.address = t->page + (index << MMU_PAGE_WIDTH);
+	fr.address = t->frame + (index << MMU_PAGE_WIDTH);
+
+	tag.value = 0;
+	tag.context = t->as->asid;
+	tag.vpn = pg.vpn;
+	
+	itlb_tag_access_write(tag.value);
+	
+	data.value = 0;
+	data.v = true;
+	data.size = PAGESIZE_8K;
+	data.pfn = fr.pfn;
+	data.l = false;
+	data.cp = t->c;
+	data.p = t->k;		/* p like privileged */
+	data.w = false;
+	data.g = t->g;
+	
+	itlb_data_in_write(data.value);
+}
+
+/** ITLB miss handler. */
+void fast_instruction_access_mmu_miss(unative_t unused, istate_t *istate)
+{
+	uintptr_t page_16k = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
+	size_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE;
+	pte_t *t;
+
+	page_table_lock(AS, true);
+	t = page_mapping_find(AS, page_16k);
+	if (t && PTE_EXECUTABLE(t)) {
+		/*
+		 * The mapping was found in the software page hash table.
+		 * Insert it into ITLB.
+		 */
+		t->a = true;
+		itlb_pte_copy(t, index);
+#ifdef CONFIG_TSB
+		itsb_pte_copy(t, index);
+#endif
+		page_table_unlock(AS, true);
+	} else {
+		/*
+		 * Forward the page fault to the address space page fault
+		 * handler.
+		 */		
+		page_table_unlock(AS, true);
+		if (as_page_fault(page_16k, PF_ACCESS_EXEC, istate) ==
+		    AS_PF_FAULT) {
+			do_fast_instruction_access_mmu_miss_fault(istate,
+			    __func__);
+		}
+	}
+}
+
+/** DTLB miss handler.
+ *
+ * Note that some faults (e.g. kernel faults) were already resolved by the
+ * low-level, assembly language part of the fast_data_access_mmu_miss handler.
+ *
+ * @param tag		Content of the TLB Tag Access register as it existed
+ * 			when the trap happened. This is to prevent confusion
+ * 			created by clobbered Tag Access register during a nested
+ * 			DTLB miss.
+ * @param istate	Interrupted state saved on the stack.
+ */
+void fast_data_access_mmu_miss(tlb_tag_access_reg_t tag, istate_t *istate)
+{
+	uintptr_t page_8k;
+	uintptr_t page_16k;
+	size_t index;
+	pte_t *t;
+
+	page_8k = (uint64_t) tag.vpn << MMU_PAGE_WIDTH;
+	page_16k = ALIGN_DOWN(page_8k, PAGE_SIZE);
+	index = tag.vpn % MMU_PAGES_PER_PAGE;
+
+	if (tag.context == ASID_KERNEL) {
+		if (!tag.vpn) {
+			/* NULL access in kernel */
+			do_fast_data_access_mmu_miss_fault(istate, tag,
+			    __func__);
+		} else if (page_8k >= end_of_identity) {
+			/*
+			 * The kernel is accessing the I/O space.
+			 * We still do identity mapping for I/O,
+			 * but without caching.
+			 */
+			dtlb_insert_mapping(page_8k, KA2PA(page_8k),
+			    PAGESIZE_8K, false, false);
+			return;
+		}
+		do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected "
+		    "kernel page fault.");
+	}
+
+	page_table_lock(AS, true);
+	t = page_mapping_find(AS, page_16k);
+	if (t) {
+		/*
+		 * The mapping was found in the software page hash table.
+		 * Insert it into DTLB.
+		 */
+		t->a = true;
+		dtlb_pte_copy(t, index, true);
+#ifdef CONFIG_TSB
+		dtsb_pte_copy(t, index, true);
+#endif
+		page_table_unlock(AS, true);
+	} else {
+		/*
+		 * Forward the page fault to the address space page fault
+		 * handler.
+		 */		
+		page_table_unlock(AS, true);
+		if (as_page_fault(page_16k, PF_ACCESS_READ, istate) ==
+		    AS_PF_FAULT) {
+			do_fast_data_access_mmu_miss_fault(istate, tag,
+			    __func__);
+		}
+	}
+}
+
+/** DTLB protection fault handler.
+ *
+ * @param tag		Content of the TLB Tag Access register as it existed
+ * 			when the trap happened. This is to prevent confusion
+ * 			created by clobbered Tag Access register during a nested
+ * 			DTLB miss.
+ * @param istate	Interrupted state saved on the stack.
+ */
+void fast_data_access_protection(tlb_tag_access_reg_t tag, istate_t *istate)
+{
+	uintptr_t page_16k;
+	size_t index;
+	pte_t *t;
+
+	page_16k = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
+	index = tag.vpn % MMU_PAGES_PER_PAGE;	/* 16K-page emulation */
+
+	page_table_lock(AS, true);
+	t = page_mapping_find(AS, page_16k);
+	if (t && PTE_WRITABLE(t)) {
+		/*
+		 * The mapping was found in the software page hash table and is
+		 * writable. Demap the old mapping and insert an updated mapping
+		 * into DTLB.
+		 */
+		t->a = true;
+		t->d = true;
+		dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY,
+		    page_16k + index * MMU_PAGE_SIZE);
+		dtlb_pte_copy(t, index, false);
+#ifdef CONFIG_TSB
+		dtsb_pte_copy(t, index, false);
+#endif
+		page_table_unlock(AS, true);
+	} else {
+		/*
+		 * Forward the page fault to the address space page fault
+		 * handler.
+		 */		
+		page_table_unlock(AS, true);
+		if (as_page_fault(page_16k, PF_ACCESS_WRITE, istate) ==
+		    AS_PF_FAULT) {
+			do_fast_data_access_protection_fault(istate, tag,
+			    __func__);
+		}
+	}
+}
+
+/** Print TLB entry (for debugging purposes).
+ *
+ * The diag field has been left out in order to make this function more generic
+ * (there is no diag field in US3 architeture). 
+ *
+ * @param i		TLB entry number 
+ * @param t		TLB entry tag
+ * @param d		TLB entry data 
+ */
+static void print_tlb_entry(int i, tlb_tag_read_reg_t t, tlb_data_t d)
+{
+	printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
+	    "ie=%d, soft2=%#x, pfn=%#x, soft=%#x, l=%d, "
+	    "cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
+	    t.context, d.v, d.size, d.nfo, d.ie, d.soft2,
+	    d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
+}
+
+#if defined (US)
+
+/** Print contents of both TLBs. */
+void tlb_print(void)
+{
+	int i;
+	tlb_data_t d;
+	tlb_tag_read_reg_t t;
+	
+	printf("I-TLB contents:\n");
+	for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
+		d.value = itlb_data_access_read(i);
+		t.value = itlb_tag_read_read(i);
+		print_tlb_entry(i, t, d);
+	}
+
+	printf("D-TLB contents:\n");
+	for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
+		d.value = dtlb_data_access_read(i);
+		t.value = dtlb_tag_read_read(i);
+		print_tlb_entry(i, t, d);
+	}
+}
+
+#elif defined (US3)
+
+/** Print contents of all TLBs. */
+void tlb_print(void)
+{
+	int i;
+	tlb_data_t d;
+	tlb_tag_read_reg_t t;
+	
+	printf("TLB_ISMALL contents:\n");
+	for (i = 0; i < tlb_ismall_size(); i++) {
+		d.value = dtlb_data_access_read(TLB_ISMALL, i);
+		t.value = dtlb_tag_read_read(TLB_ISMALL, i);
+		print_tlb_entry(i, t, d);
+	}
+	
+	printf("TLB_IBIG contents:\n");
+	for (i = 0; i < tlb_ibig_size(); i++) {
+		d.value = dtlb_data_access_read(TLB_IBIG, i);
+		t.value = dtlb_tag_read_read(TLB_IBIG, i);
+		print_tlb_entry(i, t, d);
+	}
+	
+	printf("TLB_DSMALL contents:\n");
+	for (i = 0; i < tlb_dsmall_size(); i++) {
+		d.value = dtlb_data_access_read(TLB_DSMALL, i);
+		t.value = dtlb_tag_read_read(TLB_DSMALL, i);
+		print_tlb_entry(i, t, d);
+	}
+	
+	printf("TLB_DBIG_1 contents:\n");
+	for (i = 0; i < tlb_dbig_size(); i++) {
+		d.value = dtlb_data_access_read(TLB_DBIG_0, i);
+		t.value = dtlb_tag_read_read(TLB_DBIG_0, i);
+		print_tlb_entry(i, t, d);
+	}
+	
+	printf("TLB_DBIG_2 contents:\n");
+	for (i = 0; i < tlb_dbig_size(); i++) {
+		d.value = dtlb_data_access_read(TLB_DBIG_1, i);
+		t.value = dtlb_tag_read_read(TLB_DBIG_1, i);
+		print_tlb_entry(i, t, d);
+	}
+}
+
+#endif
+
+void do_fast_instruction_access_mmu_miss_fault(istate_t *istate,
+    const char *str)
+{
+	fault_if_from_uspace(istate, "%s.", str);
+	dump_istate(istate);
+	panic("%s.", str);
+}
+
+void do_fast_data_access_mmu_miss_fault(istate_t *istate,
+    tlb_tag_access_reg_t tag, const char *str)
+{
+	uintptr_t va;
+
+	va = tag.vpn << MMU_PAGE_WIDTH;
+	if (tag.context) {
+		fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d).", str, va,
+		    tag.context);
+	}
+	dump_istate(istate);
+	printf("Faulting page: %p, ASID=%d.\n", va, tag.context);
+	panic("%s.", str);
+}
+
+void do_fast_data_access_protection_fault(istate_t *istate,
+    tlb_tag_access_reg_t tag, const char *str)
+{
+	uintptr_t va;
+
+	va = tag.vpn << MMU_PAGE_WIDTH;
+
+	if (tag.context) {
+		fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d).", str, va,
+		    tag.context);
+	}
+	printf("Faulting page: %p, ASID=%d\n", va, tag.context);
+	dump_istate(istate);
+	panic("%s.", str);
+}
+
+void dump_sfsr_and_sfar(void)
+{
+	tlb_sfsr_reg_t sfsr;
+	uintptr_t sfar;
+
+	sfsr.value = dtlb_sfsr_read();
+	sfar = dtlb_sfar_read();
+	
+#if defined (US)
+	printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, "
+	    "fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w,
+	    sfsr.ow, sfsr.fv);
+#elif defined (US3)
+	printf("DTLB SFSR: nf=%d, asi=%#x, tm=%d, ft=%#x, e=%d, ct=%d, pr=%d, "
+	    "w=%d, ow=%d, fv=%d\n", sfsr.nf, sfsr.asi, sfsr.tm, sfsr.ft,
+	    sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, sfsr.ow, sfsr.fv);
+#endif
+	    
+	printf("DTLB SFAR: address=%p\n", sfar);
+	
+	dtlb_sfsr_write(0);
+}
+
+#if defined (US)
+/** Invalidate all unlocked ITLB and DTLB entries. */
+void tlb_invalidate_all(void)
+{
+	int i;
+	
+	/*
+	 * Walk all ITLB and DTLB entries and remove all unlocked mappings.
+	 *
+	 * The kernel doesn't use global mappings so any locked global mappings
+	 * found must have been created by someone else. Their only purpose now
+	 * is to collide with proper mappings. Invalidate immediately. It should
+	 * be safe to invalidate them as late as now.
+	 */
+
+	tlb_data_t d;
+	tlb_tag_read_reg_t t;
+
+	for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
+		d.value = itlb_data_access_read(i);
+		if (!d.l || d.g) {
+			t.value = itlb_tag_read_read(i);
+			d.v = false;
+			itlb_tag_access_write(t.value);
+			itlb_data_access_write(i, d.value);
+		}
+	}
+
+	for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
+		d.value = dtlb_data_access_read(i);
+		if (!d.l || d.g) {
+			t.value = dtlb_tag_read_read(i);
+			d.v = false;
+			dtlb_tag_access_write(t.value);
+			dtlb_data_access_write(i, d.value);
+		}
+	}
+
+}
+
+#elif defined (US3)
+
+/** Invalidate all unlocked ITLB and DTLB entries. */
+void tlb_invalidate_all(void)
+{
+	itlb_demap(TLB_DEMAP_ALL, 0, 0);
+	dtlb_demap(TLB_DEMAP_ALL, 0, 0);
+}
+
+#endif
+
+/** Invalidate all ITLB and DTLB entries that belong to specified ASID
+ * (Context).
+ *
+ * @param asid Address Space ID.
+ */
+void tlb_invalidate_asid(asid_t asid)
+{
+	tlb_context_reg_t pc_save, ctx;
+	
+	/* switch to nucleus because we are mapped by the primary context */
+	nucleus_enter();
+	
+	ctx.v = pc_save.v = mmu_primary_context_read();
+	ctx.context = asid;
+	mmu_primary_context_write(ctx.v);
+	
+	itlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
+	dtlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
+	
+	mmu_primary_context_write(pc_save.v);
+	
+	nucleus_leave();
+}
+
+/** Invalidate all ITLB and DTLB entries for specified page range in specified
+ * address space.
+ *
+ * @param asid		Address Space ID.
+ * @param page		First page which to sweep out from ITLB and DTLB.
+ * @param cnt		Number of ITLB and DTLB entries to invalidate.
+ */
+void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt)
+{
+	unsigned int i;
+	tlb_context_reg_t pc_save, ctx;
+	
+	/* switch to nucleus because we are mapped by the primary context */
+	nucleus_enter();
+	
+	ctx.v = pc_save.v = mmu_primary_context_read();
+	ctx.context = asid;
+	mmu_primary_context_write(ctx.v);
+	
+	for (i = 0; i < cnt * MMU_PAGES_PER_PAGE; i++) {
+		itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
+		    page + i * MMU_PAGE_SIZE);
+		dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
+		    page + i * MMU_PAGE_SIZE);
+	}
+	
+	mmu_primary_context_write(pc_save.v);
+	
+	nucleus_leave();
+}
+
+/** @}
+ */
Index: kernel/arch/sparc64/src/mm/sun4u/tsb.c
===================================================================
--- kernel/arch/sparc64/src/mm/sun4u/tsb.c	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/src/mm/sun4u/tsb.c	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2006 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64mm	
+ * @{
+ */
+/** @file
+ */
+
+#include <arch/mm/tsb.h>
+#include <arch/mm/tlb.h>
+#include <arch/mm/page.h>
+#include <arch/barrier.h>
+#include <mm/as.h>
+#include <arch/types.h>
+#include <macros.h>
+#include <debug.h>
+
+#define TSB_INDEX_MASK	((1 << (21 + 1 + TSB_SIZE - MMU_PAGE_WIDTH)) - 1)
+
+/** Invalidate portion of TSB.
+ *
+ * We assume that the address space is already locked. Note that respective
+ * portions of both TSBs are invalidated at a time.
+ *
+ * @param as Address space.
+ * @param page First page to invalidate in TSB.
+ * @param pages Number of pages to invalidate. Value of (size_t) -1 means the
+ * 	whole TSB.
+ */
+void tsb_invalidate(as_t *as, uintptr_t page, size_t pages)
+{
+	size_t i0;
+	size_t i;
+	size_t cnt;
+	
+	ASSERT(as->arch.itsb && as->arch.dtsb);
+	
+	i0 = (page >> MMU_PAGE_WIDTH) & TSB_INDEX_MASK;
+	ASSERT(i0 < ITSB_ENTRY_COUNT && i0 < DTSB_ENTRY_COUNT);
+
+	if (pages == (size_t) -1 || (pages * 2) > ITSB_ENTRY_COUNT)
+		cnt = ITSB_ENTRY_COUNT;
+	else
+		cnt = pages * 2;
+	
+	for (i = 0; i < cnt; i++) {
+		as->arch.itsb[(i0 + i) & (ITSB_ENTRY_COUNT - 1)].tag.invalid =
+		    true;
+		as->arch.dtsb[(i0 + i) & (DTSB_ENTRY_COUNT - 1)].tag.invalid =
+		    true;
+	}
+}
+
+/** Copy software PTE to ITSB.
+ *
+ * @param t 	Software PTE.
+ * @param index	Zero if lower 8K-subpage, one if higher 8K subpage.
+ */
+void itsb_pte_copy(pte_t *t, size_t index)
+{
+	as_t *as;
+	tsb_entry_t *tsb;
+	size_t entry;
+
+	ASSERT(index <= 1);
+	
+	as = t->as;
+	entry = ((t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK; 
+	ASSERT(entry < ITSB_ENTRY_COUNT);
+	tsb = &as->arch.itsb[entry];
+
+	/*
+	 * We use write barriers to make sure that the TSB load
+	 * won't use inconsistent data or that the fault will
+	 * be repeated.
+	 */
+
+	tsb->tag.invalid = true;	/* invalidate the entry
+					 * (tag target has this
+					 * set to 0) */
+
+	write_barrier();
+
+	tsb->tag.context = as->asid;
+	/* the shift is bigger than PAGE_WIDTH, do not bother with index  */
+	tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT;
+	tsb->data.value = 0;
+	tsb->data.size = PAGESIZE_8K;
+	tsb->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index;
+	tsb->data.cp = t->c;	/* cp as cache in phys.-idxed, c as cacheable */
+	tsb->data.p = t->k;	/* p as privileged, k as kernel */
+	tsb->data.v = t->p;	/* v as valid, p as present */
+	
+	write_barrier();
+	
+	tsb->tag.invalid = false;	/* mark the entry as valid */
+}
+
+/** Copy software PTE to DTSB.
+ *
+ * @param t	Software PTE.
+ * @param index	Zero if lower 8K-subpage, one if higher 8K-subpage.
+ * @param ro	If true, the mapping is copied read-only.
+ */
+void dtsb_pte_copy(pte_t *t, size_t index, bool ro)
+{
+	as_t *as;
+	tsb_entry_t *tsb;
+	size_t entry;
+	
+	ASSERT(index <= 1);
+
+	as = t->as;
+	entry = ((t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK;
+	ASSERT(entry < DTSB_ENTRY_COUNT);
+	tsb = &as->arch.dtsb[entry];
+
+	/*
+	 * We use write barriers to make sure that the TSB load
+	 * won't use inconsistent data or that the fault will
+	 * be repeated.
+	 */
+
+	tsb->tag.invalid = true;	/* invalidate the entry
+					 * (tag target has this
+					 * set to 0) */
+
+	write_barrier();
+
+	tsb->tag.context = as->asid;
+	/* the shift is bigger than PAGE_WIDTH, do not bother with index */
+	tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT;
+	tsb->data.value = 0;
+	tsb->data.size = PAGESIZE_8K;
+	tsb->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index;
+	tsb->data.cp = t->c;
+#ifdef CONFIG_VIRT_IDX_DCACHE
+	tsb->data.cv = t->c;
+#endif /* CONFIG_VIRT_IDX_DCACHE */
+	tsb->data.p = t->k;		/* p as privileged */
+	tsb->data.w = ro ? false : t->w;
+	tsb->data.v = t->p;
+	
+	write_barrier();
+	
+	tsb->tag.invalid = false;	/* mark the entry as valid */
+}
+
+/** @}
+ */
+
Index: kernel/arch/sparc64/src/mm/sun4v/as.c
===================================================================
--- kernel/arch/sparc64/src/mm/sun4v/as.c	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/src/mm/sun4v/as.c	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2006 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64mm
+ * @{
+ */
+/** @file
+ */
+
+#include <arch/mm/as.h>
+#include <arch/mm/tlb.h>
+#include <genarch/mm/page_ht.h>
+#include <genarch/mm/asid_fifo.h>
+#include <debug.h>
+#include <config.h>
+
+#ifdef CONFIG_TSB
+#include <arch/mm/tsb.h>
+#include <arch/memstr.h>
+#include <arch/asm.h>
+#include <mm/frame.h>
+#include <bitops.h>
+#include <macros.h>
+#endif /* CONFIG_TSB */
+
+/** Architecture dependent address space init. */
+void as_arch_init(void)
+{
+	if (config.cpu_active == 1) {
+		as_operations = &as_ht_operations;
+		asid_fifo_init();
+	}
+}
+
+int as_constructor_arch(as_t *as, int flags)
+{
+#ifdef CONFIG_TSB
+	int order = fnzb32(
+		(TSB_ENTRY_COUNT * sizeof(tsb_entry_t)) >> FRAME_WIDTH);
+
+	uintptr_t tsb = (uintptr_t) frame_alloc(order, flags);
+
+	if (!tsb)
+		return -1;
+
+	as->arch.tsb_description.page_size = PAGESIZE_8K;
+	as->arch.tsb_description.associativity = 1;
+	as->arch.tsb_description.num_ttes = TSB_ENTRY_COUNT;
+	as->arch.tsb_description.pgsize_mask = 1 << PAGESIZE_8K;
+	as->arch.tsb_description.tsb_base = tsb;
+	as->arch.tsb_description.reserved = 0;
+	as->arch.tsb_description.context = 0;
+
+	memsetb((void *) PA2KA(as->arch.tsb_description.tsb_base),
+		TSB_ENTRY_COUNT * sizeof(tsb_entry_t), 0);
+#endif
+	return 0;
+}
+
+int as_destructor_arch(as_t *as)
+{
+#ifdef CONFIG_TSB
+	count_t cnt = (TSB_ENTRY_COUNT * sizeof(tsb_entry_t)) >> FRAME_WIDTH;
+	frame_free((uintptr_t) as->arch.tsb_description.tsb_base);
+	return cnt;
+#else
+	return 0;
+#endif
+}
+
+int as_create_arch(as_t *as, int flags)
+{
+#ifdef CONFIG_TSB
+	tsb_invalidate(as, 0, (size_t) -1);
+#endif
+	return 0;
+}
+
+/** Perform sparc64-specific tasks when an address space becomes active on the
+ * processor.
+ *
+ * Install ASID and map TSBs.
+ *
+ * @param as Address space.
+ */
+void as_install_arch(as_t *as)
+{
+	mmu_secondary_context_write(as->asid);
+}
+
+/** Perform sparc64-specific tasks when an address space is removed from the
+ * processor.
+ *
+ * Demap TSBs.
+ *
+ * @param as Address space.
+ */
+void as_deinstall_arch(as_t *as)
+{
+
+	/*
+	 * Note that we don't and may not lock the address space. That's ok
+	 * since we only read members that are currently read-only.
+	 *
+	 * Moreover, the as->asid is protected by asidlock, which is being held.
+	 */
+
+#ifdef CONFIG_TSB
+	uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
+
+	ASSERT(as->arch.itsb && as->arch.dtsb);
+
+	uintptr_t tsb = (uintptr_t) as->arch.itsb;
+		
+	if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
+		/*
+		 * TSBs were allocated from memory not covered
+		 * by the locked 4M kernel DTLB entry. We need
+		 * to demap the entry installed by as_install_arch().
+		 */
+		dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
+	}
+#endif
+}
+
+/** @}
+ */
Index: kernel/arch/sparc64/src/mm/sun4v/frame.c
===================================================================
--- kernel/arch/sparc64/src/mm/sun4v/frame.c	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/src/mm/sun4v/frame.c	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2005 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64mm
+ * @{
+ */
+/** @file
+ */
+
+#include <arch/mm/frame.h>
+#include <mm/frame.h>
+#include <arch/boot/boot.h>
+#include <arch/types.h>
+#include <config.h>
+#include <align.h>
+#include <macros.h>
+
+uintptr_t last_frame = NULL;
+
+/** Create memory zones according to information stored in bootinfo.
+ *
+ * Walk the bootinfo memory map and create frame zones according to it.
+ */
+void frame_arch_init(void)
+{
+	unsigned int i;
+	pfn_t confdata;
+
+	if (config.cpu_active == 1) {
+		for (i = 0; i < bootinfo.memmap.count; i++) {
+			uintptr_t start = bootinfo.memmap.zones[i].start;
+			size_t size = bootinfo.memmap.zones[i].size;
+
+			/*
+			 * The memmap is created by HelenOS boot loader.
+			 * It already contains no holes.
+			 */
+
+			confdata = ADDR2PFN(start);
+			if (confdata == ADDR2PFN(KA2PA(PFN2ADDR(0))))
+				confdata = ADDR2PFN(KA2PA(PFN2ADDR(2)));
+			zone_create(ADDR2PFN(start),
+			    SIZE2FRAMES(ALIGN_DOWN(size, FRAME_SIZE)),
+			    confdata, 0);
+			last_frame = max(last_frame, start + ALIGN_UP(size,
+			    FRAME_SIZE));
+		}
+
+		/*
+		 * On sparc64, physical memory can start on a non-zero address.
+		 * The generic frame_init() only marks PFN 0 as not free, so we
+		 * must mark the physically first frame not free explicitly
+		 * here, no matter what is its address.
+		 */
+		frame_mark_unavailable(ADDR2PFN(KA2PA(PFN2ADDR(0))), 1);
+	}
+
+//MH
+//	end_of_identity = PA2KA(last_frame);
+}
+
+/** @}
+ */
Index: kernel/arch/sparc64/src/mm/sun4v/tlb.c
===================================================================
--- kernel/arch/sparc64/src/mm/sun4v/tlb.c	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/src/mm/sun4v/tlb.c	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,452 @@
+/*
+ * Copyright (c) 2005 Jakub Jermar
+ * Copyright (c) 2008 Pavel Rimsky
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64mm	
+ * @{
+ */
+/** @file
+ */
+
+#include <mm/tlb.h>
+#include <mm/as.h>
+#include <mm/asid.h>
+#include <arch/sun4v/hypercall.h>
+#include <arch/mm/frame.h>
+#include <arch/mm/page.h>
+#include <arch/mm/tte.h>
+#include <arch/mm/tlb.h>
+#include <arch/interrupt.h>
+#include <interrupt.h>
+#include <arch.h>
+#include <print.h>
+#include <arch/types.h>
+#include <config.h>
+#include <arch/trap/trap.h>
+#include <arch/trap/exception.h>
+#include <panic.h>
+#include <arch/asm.h>
+#include <arch/cpu.h>
+#include <arch/mm/pagesize.h>
+#include <genarch/mm/page_ht.h>
+
+#ifdef CONFIG_TSB
+#include <arch/mm/tsb.h>
+#endif
+
+static void itlb_pte_copy(pte_t *);
+static void dtlb_pte_copy(pte_t *, bool);
+static void do_fast_instruction_access_mmu_miss_fault(istate_t *, const char *);
+static void do_fast_data_access_mmu_miss_fault(istate_t *, uint64_t,
+    const char *);
+static void do_fast_data_access_protection_fault(istate_t *,
+    uint64_t, const char *);
+
+/*
+ * The assembly language routine passes a 64-bit parameter to the Data Access
+ * MMU Miss and Data Access protection handlers, the parameter encapsulates
+ * a virtual address of the faulting page and the faulting context. The most
+ * significant 51 bits represent the VA of the faulting page and the least
+ * significant 13 vits represent the faulting context. The following macros
+ * extract the page and context out of the 64-bit parameter:
+ */
+
+/* extracts the VA of the faulting page */
+#define DMISS_ADDRESS(page_and_ctx)	(((page_and_ctx) >> 13) << 13)
+
+/* extracts the faulting context */
+#define DMISS_CONTEXT(page_and_ctx)	((page_and_ctx) & 0x1fff)
+
+/**
+ * Descriptions of fault types from the MMU Fault status area.
+ *
+ * fault_type[i] contains description of error for which the IFT or DFT
+ * field of the MMU fault status area is i.
+ */
+char *fault_types[] = {
+	"unknown",
+	"fast miss",
+	"fast protection",
+	"MMU miss",
+	"invalid RA",
+	"privileged violation",
+	"protection violation",
+	"NFO access",
+	"so page/NFO side effect",
+	"invalid VA",
+	"invalid ASI",
+	"nc atomic",
+	"privileged action",
+	"unknown",
+	"unaligned access",
+	"invalid page size"
+	};
+	
+
+/** Array of MMU fault status areas. */
+extern mmu_fault_status_area_t mmu_fsas[MAX_NUM_STRANDS];
+
+/*
+ * Invalidate all non-locked DTLB and ITLB entries.
+ */
+void tlb_arch_init(void)
+{
+	tlb_invalidate_all();
+}
+
+/** Insert privileged mapping into DMMU TLB.
+ *
+ * @param page		Virtual page address.
+ * @param frame		Physical frame address.
+ * @param pagesize	Page size.
+ * @param locked	True for permanent mappings, false otherwise.
+ * @param cacheable	True if the mapping is cacheable, false otherwise.
+ */
+void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize,
+    bool locked, bool cacheable)
+{
+	tte_data_t data;
+	
+	data.value = 0;
+	data.v = true;
+	data.nfo = false;
+	data.ra = frame >> FRAME_WIDTH;
+	data.ie = false;
+	data.e = false;
+	data.cp = cacheable;
+#ifdef CONFIG_VIRT_IDX_DCACHE
+	data.cv = cacheable;
+#endif
+	data.p = true;
+	data.x = false;
+	data.w = true;
+	data.size = pagesize;
+	
+	if (locked) {
+		__hypercall_fast4(
+			MMU_MAP_PERM_ADDR, page, 0, data.value, MMU_FLAG_DTLB);
+	} else {
+		__hypercall_hyperfast(
+			page, ASID_KERNEL, data.value, MMU_FLAG_DTLB, 0,
+			MMU_MAP_ADDR);
+	}
+}
+
+/** Copy PTE to TLB.
+ *
+ * @param t 		Page Table Entry to be copied.
+ * @param ro		If true, the entry will be created read-only, regardless
+ * 			of its w field.
+ */
+void dtlb_pte_copy(pte_t *t, bool ro)
+{
+	tte_data_t data;
+	
+	data.value = 0;
+	data.v = true;
+	data.nfo = false;
+	data.ra = (t->frame) >> FRAME_WIDTH;
+	data.ie = false;
+	data.e = false;
+	data.cp = t->c;
+#ifdef CONFIG_VIRT_IDX_DCACHE
+	data.cv = t->c;
+#endif
+	data.p = t->k;
+	data.x = false;
+	data.w = ro ? false : t->w;
+	data.size = PAGESIZE_8K;
+	
+	__hypercall_hyperfast(
+		t->page, t->as->asid, data.value, MMU_FLAG_DTLB, 0, MMU_MAP_ADDR);
+}
+
+/** Copy PTE to ITLB.
+ *
+ * @param t		Page Table Entry to be copied.
+ */
+void itlb_pte_copy(pte_t *t)
+{
+	tte_data_t data;
+	
+	data.value = 0;
+	data.v = true;
+	data.nfo = false;
+	data.ra = (t->frame) >> FRAME_WIDTH;
+	data.ie = false;
+	data.e = false;
+	data.cp = t->c;
+	data.cv = false;
+	data.p = t->k;
+	data.x = true;
+	data.w = false;
+	data.size = PAGESIZE_8K;
+	
+	__hypercall_hyperfast(
+		t->page, t->as->asid, data.value, MMU_FLAG_ITLB, 0, MMU_MAP_ADDR);
+}
+
+/** ITLB miss handler. */
+void fast_instruction_access_mmu_miss(unative_t unused, istate_t *istate)
+{
+	uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
+	pte_t *t;
+
+	page_table_lock(AS, true);
+	t = page_mapping_find(AS, va);
+
+	if (t && PTE_EXECUTABLE(t)) {
+		/*
+		 * The mapping was found in the software page hash table.
+		 * Insert it into ITLB.
+		 */
+		t->a = true;
+		itlb_pte_copy(t);
+#ifdef CONFIG_TSB
+		itsb_pte_copy(t);
+#endif
+		page_table_unlock(AS, true);
+	} else {
+		/*
+		 * Forward the page fault to the address space page fault
+		 * handler.
+		 */		
+		page_table_unlock(AS, true);
+		if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
+			do_fast_instruction_access_mmu_miss_fault(istate,
+			    __func__);
+		}
+	}
+}
+
+/** DTLB miss handler.
+ *
+ * Note that some faults (e.g. kernel faults) were already resolved by the
+ * low-level, assembly language part of the fast_data_access_mmu_miss handler.
+ *
+ * @param page_and_ctx	A 64-bit value describing the fault. The most
+ * 			significant 51 bits of the value contain the virtual
+ * 			address which caused the fault truncated to the page
+ * 			boundary. The least significant 13 bits of the value
+ * 			contain the number of the context in which the fault
+ * 			occurred.
+ * @param istate	Interrupted state saved on the stack.
+ */
+void fast_data_access_mmu_miss(uint64_t page_and_ctx, istate_t *istate)
+{
+	pte_t *t;
+	uintptr_t va = DMISS_ADDRESS(page_and_ctx);
+	uint16_t ctx = DMISS_CONTEXT(page_and_ctx);
+
+	if (ctx == ASID_KERNEL) {
+		if (va == 0) {
+			/* NULL access in kernel */
+			do_fast_data_access_mmu_miss_fault(istate, page_and_ctx,
+			    __func__);
+		}
+		do_fast_data_access_mmu_miss_fault(istate, page_and_ctx, "Unexpected "
+		    "kernel page fault.");
+	}
+
+	page_table_lock(AS, true);
+	t = page_mapping_find(AS, va);
+	if (t) {
+		/*
+		 * The mapping was found in the software page hash table.
+		 * Insert it into DTLB.
+		 */
+		t->a = true;
+		dtlb_pte_copy(t, true);
+#ifdef CONFIG_TSB
+		dtsb_pte_copy(t, true);
+#endif
+		page_table_unlock(AS, true);
+	} else {
+		/*
+		 * Forward the page fault to the address space page fault
+		 * handler.
+		 */		
+		page_table_unlock(AS, true);
+		if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
+			do_fast_data_access_mmu_miss_fault(istate, page_and_ctx,
+			    __func__);
+		}
+	}
+}
+
+/** DTLB protection fault handler.
+ *
+ * @param page_and_ctx	A 64-bit value describing the fault. The most
+ * 			significant 51 bits of the value contain the virtual
+ * 			address which caused the fault truncated to the page
+ * 			boundary. The least significant 13 bits of the value
+ * 			contain the number of the context in which the fault
+ * 			occurred.
+ * @param istate	Interrupted state saved on the stack.
+ */
+void fast_data_access_protection(uint64_t page_and_ctx, istate_t *istate)
+{
+	pte_t *t;
+	uintptr_t va = DMISS_ADDRESS(page_and_ctx);
+	uint16_t ctx = DMISS_CONTEXT(page_and_ctx);
+
+	page_table_lock(AS, true);
+	t = page_mapping_find(AS, va);
+	if (t && PTE_WRITABLE(t)) {
+		/*
+		 * The mapping was found in the software page hash table and is
+		 * writable. Demap the old mapping and insert an updated mapping
+		 * into DTLB.
+		 */
+		t->a = true;
+		t->d = true;
+		mmu_demap_page(va, ctx, MMU_FLAG_DTLB);
+		dtlb_pte_copy(t, false);
+#ifdef CONFIG_TSB
+		dtsb_pte_copy(t, false);
+#endif
+		page_table_unlock(AS, true);
+	} else {
+		/*
+		 * Forward the page fault to the address space page fault
+		 * handler.
+		 */		
+		page_table_unlock(AS, true);
+		if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
+			do_fast_data_access_protection_fault(istate, page_and_ctx,
+			    __func__);
+		}
+	}
+}
+
+/*
+ * On Niagara this function does not work, as supervisor software is isolated
+ * from the TLB by the hypervisor and has no chance to investigate the TLB
+ * entries.
+ */
+void tlb_print(void)
+{
+	printf("Operation not possible on Niagara.\n");
+}
+
+void do_fast_instruction_access_mmu_miss_fault(istate_t *istate,
+    const char *str)
+{
+	fault_if_from_uspace(istate, "%s.", str);
+	dump_istate(istate);
+	panic("%s.", str);
+}
+
+void do_fast_data_access_mmu_miss_fault(istate_t *istate,
+    uint64_t page_and_ctx, const char *str)
+{
+	if (DMISS_CONTEXT(page_and_ctx)) {
+		fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, DMISS_ADDRESS(page_and_ctx),
+		    DMISS_CONTEXT(page_and_ctx));
+	}
+	dump_istate(istate);
+	printf("Faulting page: %p, ASID=%d\n", DMISS_ADDRESS(page_and_ctx), DMISS_CONTEXT(page_and_ctx));
+	panic("%s\n", str);
+}
+
+void do_fast_data_access_protection_fault(istate_t *istate,
+    uint64_t page_and_ctx, const char *str)
+{
+	if (DMISS_CONTEXT(page_and_ctx)) {
+		fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, DMISS_ADDRESS(page_and_ctx),
+		    DMISS_CONTEXT(page_and_ctx));
+	}
+	printf("Faulting page: %p, ASID=%d\n", DMISS_ADDRESS(page_and_ctx), DMISS_CONTEXT(page_and_ctx));
+	dump_istate(istate);
+	panic("%s\n", str);
+}
+
+/**
+ * Describes the exact condition which caused the last DMMU fault.
+ */
+void describe_dmmu_fault(void)
+{
+	uint64_t myid;
+	__hypercall_fast_ret1(0, 0, 0, 0, 0, CPU_MYID, &myid);
+
+	ASSERT(mmu_fsas[myid].dft < 16);
+
+	printf("condition which caused the fault: %s\n",
+		fault_types[mmu_fsas[myid].dft]);
+}
+
+/** Invalidate all unlocked ITLB and DTLB entries. */
+void tlb_invalidate_all(void)
+{
+	uint64_t errno =  __hypercall_fast3(MMU_DEMAP_ALL, 0, 0,
+		MMU_FLAG_DTLB | MMU_FLAG_ITLB);
+	if (errno != EOK) {
+		panic("Error code = %d.\n", errno);
+	}
+}
+
+/** Invalidate all ITLB and DTLB entries that belong to specified ASID
+ * (Context).
+ *
+ * @param asid Address Space ID.
+ */
+void tlb_invalidate_asid(asid_t asid)
+{
+	/* switch to nucleus because we are mapped by the primary context */
+	nucleus_enter();
+
+	__hypercall_fast4(MMU_DEMAP_CTX, 0, 0, asid,
+		MMU_FLAG_ITLB | MMU_FLAG_DTLB);
+
+	nucleus_leave();
+}
+
+/** Invalidate all ITLB and DTLB entries for specified page range in specified
+ * address space.
+ *
+ * @param asid		Address Space ID.
+ * @param page		First page which to sweep out from ITLB and DTLB.
+ * @param cnt		Number of ITLB and DTLB entries to invalidate.
+ */
+void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt)
+{
+	unsigned int i;
+	
+	/* switch to nucleus because we are mapped by the primary context */
+	nucleus_enter();
+
+	for (i = 0; i < cnt; i++) {
+		__hypercall_fast5(MMU_DEMAP_PAGE, 0, 0, page, asid,
+			MMU_FLAG_DTLB | MMU_FLAG_ITLB);
+	}
+
+	nucleus_leave();
+}
+
+/** @}
+ */
Index: kernel/arch/sparc64/src/mm/sun4v/tsb.c
===================================================================
--- kernel/arch/sparc64/src/mm/sun4v/tsb.c	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/src/mm/sun4v/tsb.c	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2006 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64mm	
+ * @{
+ */
+/** @file
+ */
+
+#include <arch/mm/tsb.h>
+#include <arch/mm/tlb.h>
+#include <arch/mm/page.h>
+#include <arch/barrier.h>
+#include <mm/as.h>
+#include <arch/types.h>
+#include <macros.h>
+#include <debug.h>
+
+#define TSB_INDEX_MASK	((1 << (21 + 1 + TSB_SIZE - MMU_PAGE_WIDTH)) - 1)
+
+/** Invalidate portion of TSB.
+ *
+ * We assume that the address space is already locked. Note that respective
+ * portions of both TSBs are invalidated at a time.
+ *
+ * @param as Address space.
+ * @param page First page to invalidate in TSB.
+ * @param pages Number of pages to invalidate. Value of (size_t) -1 means the
+ * 	whole TSB.
+ */
+void tsb_invalidate(as_t *as, uintptr_t page, size_t pages)
+{
+	size_t i0;
+	size_t i;
+	size_t cnt;
+	
+	ASSERT(as->arch.itsb && as->arch.dtsb);
+	
+	i0 = (page >> MMU_PAGE_WIDTH) & TSB_INDEX_MASK;
+	ASSERT(i0 < ITSB_ENTRY_COUNT && i0 < DTSB_ENTRY_COUNT);
+
+	if (pages == (size_t) -1 || (pages * 2) > ITSB_ENTRY_COUNT)
+		cnt = ITSB_ENTRY_COUNT;
+	else
+		cnt = pages * 2;
+	
+	for (i = 0; i < cnt; i++) {
+		as->arch.itsb[(i0 + i) & (ITSB_ENTRY_COUNT - 1)].tag.invalid =
+		    true;
+		as->arch.dtsb[(i0 + i) & (DTSB_ENTRY_COUNT - 1)].tag.invalid =
+		    true;
+	}
+}
+
+/** Copy software PTE to ITSB.
+ *
+ * @param t 	Software PTE.
+ * @param index	Zero if lower 8K-subpage, one if higher 8K subpage.
+ */
+void itsb_pte_copy(pte_t *t, size_t index)
+{
+#if 0
+	as_t *as;
+	tsb_entry_t *tsb;
+	size_t entry;
+
+	ASSERT(index <= 1);
+	
+	as = t->as;
+	entry = ((t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK; 
+	ASSERT(entry < ITSB_ENTRY_COUNT);
+	tsb = &as->arch.itsb[entry];
+
+	/*
+	 * We use write barriers to make sure that the TSB load
+	 * won't use inconsistent data or that the fault will
+	 * be repeated.
+	 */
+
+	tsb->tag.invalid = true;	/* invalidate the entry
+					 * (tag target has this
+					 * set to 0) */
+
+	write_barrier();
+
+	tsb->tag.context = as->asid;
+	/* the shift is bigger than PAGE_WIDTH, do not bother with index  */
+	tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT;
+	tsb->data.value = 0;
+	tsb->data.size = PAGESIZE_8K;
+	tsb->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index;
+	tsb->data.cp = t->c;	/* cp as cache in phys.-idxed, c as cacheable */
+	tsb->data.p = t->k;	/* p as privileged, k as kernel */
+	tsb->data.v = t->p;	/* v as valid, p as present */
+	
+	write_barrier();
+	
+	tsb->tag.invalid = false;	/* mark the entry as valid */
+#endif
+}
+
+/** Copy software PTE to DTSB.
+ *
+ * @param t	Software PTE.
+ * @param index	Zero if lower 8K-subpage, one if higher 8K-subpage.
+ * @param ro	If true, the mapping is copied read-only.
+ */
+void dtsb_pte_copy(pte_t *t, size_t index, bool ro)
+{
+#if 0
+	as_t *as;
+	tsb_entry_t *tsb;
+	size_t entry;
+	
+	ASSERT(index <= 1);
+
+	as = t->as;
+	entry = ((t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK;
+	ASSERT(entry < DTSB_ENTRY_COUNT);
+	tsb = &as->arch.dtsb[entry];
+
+	/*
+	 * We use write barriers to make sure that the TSB load
+	 * won't use inconsistent data or that the fault will
+	 * be repeated.
+	 */
+
+	tsb->tag.invalid = true;	/* invalidate the entry
+					 * (tag target has this
+					 * set to 0) */
+
+	write_barrier();
+
+	tsb->tag.context = as->asid;
+	/* the shift is bigger than PAGE_WIDTH, do not bother with index */
+	tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT;
+	tsb->data.value = 0;
+	tsb->data.size = PAGESIZE_8K;
+	tsb->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index;
+	tsb->data.cp = t->c;
+#ifdef CONFIG_VIRT_IDX_DCACHE
+	tsb->data.cv = t->c;
+#endif /* CONFIG_VIRT_IDX_DCACHE */
+	tsb->data.p = t->k;		/* p as privileged */
+	tsb->data.w = ro ? false : t->w;
+	tsb->data.v = t->p;
+	
+	write_barrier();
+	
+	tsb->tag.invalid = false;	/* mark the entry as valid */
+#endif
+}
+
+/** @}
+ */
+
Index: kernel/arch/sparc64/src/mm/tlb.c
===================================================================
--- kernel/arch/sparc64/src/mm/tlb.c	(revision b1d3c36aca6850dfcd12418b4a6f60aa37a83487)
+++ 	(revision )
@@ -1,608 +1,0 @@
-/*
- * Copyright (c) 2005 Jakub Jermar
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- *   notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- *   notice, this list of conditions and the following disclaimer in the
- *   documentation and/or other materials provided with the distribution.
- * - The name of the author may not be used to endorse or promote products
- *   derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** @addtogroup sparc64mm	
- * @{
- */
-/** @file
- */
-
-#include <arch/mm/tlb.h>
-#include <mm/tlb.h>
-#include <mm/as.h>
-#include <mm/asid.h>
-#include <genarch/mm/page_ht.h>
-#include <arch/mm/frame.h>
-#include <arch/mm/page.h>
-#include <arch/mm/mmu.h>
-#include <arch/interrupt.h>
-#include <interrupt.h>
-#include <arch.h>
-#include <print.h>
-#include <arch/types.h>
-#include <config.h>
-#include <arch/trap/trap.h>
-#include <arch/trap/exception.h>
-#include <panic.h>
-#include <arch/asm.h>
-
-#ifdef CONFIG_TSB
-#include <arch/mm/tsb.h>
-#endif
-
-static void dtlb_pte_copy(pte_t *, size_t, bool);
-static void itlb_pte_copy(pte_t *, size_t);
-static void do_fast_instruction_access_mmu_miss_fault(istate_t *, const char *);
-static void do_fast_data_access_mmu_miss_fault(istate_t *, tlb_tag_access_reg_t,
-    const char *);
-static void do_fast_data_access_protection_fault(istate_t *,
-    tlb_tag_access_reg_t, const char *);
-
-char *context_encoding[] = {
-	"Primary",
-	"Secondary",
-	"Nucleus",
-	"Reserved"
-};
-
-void tlb_arch_init(void)
-{
-	/*
-	 * Invalidate all non-locked DTLB and ITLB entries.
-	 */
-	tlb_invalidate_all();
-
-	/*
-	 * Clear both SFSRs.
-	 */
-	dtlb_sfsr_write(0);
-	itlb_sfsr_write(0);
-}
-
-/** Insert privileged mapping into DMMU TLB.
- *
- * @param page		Virtual page address.
- * @param frame		Physical frame address.
- * @param pagesize	Page size.
- * @param locked	True for permanent mappings, false otherwise.
- * @param cacheable	True if the mapping is cacheable, false otherwise.
- */
-void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize,
-    bool locked, bool cacheable)
-{
-	tlb_tag_access_reg_t tag;
-	tlb_data_t data;
-	page_address_t pg;
-	frame_address_t fr;
-
-	pg.address = page;
-	fr.address = frame;
-
-	tag.context = ASID_KERNEL;
-	tag.vpn = pg.vpn;
-
-	dtlb_tag_access_write(tag.value);
-
-	data.value = 0;
-	data.v = true;
-	data.size = pagesize;
-	data.pfn = fr.pfn;
-	data.l = locked;
-	data.cp = cacheable;
-#ifdef CONFIG_VIRT_IDX_DCACHE
-	data.cv = cacheable;
-#endif /* CONFIG_VIRT_IDX_DCACHE */
-	data.p = true;
-	data.w = true;
-	data.g = false;
-
-	dtlb_data_in_write(data.value);
-}
-
-/** Copy PTE to TLB.
- *
- * @param t 		Page Table Entry to be copied.
- * @param index		Zero if lower 8K-subpage, one if higher 8K-subpage.
- * @param ro		If true, the entry will be created read-only, regardless
- * 			of its w field.
- */
-void dtlb_pte_copy(pte_t *t, size_t index, bool ro)
-{
-	tlb_tag_access_reg_t tag;
-	tlb_data_t data;
-	page_address_t pg;
-	frame_address_t fr;
-
-	pg.address = t->page + (index << MMU_PAGE_WIDTH);
-	fr.address = t->frame + (index << MMU_PAGE_WIDTH);
-
-	tag.value = 0;
-	tag.context = t->as->asid;
-	tag.vpn = pg.vpn;
-
-	dtlb_tag_access_write(tag.value);
-
-	data.value = 0;
-	data.v = true;
-	data.size = PAGESIZE_8K;
-	data.pfn = fr.pfn;
-	data.l = false;
-	data.cp = t->c;
-#ifdef CONFIG_VIRT_IDX_DCACHE
-	data.cv = t->c;
-#endif /* CONFIG_VIRT_IDX_DCACHE */
-	data.p = t->k;		/* p like privileged */
-	data.w = ro ? false : t->w;
-	data.g = t->g;
-
-	dtlb_data_in_write(data.value);
-}
-
-/** Copy PTE to ITLB.
- *
- * @param t		Page Table Entry to be copied.
- * @param index		Zero if lower 8K-subpage, one if higher 8K-subpage.
- */
-void itlb_pte_copy(pte_t *t, size_t index)
-{
-	tlb_tag_access_reg_t tag;
-	tlb_data_t data;
-	page_address_t pg;
-	frame_address_t fr;
-
-	pg.address = t->page + (index << MMU_PAGE_WIDTH);
-	fr.address = t->frame + (index << MMU_PAGE_WIDTH);
-
-	tag.value = 0;
-	tag.context = t->as->asid;
-	tag.vpn = pg.vpn;
-	
-	itlb_tag_access_write(tag.value);
-	
-	data.value = 0;
-	data.v = true;
-	data.size = PAGESIZE_8K;
-	data.pfn = fr.pfn;
-	data.l = false;
-	data.cp = t->c;
-	data.p = t->k;		/* p like privileged */
-	data.w = false;
-	data.g = t->g;
-	
-	itlb_data_in_write(data.value);
-}
-
-/** ITLB miss handler. */
-void fast_instruction_access_mmu_miss(unative_t unused, istate_t *istate)
-{
-	uintptr_t page_16k = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
-	size_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE;
-	pte_t *t;
-
-	page_table_lock(AS, true);
-	t = page_mapping_find(AS, page_16k);
-	if (t && PTE_EXECUTABLE(t)) {
-		/*
-		 * The mapping was found in the software page hash table.
-		 * Insert it into ITLB.
-		 */
-		t->a = true;
-		itlb_pte_copy(t, index);
-#ifdef CONFIG_TSB
-		itsb_pte_copy(t, index);
-#endif
-		page_table_unlock(AS, true);
-	} else {
-		/*
-		 * Forward the page fault to the address space page fault
-		 * handler.
-		 */		
-		page_table_unlock(AS, true);
-		if (as_page_fault(page_16k, PF_ACCESS_EXEC, istate) ==
-		    AS_PF_FAULT) {
-			do_fast_instruction_access_mmu_miss_fault(istate,
-			    __func__);
-		}
-	}
-}
-
-/** DTLB miss handler.
- *
- * Note that some faults (e.g. kernel faults) were already resolved by the
- * low-level, assembly language part of the fast_data_access_mmu_miss handler.
- *
- * @param tag		Content of the TLB Tag Access register as it existed
- * 			when the trap happened. This is to prevent confusion
- * 			created by clobbered Tag Access register during a nested
- * 			DTLB miss.
- * @param istate	Interrupted state saved on the stack.
- */
-void fast_data_access_mmu_miss(tlb_tag_access_reg_t tag, istate_t *istate)
-{
-	uintptr_t page_8k;
-	uintptr_t page_16k;
-	size_t index;
-	pte_t *t;
-
-	page_8k = (uint64_t) tag.vpn << MMU_PAGE_WIDTH;
-	page_16k = ALIGN_DOWN(page_8k, PAGE_SIZE);
-	index = tag.vpn % MMU_PAGES_PER_PAGE;
-
-	if (tag.context == ASID_KERNEL) {
-		if (!tag.vpn) {
-			/* NULL access in kernel */
-			do_fast_data_access_mmu_miss_fault(istate, tag,
-			    __func__);
-		} else if (page_8k >= end_of_identity) {
-			/*
-			 * The kernel is accessing the I/O space.
-			 * We still do identity mapping for I/O,
-			 * but without caching.
-			 */
-			dtlb_insert_mapping(page_8k, KA2PA(page_8k),
-			    PAGESIZE_8K, false, false);
-			return;
-		}
-		do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected "
-		    "kernel page fault.");
-	}
-
-	page_table_lock(AS, true);
-	t = page_mapping_find(AS, page_16k);
-	if (t) {
-		/*
-		 * The mapping was found in the software page hash table.
-		 * Insert it into DTLB.
-		 */
-		t->a = true;
-		dtlb_pte_copy(t, index, true);
-#ifdef CONFIG_TSB
-		dtsb_pte_copy(t, index, true);
-#endif
-		page_table_unlock(AS, true);
-	} else {
-		/*
-		 * Forward the page fault to the address space page fault
-		 * handler.
-		 */		
-		page_table_unlock(AS, true);
-		if (as_page_fault(page_16k, PF_ACCESS_READ, istate) ==
-		    AS_PF_FAULT) {
-			do_fast_data_access_mmu_miss_fault(istate, tag,
-			    __func__);
-		}
-	}
-}
-
-/** DTLB protection fault handler.
- *
- * @param tag		Content of the TLB Tag Access register as it existed
- * 			when the trap happened. This is to prevent confusion
- * 			created by clobbered Tag Access register during a nested
- * 			DTLB miss.
- * @param istate	Interrupted state saved on the stack.
- */
-void fast_data_access_protection(tlb_tag_access_reg_t tag, istate_t *istate)
-{
-	uintptr_t page_16k;
-	size_t index;
-	pte_t *t;
-
-	page_16k = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
-	index = tag.vpn % MMU_PAGES_PER_PAGE;	/* 16K-page emulation */
-
-	page_table_lock(AS, true);
-	t = page_mapping_find(AS, page_16k);
-	if (t && PTE_WRITABLE(t)) {
-		/*
-		 * The mapping was found in the software page hash table and is
-		 * writable. Demap the old mapping and insert an updated mapping
-		 * into DTLB.
-		 */
-		t->a = true;
-		t->d = true;
-		dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY,
-		    page_16k + index * MMU_PAGE_SIZE);
-		dtlb_pte_copy(t, index, false);
-#ifdef CONFIG_TSB
-		dtsb_pte_copy(t, index, false);
-#endif
-		page_table_unlock(AS, true);
-	} else {
-		/*
-		 * Forward the page fault to the address space page fault
-		 * handler.
-		 */		
-		page_table_unlock(AS, true);
-		if (as_page_fault(page_16k, PF_ACCESS_WRITE, istate) ==
-		    AS_PF_FAULT) {
-			do_fast_data_access_protection_fault(istate, tag,
-			    __func__);
-		}
-	}
-}
-
-/** Print TLB entry (for debugging purposes).
- *
- * The diag field has been left out in order to make this function more generic
- * (there is no diag field in US3 architeture). 
- *
- * @param i		TLB entry number 
- * @param t		TLB entry tag
- * @param d		TLB entry data 
- */
-static void print_tlb_entry(int i, tlb_tag_read_reg_t t, tlb_data_t d)
-{
-	printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
-	    "ie=%d, soft2=%#x, pfn=%#x, soft=%#x, l=%d, "
-	    "cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
-	    t.context, d.v, d.size, d.nfo, d.ie, d.soft2,
-	    d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
-}
-
-#if defined (US)
-
-/** Print contents of both TLBs. */
-void tlb_print(void)
-{
-	int i;
-	tlb_data_t d;
-	tlb_tag_read_reg_t t;
-	
-	printf("I-TLB contents:\n");
-	for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
-		d.value = itlb_data_access_read(i);
-		t.value = itlb_tag_read_read(i);
-		print_tlb_entry(i, t, d);
-	}
-
-	printf("D-TLB contents:\n");
-	for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
-		d.value = dtlb_data_access_read(i);
-		t.value = dtlb_tag_read_read(i);
-		print_tlb_entry(i, t, d);
-	}
-}
-
-#elif defined (US3)
-
-/** Print contents of all TLBs. */
-void tlb_print(void)
-{
-	int i;
-	tlb_data_t d;
-	tlb_tag_read_reg_t t;
-	
-	printf("TLB_ISMALL contents:\n");
-	for (i = 0; i < tlb_ismall_size(); i++) {
-		d.value = dtlb_data_access_read(TLB_ISMALL, i);
-		t.value = dtlb_tag_read_read(TLB_ISMALL, i);
-		print_tlb_entry(i, t, d);
-	}
-	
-	printf("TLB_IBIG contents:\n");
-	for (i = 0; i < tlb_ibig_size(); i++) {
-		d.value = dtlb_data_access_read(TLB_IBIG, i);
-		t.value = dtlb_tag_read_read(TLB_IBIG, i);
-		print_tlb_entry(i, t, d);
-	}
-	
-	printf("TLB_DSMALL contents:\n");
-	for (i = 0; i < tlb_dsmall_size(); i++) {
-		d.value = dtlb_data_access_read(TLB_DSMALL, i);
-		t.value = dtlb_tag_read_read(TLB_DSMALL, i);
-		print_tlb_entry(i, t, d);
-	}
-	
-	printf("TLB_DBIG_1 contents:\n");
-	for (i = 0; i < tlb_dbig_size(); i++) {
-		d.value = dtlb_data_access_read(TLB_DBIG_0, i);
-		t.value = dtlb_tag_read_read(TLB_DBIG_0, i);
-		print_tlb_entry(i, t, d);
-	}
-	
-	printf("TLB_DBIG_2 contents:\n");
-	for (i = 0; i < tlb_dbig_size(); i++) {
-		d.value = dtlb_data_access_read(TLB_DBIG_1, i);
-		t.value = dtlb_tag_read_read(TLB_DBIG_1, i);
-		print_tlb_entry(i, t, d);
-	}
-}
-
-#endif
-
-void do_fast_instruction_access_mmu_miss_fault(istate_t *istate,
-    const char *str)
-{
-	fault_if_from_uspace(istate, "%s.", str);
-	dump_istate(istate);
-	panic("%s.", str);
-}
-
-void do_fast_data_access_mmu_miss_fault(istate_t *istate,
-    tlb_tag_access_reg_t tag, const char *str)
-{
-	uintptr_t va;
-
-	va = tag.vpn << MMU_PAGE_WIDTH;
-	if (tag.context) {
-		fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d).", str, va,
-		    tag.context);
-	}
-	dump_istate(istate);
-	printf("Faulting page: %p, ASID=%d.\n", va, tag.context);
-	panic("%s.", str);
-}
-
-void do_fast_data_access_protection_fault(istate_t *istate,
-    tlb_tag_access_reg_t tag, const char *str)
-{
-	uintptr_t va;
-
-	va = tag.vpn << MMU_PAGE_WIDTH;
-
-	if (tag.context) {
-		fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d).", str, va,
-		    tag.context);
-	}
-	printf("Faulting page: %p, ASID=%d\n", va, tag.context);
-	dump_istate(istate);
-	panic("%s.", str);
-}
-
-void dump_sfsr_and_sfar(void)
-{
-	tlb_sfsr_reg_t sfsr;
-	uintptr_t sfar;
-
-	sfsr.value = dtlb_sfsr_read();
-	sfar = dtlb_sfar_read();
-	
-#if defined (US)
-	printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, "
-	    "fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w,
-	    sfsr.ow, sfsr.fv);
-#elif defined (US3)
-	printf("DTLB SFSR: nf=%d, asi=%#x, tm=%d, ft=%#x, e=%d, ct=%d, pr=%d, "
-	    "w=%d, ow=%d, fv=%d\n", sfsr.nf, sfsr.asi, sfsr.tm, sfsr.ft,
-	    sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, sfsr.ow, sfsr.fv);
-#endif
-	    
-	printf("DTLB SFAR: address=%p\n", sfar);
-	
-	dtlb_sfsr_write(0);
-}
-
-#if defined (US)
-/** Invalidate all unlocked ITLB and DTLB entries. */
-void tlb_invalidate_all(void)
-{
-	int i;
-	
-	/*
-	 * Walk all ITLB and DTLB entries and remove all unlocked mappings.
-	 *
-	 * The kernel doesn't use global mappings so any locked global mappings
-	 * found must have been created by someone else. Their only purpose now
-	 * is to collide with proper mappings. Invalidate immediately. It should
-	 * be safe to invalidate them as late as now.
-	 */
-
-	tlb_data_t d;
-	tlb_tag_read_reg_t t;
-
-	for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
-		d.value = itlb_data_access_read(i);
-		if (!d.l || d.g) {
-			t.value = itlb_tag_read_read(i);
-			d.v = false;
-			itlb_tag_access_write(t.value);
-			itlb_data_access_write(i, d.value);
-		}
-	}
-
-	for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
-		d.value = dtlb_data_access_read(i);
-		if (!d.l || d.g) {
-			t.value = dtlb_tag_read_read(i);
-			d.v = false;
-			dtlb_tag_access_write(t.value);
-			dtlb_data_access_write(i, d.value);
-		}
-	}
-
-}
-
-#elif defined (US3)
-
-/** Invalidate all unlocked ITLB and DTLB entries. */
-void tlb_invalidate_all(void)
-{
-	itlb_demap(TLB_DEMAP_ALL, 0, 0);
-	dtlb_demap(TLB_DEMAP_ALL, 0, 0);
-}
-
-#endif
-
-/** Invalidate all ITLB and DTLB entries that belong to specified ASID
- * (Context).
- *
- * @param asid Address Space ID.
- */
-void tlb_invalidate_asid(asid_t asid)
-{
-	tlb_context_reg_t pc_save, ctx;
-	
-	/* switch to nucleus because we are mapped by the primary context */
-	nucleus_enter();
-	
-	ctx.v = pc_save.v = mmu_primary_context_read();
-	ctx.context = asid;
-	mmu_primary_context_write(ctx.v);
-	
-	itlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
-	dtlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
-	
-	mmu_primary_context_write(pc_save.v);
-	
-	nucleus_leave();
-}
-
-/** Invalidate all ITLB and DTLB entries for specified page range in specified
- * address space.
- *
- * @param asid		Address Space ID.
- * @param page		First page which to sweep out from ITLB and DTLB.
- * @param cnt		Number of ITLB and DTLB entries to invalidate.
- */
-void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt)
-{
-	unsigned int i;
-	tlb_context_reg_t pc_save, ctx;
-	
-	/* switch to nucleus because we are mapped by the primary context */
-	nucleus_enter();
-	
-	ctx.v = pc_save.v = mmu_primary_context_read();
-	ctx.context = asid;
-	mmu_primary_context_write(ctx.v);
-	
-	for (i = 0; i < cnt * MMU_PAGES_PER_PAGE; i++) {
-		itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
-		    page + i * MMU_PAGE_SIZE);
-		dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
-		    page + i * MMU_PAGE_SIZE);
-	}
-	
-	mmu_primary_context_write(pc_save.v);
-	
-	nucleus_leave();
-}
-
-/** @}
- */
Index: kernel/arch/sparc64/src/mm/tsb.c
===================================================================
--- kernel/arch/sparc64/src/mm/tsb.c	(revision b1d3c36aca6850dfcd12418b4a6f60aa37a83487)
+++ 	(revision )
@@ -1,177 +1,0 @@
-/*
- * Copyright (c) 2006 Jakub Jermar
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- *   notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- *   notice, this list of conditions and the following disclaimer in the
- *   documentation and/or other materials provided with the distribution.
- * - The name of the author may not be used to endorse or promote products
- *   derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** @addtogroup sparc64mm	
- * @{
- */
-/** @file
- */
-
-#include <arch/mm/tsb.h>
-#include <arch/mm/tlb.h>
-#include <arch/mm/page.h>
-#include <arch/barrier.h>
-#include <mm/as.h>
-#include <arch/types.h>
-#include <macros.h>
-#include <debug.h>
-
-#define TSB_INDEX_MASK	((1 << (21 + 1 + TSB_SIZE - MMU_PAGE_WIDTH)) - 1)
-
-/** Invalidate portion of TSB.
- *
- * We assume that the address space is already locked. Note that respective
- * portions of both TSBs are invalidated at a time.
- *
- * @param as Address space.
- * @param page First page to invalidate in TSB.
- * @param pages Number of pages to invalidate. Value of (size_t) -1 means the
- * 	whole TSB.
- */
-void tsb_invalidate(as_t *as, uintptr_t page, size_t pages)
-{
-	size_t i0;
-	size_t i;
-	size_t cnt;
-	
-	ASSERT(as->arch.itsb && as->arch.dtsb);
-	
-	i0 = (page >> MMU_PAGE_WIDTH) & TSB_INDEX_MASK;
-	ASSERT(i0 < ITSB_ENTRY_COUNT && i0 < DTSB_ENTRY_COUNT);
-
-	if (pages == (size_t) -1 || (pages * 2) > ITSB_ENTRY_COUNT)
-		cnt = ITSB_ENTRY_COUNT;
-	else
-		cnt = pages * 2;
-	
-	for (i = 0; i < cnt; i++) {
-		as->arch.itsb[(i0 + i) & (ITSB_ENTRY_COUNT - 1)].tag.invalid =
-		    true;
-		as->arch.dtsb[(i0 + i) & (DTSB_ENTRY_COUNT - 1)].tag.invalid =
-		    true;
-	}
-}
-
-/** Copy software PTE to ITSB.
- *
- * @param t 	Software PTE.
- * @param index	Zero if lower 8K-subpage, one if higher 8K subpage.
- */
-void itsb_pte_copy(pte_t *t, size_t index)
-{
-	as_t *as;
-	tsb_entry_t *tsb;
-	size_t entry;
-
-	ASSERT(index <= 1);
-	
-	as = t->as;
-	entry = ((t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK; 
-	ASSERT(entry < ITSB_ENTRY_COUNT);
-	tsb = &as->arch.itsb[entry];
-
-	/*
-	 * We use write barriers to make sure that the TSB load
-	 * won't use inconsistent data or that the fault will
-	 * be repeated.
-	 */
-
-	tsb->tag.invalid = true;	/* invalidate the entry
-					 * (tag target has this
-					 * set to 0) */
-
-	write_barrier();
-
-	tsb->tag.context = as->asid;
-	/* the shift is bigger than PAGE_WIDTH, do not bother with index  */
-	tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT;
-	tsb->data.value = 0;
-	tsb->data.size = PAGESIZE_8K;
-	tsb->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index;
-	tsb->data.cp = t->c;	/* cp as cache in phys.-idxed, c as cacheable */
-	tsb->data.p = t->k;	/* p as privileged, k as kernel */
-	tsb->data.v = t->p;	/* v as valid, p as present */
-	
-	write_barrier();
-	
-	tsb->tag.invalid = false;	/* mark the entry as valid */
-}
-
-/** Copy software PTE to DTSB.
- *
- * @param t	Software PTE.
- * @param index	Zero if lower 8K-subpage, one if higher 8K-subpage.
- * @param ro	If true, the mapping is copied read-only.
- */
-void dtsb_pte_copy(pte_t *t, size_t index, bool ro)
-{
-	as_t *as;
-	tsb_entry_t *tsb;
-	size_t entry;
-	
-	ASSERT(index <= 1);
-
-	as = t->as;
-	entry = ((t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK;
-	ASSERT(entry < DTSB_ENTRY_COUNT);
-	tsb = &as->arch.dtsb[entry];
-
-	/*
-	 * We use write barriers to make sure that the TSB load
-	 * won't use inconsistent data or that the fault will
-	 * be repeated.
-	 */
-
-	tsb->tag.invalid = true;	/* invalidate the entry
-					 * (tag target has this
-					 * set to 0) */
-
-	write_barrier();
-
-	tsb->tag.context = as->asid;
-	/* the shift is bigger than PAGE_WIDTH, do not bother with index */
-	tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT;
-	tsb->data.value = 0;
-	tsb->data.size = PAGESIZE_8K;
-	tsb->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index;
-	tsb->data.cp = t->c;
-#ifdef CONFIG_VIRT_IDX_DCACHE
-	tsb->data.cv = t->c;
-#endif /* CONFIG_VIRT_IDX_DCACHE */
-	tsb->data.p = t->k;		/* p as privileged */
-	tsb->data.w = ro ? false : t->w;
-	tsb->data.v = t->p;
-	
-	write_barrier();
-	
-	tsb->tag.invalid = false;	/* mark the entry as valid */
-}
-
-/** @}
- */
-
Index: kernel/arch/sparc64/src/proc/scheduler.c
===================================================================
--- kernel/arch/sparc64/src/proc/scheduler.c	(revision b1d3c36aca6850dfcd12418b4a6f60aa37a83487)
+++ 	(revision )
@@ -1,83 +1,0 @@
-/*
- * Copyright (c) 2006 Jakub Jermar
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- *   notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- *   notice, this list of conditions and the following disclaimer in the
- *   documentation and/or other materials provided with the distribution.
- * - The name of the author may not be used to endorse or promote products
- *   derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** @addtogroup sparc64proc
- * @{
- */
-/** @file
- */
-
-#include <proc/scheduler.h>
-#include <proc/thread.h>
-#include <arch.h>
-#include <arch/asm.h>
-#include <arch/stack.h>
-
-/** Perform sparc64 specific tasks needed before the new task is run. */
-void before_task_runs_arch(void)
-{
-}
-
-/** Perform sparc64 specific steps before scheduling a thread.
- *
- * For userspace threads, initialize reserved global registers in the alternate
- * and interrupt sets.
- */
-void before_thread_runs_arch(void)
-{
-	if ((THREAD->flags & THREAD_FLAG_USPACE)) {
-		/*
-		 * Write kernel stack address to %g6 of the alternate and
-		 * interrupt global sets.
-		 *
-		 * Write pointer to the last item in the userspace window buffer
-		 * to %g7 in the alternate set. Write to the interrupt %g7 is
-		 * not necessary because:
-		 * - spill traps operate only in the alternate global set,
-		 * - preemptible trap handler switches to alternate globals
-		 *   before it explicitly uses %g7.
-		 */
-		uint64_t sp = (uintptr_t) THREAD->kstack + STACK_SIZE -
-		    (STACK_BIAS + ALIGN_UP(STACK_ITEM_SIZE, STACK_ALIGNMENT));
-		write_to_ig_g6(sp);
-		write_to_ag_g6(sp);
-		write_to_ag_g7((uintptr_t) THREAD->arch.uspace_window_buffer);
-	}
-}
-
-/** Perform sparc64 specific steps before a thread stops running. */
-void after_thread_ran_arch(void)
-{
-	if ((THREAD->flags & THREAD_FLAG_USPACE)) {
-		/* sample the state of the userspace window buffer */	
-		THREAD->arch.uspace_window_buffer = (uint8_t *) read_from_ag_g7();
-	}
-}
-
-/** @}
- */
Index: kernel/arch/sparc64/src/proc/sun4u/scheduler.c
===================================================================
--- kernel/arch/sparc64/src/proc/sun4u/scheduler.c	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/src/proc/sun4u/scheduler.c	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2006 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64proc
+ * @{
+ */
+/** @file
+ */
+
+#include <proc/scheduler.h>
+#include <proc/thread.h>
+#include <arch.h>
+#include <arch/asm.h>
+#include <arch/stack.h>
+
+/** Perform sparc64 specific tasks needed before the new task is run. */
+void before_task_runs_arch(void)
+{
+}
+
+/** Perform sparc64 specific steps before scheduling a thread.
+ *
+ * For userspace threads, initialize reserved global registers in the alternate
+ * and interrupt sets.
+ */
+void before_thread_runs_arch(void)
+{
+	if ((THREAD->flags & THREAD_FLAG_USPACE)) {
+		/*
+		 * Write kernel stack address to %g6 of the alternate and
+		 * interrupt global sets.
+		 *
+		 * Write pointer to the last item in the userspace window buffer
+		 * to %g7 in the alternate set. Write to the interrupt %g7 is
+		 * not necessary because:
+		 * - spill traps operate only in the alternate global set,
+		 * - preemptible trap handler switches to alternate globals
+		 *   before it explicitly uses %g7.
+		 */
+		uint64_t sp = (uintptr_t) THREAD->kstack + STACK_SIZE -
+		    (STACK_BIAS + ALIGN_UP(STACK_ITEM_SIZE, STACK_ALIGNMENT));
+		write_to_ig_g6(sp);
+		write_to_ag_g6(sp);
+		write_to_ag_g7((uintptr_t) THREAD->arch.uspace_window_buffer);
+	}
+}
+
+/** Perform sparc64 specific steps before a thread stops running. */
+void after_thread_ran_arch(void)
+{
+	if ((THREAD->flags & THREAD_FLAG_USPACE)) {
+		/* sample the state of the userspace window buffer */	
+		THREAD->arch.uspace_window_buffer = (uint8_t *) read_from_ag_g7();
+	}
+}
+
+/** @}
+ */
Index: kernel/arch/sparc64/src/proc/sun4v/scheduler.c
===================================================================
--- kernel/arch/sparc64/src/proc/sun4v/scheduler.c	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/src/proc/sun4v/scheduler.c	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2006 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64proc
+ * @{
+ */
+/** @file
+ */
+
+#include <proc/scheduler.h>
+#include <proc/thread.h>
+#include <arch.h>
+#include <arch/asm.h>
+#include <arch/arch.h>
+#include <arch/stack.h>
+#include <arch/sun4v/cpu.h>
+#include <arch/sun4v/hypercall.h>
+
+/** Perform sparc64 specific tasks needed before the new task is run. */
+void before_task_runs_arch(void)
+{
+}
+
+/** Perform sparc64 specific steps before scheduling a thread.
+ *
+ * For userspace threads, initialize pointer to the kernel stack and for the
+ * userspace window buffer.
+ */
+void before_thread_runs_arch(void)
+{
+	if ((THREAD->flags & THREAD_FLAG_USPACE)) {
+		uint64_t sp = (uintptr_t) THREAD->kstack + STACK_SIZE -
+		    (STACK_BIAS + ALIGN_UP(STACK_ITEM_SIZE, STACK_ALIGNMENT));
+		asi_u64_write(ASI_SCRATCHPAD, SCRATCHPAD_KSTACK, sp);
+		asi_u64_write(ASI_SCRATCHPAD, SCRATCHPAD_WBUF,
+		    (uintptr_t) THREAD->arch.uspace_window_buffer);
+	}
+}
+
+/** Perform sparc64 specific steps before a thread stops running. */
+void after_thread_ran_arch(void)
+{
+	if ((THREAD->flags & THREAD_FLAG_USPACE)) {
+		/* sample the state of the userspace window buffer */	
+		THREAD->arch.uspace_window_buffer =
+		    (uint8_t *) asi_u64_read(ASI_SCRATCHPAD, SCRATCHPAD_WBUF);
+		
+	}
+}
+
+/** @}
+ */
Index: kernel/arch/sparc64/src/sparc64.c
===================================================================
--- kernel/arch/sparc64/src/sparc64.c	(revision b1d3c36aca6850dfcd12418b4a6f60aa37a83487)
+++ 	(revision )
@@ -1,175 +1,0 @@
-/*
- * Copyright (c) 2005 Jakub Jermar
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- *   notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- *   notice, this list of conditions and the following disclaimer in the
- *   documentation and/or other materials provided with the distribution.
- * - The name of the author may not be used to endorse or promote products
- *   derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** @addtogroup sparc64
- * @{
- */
-/** @file
- */
-
-#include <arch.h>
-#include <debug.h>
-#include <config.h>
-#include <arch/trap/trap.h>
-#include <arch/console.h>
-#include <console/console.h>
-#include <arch/boot/boot.h>
-#include <arch/arch.h>
-#include <arch/asm.h>
-#include <arch/mm/page.h>
-#include <arch/stack.h>
-#include <interrupt.h>
-#include <genarch/ofw/ofw_tree.h>
-#include <userspace.h>
-#include <ddi/irq.h>
-#include <string.h>
-
-bootinfo_t bootinfo;
-
-/** Perform sparc64-specific initialization before main_bsp() is called. */
-void arch_pre_main(void)
-{
-	/* Copy init task info. */
-	init.cnt = bootinfo.taskmap.count;
-	
-	uint32_t i;
-
-	for (i = 0; i < bootinfo.taskmap.count; i++) {
-		init.tasks[i].addr = (uintptr_t) bootinfo.taskmap.tasks[i].addr;
-		init.tasks[i].size = bootinfo.taskmap.tasks[i].size;
-		str_cpy(init.tasks[i].name, CONFIG_TASK_NAME_BUFLEN,
-		    bootinfo.taskmap.tasks[i].name);
-	}
-	
-	/* Copy boot allocations info. */
-	ballocs.base = bootinfo.ballocs.base;
-	ballocs.size = bootinfo.ballocs.size;
-	
-	ofw_tree_init(bootinfo.ofw_root);
-}
-
-/** Perform sparc64 specific initialization before mm is initialized. */
-void arch_pre_mm_init(void)
-{
-	if (config.cpu_active == 1)
-		trap_init();
-}
-
-/** Perform sparc64 specific initialization afterr mm is initialized. */
-void arch_post_mm_init(void)
-{
-	if (config.cpu_active == 1) {
-		/*
-		 * We have 2^11 different interrupt vectors.
-		 * But we only create 128 buckets.
-		 */
-		irq_init(1 << 11, 128);
-	}
-}
-
-void arch_post_cpu_init(void)
-{
-}
-
-void arch_pre_smp_init(void)
-{
-}
-
-void arch_post_smp_init(void)
-{
-	standalone_sparc64_console_init();
-}
-
-/** Calibrate delay loop.
- *
- * On sparc64, we implement delay() by waiting for the TICK register to
- * reach a pre-computed value, as opposed to performing some pre-computed
- * amount of instructions of known duration. We set the delay_loop_const
- * to 1 in order to neutralize the multiplication done by delay().
- */
-void calibrate_delay_loop(void)
-{
-	CPU->delay_loop_const = 1;
-}
-
-/** Wait several microseconds.
- *
- * We assume that interrupts are already disabled.
- *
- * @param t Microseconds to wait.
- */
-void asm_delay_loop(const uint32_t usec)
-{
-	uint64_t stop = tick_read() + (uint64_t) usec * (uint64_t)
-	    CPU->arch.clock_frequency / 1000000;
-
-	while (tick_read() < stop)
-		;
-}
-
-/** Switch to userspace. */
-void userspace(uspace_arg_t *kernel_uarg)
-{
-	(void) interrupts_disable();
-	switch_to_userspace((uintptr_t) kernel_uarg->uspace_entry,
-	    ((uintptr_t) kernel_uarg->uspace_stack) + STACK_SIZE
-	    - (ALIGN_UP(STACK_ITEM_SIZE, STACK_ALIGNMENT) + STACK_BIAS),
-	    (uintptr_t) kernel_uarg->uspace_uarg);
-
-	for (;;)
-		;
-	/* not reached */
-}
-
-void arch_reboot(void)
-{
-	// TODO
-	while (1);
-}
-
-/** Construct function pointer
- *
- * @param fptr   function pointer structure
- * @param addr   function address
- * @param caller calling function address
- *
- * @return address of the function pointer
- *
- */
-void *arch_construct_function(fncptr_t *fptr, void *addr, void *caller)
-{
-	return addr;
-}
-
-void irq_initialize_arch(irq_t *irq)
-{
-	(void) irq;
-}
-
-/** @}
- */
Index: kernel/arch/sparc64/src/start.S
===================================================================
--- kernel/arch/sparc64/src/start.S	(revision b1d3c36aca6850dfcd12418b4a6f60aa37a83487)
+++ 	(revision )
@@ -1,417 +1,0 @@
-#
-# Copyright (c) 2005 Jakub Jermar
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# - Redistributions of source code must retain the above copyright
-#   notice, this list of conditions and the following disclaimer.
-# - Redistributions in binary form must reproduce the above copyright
-#   notice, this list of conditions and the following disclaimer in the
-#   documentation and/or other materials provided with the distribution.
-# - The name of the author may not be used to endorse or promote products
-#   derived from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-
-#include <arch/arch.h>
-#include <arch/cpu.h>
-#include <arch/regdef.h>
-#include <arch/boot/boot.h>
-#include <arch/stack.h>
-
-#include <arch/mm/mmu.h>
-#include <arch/mm/tlb.h>
-#include <arch/mm/tte.h>
-
-#ifdef CONFIG_SMP
-#include <arch/context_offset.h>
-#endif
-
-.register %g2, #scratch
-.register %g3, #scratch
-
-.section K_TEXT_START, "ax"
-
-#define BSP_FLAG	1
-
-/*
- * 2^PHYSMEM_ADDR_SIZE is the size of the physical address space on
- * a given processor.
- */
-#if defined (US)
-    #define PHYSMEM_ADDR_SIZE	41
-#elif defined (US3)
-    #define PHYSMEM_ADDR_SIZE	43
-#endif
-
-/*
- * Here is where the kernel is passed control from the boot loader.
- * 
- * The registers are expected to be in this state:
- * - %o0 starting address of physical memory + bootstrap processor flag
- * 	bits 63...1:	physical memory starting address / 2
- *	bit 0:		non-zero on BSP processor, zero on AP processors
- * - %o1 bootinfo structure address (BSP only)
- * - %o2 bootinfo structure size (BSP only)
- *
- * Moreover, we depend on boot having established the following environment:
- * - TLBs are on
- * - identity mapping for the kernel image
- */
-
-.global kernel_image_start
-kernel_image_start:
-	mov BSP_FLAG, %l0
-	and %o0, %l0, %l7			! l7 <= bootstrap processor?
-	andn %o0, %l0, %l6			! l6 <= start of physical memory
-
-	! Get bits (PHYSMEM_ADDR_SIZE - 1):13 of physmem_base.
-	srlx %l6, 13, %l5
-	
-	! l5 <= physmem_base[(PHYSMEM_ADDR_SIZE - 1):13]
-	sllx %l5, 13 + (63 - (PHYSMEM_ADDR_SIZE - 1)), %l5
-	srlx %l5, 63 - (PHYSMEM_ADDR_SIZE - 1), %l5	
-
-	/*
-	 * Setup basic runtime environment.
-	 */
-
-	wrpr %g0, NWINDOWS - 2, %cansave	! set maximum saveable windows
-	wrpr %g0, 0, %canrestore		! get rid of windows we will
-						! never need again
-	wrpr %g0, 0, %otherwin			! make sure the window state is
-						! consistent
-	wrpr %g0, NWINDOWS - 1, %cleanwin	! prevent needless clean_window
-						! traps for kernel
-						
-	wrpr %g0, 0, %wstate			! use default spill/fill trap
-
-	wrpr %g0, 0, %tl			! TL = 0, primary context
-						! register is used
-
-	wrpr %g0, PSTATE_PRIV_BIT, %pstate	! disable interrupts and disable
-						! 32-bit address masking
-
-	wrpr %g0, 0, %pil			! intialize %pil
-
-	/*
-	 * Switch to kernel trap table.
-	 */
-	sethi %hi(trap_table), %g1
-	wrpr %g1, %lo(trap_table), %tba
-
-	/* 
-	 * Take over the DMMU by installing locked TTE entry identically
-	 * mapping the first 4M of memory.
-	 *
-	 * In case of DMMU, no FLUSH instructions need to be issued. Because of
-	 * that, the old DTLB contents can be demapped pretty straightforwardly
-	 * and without causing any traps.
-	 */
-
-	wr %g0, ASI_DMMU, %asi
-
-#define SET_TLB_DEMAP_CMD(r1, context_id) \
-	set (TLB_DEMAP_CONTEXT << TLB_DEMAP_TYPE_SHIFT) | (context_id << \
-		TLB_DEMAP_CONTEXT_SHIFT), %r1
-	
-	! demap context 0
-	SET_TLB_DEMAP_CMD(g1, TLB_DEMAP_NUCLEUS)
-	stxa %g0, [%g1] ASI_DMMU_DEMAP			
-	membar #Sync
-
-#define SET_TLB_TAG(r1, context) \
-	set VMA | (context << TLB_TAG_ACCESS_CONTEXT_SHIFT), %r1
-
-	! write DTLB tag
-	SET_TLB_TAG(g1, MEM_CONTEXT_KERNEL)
-	stxa %g1, [VA_DMMU_TAG_ACCESS] %asi			
-	membar #Sync
-
-#ifdef CONFIG_VIRT_IDX_DCACHE
-#define TTE_LOW_DATA(imm) 	(TTE_CP | TTE_CV | TTE_P | LMA | (imm))
-#else /* CONFIG_VIRT_IDX_DCACHE */
-#define TTE_LOW_DATA(imm) 	(TTE_CP | TTE_P | LMA | (imm))
-#endif /* CONFIG_VIRT_IDX_DCACHE */
-
-#define SET_TLB_DATA(r1, r2, imm) \
-	set TTE_LOW_DATA(imm), %r1; \
-	or %r1, %l5, %r1; \
-	mov PAGESIZE_4M, %r2; \
-	sllx %r2, TTE_SIZE_SHIFT, %r2; \
-	or %r1, %r2, %r1; \
-	mov 1, %r2; \
-	sllx %r2, TTE_V_SHIFT, %r2; \
-	or %r1, %r2, %r1;
-	
-	! write DTLB data and install the kernel mapping
-	SET_TLB_DATA(g1, g2, TTE_L | TTE_W)	! use non-global mapping
-	stxa %g1, [%g0] ASI_DTLB_DATA_IN_REG		
-	membar #Sync
-
-	/*
-	 * Because we cannot use global mappings (because we want to have
-	 * separate 64-bit address spaces for both the kernel and the
-	 * userspace), we prepare the identity mapping also in context 1. This
-	 * step is required by the code installing the ITLB mapping.
-	 */
-	! write DTLB tag of context 1 (i.e. MEM_CONTEXT_TEMP)
-	SET_TLB_TAG(g1, MEM_CONTEXT_TEMP)
-	stxa %g1, [VA_DMMU_TAG_ACCESS] %asi			
-	membar #Sync
-
-	! write DTLB data and install the kernel mapping in context 1
-	SET_TLB_DATA(g1, g2, TTE_W)			! use non-global mapping
-	stxa %g1, [%g0] ASI_DTLB_DATA_IN_REG		
-	membar #Sync
-	
-	/*
-	 * Now is time to take over the IMMU. Unfortunatelly, it cannot be done
-	 * as easily as the DMMU, because the IMMU is mapping the code it
-	 * executes.
-	 *
-	 * [ Note that brave experiments with disabling the IMMU and using the
-	 * DMMU approach failed after a dozen of desparate days with only little
-	 * success. ]
-	 *
-	 * The approach used here is inspired from OpenBSD. First, the kernel
-	 * creates IMMU mapping for itself in context 1 (MEM_CONTEXT_TEMP) and
-	 * switches to it. Context 0 (MEM_CONTEXT_KERNEL) can be demapped
-	 * afterwards and replaced with the kernel permanent mapping. Finally,
-	 * the kernel switches back to context 0 and demaps context 1.
-	 *
-	 * Moreover, the IMMU requires use of the FLUSH instructions. But that
-	 * is OK because we always use operands with addresses already mapped by
-	 * the taken over DTLB.
-	 */
-	
-	set kernel_image_start, %g5
-	
-	! write ITLB tag of context 1
-	SET_TLB_TAG(g1, MEM_CONTEXT_TEMP)
-	mov VA_DMMU_TAG_ACCESS, %g2
-	stxa %g1, [%g2] ASI_IMMU
-	flush %g5
-
-	! write ITLB data and install the temporary mapping in context 1
-	SET_TLB_DATA(g1, g2, 0)			! use non-global mapping
-	stxa %g1, [%g0] ASI_ITLB_DATA_IN_REG		
-	flush %g5
-	
-	! switch to context 1
-	mov MEM_CONTEXT_TEMP, %g1
-	stxa %g1, [VA_PRIMARY_CONTEXT_REG] %asi	! ASI_DMMU is correct here !!!
-	flush %g5
-	
-	! demap context 0
-	SET_TLB_DEMAP_CMD(g1, TLB_DEMAP_NUCLEUS)
-	stxa %g0, [%g1] ASI_IMMU_DEMAP			
-	flush %g5
-	
-	! write ITLB tag of context 0
-	SET_TLB_TAG(g1, MEM_CONTEXT_KERNEL)
-	mov VA_DMMU_TAG_ACCESS, %g2
-	stxa %g1, [%g2] ASI_IMMU
-	flush %g5
-
-	! write ITLB data and install the permanent kernel mapping in context 0
-	SET_TLB_DATA(g1, g2, TTE_L)		! use non-global mapping
-	stxa %g1, [%g0] ASI_ITLB_DATA_IN_REG		
-	flush %g5
-
-	! enter nucleus - using context 0
-	wrpr %g0, 1, %tl
-
-	! demap context 1
-	SET_TLB_DEMAP_CMD(g1, TLB_DEMAP_PRIMARY)
-	stxa %g0, [%g1] ASI_IMMU_DEMAP			
-	flush %g5
-	
-	! set context 0 in the primary context register
-	stxa %g0, [VA_PRIMARY_CONTEXT_REG] %asi	! ASI_DMMU is correct here !!!
-	flush %g5
-	
-	! leave nucleus - using primary context, i.e. context 0
-	wrpr %g0, 0, %tl
-
-	brz %l7, 1f				! skip if you are not the bootstrap CPU
-	nop
-
-	/*
-	 * Save physmem_base for use by the mm subsystem.
-	 * %l6 contains starting physical address
-	 */
-	sethi %hi(physmem_base), %l4
-	stx %l6, [%l4 + %lo(physmem_base)]
-
-	/*
-	 * Precompute kernel 8K TLB data template.
-	 * %l5 contains starting physical address
-	 * bits [(PHYSMEM_ADDR_SIZE - 1):13]
-	 */
-	sethi %hi(kernel_8k_tlb_data_template), %l4
-	ldx [%l4 + %lo(kernel_8k_tlb_data_template)], %l3
-	or %l3, %l5, %l3
-	stx %l3, [%l4 + %lo(kernel_8k_tlb_data_template)]
-
-	/*
-	 * Flush D-Cache.
-	 */
-	call dcache_flush
-	nop
-
-	/*
-	 * So far, we have not touched the stack.
-	 * It is a good idea to set the kernel stack to a known state now.
-	 */
-	sethi %hi(temporary_boot_stack), %sp
-	or %sp, %lo(temporary_boot_stack), %sp
-	sub %sp, STACK_BIAS, %sp
-
-	sethi %hi(bootinfo), %o0
-	call memcpy				! copy bootinfo
-	or %o0, %lo(bootinfo), %o0
-
-	call arch_pre_main
-	nop
-	
-	call main_bsp
-	nop
-
-	/* Not reached. */
-
-0:
-	ba %xcc, 0b
-	nop
-
-
-1:
-#ifdef CONFIG_SMP
-	/*
-	 * Determine the width of the MID and save its mask to %g3. The width
-	 * is
-	 * 	* 5 for US and US-IIIi,
-	 * 	* 10 for US3 except US-IIIi.
-	 */
-#if defined(US)
-	mov 0x1f, %g3
-#elif defined(US3)
-	mov 0x3ff, %g3
-	rdpr %ver, %g2
-	sllx %g2, 16, %g2
-	srlx %g2, 48, %g2
-	cmp %g2, IMPL_ULTRASPARCIII_I
-	move %xcc, 0x1f, %g3
-#endif
-
-	/*
-	 * Read MID from the processor.
-	 */
-	ldxa [%g0] ASI_ICBUS_CONFIG, %g1
-	srlx %g1, ICBUS_CONFIG_MID_SHIFT, %g1
-	and %g1, %g3, %g1
-
-	/*
-	 * Active loop for APs until the BSP picks them up. A processor cannot
-	 * leave the loop until the global variable 'waking_up_mid' equals its
-	 * MID.
-	 */
-	set waking_up_mid, %g2
-2:
-	ldx [%g2], %g3
-	cmp %g3, %g1
-	bne %xcc, 2b
-	nop
-
-	/*
-	 * Configure stack for the AP.
-	 * The AP is expected to use the stack saved
-	 * in the ctx global variable.
-	 */
-	set ctx, %g1
-	add %g1, OFFSET_SP, %g1
-	ldx [%g1], %o6
-
-	call main_ap
-	nop
-
-	/* Not reached. */
-#endif
-	
-0:
-	ba %xcc, 0b
-	nop
-
-
-.section K_DATA_START, "aw", @progbits
-
-/*
- * Create small stack to be used by the bootstrap processor. It is going to be
- * used only for a very limited period of time, but we switch to it anyway,
- * just to be sure we are properly initialized.
- */
-
-#define INITIAL_STACK_SIZE	1024
-
-.align STACK_ALIGNMENT
-	.space INITIAL_STACK_SIZE
-.align STACK_ALIGNMENT
-temporary_boot_stack:
-	.space STACK_WINDOW_SAVE_AREA_SIZE
-
-
-.data
-
-.align 8
-.global physmem_base		! copy of the physical memory base address
-physmem_base:
-	.quad 0
-
-/*
- * The fast_data_access_mmu_miss_data_hi label and the end_of_identity and
- * kernel_8k_tlb_data_template variables are meant to stay together,
- * aligned on 16B boundary.
- */
-.global fast_data_access_mmu_miss_data_hi
-.global end_of_identity 
-.global kernel_8k_tlb_data_template
-
-.align 16
-/*
- * This label is used by the fast_data_access_MMU_miss trap handler.
- */
-fast_data_access_mmu_miss_data_hi:
-/*
- * This variable is used by the fast_data_access_MMU_miss trap handler.
- * In runtime, it is modified to contain the address of the end of physical
- * memory.
- */
-end_of_identity:
-	.quad -1 
-/*
- * This variable is used by the fast_data_access_MMU_miss trap handler.
- * In runtime, it is further modified to reflect the starting address of
- * physical memory.
- */
-kernel_8k_tlb_data_template:
-#ifdef CONFIG_VIRT_IDX_DCACHE
-	.quad ((1 << TTE_V_SHIFT) | (PAGESIZE_8K << TTE_SIZE_SHIFT) | TTE_CP | \
-		 TTE_CV | TTE_P | TTE_W)
-#else /* CONFIG_VIRT_IDX_DCACHE */
-	.quad ((1 << TTE_V_SHIFT) | (PAGESIZE_8K << TTE_SIZE_SHIFT) | TTE_CP | \
-		TTE_P | TTE_W)
-#endif /* CONFIG_VIRT_IDX_DCACHE */
-
Index: kernel/arch/sparc64/src/sun4u/asm.S
===================================================================
--- kernel/arch/sparc64/src/sun4u/asm.S	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/src/sun4u/asm.S	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,117 @@
+#
+# Copyright (c) 2005 Jakub Jermar
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# - Redistributions of source code must retain the above copyright
+#   notice, this list of conditions and the following disclaimer.
+# - Redistributions in binary form must reproduce the above copyright
+#   notice, this list of conditions and the following disclaimer in the
+#   documentation and/or other materials provided with the distribution.
+# - The name of the author may not be used to endorse or promote products
+#   derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+#include <arch/arch.h>
+#include <arch/stack.h>
+#include <arch/regdef.h>
+#include <arch/mm/mmu.h>
+
+.text
+
+.register       %g2, #scratch
+.register       %g3, #scratch
+
+.macro WRITE_ALTERNATE_REGISTER reg, bit
+	rdpr %pstate, %g1				! save PSTATE.PEF
+	wrpr %g0, (\bit | PSTATE_PRIV_BIT), %pstate
+	mov %o0, \reg
+	wrpr %g0, PSTATE_PRIV_BIT, %pstate
+	retl
+	wrpr %g1, 0, %pstate				! restore PSTATE.PEF
+.endm
+
+.macro READ_ALTERNATE_REGISTER reg, bit
+	rdpr %pstate, %g1				! save PSTATE.PEF
+	wrpr %g0, (\bit | PSTATE_PRIV_BIT), %pstate
+	mov \reg, %o0
+	wrpr %g0, PSTATE_PRIV_BIT, %pstate
+	retl
+	wrpr %g1, 0, %pstate				! restore PSTATE.PEF
+.endm
+
+.global write_to_ag_g6
+write_to_ag_g6:
+	WRITE_ALTERNATE_REGISTER %g6, PSTATE_AG_BIT
+
+.global write_to_ag_g7
+write_to_ag_g7:
+	WRITE_ALTERNATE_REGISTER %g7, PSTATE_AG_BIT
+
+.global write_to_ig_g6
+write_to_ig_g6:
+	WRITE_ALTERNATE_REGISTER %g6, PSTATE_IG_BIT
+
+.global read_from_ag_g7
+read_from_ag_g7:
+	READ_ALTERNATE_REGISTER %g7, PSTATE_AG_BIT
+
+/** Switch to userspace.
+ *
+ * %o0	Userspace entry address.
+ * %o1	Userspace stack pointer address.
+ * %o2  Userspace address of uarg structure.
+ */
+.global switch_to_userspace
+switch_to_userspace:
+	save %o1, -(STACK_WINDOW_SAVE_AREA_SIZE + STACK_ARG_SAVE_AREA_SIZE), %sp
+	flushw
+	wrpr %g0, 0, %cleanwin		! avoid information leak
+
+	mov %i2, %o0			! uarg
+	xor %o1, %o1, %o1		! %o1 is defined to hold pcb_ptr
+					! set it to 0
+
+	clr %i2
+	clr %i3
+	clr %i4
+	clr %i5
+	clr %i6
+
+	wrpr %g0, 1, %tl		! enforce mapping via nucleus
+
+	rdpr %cwp, %g1
+	wrpr %g1, TSTATE_IE_BIT, %tstate
+	wrpr %i0, 0, %tnpc
+	
+	/*
+	 * Set primary context according to secondary context.
+	 * Secondary context has been already installed by
+	 * higher-level functions.
+	 */
+	wr %g0, ASI_DMMU, %asi
+	ldxa [VA_SECONDARY_CONTEXT_REG] %asi, %g1
+	stxa %g1, [VA_PRIMARY_CONTEXT_REG] %asi
+	flush %i7
+
+	/*
+	 * Spills and fills will be handled by the userspace handlers.
+	 */
+	wrpr %g0, WSTATE_OTHER(0) | WSTATE_NORMAL(1), %wstate
+	
+	done				! jump to userspace
+
Index: kernel/arch/sparc64/src/sun4u/sparc64.c
===================================================================
--- kernel/arch/sparc64/src/sun4u/sparc64.c	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/src/sun4u/sparc64.c	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2005 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64
+ * @{
+ */
+/** @file
+ */
+
+#include <arch.h>
+#include <debug.h>
+#include <config.h>
+#include <arch/trap/trap.h>
+#include <arch/console.h>
+#include <console/console.h>
+#include <arch/boot/boot.h>
+#include <arch/arch.h>
+#include <arch/asm.h>
+#include <arch/mm/page.h>
+#include <arch/stack.h>
+#include <interrupt.h>
+#include <genarch/ofw/ofw_tree.h>
+#include <userspace.h>
+#include <ddi/irq.h>
+#include <string.h>
+
+bootinfo_t bootinfo;
+
+/** Perform sparc64-specific initialization before main_bsp() is called. */
+void arch_pre_main(void)
+{
+	/* Copy init task info. */
+	init.cnt = bootinfo.taskmap.count;
+	
+	uint32_t i;
+
+	for (i = 0; i < bootinfo.taskmap.count; i++) {
+		init.tasks[i].addr = (uintptr_t) bootinfo.taskmap.tasks[i].addr;
+		init.tasks[i].size = bootinfo.taskmap.tasks[i].size;
+		str_cpy(init.tasks[i].name, CONFIG_TASK_NAME_BUFLEN,
+		    bootinfo.taskmap.tasks[i].name);
+	}
+	
+	/* Copy boot allocations info. */
+	ballocs.base = bootinfo.ballocs.base;
+	ballocs.size = bootinfo.ballocs.size;
+	
+	ofw_tree_init(bootinfo.ofw_root);
+}
+
+/** Perform sparc64 specific initialization before mm is initialized. */
+void arch_pre_mm_init(void)
+{
+	if (config.cpu_active == 1)
+		trap_init();
+}
+
+/** Perform sparc64 specific initialization afterr mm is initialized. */
+void arch_post_mm_init(void)
+{
+	if (config.cpu_active == 1) {
+		/*
+		 * We have 2^11 different interrupt vectors.
+		 * But we only create 128 buckets.
+		 */
+		irq_init(1 << 11, 128);
+	}
+}
+
+void arch_post_cpu_init(void)
+{
+}
+
+void arch_pre_smp_init(void)
+{
+}
+
+void arch_post_smp_init(void)
+{
+	standalone_sparc64_console_init();
+}
+
+/** Calibrate delay loop.
+ *
+ * On sparc64, we implement delay() by waiting for the TICK register to
+ * reach a pre-computed value, as opposed to performing some pre-computed
+ * amount of instructions of known duration. We set the delay_loop_const
+ * to 1 in order to neutralize the multiplication done by delay().
+ */
+void calibrate_delay_loop(void)
+{
+	CPU->delay_loop_const = 1;
+}
+
+/** Wait several microseconds.
+ *
+ * We assume that interrupts are already disabled.
+ *
+ * @param t Microseconds to wait.
+ */
+void asm_delay_loop(const uint32_t usec)
+{
+	uint64_t stop = tick_read() + (uint64_t) usec * (uint64_t)
+	    CPU->arch.clock_frequency / 1000000;
+
+	while (tick_read() < stop)
+		;
+}
+
+/** Switch to userspace. */
+void userspace(uspace_arg_t *kernel_uarg)
+{
+	(void) interrupts_disable();
+	switch_to_userspace((uintptr_t) kernel_uarg->uspace_entry,
+	    ((uintptr_t) kernel_uarg->uspace_stack) + STACK_SIZE
+	    - (ALIGN_UP(STACK_ITEM_SIZE, STACK_ALIGNMENT) + STACK_BIAS),
+	    (uintptr_t) kernel_uarg->uspace_uarg);
+
+	for (;;)
+		;
+	/* not reached */
+}
+
+void arch_reboot(void)
+{
+	// TODO
+	while (1);
+}
+
+/** Construct function pointer
+ *
+ * @param fptr   function pointer structure
+ * @param addr   function address
+ * @param caller calling function address
+ *
+ * @return address of the function pointer
+ *
+ */
+void *arch_construct_function(fncptr_t *fptr, void *addr, void *caller)
+{
+	return addr;
+}
+
+void irq_initialize_arch(irq_t *irq)
+{
+	(void) irq;
+}
+
+/** @}
+ */
Index: kernel/arch/sparc64/src/sun4u/start.S
===================================================================
--- kernel/arch/sparc64/src/sun4u/start.S	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/src/sun4u/start.S	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,417 @@
+#
+# Copyright (c) 2005 Jakub Jermar
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# - Redistributions of source code must retain the above copyright
+#   notice, this list of conditions and the following disclaimer.
+# - Redistributions in binary form must reproduce the above copyright
+#   notice, this list of conditions and the following disclaimer in the
+#   documentation and/or other materials provided with the distribution.
+# - The name of the author may not be used to endorse or promote products
+#   derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+#include <arch/arch.h>
+#include <arch/cpu.h>
+#include <arch/regdef.h>
+#include <arch/boot/boot.h>
+#include <arch/stack.h>
+
+#include <arch/mm/mmu.h>
+#include <arch/mm/tlb.h>
+#include <arch/mm/tte.h>
+
+#ifdef CONFIG_SMP
+#include <arch/context_offset.h>
+#endif
+
+.register %g2, #scratch
+.register %g3, #scratch
+
+.section K_TEXT_START, "ax"
+
+#define BSP_FLAG	1
+
+/*
+ * 2^PHYSMEM_ADDR_SIZE is the size of the physical address space on
+ * a given processor.
+ */
+#if defined (US)
+    #define PHYSMEM_ADDR_SIZE	41
+#elif defined (US3)
+    #define PHYSMEM_ADDR_SIZE	43
+#endif
+
+/*
+ * Here is where the kernel is passed control from the boot loader.
+ * 
+ * The registers are expected to be in this state:
+ * - %o0 starting address of physical memory + bootstrap processor flag
+ * 	bits 63...1:	physical memory starting address / 2
+ *	bit 0:		non-zero on BSP processor, zero on AP processors
+ * - %o1 bootinfo structure address (BSP only)
+ * - %o2 bootinfo structure size (BSP only)
+ *
+ * Moreover, we depend on boot having established the following environment:
+ * - TLBs are on
+ * - identity mapping for the kernel image
+ */
+
+.global kernel_image_start
+kernel_image_start:
+	mov BSP_FLAG, %l0
+	and %o0, %l0, %l7			! l7 <= bootstrap processor?
+	andn %o0, %l0, %l6			! l6 <= start of physical memory
+
+	! Get bits (PHYSMEM_ADDR_SIZE - 1):13 of physmem_base.
+	srlx %l6, 13, %l5
+	
+	! l5 <= physmem_base[(PHYSMEM_ADDR_SIZE - 1):13]
+	sllx %l5, 13 + (63 - (PHYSMEM_ADDR_SIZE - 1)), %l5
+	srlx %l5, 63 - (PHYSMEM_ADDR_SIZE - 1), %l5	
+
+	/*
+	 * Setup basic runtime environment.
+	 */
+
+	wrpr %g0, NWINDOWS - 2, %cansave	! set maximum saveable windows
+	wrpr %g0, 0, %canrestore		! get rid of windows we will
+						! never need again
+	wrpr %g0, 0, %otherwin			! make sure the window state is
+						! consistent
+	wrpr %g0, NWINDOWS - 1, %cleanwin	! prevent needless clean_window
+						! traps for kernel
+						
+	wrpr %g0, 0, %wstate			! use default spill/fill trap
+
+	wrpr %g0, 0, %tl			! TL = 0, primary context
+						! register is used
+
+	wrpr %g0, PSTATE_PRIV_BIT, %pstate	! disable interrupts and disable
+						! 32-bit address masking
+
+	wrpr %g0, 0, %pil			! intialize %pil
+
+	/*
+	 * Switch to kernel trap table.
+	 */
+	sethi %hi(trap_table), %g1
+	wrpr %g1, %lo(trap_table), %tba
+
+	/* 
+	 * Take over the DMMU by installing locked TTE entry identically
+	 * mapping the first 4M of memory.
+	 *
+	 * In case of DMMU, no FLUSH instructions need to be issued. Because of
+	 * that, the old DTLB contents can be demapped pretty straightforwardly
+	 * and without causing any traps.
+	 */
+
+	wr %g0, ASI_DMMU, %asi
+
+#define SET_TLB_DEMAP_CMD(r1, context_id) \
+	set (TLB_DEMAP_CONTEXT << TLB_DEMAP_TYPE_SHIFT) | (context_id << \
+		TLB_DEMAP_CONTEXT_SHIFT), %r1
+	
+	! demap context 0
+	SET_TLB_DEMAP_CMD(g1, TLB_DEMAP_NUCLEUS)
+	stxa %g0, [%g1] ASI_DMMU_DEMAP			
+	membar #Sync
+
+#define SET_TLB_TAG(r1, context) \
+	set VMA | (context << TLB_TAG_ACCESS_CONTEXT_SHIFT), %r1
+
+	! write DTLB tag
+	SET_TLB_TAG(g1, MEM_CONTEXT_KERNEL)
+	stxa %g1, [VA_DMMU_TAG_ACCESS] %asi			
+	membar #Sync
+
+#ifdef CONFIG_VIRT_IDX_DCACHE
+#define TTE_LOW_DATA(imm) 	(TTE_CP | TTE_CV | TTE_P | LMA | (imm))
+#else /* CONFIG_VIRT_IDX_DCACHE */
+#define TTE_LOW_DATA(imm) 	(TTE_CP | TTE_P | LMA | (imm))
+#endif /* CONFIG_VIRT_IDX_DCACHE */
+
+#define SET_TLB_DATA(r1, r2, imm) \
+	set TTE_LOW_DATA(imm), %r1; \
+	or %r1, %l5, %r1; \
+	mov PAGESIZE_4M, %r2; \
+	sllx %r2, TTE_SIZE_SHIFT, %r2; \
+	or %r1, %r2, %r1; \
+	mov 1, %r2; \
+	sllx %r2, TTE_V_SHIFT, %r2; \
+	or %r1, %r2, %r1;
+	
+	! write DTLB data and install the kernel mapping
+	SET_TLB_DATA(g1, g2, TTE_L | TTE_W)	! use non-global mapping
+	stxa %g1, [%g0] ASI_DTLB_DATA_IN_REG		
+	membar #Sync
+
+	/*
+	 * Because we cannot use global mappings (because we want to have
+	 * separate 64-bit address spaces for both the kernel and the
+	 * userspace), we prepare the identity mapping also in context 1. This
+	 * step is required by the code installing the ITLB mapping.
+	 */
+	! write DTLB tag of context 1 (i.e. MEM_CONTEXT_TEMP)
+	SET_TLB_TAG(g1, MEM_CONTEXT_TEMP)
+	stxa %g1, [VA_DMMU_TAG_ACCESS] %asi			
+	membar #Sync
+
+	! write DTLB data and install the kernel mapping in context 1
+	SET_TLB_DATA(g1, g2, TTE_W)			! use non-global mapping
+	stxa %g1, [%g0] ASI_DTLB_DATA_IN_REG		
+	membar #Sync
+	
+	/*
+	 * Now is time to take over the IMMU. Unfortunatelly, it cannot be done
+	 * as easily as the DMMU, because the IMMU is mapping the code it
+	 * executes.
+	 *
+	 * [ Note that brave experiments with disabling the IMMU and using the
+	 * DMMU approach failed after a dozen of desparate days with only little
+	 * success. ]
+	 *
+	 * The approach used here is inspired from OpenBSD. First, the kernel
+	 * creates IMMU mapping for itself in context 1 (MEM_CONTEXT_TEMP) and
+	 * switches to it. Context 0 (MEM_CONTEXT_KERNEL) can be demapped
+	 * afterwards and replaced with the kernel permanent mapping. Finally,
+	 * the kernel switches back to context 0 and demaps context 1.
+	 *
+	 * Moreover, the IMMU requires use of the FLUSH instructions. But that
+	 * is OK because we always use operands with addresses already mapped by
+	 * the taken over DTLB.
+	 */
+	
+	set kernel_image_start, %g5
+	
+	! write ITLB tag of context 1
+	SET_TLB_TAG(g1, MEM_CONTEXT_TEMP)
+	mov VA_DMMU_TAG_ACCESS, %g2
+	stxa %g1, [%g2] ASI_IMMU
+	flush %g5
+
+	! write ITLB data and install the temporary mapping in context 1
+	SET_TLB_DATA(g1, g2, 0)			! use non-global mapping
+	stxa %g1, [%g0] ASI_ITLB_DATA_IN_REG		
+	flush %g5
+	
+	! switch to context 1
+	mov MEM_CONTEXT_TEMP, %g1
+	stxa %g1, [VA_PRIMARY_CONTEXT_REG] %asi	! ASI_DMMU is correct here !!!
+	flush %g5
+	
+	! demap context 0
+	SET_TLB_DEMAP_CMD(g1, TLB_DEMAP_NUCLEUS)
+	stxa %g0, [%g1] ASI_IMMU_DEMAP			
+	flush %g5
+	
+	! write ITLB tag of context 0
+	SET_TLB_TAG(g1, MEM_CONTEXT_KERNEL)
+	mov VA_DMMU_TAG_ACCESS, %g2
+	stxa %g1, [%g2] ASI_IMMU
+	flush %g5
+
+	! write ITLB data and install the permanent kernel mapping in context 0
+	SET_TLB_DATA(g1, g2, TTE_L)		! use non-global mapping
+	stxa %g1, [%g0] ASI_ITLB_DATA_IN_REG		
+	flush %g5
+
+	! enter nucleus - using context 0
+	wrpr %g0, 1, %tl
+
+	! demap context 1
+	SET_TLB_DEMAP_CMD(g1, TLB_DEMAP_PRIMARY)
+	stxa %g0, [%g1] ASI_IMMU_DEMAP			
+	flush %g5
+	
+	! set context 0 in the primary context register
+	stxa %g0, [VA_PRIMARY_CONTEXT_REG] %asi	! ASI_DMMU is correct here !!!
+	flush %g5
+	
+	! leave nucleus - using primary context, i.e. context 0
+	wrpr %g0, 0, %tl
+
+	brz %l7, 1f				! skip if you are not the bootstrap CPU
+	nop
+
+	/*
+	 * Save physmem_base for use by the mm subsystem.
+	 * %l6 contains starting physical address
+	 */
+	sethi %hi(physmem_base), %l4
+	stx %l6, [%l4 + %lo(physmem_base)]
+
+	/*
+	 * Precompute kernel 8K TLB data template.
+	 * %l5 contains starting physical address
+	 * bits [(PHYSMEM_ADDR_SIZE - 1):13]
+	 */
+	sethi %hi(kernel_8k_tlb_data_template), %l4
+	ldx [%l4 + %lo(kernel_8k_tlb_data_template)], %l3
+	or %l3, %l5, %l3
+	stx %l3, [%l4 + %lo(kernel_8k_tlb_data_template)]
+
+	/*
+	 * Flush D-Cache.
+	 */
+	call dcache_flush
+	nop
+
+	/*
+	 * So far, we have not touched the stack.
+	 * It is a good idea to set the kernel stack to a known state now.
+	 */
+	sethi %hi(temporary_boot_stack), %sp
+	or %sp, %lo(temporary_boot_stack), %sp
+	sub %sp, STACK_BIAS, %sp
+
+	sethi %hi(bootinfo), %o0
+	call memcpy				! copy bootinfo
+	or %o0, %lo(bootinfo), %o0
+
+	call arch_pre_main
+	nop
+	
+	call main_bsp
+	nop
+
+	/* Not reached. */
+
+0:
+	ba %xcc, 0b
+	nop
+
+
+1:
+#ifdef CONFIG_SMP
+	/*
+	 * Determine the width of the MID and save its mask to %g3. The width
+	 * is
+	 * 	* 5 for US and US-IIIi,
+	 * 	* 10 for US3 except US-IIIi.
+	 */
+#if defined(US)
+	mov 0x1f, %g3
+#elif defined(US3)
+	mov 0x3ff, %g3
+	rdpr %ver, %g2
+	sllx %g2, 16, %g2
+	srlx %g2, 48, %g2
+	cmp %g2, IMPL_ULTRASPARCIII_I
+	move %xcc, 0x1f, %g3
+#endif
+
+	/*
+	 * Read MID from the processor.
+	 */
+	ldxa [%g0] ASI_ICBUS_CONFIG, %g1
+	srlx %g1, ICBUS_CONFIG_MID_SHIFT, %g1
+	and %g1, %g3, %g1
+
+	/*
+	 * Active loop for APs until the BSP picks them up. A processor cannot
+	 * leave the loop until the global variable 'waking_up_mid' equals its
+	 * MID.
+	 */
+	set waking_up_mid, %g2
+2:
+	ldx [%g2], %g3
+	cmp %g3, %g1
+	bne %xcc, 2b
+	nop
+
+	/*
+	 * Configure stack for the AP.
+	 * The AP is expected to use the stack saved
+	 * in the ctx global variable.
+	 */
+	set ctx, %g1
+	add %g1, OFFSET_SP, %g1
+	ldx [%g1], %o6
+
+	call main_ap
+	nop
+
+	/* Not reached. */
+#endif
+	
+0:
+	ba %xcc, 0b
+	nop
+
+
+.section K_DATA_START, "aw", @progbits
+
+/*
+ * Create small stack to be used by the bootstrap processor. It is going to be
+ * used only for a very limited period of time, but we switch to it anyway,
+ * just to be sure we are properly initialized.
+ */
+
+#define INITIAL_STACK_SIZE	1024
+
+.align STACK_ALIGNMENT
+	.space INITIAL_STACK_SIZE
+.align STACK_ALIGNMENT
+temporary_boot_stack:
+	.space STACK_WINDOW_SAVE_AREA_SIZE
+
+
+.data
+
+.align 8
+.global physmem_base		! copy of the physical memory base address
+physmem_base:
+	.quad 0
+
+/*
+ * The fast_data_access_mmu_miss_data_hi label and the end_of_identity and
+ * kernel_8k_tlb_data_template variables are meant to stay together,
+ * aligned on 16B boundary.
+ */
+.global fast_data_access_mmu_miss_data_hi
+.global end_of_identity 
+.global kernel_8k_tlb_data_template
+
+.align 16
+/*
+ * This label is used by the fast_data_access_MMU_miss trap handler.
+ */
+fast_data_access_mmu_miss_data_hi:
+/*
+ * This variable is used by the fast_data_access_MMU_miss trap handler.
+ * In runtime, it is modified to contain the address of the end of physical
+ * memory.
+ */
+end_of_identity:
+	.quad -1 
+/*
+ * This variable is used by the fast_data_access_MMU_miss trap handler.
+ * In runtime, it is further modified to reflect the starting address of
+ * physical memory.
+ */
+kernel_8k_tlb_data_template:
+#ifdef CONFIG_VIRT_IDX_DCACHE
+	.quad ((1 << TTE_V_SHIFT) | (PAGESIZE_8K << TTE_SIZE_SHIFT) | TTE_CP | \
+		 TTE_CV | TTE_P | TTE_W)
+#else /* CONFIG_VIRT_IDX_DCACHE */
+	.quad ((1 << TTE_V_SHIFT) | (PAGESIZE_8K << TTE_SIZE_SHIFT) | TTE_CP | \
+		TTE_P | TTE_W)
+#endif /* CONFIG_VIRT_IDX_DCACHE */
+
Index: kernel/arch/sparc64/src/sun4v/asm.S
===================================================================
--- kernel/arch/sparc64/src/sun4v/asm.S	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/src/sun4v/asm.S	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,77 @@
+#
+# Copyright (c) 2008 Pavel Rimsky
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# - Redistributions of source code must retain the above copyright
+#   notice, this list of conditions and the following disclaimer.
+# - Redistributions in binary form must reproduce the above copyright
+#   notice, this list of conditions and the following disclaimer in the
+#   documentation and/or other materials provided with the distribution.
+# - The name of the author may not be used to endorse or promote products
+#   derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+#include <arch/mm/mmu.h>
+#include <arch/regdef.h>
+#include <arch/stack.h>
+
+.text
+
+/** Switch to userspace.
+ *
+ * %o0	Userspace entry address.
+ * %o1	Userspace stack pointer address.
+ * %o2  Userspace address of uarg structure.
+ */
+.global switch_to_userspace
+switch_to_userspace:
+	wrpr PSTATE_PRIV_BIT, %pstate
+	save %o1, -STACK_WINDOW_SAVE_AREA_SIZE, %sp
+	flushw
+	wrpr %g0, 0, %cleanwin		! avoid information leak
+
+	mov %i2, %o0			! uarg
+	xor %o1, %o1, %o1		! %o1 is defined to hold pcb_ptr
+					! set it to 0
+	clr %i2
+	clr %i3
+	clr %i4
+	clr %i5
+	clr %i6
+	wrpr %g0, 1, %tl		! enforce mapping via nucleus
+
+	rdpr %cwp, %g1
+	wrpr %g1, TSTATE_IE_BIT, %tstate
+	wrpr %i0, 0, %tnpc
+	
+	/*
+	 * Set primary context according to secondary context.
+	 * Secondary context has been already installed by
+	 * higher-level functions.
+	 */
+	wr %g0, ASI_SECONDARY_CONTEXT_REG, %asi
+	ldxa [VA_SECONDARY_CONTEXT_REG] %asi, %g1
+	wr %g0, ASI_PRIMARY_CONTEXT_REG, %asi
+	stxa %g1, [VA_PRIMARY_CONTEXT_REG] %asi
+	flush %i7
+
+	/*
+	 * Spills and fills will be handled by the userspace handlers.
+	 */
+	wrpr %g0, WSTATE_OTHER(0) | WSTATE_NORMAL(1), %wstate
+	done				! jump to userspace
Index: kernel/arch/sparc64/src/sun4v/md.c
===================================================================
--- kernel/arch/sparc64/src/sun4v/md.c	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/src/sun4v/md.c	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,318 @@
+/*
+ * Copyright (c) 2009 Pavel Rimsky
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64
+ * @{
+ */
+/** @file
+ */
+
+#include <debug.h>
+#include <panic.h>
+#include <func.h>
+#include <print.h>
+#include <string.h>
+#include <arch/sun4v/md.h>
+#include <arch/sun4v/hypercall.h>
+#include <arch/mm/page.h>
+
+/* maximum MD size estimate (in bytes) */
+#define MD_MAX_SIZE	(64 * 1024)
+
+/** element types (element tag values) */
+#define LIST_END	0x0	/**< End of element list */
+#define NODE		0x4e	/**< Start of node definition */
+#define NODE_END	0x45	/**< End of node definition */
+#define NOOP		0x20	/**< NOOP list element - to be ignored */
+#define PROP_ARC	0x61	/**< Node property arc'ing to another node */
+#define PROP_VAL	0x76	/**< Node property with an integer value */
+#define PROP_STR	0x73	/**< Node property with a string value */
+#define PROP_DATA	0x64	/**< Node property with a block of data */
+
+
+/** machine description header */
+typedef struct {
+	uint32_t transport_version;	/**< Transport version number */
+	uint32_t node_blk_sz;		/**< Size in bytes of node block */
+	uint32_t name_blk_sz;		/**< Size in bytes of name block */
+	uint32_t data_blk_sz;		/**< Size in bytes of data block */
+} __attribute__ ((packed)) md_header_t;
+
+/** machine description element (in the node block) */
+typedef struct {
+	uint8_t tag;			/**< Type of element */
+	uint8_t name_len;		/**< Length in bytes of element name */
+	uint16_t _reserved_field;	/**< reserved field (zeros) */
+	uint32_t name_offset;		/**< Location offset of name associated
+					     with this element relative to
+					     start of name block */
+	union {
+		/** for elements of type “PROP_STR” and of type “PROP_DATA” */
+		struct {
+			/** Length in bytes of data in data block */
+			uint32_t data_len;
+
+			/**
+			 * Location offset of data associated with this
+			 * element relative to start of data block
+			 */
+			uint32_t data_offset;
+		} y;
+
+		/**
+		 *  64 bit value for elements of tag type “NODE”, “PROP_VAL”
+		 *  or “PROP_ARC”
+		 */
+		uint64_t val;
+	} d;
+} __attribute__ ((packed)) md_element_t;
+
+/** index of the element within the node block */
+typedef unsigned int element_idx_t;
+
+/** buffer to which the machine description will be saved */
+static uint8_t mach_desc[MD_MAX_SIZE]
+	 __attribute__ ((aligned (16)));
+
+
+/** returns pointer to the element at the given index */
+static md_element_t *get_element(element_idx_t idx)
+{
+	return (md_element_t *) (
+		mach_desc + sizeof(md_header_t) + idx * sizeof(md_element_t));
+}
+
+/** returns the name of the element represented by the index */
+static const char *get_element_name(element_idx_t idx)
+{
+	md_header_t *md_header = (md_header_t *) mach_desc;
+	uintptr_t name_offset = get_element(idx)->name_offset;
+	return (char *) mach_desc + sizeof(md_header_t) +
+		md_header->node_blk_sz + name_offset;
+}
+
+/** finds the name of the node represented by "node" */
+const char *md_get_node_name(md_node_t node)
+{
+	return get_element_name(node);
+}
+
+/**
+ * Returns the value of the integer property of the given node.
+ *
+ * @param 
+ */
+bool md_get_integer_property(md_node_t node, const char *key,
+	uint64_t *result)
+{
+	element_idx_t idx = node;	
+
+	while (get_element(idx)->tag != NODE_END) {
+		idx++;
+		md_element_t *element = get_element(idx);
+		if (element->tag == PROP_VAL &&
+				str_cmp(key, get_element_name(idx)) == 0) {
+			*result = element->d.val;
+			return true;
+		}
+	}
+
+	return false;
+}
+
+/**
+ * Returns the value of the string property of the given node.
+ *
+ * @param 
+ */
+bool md_get_string_property(md_node_t node, const char *key,
+	const char **result)
+{
+	md_header_t *md_header = (md_header_t *) mach_desc;
+	element_idx_t idx = node;
+
+	while (get_element(idx)->tag != NODE_END) {
+		idx++;
+		md_element_t *element = get_element(idx);
+		if (element->tag == PROP_DATA &&
+				str_cmp(key, get_element_name(idx)) == 0) {
+			*result = (char *) mach_desc + sizeof(md_header_t) +
+				md_header->node_blk_sz + md_header->name_blk_sz +
+				element->d.y.data_offset;
+			return true;
+		}
+	}
+
+	return false;
+}
+
+/**
+ * Moves the child oterator to the next child (following sibling of the node
+ * the oterator currently points to).
+ *
+ * @param it	pointer to the iterator to be moved
+ */
+bool md_next_child(md_child_iter_t *it)
+{
+	element_idx_t backup = *it;
+
+	while (get_element(*it)->tag != NODE_END) {
+		(*it)++;
+		md_element_t *element = get_element(*it);
+		if (element->tag == PROP_ARC &&
+				str_cmp("fwd", get_element_name(*it)) == 0) {
+			return true;
+		}
+	}
+
+	*it = backup;
+	return false;
+}
+
+/**
+ * Returns the node the iterator point to.
+ */
+md_node_t md_get_child_node(md_child_iter_t it)
+{
+	return get_element(it)->d.val;
+}
+
+/**
+ * Helper function used to split a string to a part before the first
+ * slash sign and a part after the slash sign.
+ *
+ * @param str	pointer to the string to be split; when the function finishes,
+ * 		it will	contain only the part following the first slash sign of
+ * 		the original string
+ * @param head	pointer to the string which will be set to the part before the
+ * 		first slash sign
+ */
+static bool str_parse_head(char **str, char **head)
+{
+	*head = *str;
+
+	char *cur = *str;
+	while (*cur != '\0') {
+		if (*cur == '/') {
+			*cur = '\0';
+			*str = cur + 1;
+			return true;
+		}
+		cur++;
+	}
+
+	return false;
+}
+
+/**
+ * Returns the descendant of the given node. The descendant is identified
+ * by a path where the node names are separated by a slash.
+ *
+ * Ex.: Let there be a node N with path "a/b/c/x/y/z" and let P represent the
+ * node with path "a/b/c". Then md_get_child(P, "x/y/z") will return N.
+ */
+md_node_t md_get_child(md_node_t node, char *name)
+{
+	bool more;
+
+	do {
+		char *head;
+		more = str_parse_head(&name, &head);
+		
+		while (md_next_child(&node)) {
+			element_idx_t child = md_get_child_node(node);
+			if (str_cmp(head, get_element_name(child)) == 0) {
+				node = child;
+				break;
+			}
+		}
+
+	} while (more);
+
+	return node;
+}
+
+/** returns the root node of MD */
+md_node_t md_get_root(void)
+{
+	return 0;
+}
+
+/**
+ * Returns the child iterator - a token to be passed to functions iterating
+ * through all the children of a node.
+ *
+ * @param node	a node whose children the iterator will be used
+ * 		to iterate through
+ */
+md_child_iter_t md_get_child_iterator(md_node_t node)
+{
+	return node;
+}
+
+/**
+ * Moves "node" to the node following "node" in the list of all the existing
+ * nodes of the MD whose name is "name". 
+ */
+bool md_next_node(md_node_t *node, const char *name)
+{
+	md_element_t *element;
+	(*node)++;
+
+	do {
+		element = get_element(*node);
+
+		if (element->tag == NODE &&
+				str_cmp(name, get_element_name(*node)) == 0) {
+			return true;
+		}
+		
+		(*node)++;
+	} while (element->tag != LIST_END);
+
+	return false;
+}
+
+/**
+ * Retrieves the machine description from the hypervisor and saves it to
+ * a kernel buffer.
+ */
+void md_init(void)
+{
+	uint64_t retval = __hypercall_fast2(MACH_DESC, KA2PA(mach_desc),
+		MD_MAX_SIZE);
+
+	retval = retval;
+	if (retval != EOK) {
+		printf("Could not retrieve machine description, error = %d.\n",
+			retval);
+	}
+}
+
+/** @}
+ */
Index: kernel/arch/sparc64/src/sun4v/sparc64.c
===================================================================
--- kernel/arch/sparc64/src/sun4v/sparc64.c	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/src/sun4v/sparc64.c	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2005 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup sparc64
+ * @{
+ */
+/** @file
+ */
+
+#include <arch.h>
+#include <debug.h>
+#include <config.h>
+#include <arch/trap/trap.h>
+#include <arch/console.h>
+#include <arch/sun4v/md.h>
+#include <console/console.h>
+#include <arch/boot/boot.h>
+#include <arch/arch.h>
+#include <arch/asm.h>
+#include <arch/mm/page.h>
+#include <arch/stack.h>
+#include <interrupt.h>
+#include <genarch/ofw/ofw_tree.h>
+#include <userspace.h>
+#include <ddi/irq.h>
+#include <string.h>
+
+//MH
+#include <arch/drivers/niagara.h>
+
+bootinfo_t bootinfo;
+
+/** Perform sparc64-specific initialization before main_bsp() is called. */
+void arch_pre_main(void)
+{
+	/* Copy init task info. */
+	init.cnt = bootinfo.taskmap.count;
+	
+	uint32_t i;
+
+	for (i = 0; i < bootinfo.taskmap.count; i++) {
+		init.tasks[i].addr = (uintptr_t) bootinfo.taskmap.tasks[i].addr;
+		init.tasks[i].size = bootinfo.taskmap.tasks[i].size;
+		str_cpy(init.tasks[i].name, CONFIG_TASK_NAME_BUFLEN,
+		    bootinfo.taskmap.tasks[i].name);
+	}
+
+	md_init();
+}
+
+/** Perform sparc64 specific initialization before mm is initialized. */
+void arch_pre_mm_init(void)
+{
+	if (config.cpu_active == 1)
+		trap_init();
+}
+
+/** Perform sparc64 specific initialization afterr mm is initialized. */
+void arch_post_mm_init(void)
+{
+	if (config.cpu_active == 1) {
+		/*
+		 * We have 2^11 different interrupt vectors.
+		 * But we only create 128 buckets.
+		 */
+		irq_init(1 << 11, 128);
+	}
+}
+
+void arch_post_cpu_init(void)
+{
+}
+
+void arch_pre_smp_init(void)
+{
+}
+
+void arch_post_smp_init(void)
+{
+	niagarain_init();
+}
+
+/** Calibrate delay loop.
+ *
+ * On sparc64, we implement delay() by waiting for the TICK register to
+ * reach a pre-computed value, as opposed to performing some pre-computed
+ * amount of instructions of known duration. We set the delay_loop_const
+ * to 1 in order to neutralize the multiplication done by delay().
+ */
+void calibrate_delay_loop(void)
+{
+	CPU->delay_loop_const = 1;
+}
+
+/** Wait several microseconds.
+ *
+ * We assume that interrupts are already disabled.
+ *
+ * @param t Microseconds to wait.
+ */
+void asm_delay_loop(const uint32_t usec)
+{
+	uint64_t stop = tick_read() + (uint64_t) usec * (uint64_t)
+	    CPU->arch.clock_frequency / 1000000;
+
+	while (tick_read() < stop)
+		;
+}
+
+/** Switch to userspace. */
+void userspace(uspace_arg_t *kernel_uarg)
+{
+	(void) interrupts_disable();
+	switch_to_userspace((uintptr_t) kernel_uarg->uspace_entry,
+	    ((uintptr_t) kernel_uarg->uspace_stack) + STACK_SIZE
+	    - (ALIGN_UP(STACK_ITEM_SIZE, STACK_ALIGNMENT) + STACK_BIAS),
+	    (uintptr_t) kernel_uarg->uspace_uarg);
+
+	for (;;)
+		;
+	/* not reached */
+}
+
+void arch_reboot(void)
+{
+	// TODO
+	while (1);
+}
+
+/** Construct function pointer
+ *
+ * @param fptr   function pointer structure
+ * @param addr   function address
+ * @param caller calling function address
+ *
+ * @return address of the function pointer
+ *
+ */
+void *arch_construct_function(fncptr_t *fptr, void *addr, void *caller)
+{
+	return addr;
+}
+
+void irq_initialize_arch(irq_t *irq)
+{
+	(void) irq;
+}
+
+/** @}
+ */
Index: kernel/arch/sparc64/src/sun4v/start.S
===================================================================
--- kernel/arch/sparc64/src/sun4v/start.S	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/src/sun4v/start.S	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,345 @@
+#
+# Copyright (c) 2005 Jakub Jermar
+# Copyright (c) 2008 Pavel Rimsky
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# - Redistributions of source code must retain the above copyright
+#   notice, this list of conditions and the following disclaimer.
+# - Redistributions in binary form must reproduce the above copyright
+#   notice, this list of conditions and the following disclaimer in the
+#   documentation and/or other materials provided with the distribution.
+# - The name of the author may not be used to endorse or promote products
+#   derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+#include <arch/arch.h>
+#include <arch/stack.h>
+#include <arch/context_offset.h>
+#include <arch/sun4v/regdef.h>
+#include <arch/sun4v/hypercall.h>
+#include <arch/sun4v/arch.h>
+#include <arch/sun4v/cpu.h>
+#include <arch/mm/pagesize.h>
+#include <arch/mm/sun4v/tte.h>
+#include <arch/mm/sun4v/mmu.h>
+#include <arch/mm/sun4v/tlb.h>
+
+.register %g2, #scratch
+.register %g3, #scratch
+
+.section K_TEXT_START, "ax"
+
+#define BSP_FLAG		1
+#define PHYSMEM_ADDR_SIZE	56
+
+/*
+ * Flags set in the TTE data entry mapping the kernel.
+ */
+#ifdef CONFIG_VIRT_IDX_DCACHE
+	#define TTE_FLAGS \
+		(1 << TTE_V_SHIFT) \
+		| (1 << TTE_EP_SHIFT) \
+		| (1 << TTE_CP_SHIFT) \
+		| (1 << TTE_CV_SHIFT) \
+		| (1 << TTE_P_SHIFT) \
+		| (1 << TTE_W_SHIFT)
+#else
+	#define TTE_FLAGS \
+		(1 << TTE_V_SHIFT) \
+		| (1 << TTE_EP_SHIFT) \
+		| (1 << TTE_CP_SHIFT) \
+		| (1 << TTE_P_SHIFT) \
+		| (1 << TTE_W_SHIFT)
+#endif
+
+
+/*
+ * Fills a register with a TTE Data item. The item will map the given virtual
+ * address to a real address which will be computed by adding the starting
+ * address of the physical memory to the virtual address.
+ *
+ * parameters:
+ * 	addr:			virtual address to be mapped
+ *	rphysmem_start:		register containing the starting address of the
+ *				physical memory
+ *	rtmp1:			a register to be used as temporary
+ *	rtmp2:			a register to be used as temporary
+ *	rd:			register where the result will be saved
+ */
+#define TTE_DATA(addr, rphysmem_start, rtmp1, rtmp2, rd) \
+	setx TTE_FLAGS | PAGESIZE_4M, rtmp1, rd; \
+	add rd, rphysmem_start, rd; \
+	setx (addr), rtmp1, rtmp2; \
+	add rd, rtmp2, rd;
+
+/*
+ * Here is where the kernel is passed control from the boot loader.
+ * 
+ * The registers are expected to be in this state:
+ * - %o0 starting address of physical memory + bootstrap processor flag
+ * 	bits 63...1:	physical memory starting address / 2
+ *	bit 0:		non-zero on BSP processor, zero on AP processors
+ * - %o1 bootinfo structure address (BSP only)
+ * - %o2 bootinfo structure size (BSP only)
+ *
+ * Moreover, we depend on boot having established the following environment:
+ * - TLBs are on
+ * - identity mapping for the kernel image
+ */
+.global kernel_image_start
+kernel_image_start:
+	mov BSP_FLAG, %l0
+	and %o0, %l0, %l7			! l7 <= bootstrap processor?
+	andn %o0, %l0, %l6			! l6 <= start of physical memory
+	or %o1, %g0, %l1
+	or %o2, %g0, %l2
+
+	! Get bits (PHYSMEM_ADDR_SIZE - 1):13 of physmem_base.
+	srlx %l6, 13, %l5
+	
+	! l5 <= physmem_base[(PHYSMEM_ADDR_SIZE - 1):13]
+	sllx %l5, 13 + (63 - (PHYSMEM_ADDR_SIZE - 1)), %l5
+	srlx %l5, 63 - (PHYSMEM_ADDR_SIZE - 1), %l5	
+
+	/*
+	 * Setup basic runtime environment.
+	 */
+	wrpr %g0, NWINDOWS - 2, %cansave	! set maximum saveable windows
+	wrpr %g0, 0, %canrestore		! get rid of windows we will
+						! never need again
+	wrpr %g0, 0, %otherwin			! make sure the window state is
+						! consistent
+	wrpr %g0, NWINDOWS - 1, %cleanwin	! prevent needless clean_window
+						! traps for kernel
+						
+	wrpr %g0, 0, %wstate			! use default spill/fill trap
+
+	wrpr %g0, 0, %tl			! TL = 0, primary context
+						! register is used
+	wrpr %g0, 0, %gl
+
+	wrpr %g0, PSTATE_PRIV_BIT, %pstate	! disable interrupts and disable
+						! 32-bit address masking
+
+	wrpr %g0, 0, %pil			! intialize %pil
+
+	/*
+	 * Switch to kernel trap table.
+	 */
+	sethi %hi(trap_table), %g1
+	wrpr %g1, %lo(trap_table), %tba
+
+	/* Explicitly switch to hypervisor API 1.1. */
+	mov 1, %o0
+   	mov 1, %o1
+   	mov 1, %o2
+   	mov 0, %o3
+   	mov 0, %o4
+   	mov 0, %o5
+   	ta 0xff
+   	nop
+
+	/*
+	 * Take over the MMU.
+	 */
+
+	! map kernel in context 1
+	set kernel_image_start, %o0				! virt. address
+	set 1, %o1						! context
+	TTE_DATA(kernel_image_start, %l5, %g2, %g3, %o2)	! TTE data
+	set MMU_FLAG_DTLB | MMU_FLAG_ITLB, %o3			! MMU flags
+	__HYPERCALL_HYPERFAST(MMU_MAP_ADDR)
+
+	! switch to context 1
+	set 1, %o0
+	set VA_PRIMARY_CONTEXT_REG, %o1
+	stxa %o0, [%o1] ASI_PRIMARY_CONTEXT_REG
+
+	! demap all in context 0
+	set 0, %o0						! reserved
+	set 0, %o1						! reserved
+	set 0, %o2						! context
+	set MMU_FLAG_DTLB | MMU_FLAG_ITLB, %o3			! MMU flags
+	__HYPERCALL_FAST(MMU_DEMAP_CTX)
+
+	! install permanent mapping for kernel in context 0
+	set kernel_image_start, %o0				! virtual address
+	set 0, %o1						! context
+	TTE_DATA(kernel_image_start, %l5, %g2, %g3, %o2)	! TTE data
+	set MMU_FLAG_DTLB | MMU_FLAG_ITLB, %o3			! MMU flags
+	__HYPERCALL_FAST(MMU_MAP_PERM_ADDR)
+
+	! switch to context 0
+	mov 0, %o0
+	set VA_PRIMARY_CONTEXT_REG, %o1
+	stxa %o0, [%o1] ASI_PRIMARY_CONTEXT_REG
+
+	! demap all in context 1 (cleanup)
+	set 0, %o0						! reserved
+	set 0, %o1						! reserved
+	set 1, %o2						! context
+	set MMU_FLAG_DTLB | MMU_FLAG_ITLB, %o3			! MMU flags
+	__HYPERCALL_FAST(MMU_DEMAP_CTX)
+
+	/*
+	 * Set CPUID.
+	 */
+	__HYPERCALL_FAST(CPU_MYID)
+	mov SCRATCHPAD_CPUID, %g1
+	stxa %o1, [%g1] ASI_SCRATCHPAD
+
+	/*
+	 * Set MMU fault status area for the current CPU.
+	 */
+	set mmu_fsas, %o0			! o0 <= addr. of fault status areas array
+	add %o0, %l6, %o0			! kernel address to real address
+	mulx %o1, MMU_FSA_SIZE, %g1		! g1 <= offset of current CPU's fault status area
+	add %g1, %o0, %o0			! o0 <= FSA of the current CPU
+	mov SCRATCHPAD_MMU_FSA, %g1
+	stxa %o0, [%g1] ASI_SCRATCHPAD		! remember MMU fault status area to speed up miss handler
+	__HYPERCALL_FAST(MMU_FAULT_AREA_CONF)
+
+	! on APs skip executing the following code
+	cmp %l7, 0
+	be 1f
+	nop
+
+	/*
+	 * Save physmem_base for use by the mm subsystem.
+	 * %l6 contains starting physical address
+	 */	
+	sethi %hi(physmem_base), %l4
+	stx %l6, [%l4 + %lo(physmem_base)]
+
+	/*
+	 * Store a template of a TTE Data entry for kernel mappings.
+	 * This template will be used from the kernel MMU miss handler.
+	 */
+	!TTE_DATA(0, %l5, %g2, %g3, %g1)
+	setx TTE_FLAGS | PAGESIZE_8K, %g2, %g1; \
+	add %g1, %l5, %g1; \
+	set kernel_8k_tlb_data_template, %g4
+	stx %g1, [%g4]
+
+	/*
+	 * So far, we have not touched the stack.
+	 * It is a good idea to set the kernel stack to a known state now.
+	 */
+	sethi %hi(temporary_boot_stack), %sp
+	or %sp, %lo(temporary_boot_stack), %sp
+	sub %sp, STACK_BIAS, %sp
+
+	or %l1, %g0, %o1
+	or %l2, %g0, %o2
+	sethi %hi(bootinfo), %o0
+	call memcpy				! copy bootinfo
+	or %o0, %lo(bootinfo), %o0
+
+	call arch_pre_main
+	nop
+	
+	call main_bsp
+	nop
+
+	/* Not reached. */
+
+0:
+	ba 0b
+	nop
+
+1:
+
+#ifdef CONFIG_SMP
+
+	/*
+	 * Configure stack for the AP.
+	 * The AP is expected to use the stack saved
+	 * in the ctx global variable.
+	 */
+
+	mov	1, %o0			! MMU enable flag
+	set	mmu_enabled, %o1
+	mov	MMU_ENABLE, %o5	! MMU enable HV call
+	ta	0x80		! call HV
+
+	mmu_enabled:
+
+	/*
+	 * Configure stack for the AP.
+	 * The AP is expected to use the stack saved
+	 * in the ctx global variable.
+	 */
+	set ctx, %g1
+	add %g1, OFFSET_SP, %g1
+	ldx [%g1], %o6
+
+	call main_ap
+	nop
+#endif
+
+	/* Not reached. */
+0:
+	ba 0b
+	nop
+
+.align 8
+.global temp_cpu_mondo_handler
+temp_cpu_mondo_handler:
+
+	set 0x3c, %o0
+	set 0x15, %o5
+	ta 0x80
+
+	mov 0, %o0
+	setx before_ap_boots, %g1, %o1
+	setx 0x80400000, %g1, %o2
+	add %o1, %o2, %o1
+	__HYPERCALL_FAST(MMU_ENABLE)
+
+before_ap_boots:
+	setx 0x80400000, %g0, %o0
+	ba kernel_image_start
+	nop
+
+.section K_DATA_START, "aw", @progbits
+
+#define INITIAL_STACK_SIZE		1024
+
+.align STACK_ALIGNMENT
+	.space INITIAL_STACK_SIZE
+.align STACK_ALIGNMENT
+temporary_boot_stack:
+	.space STACK_WINDOW_SAVE_AREA_SIZE
+
+
+.data
+
+.align 8
+.global physmem_base		! copy of the physical memory base address
+physmem_base:
+	.quad 0
+
+.global kernel_8k_tlb_data_template
+kernel_8k_tlb_data_template:
+	.quad 0
+
+/* MMU fault status areas for all CPUs */
+.align MMU_FSA_ALIGNMENT
+.global mmu_fsas
+mmu_fsas:
+	.space (MMU_FSA_SIZE * MAX_NUM_STRANDS)
Index: kernel/arch/sparc64/src/trap/exception.c
===================================================================
--- kernel/arch/sparc64/src/trap/exception.c	(revision b1d3c36aca6850dfcd12418b4a6f60aa37a83487)
+++ kernel/arch/sparc64/src/trap/exception.c	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -162,5 +162,6 @@
 	fault_if_from_uspace(istate, "%s.", __func__);
 	dump_istate(istate);
-	dump_sfsr_and_sfar();
+//MH
+//	dump_sfsr_and_sfar();
 	panic("%s.", __func__);
 }
Index: kernel/arch/sparc64/src/trap/mmu.S
===================================================================
--- kernel/arch/sparc64/src/trap/mmu.S	(revision b1d3c36aca6850dfcd12418b4a6f60aa37a83487)
+++ 	(revision )
@@ -1,42 +1,0 @@
-#
-# Copyright (c) 2006 Jakub Jermar
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# - Redistributions of source code must retain the above copyright
-#   notice, this list of conditions and the following disclaimer.
-# - Redistributions in binary form must reproduce the above copyright
-#   notice, this list of conditions and the following disclaimer in the
-#   documentation and/or other materials provided with the distribution.
-# - The name of the author may not be used to endorse or promote products
-#   derived from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-
-/**
- * @file
- * @brief	MMU trap handlers that do not fit into the trap table.
- */
-
-.register %g2, #scratch
-.register %g3, #scratch
-
-.text
-
-#include <arch/trap/mmu.h>
-#include <arch/trap/trap_table.h>
-#include <arch/regdef.h>
-
Index: kernel/arch/sparc64/src/trap/sun4u/mmu.S
===================================================================
--- kernel/arch/sparc64/src/trap/sun4u/mmu.S	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/src/trap/sun4u/mmu.S	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,42 @@
+#
+# Copyright (c) 2006 Jakub Jermar
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# - Redistributions of source code must retain the above copyright
+#   notice, this list of conditions and the following disclaimer.
+# - Redistributions in binary form must reproduce the above copyright
+#   notice, this list of conditions and the following disclaimer in the
+#   documentation and/or other materials provided with the distribution.
+# - The name of the author may not be used to endorse or promote products
+#   derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+/**
+ * @file
+ * @brief	MMU trap handlers that do not fit into the trap table.
+ */
+
+.register %g2, #scratch
+.register %g3, #scratch
+
+.text
+
+#include <arch/trap/mmu.h>
+#include <arch/trap/trap_table.h>
+#include <arch/regdef.h>
+
Index: kernel/arch/sparc64/src/trap/sun4u/trap_table.S
===================================================================
--- kernel/arch/sparc64/src/trap/sun4u/trap_table.S	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/src/trap/sun4u/trap_table.S	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,851 @@
+#
+# Copyright (c) 2005 Jakub Jermar
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# - Redistributions of source code must retain the above copyright
+#   notice, this list of conditions and the following disclaimer.
+# - Redistributions in binary form must reproduce the above copyright
+#   notice, this list of conditions and the following disclaimer in the
+#   documentation and/or other materials provided with the distribution.
+# - The name of the author may not be used to endorse or promote products
+#   derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+/**
+ * @file
+ * @brief This file contains kernel trap table.
+ */
+
+.register %g2, #scratch
+.register %g3, #scratch
+
+.text
+
+#include <arch/trap/trap_table.h>
+#include <arch/trap/regwin.h>
+#include <arch/trap/interrupt.h>
+#include <arch/trap/exception.h>
+#include <arch/trap/syscall.h>
+#include <arch/trap/mmu.h>
+#include <arch/mm/mmu.h>
+#include <arch/mm/page.h>
+#include <arch/stack.h>
+#include <arch/regdef.h>
+
+#define TABLE_SIZE	TRAP_TABLE_SIZE
+#define ENTRY_SIZE	TRAP_TABLE_ENTRY_SIZE
+
+/*
+ * Kernel trap table.
+ */
+.align TABLE_SIZE
+.global trap_table
+trap_table:
+
+/* TT = 0x08, TL = 0, instruction_access_exception */
+.org trap_table + TT_INSTRUCTION_ACCESS_EXCEPTION*ENTRY_SIZE
+.global instruction_access_exception_tl0
+instruction_access_exception_tl0:
+	wrpr %g0, PSTATE_AG_BIT | PSTATE_PRIV_BIT, %pstate
+	PREEMPTIBLE_HANDLER instruction_access_exception
+
+/* TT = 0x0a, TL = 0, instruction_access_error */
+.org trap_table + TT_INSTRUCTION_ACCESS_ERROR*ENTRY_SIZE
+.global instruction_access_error_tl0
+instruction_access_error_tl0:
+	PREEMPTIBLE_HANDLER instruction_access_error
+
+/* TT = 0x10, TL = 0, illegal_instruction */
+.org trap_table + TT_ILLEGAL_INSTRUCTION*ENTRY_SIZE
+.global illegal_instruction_tl0
+illegal_instruction_tl0:
+	PREEMPTIBLE_HANDLER illegal_instruction
+
+/* TT = 0x11, TL = 0, privileged_opcode */
+.org trap_table + TT_PRIVILEGED_OPCODE*ENTRY_SIZE
+.global privileged_opcode_tl0
+privileged_opcode_tl0:
+	PREEMPTIBLE_HANDLER privileged_opcode
+
+/* TT = 0x12, TL = 0, unimplemented_LDD */
+.org trap_table + TT_UNIMPLEMENTED_LDD*ENTRY_SIZE
+.global unimplemented_LDD_tl0
+unimplemented_LDD_tl0:
+	PREEMPTIBLE_HANDLER unimplemented_LDD
+
+/* TT = 0x13, TL = 0, unimplemented_STD */
+.org trap_table + TT_UNIMPLEMENTED_STD*ENTRY_SIZE
+.global unimplemented_STD_tl0
+unimplemented_STD_tl0:
+	PREEMPTIBLE_HANDLER unimplemented_STD
+
+/* TT = 0x20, TL = 0, fb_disabled handler */
+.org trap_table + TT_FP_DISABLED*ENTRY_SIZE
+.global fb_disabled_tl0
+fp_disabled_tl0:
+	PREEMPTIBLE_HANDLER fp_disabled
+
+/* TT = 0x21, TL = 0, fb_exception_ieee_754 handler */
+.org trap_table + TT_FP_EXCEPTION_IEEE_754*ENTRY_SIZE
+.global fb_exception_ieee_754_tl0
+fp_exception_ieee_754_tl0:
+	PREEMPTIBLE_HANDLER fp_exception_ieee_754
+
+/* TT = 0x22, TL = 0, fb_exception_other handler */
+.org trap_table + TT_FP_EXCEPTION_OTHER*ENTRY_SIZE
+.global fb_exception_other_tl0
+fp_exception_other_tl0:
+	PREEMPTIBLE_HANDLER fp_exception_other
+
+/* TT = 0x23, TL = 0, tag_overflow */
+.org trap_table + TT_TAG_OVERFLOW*ENTRY_SIZE
+.global tag_overflow_tl0
+tag_overflow_tl0:
+	PREEMPTIBLE_HANDLER tag_overflow
+
+/* TT = 0x24, TL = 0, clean_window handler */
+.org trap_table + TT_CLEAN_WINDOW*ENTRY_SIZE
+.global clean_window_tl0
+clean_window_tl0:
+	CLEAN_WINDOW_HANDLER
+
+/* TT = 0x28, TL = 0, division_by_zero */
+.org trap_table + TT_DIVISION_BY_ZERO*ENTRY_SIZE
+.global division_by_zero_tl0
+division_by_zero_tl0:
+	PREEMPTIBLE_HANDLER division_by_zero
+
+/* TT = 0x30, TL = 0, data_access_exception */
+.org trap_table + TT_DATA_ACCESS_EXCEPTION*ENTRY_SIZE
+.global data_access_exception_tl0
+data_access_exception_tl0:
+	wrpr %g0, PSTATE_AG_BIT | PSTATE_PRIV_BIT, %pstate
+	PREEMPTIBLE_HANDLER data_access_exception
+
+/* TT = 0x32, TL = 0, data_access_error */
+.org trap_table + TT_DATA_ACCESS_ERROR*ENTRY_SIZE
+.global data_access_error_tl0
+data_access_error_tl0:
+	PREEMPTIBLE_HANDLER data_access_error
+
+/* TT = 0x34, TL = 0, mem_address_not_aligned */
+.org trap_table + TT_MEM_ADDRESS_NOT_ALIGNED*ENTRY_SIZE
+.global mem_address_not_aligned_tl0
+mem_address_not_aligned_tl0:
+	PREEMPTIBLE_HANDLER mem_address_not_aligned
+
+/* TT = 0x35, TL = 0, LDDF_mem_address_not_aligned */
+.org trap_table + TT_LDDF_MEM_ADDRESS_NOT_ALIGNED*ENTRY_SIZE
+.global LDDF_mem_address_not_aligned_tl0
+LDDF_mem_address_not_aligned_tl0:
+	PREEMPTIBLE_HANDLER LDDF_mem_address_not_aligned
+
+/* TT = 0x36, TL = 0, STDF_mem_address_not_aligned */
+.org trap_table + TT_STDF_MEM_ADDRESS_NOT_ALIGNED*ENTRY_SIZE
+.global STDF_mem_address_not_aligned_tl0
+STDF_mem_address_not_aligned_tl0:
+	PREEMPTIBLE_HANDLER STDF_mem_address_not_aligned
+
+/* TT = 0x37, TL = 0, privileged_action */
+.org trap_table + TT_PRIVILEGED_ACTION*ENTRY_SIZE
+.global privileged_action_tl0
+privileged_action_tl0:
+	PREEMPTIBLE_HANDLER privileged_action
+
+/* TT = 0x38, TL = 0, LDQF_mem_address_not_aligned */
+.org trap_table + TT_LDQF_MEM_ADDRESS_NOT_ALIGNED*ENTRY_SIZE
+.global LDQF_mem_address_not_aligned_tl0
+LDQF_mem_address_not_aligned_tl0:
+	PREEMPTIBLE_HANDLER LDQF_mem_address_not_aligned
+
+/* TT = 0x39, TL = 0, STQF_mem_address_not_aligned */
+.org trap_table + TT_STQF_MEM_ADDRESS_NOT_ALIGNED*ENTRY_SIZE
+.global STQF_mem_address_not_aligned_tl0
+STQF_mem_address_not_aligned_tl0:
+	PREEMPTIBLE_HANDLER STQF_mem_address_not_aligned
+
+/* TT = 0x41, TL = 0, interrupt_level_1 handler */
+.org trap_table + TT_INTERRUPT_LEVEL_1*ENTRY_SIZE
+.global interrupt_level_1_handler_tl0
+interrupt_level_1_handler_tl0:
+	INTERRUPT_LEVEL_N_HANDLER 1
+
+/* TT = 0x42, TL = 0, interrupt_level_2 handler */
+.org trap_table + TT_INTERRUPT_LEVEL_2*ENTRY_SIZE
+.global interrupt_level_2_handler_tl0
+interrupt_level_2_handler_tl0:
+	INTERRUPT_LEVEL_N_HANDLER 2
+
+/* TT = 0x43, TL = 0, interrupt_level_3 handler */
+.org trap_table + TT_INTERRUPT_LEVEL_3*ENTRY_SIZE
+.global interrupt_level_3_handler_tl0
+interrupt_level_3_handler_tl0:
+	INTERRUPT_LEVEL_N_HANDLER 3
+
+/* TT = 0x44, TL = 0, interrupt_level_4 handler */
+.org trap_table + TT_INTERRUPT_LEVEL_4*ENTRY_SIZE
+.global interrupt_level_4_handler_tl0
+interrupt_level_4_handler_tl0:
+	INTERRUPT_LEVEL_N_HANDLER 4
+
+/* TT = 0x45, TL = 0, interrupt_level_5 handler */
+.org trap_table + TT_INTERRUPT_LEVEL_5*ENTRY_SIZE
+.global interrupt_level_5_handler_tl0
+interrupt_level_5_handler_tl0:
+	INTERRUPT_LEVEL_N_HANDLER 5
+
+/* TT = 0x46, TL = 0, interrupt_level_6 handler */
+.org trap_table + TT_INTERRUPT_LEVEL_6*ENTRY_SIZE
+.global interrupt_level_6_handler_tl0
+interrupt_level_6_handler_tl0:
+	INTERRUPT_LEVEL_N_HANDLER 6
+
+/* TT = 0x47, TL = 0, interrupt_level_7 handler */
+.org trap_table + TT_INTERRUPT_LEVEL_7*ENTRY_SIZE
+.global interrupt_level_7_handler_tl0
+interrupt_level_7_handler_tl0:
+	INTERRUPT_LEVEL_N_HANDLER 7
+
+/* TT = 0x48, TL = 0, interrupt_level_8 handler */
+.org trap_table + TT_INTERRUPT_LEVEL_8*ENTRY_SIZE
+.global interrupt_level_8_handler_tl0
+interrupt_level_8_handler_tl0:
+	INTERRUPT_LEVEL_N_HANDLER 8
+
+/* TT = 0x49, TL = 0, interrupt_level_9 handler */
+.org trap_table + TT_INTERRUPT_LEVEL_9*ENTRY_SIZE
+.global interrupt_level_9_handler_tl0
+interrupt_level_9_handler_tl0:
+	INTERRUPT_LEVEL_N_HANDLER 9
+
+/* TT = 0x4a, TL = 0, interrupt_level_10 handler */
+.org trap_table + TT_INTERRUPT_LEVEL_10*ENTRY_SIZE
+.global interrupt_level_10_handler_tl0
+interrupt_level_10_handler_tl0:
+	INTERRUPT_LEVEL_N_HANDLER 10
+
+/* TT = 0x4b, TL = 0, interrupt_level_11 handler */
+.org trap_table + TT_INTERRUPT_LEVEL_11*ENTRY_SIZE
+.global interrupt_level_11_handler_tl0
+interrupt_level_11_handler_tl0:
+	INTERRUPT_LEVEL_N_HANDLER 11
+
+/* TT = 0x4c, TL = 0, interrupt_level_12 handler */
+.org trap_table + TT_INTERRUPT_LEVEL_12*ENTRY_SIZE
+.global interrupt_level_12_handler_tl0
+interrupt_level_12_handler_tl0:
+	INTERRUPT_LEVEL_N_HANDLER 12
+
+/* TT = 0x4d, TL = 0, interrupt_level_13 handler */
+.org trap_table + TT_INTERRUPT_LEVEL_13*ENTRY_SIZE
+.global interrupt_level_13_handler_tl0
+interrupt_level_13_handler_tl0:
+	INTERRUPT_LEVEL_N_HANDLER 13
+
+/* TT = 0x4e, TL = 0, interrupt_level_14 handler */
+.org trap_table + TT_INTERRUPT_LEVEL_14*ENTRY_SIZE
+.global interrupt_level_14_handler_tl0
+interrupt_level_14_handler_tl0:
+	INTERRUPT_LEVEL_N_HANDLER 14
+
+/* TT = 0x4f, TL = 0, interrupt_level_15 handler */
+.org trap_table + TT_INTERRUPT_LEVEL_15*ENTRY_SIZE
+.global interrupt_level_15_handler_tl0
+interrupt_level_15_handler_tl0:
+	INTERRUPT_LEVEL_N_HANDLER 15
+
+/* TT = 0x60, TL = 0, interrupt_vector_trap handler */
+.org trap_table + TT_INTERRUPT_VECTOR_TRAP*ENTRY_SIZE
+.global interrupt_vector_trap_handler_tl0
+interrupt_vector_trap_handler_tl0:
+	INTERRUPT_VECTOR_TRAP_HANDLER
+
+/* TT = 0x64, TL = 0, fast_instruction_access_MMU_miss */
+.org trap_table + TT_FAST_INSTRUCTION_ACCESS_MMU_MISS*ENTRY_SIZE
+.global fast_instruction_access_mmu_miss_handler_tl0
+fast_instruction_access_mmu_miss_handler_tl0:
+	FAST_INSTRUCTION_ACCESS_MMU_MISS_HANDLER
+
+/* TT = 0x68, TL = 0, fast_data_access_MMU_miss */
+.org trap_table + TT_FAST_DATA_ACCESS_MMU_MISS*ENTRY_SIZE
+.global fast_data_access_mmu_miss_handler_tl0
+fast_data_access_mmu_miss_handler_tl0:
+	FAST_DATA_ACCESS_MMU_MISS_HANDLER 0
+
+/* TT = 0x6c, TL = 0, fast_data_access_protection */
+.org trap_table + TT_FAST_DATA_ACCESS_PROTECTION*ENTRY_SIZE
+.global fast_data_access_protection_handler_tl0
+fast_data_access_protection_handler_tl0:
+	FAST_DATA_ACCESS_PROTECTION_HANDLER 0
+
+/* TT = 0x80, TL = 0, spill_0_normal handler */
+.org trap_table + TT_SPILL_0_NORMAL*ENTRY_SIZE
+.global spill_0_normal_tl0
+spill_0_normal_tl0:
+	SPILL_NORMAL_HANDLER_KERNEL
+
+/* TT = 0x84, TL = 0, spill_1_normal handler */
+.org trap_table + TT_SPILL_1_NORMAL*ENTRY_SIZE
+.global spill_1_normal_tl0
+spill_1_normal_tl0:
+	SPILL_NORMAL_HANDLER_USERSPACE
+
+/* TT = 0x88, TL = 0, spill_2_normal handler */
+.org trap_table + TT_SPILL_2_NORMAL*ENTRY_SIZE
+.global spill_2_normal_tl0
+spill_2_normal_tl0:
+	SPILL_TO_USPACE_WINDOW_BUFFER
+
+/* TT = 0xa0, TL = 0, spill_0_other handler */
+.org trap_table + TT_SPILL_0_OTHER*ENTRY_SIZE
+.global spill_0_other_tl0
+spill_0_other_tl0:
+	SPILL_TO_USPACE_WINDOW_BUFFER
+
+/* TT = 0xc0, TL = 0, fill_0_normal handler */
+.org trap_table + TT_FILL_0_NORMAL*ENTRY_SIZE
+.global fill_0_normal_tl0
+fill_0_normal_tl0:
+	FILL_NORMAL_HANDLER_KERNEL
+
+/* TT = 0xc4, TL = 0, fill_1_normal handler */
+.org trap_table + TT_FILL_1_NORMAL*ENTRY_SIZE
+.global fill_1_normal_tl0
+fill_1_normal_tl0:
+	FILL_NORMAL_HANDLER_USERSPACE
+
+/* TT = 0x100 - 0x17f, TL = 0, trap_instruction_0 - trap_instruction_7f */
+.irp cur, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,\
+    20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38,\
+    39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,\
+    58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76,\
+    77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,\
+    96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,\
+    112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126,\
+    127
+.org trap_table + (TT_TRAP_INSTRUCTION_0+\cur)*ENTRY_SIZE
+.global trap_instruction_\cur\()_tl0
+trap_instruction_\cur\()_tl0:
+	ba %xcc, trap_instruction_handler
+	mov \cur, %g2
+.endr
+
+/*
+ * Handlers for TL>0.
+ */
+
+/* TT = 0x08, TL > 0, instruction_access_exception */
+.org trap_table + (TT_INSTRUCTION_ACCESS_EXCEPTION+512)*ENTRY_SIZE
+.global instruction_access_exception_tl1
+instruction_access_exception_tl1:
+	wrpr %g0, 1, %tl
+	wrpr %g0, PSTATE_AG_BIT | PSTATE_PRIV_BIT, %pstate
+	PREEMPTIBLE_HANDLER instruction_access_exception
+
+/* TT = 0x0a, TL > 0, instruction_access_error */
+.org trap_table + (TT_INSTRUCTION_ACCESS_ERROR+512)*ENTRY_SIZE
+.global instruction_access_error_tl1
+instruction_access_error_tl1:
+	wrpr %g0, 1, %tl
+	PREEMPTIBLE_HANDLER instruction_access_error
+
+/* TT = 0x10, TL > 0, illegal_instruction */
+.org trap_table + (TT_ILLEGAL_INSTRUCTION+512)*ENTRY_SIZE
+.global illegal_instruction_tl1
+illegal_instruction_tl1:
+	wrpr %g0, 1, %tl
+	PREEMPTIBLE_HANDLER illegal_instruction
+
+/* TT = 0x24, TL > 0, clean_window handler */
+.org trap_table + (TT_CLEAN_WINDOW+512)*ENTRY_SIZE
+.global clean_window_tl1
+clean_window_tl1:
+	CLEAN_WINDOW_HANDLER
+
+/* TT = 0x28, TL > 0, division_by_zero */
+.org trap_table + (TT_DIVISION_BY_ZERO+512)*ENTRY_SIZE
+.global division_by_zero_tl1
+division_by_zero_tl1:
+	wrpr %g0, 1, %tl
+	PREEMPTIBLE_HANDLER division_by_zero
+
+/* TT = 0x30, TL > 0, data_access_exception */
+.org trap_table + (TT_DATA_ACCESS_EXCEPTION+512)*ENTRY_SIZE
+.global data_access_exception_tl1
+data_access_exception_tl1:
+	wrpr %g0, 1, %tl
+	wrpr %g0, PSTATE_AG_BIT | PSTATE_PRIV_BIT, %pstate
+	PREEMPTIBLE_HANDLER data_access_exception
+
+/* TT = 0x32, TL > 0, data_access_error */
+.org trap_table + (TT_DATA_ACCESS_ERROR+512)*ENTRY_SIZE
+.global data_access_error_tl1
+data_access_error_tl1:
+	wrpr %g0, 1, %tl
+	PREEMPTIBLE_HANDLER data_access_error
+
+/* TT = 0x34, TL > 0, mem_address_not_aligned */
+.org trap_table + (TT_MEM_ADDRESS_NOT_ALIGNED+512)*ENTRY_SIZE
+.global mem_address_not_aligned_tl1
+mem_address_not_aligned_tl1:
+	wrpr %g0, 1, %tl
+	PREEMPTIBLE_HANDLER mem_address_not_aligned
+
+/* TT = 0x68, TL > 0, fast_data_access_MMU_miss */
+.org trap_table + (TT_FAST_DATA_ACCESS_MMU_MISS+512)*ENTRY_SIZE
+.global fast_data_access_mmu_miss_handler_tl1
+fast_data_access_mmu_miss_handler_tl1:
+	FAST_DATA_ACCESS_MMU_MISS_HANDLER 1
+
+/* TT = 0x6c, TL > 0, fast_data_access_protection */
+.org trap_table + (TT_FAST_DATA_ACCESS_PROTECTION+512)*ENTRY_SIZE
+.global fast_data_access_protection_handler_tl1
+fast_data_access_protection_handler_tl1:
+	FAST_DATA_ACCESS_PROTECTION_HANDLER 1
+
+/* TT = 0x80, TL > 0, spill_0_normal handler */
+.org trap_table + (TT_SPILL_0_NORMAL+512)*ENTRY_SIZE
+.global spill_0_normal_tl1
+spill_0_normal_tl1:
+	SPILL_NORMAL_HANDLER_KERNEL
+
+/* TT = 0x88, TL > 0, spill_2_normal handler */
+.org trap_table + (TT_SPILL_2_NORMAL+512)*ENTRY_SIZE
+.global spill_2_normal_tl1
+spill_2_normal_tl1:
+	SPILL_TO_USPACE_WINDOW_BUFFER
+
+/* TT = 0xa0, TL > 0, spill_0_other handler */
+.org trap_table + (TT_SPILL_0_OTHER+512)*ENTRY_SIZE
+.global spill_0_other_tl1
+spill_0_other_tl1:
+	SPILL_TO_USPACE_WINDOW_BUFFER
+
+/* TT = 0xc0, TL > 0, fill_0_normal handler */
+.org trap_table + (TT_FILL_0_NORMAL+512)*ENTRY_SIZE
+.global fill_0_normal_tl1
+fill_0_normal_tl1:
+	FILL_NORMAL_HANDLER_KERNEL
+
+.align TABLE_SIZE
+
+
+#define NOT(x)	((x) == 0)
+
+/* Preemptible trap handler for TL=1.
+ *
+ * This trap handler makes arrangements to make calling of scheduler() from
+ * within a trap context possible. It is called from several other trap
+ * handlers.
+ *
+ * This function can be entered either with interrupt globals or alternate
+ * globals. Memory management trap handlers are obliged to switch to one of
+ * those global sets prior to calling this function. Register window management
+ * functions are not allowed to modify the alternate global registers.
+ *
+ * The kernel is designed to work on trap levels 0 - 4. For instance, the
+ * following can happen:
+ * TL0: kernel thread runs (CANSAVE=0, kernel stack not in DTLB)
+ * TL1: preemptible trap handler started after a tick interrupt
+ * TL2: preemptible trap handler did SAVE
+ * TL3: spill handler touched the kernel stack  
+ * TL4: hardware or software failure
+ *
+ * Input registers:
+ *	%g1		Address of function to call if this is not a syscall.
+ * 	%g2	 	First argument for the function.
+ *	%g6		Pre-set as kernel stack base if trap from userspace.
+ *	%g7		Pre-set as address of the userspace window buffer.
+ */
+.macro PREEMPTIBLE_HANDLER_TEMPLATE is_syscall
+	/*
+	 * ASSERT(%tl == 1)
+	 */
+	rdpr %tl, %g3
+	cmp %g3, 1
+	be %xcc, 1f
+	nop
+0:	ba %xcc, 0b				! this is for debugging, if we ever get here
+	nop					! it will be easy to find
+
+1:
+.if NOT(\is_syscall)
+	rdpr %tstate, %g3
+	
+	/*
+	 * One of the ways this handler can be invoked is after a nested MMU trap from
+	 * either spill_1_normal or fill_1_normal traps. Both of these traps manipulate
+	 * the CWP register. We deal with the situation by simulating the MMU trap
+	 * on TL=1 and restart the respective SAVE or RESTORE instruction once the MMU
+	 * trap is resolved. However, because we are in the wrong window from the
+	 * perspective of the MMU trap, we need to synchronize CWP with CWP from TL=0.
+	 */ 
+	and %g3, TSTATE_CWP_MASK, %g4
+	wrpr %g4, 0, %cwp			! resynchronize CWP
+
+	andcc %g3, TSTATE_PRIV_BIT, %g0		! if this trap came from the privileged mode...
+	bnz %xcc, 0f				! ...skip setting of kernel stack and primary context
+	nop
+	
+.endif
+	/*
+	 * Normal window spills will go to the userspace window buffer.
+	 */
+	wrpr %g0, WSTATE_OTHER(0) | WSTATE_NORMAL(2), %wstate
+
+	wrpr %g0, NWINDOWS - 1, %cleanwin	! prevent unnecessary clean_window exceptions
+
+	/*
+	 * Switch to kernel stack. The old stack is
+	 * automatically saved in the old window's %sp
+	 * and the new window's %fp.
+	 */
+	save %g6, -PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE, %sp
+
+.if \is_syscall
+	/*
+	 * Copy arguments for the syscall to the new window.
+	 */
+	mov %i0, %o0
+	mov %i1, %o1
+	mov %i2, %o2
+	mov %i3, %o3
+	mov %i4, %o4
+	mov %i5, %o5
+.endif
+
+	/*
+	 * Mark the CANRESTORE windows as OTHER windows.
+	 */
+	rdpr %canrestore, %l0
+	wrpr %l0, %otherwin
+	wrpr %g0, %canrestore
+
+	/*
+	 * Switch to primary context 0.
+	 */
+	mov VA_PRIMARY_CONTEXT_REG, %l0
+	stxa %g0, [%l0] ASI_DMMU
+	rd %pc, %l0
+	flush %l0
+
+.if NOT(\is_syscall)
+	ba %xcc, 1f
+	nop
+0:
+	save %sp, -PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE, %sp
+
+	/*
+	 * At this moment, we are using the kernel stack 
+	 * and have successfully allocated a register window.
+	 */
+1:
+.endif
+	/*
+	 * Other window spills will go to the userspace window buffer
+	 * and normal spills will go to the kernel stack.
+	 */
+	wrpr %g0, WSTATE_OTHER(0) | WSTATE_NORMAL(0), %wstate
+	
+	/*
+	 * Copy arguments.
+	 */
+	mov %g1, %l0
+.if NOT(\is_syscall)
+	mov %g2, %o0
+.else
+	! store the syscall number on the stack as 7th argument
+	stx %g2, [%sp + STACK_WINDOW_SAVE_AREA_SIZE + STACK_BIAS + STACK_ARG6] 
+.endif
+
+	/*
+	 * Save TSTATE, TPC and TNPC aside.
+	 */
+	rdpr %tstate, %g1
+	rdpr %tpc, %g2
+	rdpr %tnpc, %g3
+	rd %y, %g4
+
+	stx %g1, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TSTATE]
+	stx %g2, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TPC]
+	stx %g3, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TNPC]
+
+	/*
+	 * Save the Y register.
+	 * This register is deprecated according to SPARC V9 specification
+	 * and is only present for backward compatibility with previous
+	 * versions of the SPARC architecture.
+	 * Surprisingly, gcc makes use of this register without a notice.
+	 */
+	stx %g4, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_Y]
+	
+	wrpr %g0, 0, %tl
+	wrpr %g0, PSTATE_PRIV_BIT | PSTATE_PEF_BIT, %pstate
+	SAVE_GLOBALS
+	
+.if NOT(\is_syscall)
+	/*
+	 * Call the higher-level handler and pass istate as second parameter.
+	 */
+	call %l0
+	add %sp, PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TNPC, %o1
+.else
+	/*
+	 * Call the higher-level syscall handler and enable interrupts.
+	 */
+	call syscall_handler
+	wrpr %g0, PSTATE_PRIV_BIT | PSTATE_PEF_BIT | PSTATE_IE_BIT, %pstate
+	mov %o0, %i0				! copy the value returned by the syscall
+.endif
+
+	RESTORE_GLOBALS
+	rdpr %pstate, %l1			! we must preserve the PEF bit
+	wrpr %g0, PSTATE_AG_BIT | PSTATE_PRIV_BIT, %pstate
+	wrpr %g0, 1, %tl
+	
+	/*
+	 * Read TSTATE, TPC and TNPC from saved copy.
+	 */
+	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TSTATE], %g1
+	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TPC], %g2
+	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TNPC], %g3
+
+	/*
+	 * Copy PSTATE.PEF to the in-register copy of TSTATE.
+	 */
+	and %l1, PSTATE_PEF_BIT, %l1
+	sllx %l1, TSTATE_PSTATE_SHIFT, %l1
+	sethi %hi(TSTATE_PEF_BIT), %g4
+	andn %g1, %g4, %g1
+	or %g1, %l1, %g1
+
+	/*
+	 * Restore TSTATE, TPC and TNPC from saved copies.
+	 */
+	wrpr %g1, 0, %tstate
+	wrpr %g2, 0, %tpc
+	wrpr %g3, 0, %tnpc
+
+	/*
+	 * Restore Y.
+	 */
+	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_Y], %g4
+	wr %g4, %y
+
+	/*
+	 * If OTHERWIN is zero, then all the userspace windows have been
+	 * spilled to kernel memory (i.e. register window buffer). Moreover,
+	 * if the scheduler was called in the meantime, all valid windows
+	 * belonging to other threads were spilled by context_save().
+	 * If OTHERWIN is non-zero, then some userspace windows are still
+	 * valid. Others might have been spilled. However, the CWP pointer
+	 * needs no fixing because the scheduler had not been called.
+	 */
+	rdpr %otherwin, %l0
+	brnz %l0, 0f
+	nop
+
+	/*
+	 * OTHERWIN == 0
+	 */
+
+	/*
+	 * If TSTATE.CWP + 1 == CWP, then we still do not have to fix CWP.
+	 */
+	and %g1, TSTATE_CWP_MASK, %l0
+	inc %l0
+	and %l0, NWINDOWS - 1, %l0	! %l0 mod NWINDOWS
+	rdpr %cwp, %l1
+	cmp %l0, %l1
+	bz %xcc, 0f			! CWP is ok
+	nop
+
+	/*
+	 * Fix CWP.
+	 * In order to recapitulate, the input registers in the current
+	 * window are the output registers of the window to which we want
+	 * to restore. Because the fill trap fills only input and local
+	 * registers of a window, we need to preserve those output
+	 * registers manually.
+	 */
+	mov %sp, %g2
+	stx %i0, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I0]
+	stx %i1, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I1]
+	stx %i2, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I2]
+	stx %i3, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I3]
+	stx %i4, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I4]
+	stx %i5, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I5]
+	stx %i6, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I6]
+	stx %i7, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I7]
+	wrpr %l0, 0, %cwp
+	mov %g2, %sp
+	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I0], %i0
+	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I1], %i1
+	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I2], %i2
+	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I3], %i3
+	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I4], %i4
+	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I5], %i5
+	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I6], %i6
+	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I7], %i7
+
+	/*
+	 * OTHERWIN != 0 or fall-through from the OTHERWIN == 0 case.
+	 * The CWP has already been restored to the value it had after the SAVE
+	 * at the beginning of this function.
+	 */
+0:
+.if NOT(\is_syscall)
+	rdpr %tstate, %g1
+	andcc %g1, TSTATE_PRIV_BIT, %g0		! if we are not returning to userspace...,
+	bnz %xcc, 1f				! ...skip restoring userspace windows
+	nop
+.endif
+
+	/*
+	 * Spills and fills will be processed by the {spill,fill}_1_normal
+	 * handlers.
+	 */
+	wrpr %g0, WSTATE_OTHER(0) | WSTATE_NORMAL(1), %wstate
+
+	/*
+	 * Set primary context according to secondary context.
+	 */
+	wr %g0, ASI_DMMU, %asi
+	ldxa [VA_SECONDARY_CONTEXT_REG] %asi, %g1
+	stxa %g1, [VA_PRIMARY_CONTEXT_REG] %asi
+	rd %pc, %g1
+	flush %g1
+	
+	rdpr %cwp, %g1
+	rdpr %otherwin, %g2
+
+	/*
+	 * Skip all OTHERWIN windows and descend to the first window
+	 * in the userspace window buffer.
+	 */
+	sub %g1, %g2, %g3
+	dec %g3
+	and %g3, NWINDOWS - 1, %g3
+	wrpr %g3, 0, %cwp
+
+	/*
+	 * CWP is now in the window last saved in the userspace window buffer.
+	 * Fill all windows stored in the buffer.
+	 */
+	clr %g4
+0:	andcc %g7, UWB_ALIGNMENT - 1, %g0	! alignment check
+	bz %xcc, 0f				! %g7 is UWB_ALIGNMENT-aligned, no more windows to refill
+	nop
+
+	add %g7, -STACK_WINDOW_SAVE_AREA_SIZE, %g7
+	ldx [%g7 + L0_OFFSET], %l0
+	ldx [%g7 + L1_OFFSET], %l1
+	ldx [%g7 + L2_OFFSET], %l2
+	ldx [%g7 + L3_OFFSET], %l3
+	ldx [%g7 + L4_OFFSET], %l4
+	ldx [%g7 + L5_OFFSET], %l5
+	ldx [%g7 + L6_OFFSET], %l6
+	ldx [%g7 + L7_OFFSET], %l7
+	ldx [%g7 + I0_OFFSET], %i0
+	ldx [%g7 + I1_OFFSET], %i1
+	ldx [%g7 + I2_OFFSET], %i2
+	ldx [%g7 + I3_OFFSET], %i3
+	ldx [%g7 + I4_OFFSET], %i4
+	ldx [%g7 + I5_OFFSET], %i5
+	ldx [%g7 + I6_OFFSET], %i6
+	ldx [%g7 + I7_OFFSET], %i7
+
+	dec %g3
+	and %g3, NWINDOWS - 1, %g3
+	wrpr %g3, 0, %cwp			! switch to the preceeding window
+
+	ba %xcc, 0b
+	inc %g4
+
+0:
+	/*
+	 * Switch back to the proper current window and adjust
+	 * OTHERWIN, CANRESTORE, CANSAVE and CLEANWIN.
+	 */
+	wrpr %g1, 0, %cwp
+	add %g4, %g2, %g2
+	cmp %g2, NWINDOWS - 2
+	bg %xcc, 2f				! fix the CANRESTORE=NWINDOWS-1 anomaly
+	mov NWINDOWS - 2, %g1			! use dealy slot for both cases
+	sub %g1, %g2, %g1
+	
+	wrpr %g0, 0, %otherwin
+	wrpr %g1, 0, %cansave			! NWINDOWS - 2 - CANRESTORE
+	wrpr %g2, 0, %canrestore		! OTHERWIN + windows in the buffer
+	wrpr %g2, 0, %cleanwin			! avoid information leak
+
+1:
+	restore
+
+.if \is_syscall
+	done
+.else
+	retry
+.endif
+
+	/*
+	 * We got here in order to avoid inconsistency of the window state registers.
+	 * If the:
+	 *
+	 * 	save %g6, -PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE, %sp
+	 *
+	 * instruction trapped and spilled a register window into the userspace
+	 * window buffer, we have just restored NWINDOWS - 1 register windows.
+	 * However, CANRESTORE can be only NWINDOW - 2 at most.
+	 *
+	 * The solution is to manually switch to (CWP - 1) mod NWINDOWS
+	 * and set the window state registers so that:
+	 *
+	 * 	CANRESTORE 	= NWINDOWS - 2
+	 *	CLEANWIN	= NWINDOWS - 2
+	 *	CANSAVE 	= 0
+	 *	OTHERWIN	= 0
+	 *
+	 * The RESTORE instruction is therfore to be skipped.
+	 */
+2:
+	wrpr %g0, 0, %otherwin
+	wrpr %g0, 0, %cansave
+	wrpr %g1, 0, %canrestore
+	wrpr %g1, 0, %cleanwin
+
+	rdpr %cwp, %g1
+	dec %g1
+	and %g1, NWINDOWS - 1, %g1
+	wrpr %g1, 0, %cwp			! CWP--
+	
+.if \is_syscall
+	done
+.else
+	retry
+.endif
+
+.endm
+
+.global preemptible_handler
+preemptible_handler:
+	PREEMPTIBLE_HANDLER_TEMPLATE 0
+
+.global trap_instruction_handler
+trap_instruction_handler:
+	PREEMPTIBLE_HANDLER_TEMPLATE 1
Index: kernel/arch/sparc64/src/trap/sun4v/mmu.S
===================================================================
--- kernel/arch/sparc64/src/trap/sun4v/mmu.S	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/src/trap/sun4v/mmu.S	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,74 @@
+#
+# Copyright (c) 2006 Jakub Jermar
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# - Redistributions of source code must retain the above copyright
+#   notice, this list of conditions and the following disclaimer.
+# - Redistributions in binary form must reproduce the above copyright
+#   notice, this list of conditions and the following disclaimer in the
+#   documentation and/or other materials provided with the distribution.
+# - The name of the author may not be used to endorse or promote products
+#   derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+/**
+ * @file
+ * @brief	MMU trap handlers that do not fit into the trap table.
+ */
+
+.register %g2, #scratch
+.register %g3, #scratch
+
+.text
+
+#include <arch/trap/sun4v/mmu.h>
+#include <arch/trap/trap_table.h>
+#include <arch/sun4v/regdef.h>
+
+/*
+ * Install mapping for the kernel. The mapping obeys this formula:
+ * 	virtual address = real address + start of physical memory
+ *
+ * The routine expects the following values of registers:
+ *	 %g1	virtual address that has caused the miss
+ */
+.global install_identity_mapping
+install_identity_mapping:
+
+	/* output registers mustn't be clobbered during the hypercall, SAVE is too risky */
+	mov %o0, %g3
+	mov %o1, %g4
+	mov %o2, %g5
+	mov %o3, %g6
+
+	/* install mapping for kernel */
+	mov %g1, %o0
+	set 0, %o1					! set context
+	setx kernel_8k_tlb_data_template, %g1, %g2	! g2 <= template of TTE Data
+	ldx [%g2], %g2					! read the TTE Data template
+	add %g2, %o0, %o2				! template + VA = TTE Data entry
+	set MMU_FLAG_DTLB, %o3				! map in DTLB only
+	ta MMU_MAP_ADDR
+
+	/* restore output registers */
+	mov %g6, %o3
+	mov %g5, %o2
+	mov %g4, %o1
+	mov %g3, %o0
+
+	retry
Index: kernel/arch/sparc64/src/trap/sun4v/trap_table.S
===================================================================
--- kernel/arch/sparc64/src/trap/sun4v/trap_table.S	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
+++ kernel/arch/sparc64/src/trap/sun4v/trap_table.S	(revision e3a3a61956c64d760a4a1bdd93b21d0838823601)
@@ -0,0 +1,1134 @@
+#
+# Copyright (c) 2005 Jakub Jermar
+# Copyright (c) 2008 Pavel Rimsky
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# - Redistributions of source code must retain the above copyright
+#   notice, this list of conditions and the following disclaimer.
+# - Redistributions in binary form must reproduce the above copyright
+#   notice, this list of conditions and the following disclaimer in the
+#   documentation and/or other materials provided with the distribution.
+# - The name of the author may not be used to endorse or promote products
+#   derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+/**
+ * @file
+ * @brief This file contains kernel trap table.
+ */
+
+.register %g2, #scratch
+.register %g3, #scratch
+
+.text
+
+#include <arch/trap/trap_table.h>
+#include <arch/trap/regwin.h>
+#include <arch/trap/interrupt.h>
+#include <arch/trap/exception.h>
+#include <arch/trap/syscall.h>
+#include <arch/trap/sun4v/mmu.h>
+#include <arch/mm/sun4v/mmu.h>
+#include <arch/mm/page.h>
+#include <arch/stack.h>
+#include <arch/sun4v/regdef.h>
+
+#define TABLE_SIZE	TRAP_TABLE_SIZE
+#define ENTRY_SIZE	TRAP_TABLE_ENTRY_SIZE
+
+/*
+ * Kernel trap table.
+ */
+.align TABLE_SIZE
+.global trap_table
+trap_table:
+
+/* TT = 0x08, TL = 0, instruction_access_exception */
+.org trap_table + TT_INSTRUCTION_ACCESS_EXCEPTION*ENTRY_SIZE
+.global instruction_access_exception_tl0
+instruction_access_exception_tl0:
+	/*wrpr %g0, PSTATE_AG_BIT | PSTATE_PRIV_BIT, %pstate
+	PREEMPTIBLE_HANDLER instruction_access_exception*/
+
+/* TT = 0x0a, TL = 0, instruction_access_error */
+.org trap_table + TT_INSTRUCTION_ACCESS_ERROR*ENTRY_SIZE
+.global instruction_access_error_tl0
+instruction_access_error_tl0:
+	PREEMPTIBLE_HANDLER instruction_access_error
+
+/* TT = 0x10, TL = 0, illegal_instruction */
+.org trap_table + TT_ILLEGAL_INSTRUCTION*ENTRY_SIZE
+.global illegal_instruction_tl0
+illegal_instruction_tl0:
+	PREEMPTIBLE_HANDLER illegal_instruction
+
+/* TT = 0x11, TL = 0, privileged_opcode */
+.org trap_table + TT_PRIVILEGED_OPCODE*ENTRY_SIZE
+.global privileged_opcode_tl0
+privileged_opcode_tl0:
+	PREEMPTIBLE_HANDLER privileged_opcode
+
+/* TT = 0x12, TL = 0, unimplemented_LDD */
+.org trap_table + TT_UNIMPLEMENTED_LDD*ENTRY_SIZE
+.global unimplemented_LDD_tl0
+unimplemented_LDD_tl0:
+	PREEMPTIBLE_HANDLER unimplemented_LDD
+
+/* TT = 0x13, TL = 0, unimplemented_STD */
+.org trap_table + TT_UNIMPLEMENTED_STD*ENTRY_SIZE
+.global unimplemented_STD_tl0
+unimplemented_STD_tl0:
+	PREEMPTIBLE_HANDLER unimplemented_STD
+
+/* TT = 0x20, TL = 0, fb_disabled handler */
+.org trap_table + TT_FP_DISABLED*ENTRY_SIZE
+.global fb_disabled_tl0
+fp_disabled_tl0:
+	PREEMPTIBLE_HANDLER fp_disabled
+
+/* TT = 0x21, TL = 0, fb_exception_ieee_754 handler */
+.org trap_table + TT_FP_EXCEPTION_IEEE_754*ENTRY_SIZE
+.global fb_exception_ieee_754_tl0
+fp_exception_ieee_754_tl0:
+	PREEMPTIBLE_HANDLER fp_exception_ieee_754
+
+/* TT = 0x22, TL = 0, fb_exception_other handler */
+.org trap_table + TT_FP_EXCEPTION_OTHER*ENTRY_SIZE
+.global fb_exception_other_tl0
+fp_exception_other_tl0:
+	PREEMPTIBLE_HANDLER fp_exception_other
+
+/* TT = 0x23, TL = 0, tag_overflow */
+.org trap_table + TT_TAG_OVERFLOW*ENTRY_SIZE
+.global tag_overflow_tl0
+tag_overflow_tl0:
+	PREEMPTIBLE_HANDLER tag_overflow
+
+/* TT = 0x24, TL = 0, clean_window handler */
+.org trap_table + TT_CLEAN_WINDOW*ENTRY_SIZE
+.global clean_window_tl0
+clean_window_tl0:
+	CLEAN_WINDOW_HANDLER
+
+/* TT = 0x28, TL = 0, division_by_zero */
+.org trap_table + TT_DIVISION_BY_ZERO*ENTRY_SIZE
+.global division_by_zero_tl0
+division_by_zero_tl0:
+	PREEMPTIBLE_HANDLER division_by_zero
+
+/* TT = 0x30, TL = 0, data_access_exception */
+.org trap_table + TT_DATA_ACCESS_EXCEPTION*ENTRY_SIZE
+.global data_access_exception_tl0
+data_access_exception_tl0:
+	wrpr %g0, PSTATE_AG_BIT | PSTATE_PRIV_BIT, %pstate
+	PREEMPTIBLE_HANDLER data_access_exception
+
+/* TT = 0x32, TL = 0, data_access_error */
+.org trap_table + TT_DATA_ACCESS_ERROR*ENTRY_SIZE
+.global data_access_error_tl0
+data_access_error_tl0:
+	PREEMPTIBLE_HANDLER data_access_error
+
+/* TT = 0x34, TL = 0, mem_address_not_aligned */
+.org trap_table + TT_MEM_ADDRESS_NOT_ALIGNED*ENTRY_SIZE
+.global mem_address_not_aligned_tl0
+mem_address_not_aligned_tl0:
+	PREEMPTIBLE_HANDLER mem_address_not_aligned
+
+/* TT = 0x35, TL = 0, LDDF_mem_address_not_aligned */
+.org trap_table + TT_LDDF_MEM_ADDRESS_NOT_ALIGNED*ENTRY_SIZE
+.global LDDF_mem_address_not_aligned_tl0
+LDDF_mem_address_not_aligned_tl0:
+	PREEMPTIBLE_HANDLER LDDF_mem_address_not_aligned
+
+/* TT = 0x36, TL = 0, STDF_mem_address_not_aligned */
+.org trap_table + TT_STDF_MEM_ADDRESS_NOT_ALIGNED*ENTRY_SIZE
+.global STDF_mem_address_not_aligned_tl0
+STDF_mem_address_not_aligned_tl0:
+	PREEMPTIBLE_HANDLER STDF_mem_address_not_aligned
+
+/* TT = 0x37, TL = 0, privileged_action */
+.org trap_table + TT_PRIVILEGED_ACTION*ENTRY_SIZE
+.global privileged_action_tl0
+privileged_action_tl0:
+	PREEMPTIBLE_HANDLER privileged_action
+
+/* TT = 0x38, TL = 0, LDQF_mem_address_not_aligned */
+.org trap_table + TT_LDQF_MEM_ADDRESS_NOT_ALIGNED*ENTRY_SIZE
+.global LDQF_mem_address_not_aligned_tl0
+LDQF_mem_address_not_aligned_tl0:
+	PREEMPTIBLE_HANDLER LDQF_mem_address_not_aligned
+
+/* TT = 0x39, TL = 0, STQF_mem_address_not_aligned */
+.org trap_table + TT_STQF_MEM_ADDRESS_NOT_ALIGNED*ENTRY_SIZE
+.global STQF_mem_address_not_aligned_tl0
+STQF_mem_address_not_aligned_tl0:
+	PREEMPTIBLE_HANDLER STQF_mem_address_not_aligned
+
+/* TT = 0x41, TL = 0, interrupt_level_1 handler */
+.org trap_table + TT_INTERRUPT_LEVEL_1*ENTRY_SIZE
+.global interrupt_level_1_handler_tl0
+interrupt_level_1_handler_tl0:
+	INTERRUPT_LEVEL_N_HANDLER 1
+
+/* TT = 0x42, TL = 0, interrupt_level_2 handler */
+.org trap_table + TT_INTERRUPT_LEVEL_2*ENTRY_SIZE
+.global interrupt_level_2_handler_tl0
+interrupt_level_2_handler_tl0:
+	INTERRUPT_LEVEL_N_HANDLER 2
+
+/* TT = 0x43, TL = 0, interrupt_level_3 handler */
+.org trap_table + TT_INTERRUPT_LEVEL_3*ENTRY_SIZE
+.global interrupt_level_3_handler_tl0
+interrupt_level_3_handler_tl0:
+	INTERRUPT_LEVEL_N_HANDLER 3
+
+/* TT = 0x44, TL = 0, interrupt_level_4 handler */
+.org trap_table + TT_INTERRUPT_LEVEL_4*ENTRY_SIZE
+.global interrupt_level_4_handler_tl0
+interrupt_level_4_handler_tl0:
+	INTERRUPT_LEVEL_N_HANDLER 4
+
+/* TT = 0x45, TL = 0, interrupt_level_5 handler */
+.org trap_table + TT_INTERRUPT_LEVEL_5*ENTRY_SIZE
+.global interrupt_level_5_handler_tl0
+interrupt_level_5_handler_tl0:
+	INTERRUPT_LEVEL_N_HANDLER 5
+
+/* TT = 0x46, TL = 0, interrupt_level_6 handler */
+.org trap_table + TT_INTERRUPT_LEVEL_6*ENTRY_SIZE
+.global interrupt_level_6_handler_tl0
+interrupt_level_6_handler_tl0:
+	INTERRUPT_LEVEL_N_HANDLER 6
+
+/* TT = 0x47, TL = 0, interrupt_level_7 handler */
+.org trap_table + TT_INTERRUPT_LEVEL_7*ENTRY_SIZE
+.global interrupt_level_7_handler_tl0
+interrupt_level_7_handler_tl0:
+	INTERRUPT_LEVEL_N_HANDLER 7
+
+/* TT = 0x48, TL = 0, interrupt_level_8 handler */
+.org trap_table + TT_INTERRUPT_LEVEL_8*ENTRY_SIZE
+.global interrupt_level_8_handler_tl0
+interrupt_level_8_handler_tl0:
+	INTERRUPT_LEVEL_N_HANDLER 8
+
+/* TT = 0x49, TL = 0, interrupt_level_9 handler */
+.org trap_table + TT_INTERRUPT_LEVEL_9*ENTRY_SIZE
+.global interrupt_level_9_handler_tl0
+interrupt_level_9_handler_tl0:
+	INTERRUPT_LEVEL_N_HANDLER 9
+
+/* TT = 0x4a, TL = 0, interrupt_level_10 handler */
+.org trap_table + TT_INTERRUPT_LEVEL_10*ENTRY_SIZE
+.global interrupt_level_10_handler_tl0
+interrupt_level_10_handler_tl0:
+	INTERRUPT_LEVEL_N_HANDLER 10
+
+/* TT = 0x4b, TL = 0, interrupt_level_11 handler */
+.org trap_table + TT_INTERRUPT_LEVEL_11*ENTRY_SIZE
+.global interrupt_level_11_handler_tl0
+interrupt_level_11_handler_tl0:
+	INTERRUPT_LEVEL_N_HANDLER 11
+
+/* TT = 0x4c, TL = 0, interrupt_level_12 handler */
+.org trap_table + TT_INTERRUPT_LEVEL_12*ENTRY_SIZE
+.global interrupt_level_12_handler_tl0
+interrupt_level_12_handler_tl0:
+	INTERRUPT_LEVEL_N_HANDLER 12
+
+/* TT = 0x4d, TL = 0, interrupt_level_13 handler */
+.org trap_table + TT_INTERRUPT_LEVEL_13*ENTRY_SIZE
+.global interrupt_level_13_handler_tl0
+interrupt_level_13_handler_tl0:
+	INTERRUPT_LEVEL_N_HANDLER 13
+
+/* TT = 0x4e, TL = 0, interrupt_level_14 handler */
+.org trap_table + TT_INTERRUPT_LEVEL_14*ENTRY_SIZE
+.global interrupt_level_14_handler_tl0
+interrupt_level_14_handler_tl0:
+	INTERRUPT_LEVEL_N_HANDLER 14
+
+/* TT = 0x4f, TL = 0, interrupt_level_15 handler */
+.org trap_table + TT_INTERRUPT_LEVEL_15*ENTRY_SIZE
+.global interrupt_level_15_handler_tl0
+interrupt_level_15_handler_tl0:
+	INTERRUPT_LEVEL_N_HANDLER 15
+
+/* TT = 0x60, TL = 0, interrupt_vector_trap handler */
+.org trap_table + TT_INTERRUPT_VECTOR_TRAP*ENTRY_SIZE
+.global interrupt_vector_trap_handler_tl0
+interrupt_vector_trap_handler_tl0:
+	INTERRUPT_VECTOR_TRAP_HANDLER
+
+/* TT = 0x64, TL = 0, fast_instruction_access_MMU_miss */
+.org trap_table + TT_FAST_INSTRUCTION_ACCESS_MMU_MISS*ENTRY_SIZE
+.global fast_instruction_access_mmu_miss_handler_tl0
+fast_instruction_access_mmu_miss_handler_tl0:
+	FAST_INSTRUCTION_ACCESS_MMU_MISS_HANDLER
+
+/* TT = 0x68, TL = 0, fast_data_access_MMU_miss */
+.org trap_table + TT_FAST_DATA_ACCESS_MMU_MISS*ENTRY_SIZE
+.global fast_data_access_mmu_miss_handler_tl0
+fast_data_access_mmu_miss_handler_tl0:
+	FAST_DATA_ACCESS_MMU_MISS_HANDLER 0
+
+/* TT = 0x6c, TL = 0, fast_data_access_protection */
+.org trap_table + TT_FAST_DATA_ACCESS_PROTECTION*ENTRY_SIZE
+.global fast_data_access_protection_handler_tl0
+fast_data_access_protection_handler_tl0:
+	FAST_DATA_ACCESS_PROTECTION_HANDLER 0
+
+/* TT = 0x80, TL = 0, spill_0_normal handler */
+.org trap_table + TT_SPILL_0_NORMAL*ENTRY_SIZE
+.global spill_0_normal_tl0
+spill_0_normal_tl0:
+	SPILL_NORMAL_HANDLER_KERNEL
+
+/* TT = 0x84, TL = 0, spill_1_normal handler */
+.org trap_table + TT_SPILL_1_NORMAL*ENTRY_SIZE
+.global spill_1_normal_tl0
+spill_1_normal_tl0:
+	SPILL_NORMAL_HANDLER_USERSPACE
+
+/* TT = 0x88, TL = 0, spill_2_normal handler */
+.org trap_table + TT_SPILL_2_NORMAL*ENTRY_SIZE
+.global spill_2_normal_tl0
+spill_2_normal_tl0:
+	SPILL_TO_USPACE_WINDOW_BUFFER
+
+/* TT = 0xa0, TL = 0, spill_0_other handler */
+.org trap_table + TT_SPILL_0_OTHER*ENTRY_SIZE
+.global spill_0_other_tl0
+spill_0_other_tl0:
+	SPILL_TO_USPACE_WINDOW_BUFFER
+
+/* TT = 0xc0, TL = 0, fill_0_normal handler */
+.org trap_table + TT_FILL_0_NORMAL*ENTRY_SIZE
+.global fill_0_normal_tl0
+fill_0_normal_tl0:
+	FILL_NORMAL_HANDLER_KERNEL
+
+/* TT = 0xc4, TL = 0, fill_1_normal handler */
+.org trap_table + TT_FILL_1_NORMAL*ENTRY_SIZE
+.global fill_1_normal_tl0
+fill_1_normal_tl0:
+	FILL_NORMAL_HANDLER_USERSPACE
+
+/* TT = 0x100 - 0x17f, TL = 0, trap_instruction_0 - trap_instruction_7f */
+.irp cur, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,\
+    20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38,\
+    39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,\
+    58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76,\
+    77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,\
+    96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,\
+    112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126,\
+    127
+.org trap_table + (TT_TRAP_INSTRUCTION_0+\cur)*ENTRY_SIZE
+.global trap_instruction_\cur\()_tl0
+trap_instruction_\cur\()_tl0:
+	ba trap_instruction_handler
+	mov \cur, %g2
+.endr
+
+/*
+ * Handlers for TL>0.
+ */
+
+/* TT = 0x08, TL > 0, instruction_access_exception */
+.org trap_table + (TT_INSTRUCTION_ACCESS_EXCEPTION+512)*ENTRY_SIZE
+.global instruction_access_exception_tl1
+instruction_access_exception_tl1:
+	wrpr %g0, 1, %tl
+	wrpr %g0, PSTATE_AG_BIT | PSTATE_PRIV_BIT, %pstate
+	PREEMPTIBLE_HANDLER instruction_access_exception
+
+/* TT = 0x0a, TL > 0, instruction_access_error */
+.org trap_table + (TT_INSTRUCTION_ACCESS_ERROR+512)*ENTRY_SIZE
+.global instruction_access_error_tl1
+instruction_access_error_tl1:
+	wrpr %g0, 1, %tl
+	PREEMPTIBLE_HANDLER instruction_access_error
+
+/* TT = 0x10, TL > 0, illegal_instruction */
+.org trap_table + (TT_ILLEGAL_INSTRUCTION+512)*ENTRY_SIZE
+.global illegal_instruction_tl1
+illegal_instruction_tl1:
+	wrpr %g0, 1, %tl
+	PREEMPTIBLE_HANDLER illegal_instruction
+
+/* TT = 0x24, TL > 0, clean_window handler */
+.org trap_table + (TT_CLEAN_WINDOW+512)*ENTRY_SIZE
+.global clean_window_tl1
+clean_window_tl1:
+	CLEAN_WINDOW_HANDLER
+
+/* TT = 0x28, TL > 0, division_by_zero */
+.org trap_table + (TT_DIVISION_BY_ZERO+512)*ENTRY_SIZE
+.global division_by_zero_tl1
+division_by_zero_tl1:
+	wrpr %g0, 1, %tl
+	PREEMPTIBLE_HANDLER division_by_zero
+
+/* TT = 0x30, TL > 0, data_access_exception */
+.org trap_table + (TT_DATA_ACCESS_EXCEPTION+512)*ENTRY_SIZE
+.global data_access_exception_tl1
+data_access_exception_tl1:
+	wrpr %g0, 1, %tl
+	wrpr %g0, PSTATE_AG_BIT | PSTATE_PRIV_BIT, %pstate
+	PREEMPTIBLE_HANDLER data_access_exception
+
+/* TT = 0x32, TL > 0, data_access_error */
+.org trap_table + (TT_DATA_ACCESS_ERROR+512)*ENTRY_SIZE
+.global data_access_error_tl1
+data_access_error_tl1:
+	wrpr %g0, 1, %tl
+	PREEMPTIBLE_HANDLER data_access_error
+
+/* TT = 0x34, TL > 0, mem_address_not_aligned */
+.org trap_table + (TT_MEM_ADDRESS_NOT_ALIGNED+512)*ENTRY_SIZE
+.global mem_address_not_aligned_tl1
+mem_address_not_aligned_tl1:
+	wrpr %g0, 1, %tl
+	PREEMPTIBLE_HANDLER mem_address_not_aligned
+
+/* TT = 0x68, TL > 0, fast_data_access_MMU_miss */
+.org trap_table + (TT_FAST_DATA_ACCESS_MMU_MISS+512)*ENTRY_SIZE
+.global fast_data_access_mmu_miss_handler_tl1
+fast_data_access_mmu_miss_handler_tl1:
+	FAST_DATA_ACCESS_MMU_MISS_HANDLER 1
+
+/* TT = 0x6c, TL > 0, fast_data_access_protection */
+.org trap_table + (TT_FAST_DATA_ACCESS_PROTECTION+512)*ENTRY_SIZE
+.global fast_data_access_protection_handler_tl1
+fast_data_access_protection_handler_tl1:
+	FAST_DATA_ACCESS_PROTECTION_HANDLER 1
+
+/* TT = 0x80, TL > 0, spill_0_normal handler */
+.org trap_table + (TT_SPILL_0_NORMAL+512)*ENTRY_SIZE
+.global spill_0_normal_tl1
+spill_0_normal_tl1:
+	SPILL_NORMAL_HANDLER_KERNEL
+
+/* TT = 0x88, TL > 0, spill_2_normal handler */
+.org trap_table + (TT_SPILL_2_NORMAL+512)*ENTRY_SIZE
+.global spill_2_normal_tl1
+spill_2_normal_tl1:
+	SPILL_TO_USPACE_WINDOW_BUFFER
+
+/* TT = 0xa0, TL > 0, spill_0_other handler */
+.org trap_table + (TT_SPILL_0_OTHER+512)*ENTRY_SIZE
+.global spill_0_other_tl1
+spill_0_other_tl1:
+	SPILL_TO_USPACE_WINDOW_BUFFER
+
+/* TT = 0xc0, TL > 0, fill_0_normal handler */
+.org trap_table + (TT_FILL_0_NORMAL+512)*ENTRY_SIZE
+.global fill_0_normal_tl1
+fill_0_normal_tl1:
+	FILL_NORMAL_HANDLER_KERNEL
+
+.align TABLE_SIZE
+
+
+/*
+ * Spills the window at CWP + 2 to the kernel stack. This macro is to be
+ * used before doing SAVE when the spill trap is undesirable.
+ * 
+ * Parameters:
+ * 	tmpreg1		global register to be used for scratching purposes
+ * 	tmpreg2		global register to be used for scratching purposes
+ */
+.macro INLINE_SPILL tmpreg1, tmpreg2
+	! CWP := CWP + 2
+	rdpr %cwp, \tmpreg2
+	add \tmpreg2, 2, \tmpreg1
+	and \tmpreg1, NWINDOWS - 1, \tmpreg1		! modulo NWINDOWS
+	wrpr \tmpreg1, %cwp
+	
+	! spill to kernel stack
+	stx %l0, [%sp + STACK_BIAS + L0_OFFSET]	
+	stx %l1, [%sp + STACK_BIAS + L1_OFFSET]
+	stx %l2, [%sp + STACK_BIAS + L2_OFFSET]
+	stx %l3, [%sp + STACK_BIAS + L3_OFFSET]
+	stx %l4, [%sp + STACK_BIAS + L4_OFFSET]
+	stx %l5, [%sp + STACK_BIAS + L5_OFFSET]
+	stx %l6, [%sp + STACK_BIAS + L6_OFFSET]
+	stx %l7, [%sp + STACK_BIAS + L7_OFFSET]
+	stx %i0, [%sp + STACK_BIAS + I0_OFFSET]
+	stx %i1, [%sp + STACK_BIAS + I1_OFFSET]
+	stx %i2, [%sp + STACK_BIAS + I2_OFFSET]
+	stx %i3, [%sp + STACK_BIAS + I3_OFFSET]
+	stx %i4, [%sp + STACK_BIAS + I4_OFFSET]
+	stx %i5, [%sp + STACK_BIAS + I5_OFFSET]
+	stx %i6, [%sp + STACK_BIAS + I6_OFFSET]
+	stx %i7, [%sp + STACK_BIAS + I7_OFFSET]
+
+	! CWP := CWP - 2
+	wrpr \tmpreg2, %cwp
+
+	saved
+.endm
+
+/*
+ * Fill the window at CWP - 1 from the kernel stack. This macro is to be
+ * used before doing RESTORE when the fill trap is undesirable.
+ * 
+ * Parameters:
+ * 	tmpreg1		global register to be used for scratching purposes
+ * 	tmpreg2		global register to be used for scratching purposes
+ */
+.macro INLINE_FILL tmpreg1, tmpreg2
+	! CWP := CWP - 1
+	rdpr %cwp, \tmpreg2
+	add \tmpreg2, NWINDOWS - 1, \tmpreg1
+	and \tmpreg1, NWINDOWS - 1, \tmpreg1
+	wrpr \tmpreg1, %cwp
+
+	! fill from kernel stack
+	ldx [%sp + STACK_BIAS + L0_OFFSET], %l0
+	ldx [%sp + STACK_BIAS + L1_OFFSET], %l1
+	ldx [%sp + STACK_BIAS + L2_OFFSET], %l2
+	ldx [%sp + STACK_BIAS + L3_OFFSET], %l3
+	ldx [%sp + STACK_BIAS + L4_OFFSET], %l4
+	ldx [%sp + STACK_BIAS + L5_OFFSET], %l5
+	ldx [%sp + STACK_BIAS + L6_OFFSET], %l6
+	ldx [%sp + STACK_BIAS + L7_OFFSET], %l7
+	ldx [%sp + STACK_BIAS + I0_OFFSET], %i0
+	ldx [%sp + STACK_BIAS + I1_OFFSET], %i1
+	ldx [%sp + STACK_BIAS + I2_OFFSET], %i2
+	ldx [%sp + STACK_BIAS + I3_OFFSET], %i3
+	ldx [%sp + STACK_BIAS + I4_OFFSET], %i4
+	ldx [%sp + STACK_BIAS + I5_OFFSET], %i5
+	ldx [%sp + STACK_BIAS + I6_OFFSET], %i6
+	ldx [%sp + STACK_BIAS + I7_OFFSET], %i7
+
+	! CWP := CWP + 1
+	wrpr \tmpreg2, %cwp
+
+	restored
+.endm
+
+#define NOT(x)	((x) == 0)
+
+/*
+ * Perform all the actions of the preemptible trap handler which are common
+ * for trapping from kernel and trapping from userspace, including call of the
+ * higher level service routine.
+ *
+ * Important note:
+ * 	This macro must be inserted between the "2:" and "4:" labels. The
+ *	inserting code must be aware of the usage of all the registers
+ *	contained in this macro.
+ */
+.macro MIDDLE_PART is_syscall
+	/* copy higher level routine's address and its argument */
+	mov %g1, %l0
+.if NOT(\is_syscall)
+	mov %g2, %o0
+.else
+	! store the syscall number on the stack as 7th argument
+	stx %g2, [%sp + STACK_WINDOW_SAVE_AREA_SIZE + STACK_BIAS + STACK_ARG6] 
+.endif
+
+	/*
+	 * Save TSTATE, TPC and TNPC aside.
+	 */
+	rdpr %tstate, %g1
+	rdpr %tpc, %g2
+	rdpr %tnpc, %g3
+
+	stx %g1, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TSTATE]
+	stx %g2, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TPC]
+	stx %g3, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TNPC]
+
+	/*
+	 * Save the Y register.
+	 * This register is deprecated according to SPARC V9 specification
+	 * and is only present for backward compatibility with previous
+	 * versions of the SPARC architecture.
+	 * Surprisingly, gcc makes use of this register without a notice.
+	 */
+	rd %y, %g4
+	stx %g4, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_Y]
+
+	/* switch to TL = 0, explicitly enable FPU */
+	wrpr %g0, 0, %tl
+	wrpr %g0, 0, %gl
+	wrpr %g0, PSTATE_PRIV_BIT | PSTATE_PEF_BIT, %pstate
+
+	/* g1 -> l1, ..., g7 -> l7 */
+	SAVE_GLOBALS
+
+.if NOT(\is_syscall)
+	/* call higher-level service routine, pass istate as its 2nd parameter */
+	call %l0
+	add %sp, PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TNPC, %o1
+.else
+	/* Call the higher-level syscall handler. */
+	!wrpr %g0, PSTATE_PRIV_BIT | PSTATE_PEF_BIT | PSTATE_IE_BIT, %pstate
+	call syscall_handler
+	nop
+	/* copy the value returned by the syscall */
+	mov %o0, %i0
+.endif
+
+	/* l1 -> g1, ..., l7 -> g7 */
+	RESTORE_GLOBALS
+
+	/* we must prserve the PEF bit */
+	rdpr %pstate, %l1
+
+	/* TL := 1, GL := 1 */
+	wrpr %g0, PSTATE_PRIV_BIT, %pstate
+	wrpr %g0, 1, %tl
+	wrpr %g0, 1, %gl
+
+	/* Read TSTATE, TPC and TNPC from saved copy. */
+	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TSTATE], %g1
+	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TPC], %g2
+	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TNPC], %g3
+
+	/* Copy PSTATE.PEF to the in-register copy of TSTATE. */
+	and %l1, PSTATE_PEF_BIT, %l1
+	sllx %l1, TSTATE_PSTATE_SHIFT, %l1
+	sethi %hi(TSTATE_PEF_BIT), %g4		! reset the PEF bit to 0 ...
+	andn %g1, %g4, %g1
+	or %g1, %l1, %g1			! ... "or" it with saved PEF
+
+	/* Restore TSTATE, TPC and TNPC from saved copies. */
+	wrpr %g1, 0, %tstate
+	wrpr %g2, 0, %tpc
+	wrpr %g3, 0, %tnpc
+
+	/* Restore Y. */
+	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_Y], %g4
+	wr %g4, %y
+	
+	/* If TSTATE.CWP + 1 == CWP, then we do not have to fix CWP. */
+	and %g1, TSTATE_CWP_MASK, %l0
+	inc %l0
+	and %l0, NWINDOWS - 1, %l0	! %l0 mod NWINDOWS
+	rdpr %cwp, %l1
+	cmp %l0, %l1
+	bz 4f				! CWP is ok
+	nop
+
+3:
+	/*
+	 * Fix CWP.
+	 * In order to recapitulate, the input registers in the current
+	 * window are the output registers of the window to which we want
+	 * to restore. Because the fill trap fills only input and local
+	 * registers of a window, we need to preserve those output
+	 * registers manually.
+	 */
+	mov %sp, %g2
+	stx %i0, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I0]
+	stx %i1, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I1]
+	stx %i2, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I2]
+	stx %i3, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I3]
+	stx %i4, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I4]
+	stx %i5, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I5]
+	stx %i6, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I6]
+	stx %i7, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I7]
+	wrpr %l0, 0, %cwp
+	mov %g2, %sp
+	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I0], %i0
+	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I1], %i1
+	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I2], %i2
+	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I3], %i3
+	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I4], %i4
+	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I5], %i5
+	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I6], %i6
+	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I7], %i7
+.endm
+
+
+#if 0
+/*
+ * Preemptible trap handler for handling traps from kernel.
+ */
+.macro PREEMPTIBLE_HANDLER_KERNEL
+
+	/*
+	 * ASSERT(%tl == 1)
+	 */
+	rdpr %tl, %g3
+	cmp %g3, 1
+	be 1f
+	nop
+0:	ba 0b					! this is for debugging, if we ever get here
+	nop					! it will be easy to find
+
+	/* prevent unnecessary CLEANWIN exceptions */
+	wrpr %g0, WSTATE_OTHER(0) | WSTATE_NORMAL(0), %wstate
+1:
+	/*
+	 * Prevent SAVE instruction from causing a spill exception. If the
+	 * CANSAVE register is zero, explicitly spill register window
+	 * at CWP + 2.
+	 */
+
+	rdpr %cansave, %g3
+	brnz %g3, 2f
+	nop
+	INLINE_SPILL %g3, %g4
+
+2:
+	/* ask for new register window */
+	save %sp, -PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE, %sp
+
+	/* copy higher level routine's address and its argument */
+	mov %g1, %l0
+	mov %g2, %o0
+
+	/*
+	 * Save TSTATE, TPC and TNPC aside.
+	 */
+	rdpr %tstate, %g1
+	rdpr %tpc, %g2
+	rdpr %tnpc, %g3
+
+	stx %g1, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TSTATE]
+	stx %g2, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TPC]
+	stx %g3, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TNPC]
+
+	/*
+	 * Save the Y register.
+	 * This register is deprecated according to SPARC V9 specification
+	 * and is only present for backward compatibility with previous
+	 * versions of the SPARC architecture.
+	 * Surprisingly, gcc makes use of this register without a notice.
+	 */
+	rd %y, %g4
+	stx %g4, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_Y]
+
+	/* switch to TL = 0, explicitly enable FPU */
+	wrpr %g0, 0, %tl
+	wrpr %g0, 0, %gl
+	wrpr %g0, PSTATE_PRIV_BIT | PSTATE_PEF_BIT, %pstate
+
+	/* g1 -> l1, ..., g7 -> l7 */
+	SAVE_GLOBALS
+
+	/* call higher-level service routine, pass istate as its 2nd parameter */
+	call %l0
+	add %sp, PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TNPC, %o1
+
+	/* l1 -> g1, ..., l7 -> g7 */
+	RESTORE_GLOBALS
+
+	/* we must prserve the PEF bit */
+	rdpr %pstate, %l1
+
+	/* TL := 1, GL := 1 */
+	wrpr %g0, PSTATE_PRIV_BIT, %pstate
+	wrpr %g0, 1, %tl
+	wrpr %g0, 1, %gl
+
+	/* Read TSTATE, TPC and TNPC from saved copy. */
+	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TSTATE], %g1
+	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TPC], %g2
+	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TNPC], %g3
+
+	/* Copy PSTATE.PEF to the in-register copy of TSTATE. */
+	and %l1, PSTATE_PEF_BIT, %l1
+	sllx %l1, TSTATE_PSTATE_SHIFT, %l1
+	sethi %hi(TSTATE_PEF_BIT), %g4		! reset the PEF bit to 0 ...
+	andn %g1, %g4, %g1
+	or %g1, %l1, %g1			! ... "or" it with saved PEF
+
+	/* Restore TSTATE, TPC and TNPC from saved copies. */
+	wrpr %g1, 0, %tstate
+	wrpr %g2, 0, %tpc
+	wrpr %g3, 0, %tnpc
+
+	/* Restore Y. */
+	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_Y], %g4
+	wr %g4, %y
+	
+	/* If TSTATE.CWP + 1 == CWP, then we do not have to fix CWP. */
+	and %g1, TSTATE_CWP_MASK, %l0
+	inc %l0
+	and %l0, NWINDOWS - 1, %l0	! %l0 mod NWINDOWS
+	rdpr %cwp, %l1
+	cmp %l0, %l1
+	bz 4f				! CWP is ok
+	nop
+
+3:
+	/*
+	 * Fix CWP.
+	 * In order to recapitulate, the input registers in the current
+	 * window are the output registers of the window to which we want
+	 * to restore. Because the fill trap fills only input and local
+	 * registers of a window, we need to preserve those output
+	 * registers manually.
+	 */
+	mov %sp, %g2
+	stx %i0, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I0]
+	stx %i1, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I1]
+	stx %i2, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I2]
+	stx %i3, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I3]
+	stx %i4, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I4]
+	stx %i5, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I5]
+	stx %i6, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I6]
+	stx %i7, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I7]
+	wrpr %l0, 0, %cwp
+	mov %g2, %sp
+	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I0], %i0
+	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I1], %i1
+	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I2], %i2
+	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I3], %i3
+	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I4], %i4
+	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I5], %i5
+	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I6], %i6
+	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I7], %i7
+
+4:
+	/*
+	 * Prevent RESTORE instruction from causing a fill exception. If the
+	 * CANRESTORE register is zero, explicitly fill register window
+	 * at CWP - 1.
+	 */
+	rdpr %canrestore, %g1
+	brnz %g1, 5f
+	nop
+	INLINE_FILL %g3, %g4
+
+5:
+	restore
+
+	retry
+.endm
+
+#endif
+
+/*
+ * Preemptible trap handler for handling traps from kernel.
+ */
+.macro PREEMPTIBLE_HANDLER_KERNEL
+
+	/*
+	 * ASSERT(%tl == 1)
+	 */
+	rdpr %tl, %g3
+	cmp %g3, 1
+	be 1f
+	nop
+0:	ba 0b					! this is for debugging, if we ever get here
+	nop					! it will be easy to find
+
+1:
+	/* prevent unnecessary CLEANWIN exceptions */
+	wrpr %g0, NWINDOWS - 1, %cleanwin
+
+	/*
+	 * Prevent SAVE instruction from causing a spill exception. If the
+	 * CANSAVE register is zero, explicitly spill register window
+	 * at CWP + 2.
+	 */
+
+	rdpr %cansave, %g3
+	brnz %g3, 2f
+	nop
+	INLINE_SPILL %g3, %g4
+
+2:
+	/* ask for new register window */
+	save %sp, -PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE, %sp
+
+	MIDDLE_PART 0
+
+4:
+	/*
+	 * Prevent RESTORE instruction from causing a fill exception. If the
+	 * CANRESTORE register is zero, explicitly fill register window
+	 * at CWP - 1.
+	 */
+	rdpr %canrestore, %g1
+	brnz %g1, 5f
+	nop
+	INLINE_FILL %g3, %g4
+
+5:
+	restore
+	retry
+.endm
+
+
+
+/*
+ * Spills the window at CWP + 2 to the userspace window buffer. This macro
+ * is to be used before doing SAVE when the spill trap is undesirable.
+ * 
+ * Parameters:
+ * 	tmpreg1		global register to be used for scratching purposes
+ * 	tmpreg2		global register to be used for scratching purposes
+ * 	tmpreg3		global register to be used for scratching purposes
+ */
+.macro INLINE_SPILL_TO_WBUF tmpreg1, tmpreg2, tmpreg3
+	! CWP := CWP + 2
+	rdpr %cwp, \tmpreg2
+	add \tmpreg2, 2, \tmpreg1
+	and \tmpreg1, NWINDOWS - 1, \tmpreg1		! modulo NWINDOWS
+	wrpr \tmpreg1, %cwp
+	
+	! spill to userspace window buffer
+	SAVE_TO_USPACE_WBUF \tmpreg3, \tmpreg1
+
+	! CWP := CWP - 2
+	wrpr \tmpreg2, %cwp
+
+	saved
+.endm
+
+/*
+ * Preemptible handler for handling traps from userspace.
+ */
+.macro PREEMPTIBLE_HANDLER_USPACE is_syscall
+	/*
+	 * One of the ways this handler can be invoked is after a nested MMU trap from
+	 * either spill_1_normal or fill_1_normal traps. Both of these traps manipulate
+	 * the CWP register. We deal with the situation by simulating the MMU trap
+	 * on TL=1 and restart the respective SAVE or RESTORE instruction once the MMU
+	 * trap is resolved. However, because we are in the wrong window from the
+	 * perspective of the MMU trap, we need to synchronize CWP with CWP from TL=0.
+	 */ 
+.if NOT(\is_syscall)
+	rdpr %tstate, %g3
+	and %g3, TSTATE_CWP_MASK, %g4
+	wrpr %g4, 0, %cwp			! resynchronize CWP
+.endif
+
+	/* prevent unnecessary CLEANWIN exceptions */
+	wrpr %g0, NWINDOWS - 1, %cleanwin
+
+	/*
+	 * Prevent SAVE instruction from causing a spill exception. If the
+	 * CANSAVE register is zero, explicitly spill register window
+	 * at CWP + 2.
+	 */
+	rdpr %cansave, %g3
+	brnz %g3, 2f
+	nop
+	INLINE_SPILL_TO_WBUF %g3, %g4, %g7
+
+2:
+	set SCRATCHPAD_KSTACK, %g4
+	ldxa [%g4] ASI_SCRATCHPAD, %g6
+	save %g6, -PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE, %sp
+
+.if \is_syscall
+	/* Copy arguments for the syscall to the new window. */
+	mov %i0, %o0
+	mov %i1, %o1
+	mov %i2, %o2
+	mov %i3, %o3
+	mov %i4, %o4
+	mov %i5, %o5
+.endif
+
+	mov VA_PRIMARY_CONTEXT_REG, %l0 
+	stxa %g0, [%l0] ASI_PRIMARY_CONTEXT_REG
+	rd %pc, %l0
+	flush %l0
+
+	/* Mark the CANRESTORE windows as OTHER windows. */
+	rdpr %canrestore, %l0
+	wrpr %l0, %otherwin
+	wrpr %g0, %canrestore
+
+	/*
+	 * Other window spills will go to the userspace window buffer
+	 * and normal spills will go to the kernel stack.
+	 */
+	wrpr %g0, WSTATE_OTHER(0) | WSTATE_NORMAL(0), %wstate
+
+	MIDDLE_PART \is_syscall
+
+4:
+	/*
+	 * Spills and fills will be processed by the {spill,fill}_1_normal
+	 * handlers.
+	 */
+	wrpr %g0, WSTATE_OTHER(0) | WSTATE_NORMAL(1), %wstate
+
+	/*
+	 * Set primary context according to secondary context.
+	 */
+	wr %g0, ASI_SECONDARY_CONTEXT_REG, %asi
+	ldxa [VA_SECONDARY_CONTEXT_REG] %asi, %g1
+	wr %g0, ASI_PRIMARY_CONTEXT_REG, %asi
+	stxa %g1, [VA_PRIMARY_CONTEXT_REG] %asi
+	rd %pc, %g1
+	flush %g1
+
+	/* Restoring userspace windows: */
+
+	/* Save address of the userspace window buffer to the %g7 register. */
+	set SCRATCHPAD_WBUF, %g5
+	ldxa [%g5] ASI_SCRATCHPAD, %g7
+
+	rdpr %cwp, %g1
+	rdpr %otherwin, %g2
+
+	/*
+	 * Skip all OTHERWIN windows and descend to the first window
+	 * in the userspace window buffer.
+	 */
+	sub %g1, %g2, %g3
+	dec %g3
+	and %g3, NWINDOWS - 1, %g3
+	wrpr %g3, 0, %cwp
+
+	/*
+	 * CWP is now in the window last saved in the userspace window buffer.
+	 * Fill all windows stored in the buffer.
+	 */
+	clr %g4
+5:	andcc %g7, UWB_ALIGNMENT - 1, %g0	! alignment check
+	bz 6f					! %g7 is UWB_ALIGNMENT-aligned, no more windows to refill
+	nop
+
+	add %g7, -STACK_WINDOW_SAVE_AREA_SIZE, %g7
+	ldx [%g7 + L0_OFFSET], %l0
+	ldx [%g7 + L1_OFFSET], %l1
+	ldx [%g7 + L2_OFFSET], %l2
+	ldx [%g7 + L3_OFFSET], %l3
+	ldx [%g7 + L4_OFFSET], %l4
+	ldx [%g7 + L5_OFFSET], %l5
+	ldx [%g7 + L6_OFFSET], %l6
+	ldx [%g7 + L7_OFFSET], %l7
+	ldx [%g7 + I0_OFFSET], %i0
+	ldx [%g7 + I1_OFFSET], %i1
+	ldx [%g7 + I2_OFFSET], %i2
+	ldx [%g7 + I3_OFFSET], %i3
+	ldx [%g7 + I4_OFFSET], %i4
+	ldx [%g7 + I5_OFFSET], %i5
+	ldx [%g7 + I6_OFFSET], %i6
+	ldx [%g7 + I7_OFFSET], %i7
+
+	dec %g3
+	and %g3, NWINDOWS - 1, %g3
+	wrpr %g3, 0, %cwp			! switch to the preceeding window
+
+	ba 5b
+	inc %g4
+
+6:
+	/* Save changes of the address of the userspace window buffer. */
+	stxa %g7, [%g5] ASI_SCRATCHPAD
+
+	/*
+	 * Switch back to the proper current window and adjust
+	 * OTHERWIN, CANRESTORE, CANSAVE and CLEANWIN.
+	 */
+	wrpr %g1, 0, %cwp
+	add %g4, %g2, %g2
+	cmp %g2, NWINDOWS - 2
+	bg 8f					! fix the CANRESTORE=NWINDOWS-1 anomaly
+	mov NWINDOWS - 2, %g1			! use dealy slot for both cases
+	sub %g1, %g2, %g1
+	
+	wrpr %g0, 0, %otherwin
+	wrpr %g1, 0, %cansave			! NWINDOWS - 2 - CANRESTORE
+	wrpr %g2, 0, %canrestore		! OTHERWIN + windows in the buffer
+	wrpr %g2, 0, %cleanwin			! avoid information leak
+
+7:
+	restore
+
+.if \is_syscall
+	done
+.else
+	retry
+.endif
+
+8:
+	/*
+	 * We got here in order to avoid inconsistency of the window state registers.
+	 * If the:
+	 *
+	 * 	save %g6, -PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE, %sp
+	 *
+	 * instruction trapped and spilled a register window into the userspace
+	 * window buffer, we have just restored NWINDOWS - 1 register windows.
+	 * However, CANRESTORE can be only NWINDOW - 2 at most.
+	 *
+	 * The solution is to manually switch to (CWP - 1) mod NWINDOWS
+	 * and set the window state registers so that:
+	 *
+	 * 	CANRESTORE 	= NWINDOWS - 2
+	 *	CLEANWIN	= NWINDOWS - 2
+	 *	CANSAVE 	= 0
+	 *	OTHERWIN	= 0
+	 *
+	 * The RESTORE instruction is therfore to be skipped.
+	 */
+	wrpr %g0, 0, %otherwin
+	wrpr %g0, 0, %cansave
+	wrpr %g1, 0, %canrestore
+	wrpr %g1, 0, %cleanwin
+
+	rdpr %cwp, %g1
+	dec %g1
+	and %g1, NWINDOWS - 1, %g1
+	wrpr %g1, 0, %cwp			! CWP--
+
+.if \is_syscall
+	done
+.else
+	retry
+.endif
+
+.endm
+
+
+
+/* Preemptible trap handler for TL=1.
+ *
+ * This trap handler makes arrangements to make calling of scheduler() from
+ * within a trap context possible. It is called from several other trap
+ * handlers.
+ */
+.macro PREEMPTIBLE_HANDLER_TEMPLATE is_syscall
+	rdpr %tstate, %g3
+	and %g3, TSTATE_PRIV_BIT, %g3
+	brz %g3, 100f			! trapping from userspace
+	nop
+
+	PREEMPTIBLE_HANDLER_KERNEL
+	ba 101f
+	nop
+
+	100:
+	PREEMPTIBLE_HANDLER_USPACE \is_syscall
+
+	101:
+.endm
+
+.global preemptible_handler
+preemptible_handler:
+	PREEMPTIBLE_HANDLER_TEMPLATE 0
+
+.global trap_instruction_handler
+trap_instruction_handler:
+	PREEMPTIBLE_HANDLER_TEMPLATE 1
+
Index: kernel/arch/sparc64/src/trap/trap_table.S
===================================================================
--- kernel/arch/sparc64/src/trap/trap_table.S	(revision b1d3c36aca6850dfcd12418b4a6f60aa37a83487)
+++ 	(revision )
@@ -1,851 +1,0 @@
-#
-# Copyright (c) 2005 Jakub Jermar
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# - Redistributions of source code must retain the above copyright
-#   notice, this list of conditions and the following disclaimer.
-# - Redistributions in binary form must reproduce the above copyright
-#   notice, this list of conditions and the following disclaimer in the
-#   documentation and/or other materials provided with the distribution.
-# - The name of the author may not be used to endorse or promote products
-#   derived from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-
-/**
- * @file
- * @brief This file contains kernel trap table.
- */
-
-.register %g2, #scratch
-.register %g3, #scratch
-
-.text
-
-#include <arch/trap/trap_table.h>
-#include <arch/trap/regwin.h>
-#include <arch/trap/interrupt.h>
-#include <arch/trap/exception.h>
-#include <arch/trap/syscall.h>
-#include <arch/trap/mmu.h>
-#include <arch/mm/mmu.h>
-#include <arch/mm/page.h>
-#include <arch/stack.h>
-#include <arch/regdef.h>
-
-#define TABLE_SIZE	TRAP_TABLE_SIZE
-#define ENTRY_SIZE	TRAP_TABLE_ENTRY_SIZE
-
-/*
- * Kernel trap table.
- */
-.align TABLE_SIZE
-.global trap_table
-trap_table:
-
-/* TT = 0x08, TL = 0, instruction_access_exception */
-.org trap_table + TT_INSTRUCTION_ACCESS_EXCEPTION*ENTRY_SIZE
-.global instruction_access_exception_tl0
-instruction_access_exception_tl0:
-	wrpr %g0, PSTATE_AG_BIT | PSTATE_PRIV_BIT, %pstate
-	PREEMPTIBLE_HANDLER instruction_access_exception
-
-/* TT = 0x0a, TL = 0, instruction_access_error */
-.org trap_table + TT_INSTRUCTION_ACCESS_ERROR*ENTRY_SIZE
-.global instruction_access_error_tl0
-instruction_access_error_tl0:
-	PREEMPTIBLE_HANDLER instruction_access_error
-
-/* TT = 0x10, TL = 0, illegal_instruction */
-.org trap_table + TT_ILLEGAL_INSTRUCTION*ENTRY_SIZE
-.global illegal_instruction_tl0
-illegal_instruction_tl0:
-	PREEMPTIBLE_HANDLER illegal_instruction
-
-/* TT = 0x11, TL = 0, privileged_opcode */
-.org trap_table + TT_PRIVILEGED_OPCODE*ENTRY_SIZE
-.global privileged_opcode_tl0
-privileged_opcode_tl0:
-	PREEMPTIBLE_HANDLER privileged_opcode
-
-/* TT = 0x12, TL = 0, unimplemented_LDD */
-.org trap_table + TT_UNIMPLEMENTED_LDD*ENTRY_SIZE
-.global unimplemented_LDD_tl0
-unimplemented_LDD_tl0:
-	PREEMPTIBLE_HANDLER unimplemented_LDD
-
-/* TT = 0x13, TL = 0, unimplemented_STD */
-.org trap_table + TT_UNIMPLEMENTED_STD*ENTRY_SIZE
-.global unimplemented_STD_tl0
-unimplemented_STD_tl0:
-	PREEMPTIBLE_HANDLER unimplemented_STD
-
-/* TT = 0x20, TL = 0, fb_disabled handler */
-.org trap_table + TT_FP_DISABLED*ENTRY_SIZE
-.global fb_disabled_tl0
-fp_disabled_tl0:
-	PREEMPTIBLE_HANDLER fp_disabled
-
-/* TT = 0x21, TL = 0, fb_exception_ieee_754 handler */
-.org trap_table + TT_FP_EXCEPTION_IEEE_754*ENTRY_SIZE
-.global fb_exception_ieee_754_tl0
-fp_exception_ieee_754_tl0:
-	PREEMPTIBLE_HANDLER fp_exception_ieee_754
-
-/* TT = 0x22, TL = 0, fb_exception_other handler */
-.org trap_table + TT_FP_EXCEPTION_OTHER*ENTRY_SIZE
-.global fb_exception_other_tl0
-fp_exception_other_tl0:
-	PREEMPTIBLE_HANDLER fp_exception_other
-
-/* TT = 0x23, TL = 0, tag_overflow */
-.org trap_table + TT_TAG_OVERFLOW*ENTRY_SIZE
-.global tag_overflow_tl0
-tag_overflow_tl0:
-	PREEMPTIBLE_HANDLER tag_overflow
-
-/* TT = 0x24, TL = 0, clean_window handler */
-.org trap_table + TT_CLEAN_WINDOW*ENTRY_SIZE
-.global clean_window_tl0
-clean_window_tl0:
-	CLEAN_WINDOW_HANDLER
-
-/* TT = 0x28, TL = 0, division_by_zero */
-.org trap_table + TT_DIVISION_BY_ZERO*ENTRY_SIZE
-.global division_by_zero_tl0
-division_by_zero_tl0:
-	PREEMPTIBLE_HANDLER division_by_zero
-
-/* TT = 0x30, TL = 0, data_access_exception */
-.org trap_table + TT_DATA_ACCESS_EXCEPTION*ENTRY_SIZE
-.global data_access_exception_tl0
-data_access_exception_tl0:
-	wrpr %g0, PSTATE_AG_BIT | PSTATE_PRIV_BIT, %pstate
-	PREEMPTIBLE_HANDLER data_access_exception
-
-/* TT = 0x32, TL = 0, data_access_error */
-.org trap_table + TT_DATA_ACCESS_ERROR*ENTRY_SIZE
-.global data_access_error_tl0
-data_access_error_tl0:
-	PREEMPTIBLE_HANDLER data_access_error
-
-/* TT = 0x34, TL = 0, mem_address_not_aligned */
-.org trap_table + TT_MEM_ADDRESS_NOT_ALIGNED*ENTRY_SIZE
-.global mem_address_not_aligned_tl0
-mem_address_not_aligned_tl0:
-	PREEMPTIBLE_HANDLER mem_address_not_aligned
-
-/* TT = 0x35, TL = 0, LDDF_mem_address_not_aligned */
-.org trap_table + TT_LDDF_MEM_ADDRESS_NOT_ALIGNED*ENTRY_SIZE
-.global LDDF_mem_address_not_aligned_tl0
-LDDF_mem_address_not_aligned_tl0:
-	PREEMPTIBLE_HANDLER LDDF_mem_address_not_aligned
-
-/* TT = 0x36, TL = 0, STDF_mem_address_not_aligned */
-.org trap_table + TT_STDF_MEM_ADDRESS_NOT_ALIGNED*ENTRY_SIZE
-.global STDF_mem_address_not_aligned_tl0
-STDF_mem_address_not_aligned_tl0:
-	PREEMPTIBLE_HANDLER STDF_mem_address_not_aligned
-
-/* TT = 0x37, TL = 0, privileged_action */
-.org trap_table + TT_PRIVILEGED_ACTION*ENTRY_SIZE
-.global privileged_action_tl0
-privileged_action_tl0:
-	PREEMPTIBLE_HANDLER privileged_action
-
-/* TT = 0x38, TL = 0, LDQF_mem_address_not_aligned */
-.org trap_table + TT_LDQF_MEM_ADDRESS_NOT_ALIGNED*ENTRY_SIZE
-.global LDQF_mem_address_not_aligned_tl0
-LDQF_mem_address_not_aligned_tl0:
-	PREEMPTIBLE_HANDLER LDQF_mem_address_not_aligned
-
-/* TT = 0x39, TL = 0, STQF_mem_address_not_aligned */
-.org trap_table + TT_STQF_MEM_ADDRESS_NOT_ALIGNED*ENTRY_SIZE
-.global STQF_mem_address_not_aligned_tl0
-STQF_mem_address_not_aligned_tl0:
-	PREEMPTIBLE_HANDLER STQF_mem_address_not_aligned
-
-/* TT = 0x41, TL = 0, interrupt_level_1 handler */
-.org trap_table + TT_INTERRUPT_LEVEL_1*ENTRY_SIZE
-.global interrupt_level_1_handler_tl0
-interrupt_level_1_handler_tl0:
-	INTERRUPT_LEVEL_N_HANDLER 1
-
-/* TT = 0x42, TL = 0, interrupt_level_2 handler */
-.org trap_table + TT_INTERRUPT_LEVEL_2*ENTRY_SIZE
-.global interrupt_level_2_handler_tl0
-interrupt_level_2_handler_tl0:
-	INTERRUPT_LEVEL_N_HANDLER 2
-
-/* TT = 0x43, TL = 0, interrupt_level_3 handler */
-.org trap_table + TT_INTERRUPT_LEVEL_3*ENTRY_SIZE
-.global interrupt_level_3_handler_tl0
-interrupt_level_3_handler_tl0:
-	INTERRUPT_LEVEL_N_HANDLER 3
-
-/* TT = 0x44, TL = 0, interrupt_level_4 handler */
-.org trap_table + TT_INTERRUPT_LEVEL_4*ENTRY_SIZE
-.global interrupt_level_4_handler_tl0
-interrupt_level_4_handler_tl0:
-	INTERRUPT_LEVEL_N_HANDLER 4
-
-/* TT = 0x45, TL = 0, interrupt_level_5 handler */
-.org trap_table + TT_INTERRUPT_LEVEL_5*ENTRY_SIZE
-.global interrupt_level_5_handler_tl0
-interrupt_level_5_handler_tl0:
-	INTERRUPT_LEVEL_N_HANDLER 5
-
-/* TT = 0x46, TL = 0, interrupt_level_6 handler */
-.org trap_table + TT_INTERRUPT_LEVEL_6*ENTRY_SIZE
-.global interrupt_level_6_handler_tl0
-interrupt_level_6_handler_tl0:
-	INTERRUPT_LEVEL_N_HANDLER 6
-
-/* TT = 0x47, TL = 0, interrupt_level_7 handler */
-.org trap_table + TT_INTERRUPT_LEVEL_7*ENTRY_SIZE
-.global interrupt_level_7_handler_tl0
-interrupt_level_7_handler_tl0:
-	INTERRUPT_LEVEL_N_HANDLER 7
-
-/* TT = 0x48, TL = 0, interrupt_level_8 handler */
-.org trap_table + TT_INTERRUPT_LEVEL_8*ENTRY_SIZE
-.global interrupt_level_8_handler_tl0
-interrupt_level_8_handler_tl0:
-	INTERRUPT_LEVEL_N_HANDLER 8
-
-/* TT = 0x49, TL = 0, interrupt_level_9 handler */
-.org trap_table + TT_INTERRUPT_LEVEL_9*ENTRY_SIZE
-.global interrupt_level_9_handler_tl0
-interrupt_level_9_handler_tl0:
-	INTERRUPT_LEVEL_N_HANDLER 9
-
-/* TT = 0x4a, TL = 0, interrupt_level_10 handler */
-.org trap_table + TT_INTERRUPT_LEVEL_10*ENTRY_SIZE
-.global interrupt_level_10_handler_tl0
-interrupt_level_10_handler_tl0:
-	INTERRUPT_LEVEL_N_HANDLER 10
-
-/* TT = 0x4b, TL = 0, interrupt_level_11 handler */
-.org trap_table + TT_INTERRUPT_LEVEL_11*ENTRY_SIZE
-.global interrupt_level_11_handler_tl0
-interrupt_level_11_handler_tl0:
-	INTERRUPT_LEVEL_N_HANDLER 11
-
-/* TT = 0x4c, TL = 0, interrupt_level_12 handler */
-.org trap_table + TT_INTERRUPT_LEVEL_12*ENTRY_SIZE
-.global interrupt_level_12_handler_tl0
-interrupt_level_12_handler_tl0:
-	INTERRUPT_LEVEL_N_HANDLER 12
-
-/* TT = 0x4d, TL = 0, interrupt_level_13 handler */
-.org trap_table + TT_INTERRUPT_LEVEL_13*ENTRY_SIZE
-.global interrupt_level_13_handler_tl0
-interrupt_level_13_handler_tl0:
-	INTERRUPT_LEVEL_N_HANDLER 13
-
-/* TT = 0x4e, TL = 0, interrupt_level_14 handler */
-.org trap_table + TT_INTERRUPT_LEVEL_14*ENTRY_SIZE
-.global interrupt_level_14_handler_tl0
-interrupt_level_14_handler_tl0:
-	INTERRUPT_LEVEL_N_HANDLER 14
-
-/* TT = 0x4f, TL = 0, interrupt_level_15 handler */
-.org trap_table + TT_INTERRUPT_LEVEL_15*ENTRY_SIZE
-.global interrupt_level_15_handler_tl0
-interrupt_level_15_handler_tl0:
-	INTERRUPT_LEVEL_N_HANDLER 15
-
-/* TT = 0x60, TL = 0, interrupt_vector_trap handler */
-.org trap_table + TT_INTERRUPT_VECTOR_TRAP*ENTRY_SIZE
-.global interrupt_vector_trap_handler_tl0
-interrupt_vector_trap_handler_tl0:
-	INTERRUPT_VECTOR_TRAP_HANDLER
-
-/* TT = 0x64, TL = 0, fast_instruction_access_MMU_miss */
-.org trap_table + TT_FAST_INSTRUCTION_ACCESS_MMU_MISS*ENTRY_SIZE
-.global fast_instruction_access_mmu_miss_handler_tl0
-fast_instruction_access_mmu_miss_handler_tl0:
-	FAST_INSTRUCTION_ACCESS_MMU_MISS_HANDLER
-
-/* TT = 0x68, TL = 0, fast_data_access_MMU_miss */
-.org trap_table + TT_FAST_DATA_ACCESS_MMU_MISS*ENTRY_SIZE
-.global fast_data_access_mmu_miss_handler_tl0
-fast_data_access_mmu_miss_handler_tl0:
-	FAST_DATA_ACCESS_MMU_MISS_HANDLER 0
-
-/* TT = 0x6c, TL = 0, fast_data_access_protection */
-.org trap_table + TT_FAST_DATA_ACCESS_PROTECTION*ENTRY_SIZE
-.global fast_data_access_protection_handler_tl0
-fast_data_access_protection_handler_tl0:
-	FAST_DATA_ACCESS_PROTECTION_HANDLER 0
-
-/* TT = 0x80, TL = 0, spill_0_normal handler */
-.org trap_table + TT_SPILL_0_NORMAL*ENTRY_SIZE
-.global spill_0_normal_tl0
-spill_0_normal_tl0:
-	SPILL_NORMAL_HANDLER_KERNEL
-
-/* TT = 0x84, TL = 0, spill_1_normal handler */
-.org trap_table + TT_SPILL_1_NORMAL*ENTRY_SIZE
-.global spill_1_normal_tl0
-spill_1_normal_tl0:
-	SPILL_NORMAL_HANDLER_USERSPACE
-
-/* TT = 0x88, TL = 0, spill_2_normal handler */
-.org trap_table + TT_SPILL_2_NORMAL*ENTRY_SIZE
-.global spill_2_normal_tl0
-spill_2_normal_tl0:
-	SPILL_TO_USPACE_WINDOW_BUFFER
-
-/* TT = 0xa0, TL = 0, spill_0_other handler */
-.org trap_table + TT_SPILL_0_OTHER*ENTRY_SIZE
-.global spill_0_other_tl0
-spill_0_other_tl0:
-	SPILL_TO_USPACE_WINDOW_BUFFER
-
-/* TT = 0xc0, TL = 0, fill_0_normal handler */
-.org trap_table + TT_FILL_0_NORMAL*ENTRY_SIZE
-.global fill_0_normal_tl0
-fill_0_normal_tl0:
-	FILL_NORMAL_HANDLER_KERNEL
-
-/* TT = 0xc4, TL = 0, fill_1_normal handler */
-.org trap_table + TT_FILL_1_NORMAL*ENTRY_SIZE
-.global fill_1_normal_tl0
-fill_1_normal_tl0:
-	FILL_NORMAL_HANDLER_USERSPACE
-
-/* TT = 0x100 - 0x17f, TL = 0, trap_instruction_0 - trap_instruction_7f */
-.irp cur, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,\
-    20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38,\
-    39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,\
-    58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76,\
-    77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,\
-    96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,\
-    112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126,\
-    127
-.org trap_table + (TT_TRAP_INSTRUCTION_0+\cur)*ENTRY_SIZE
-.global trap_instruction_\cur\()_tl0
-trap_instruction_\cur\()_tl0:
-	ba %xcc, trap_instruction_handler
-	mov \cur, %g2
-.endr
-
-/*
- * Handlers for TL>0.
- */
-
-/* TT = 0x08, TL > 0, instruction_access_exception */
-.org trap_table + (TT_INSTRUCTION_ACCESS_EXCEPTION+512)*ENTRY_SIZE
-.global instruction_access_exception_tl1
-instruction_access_exception_tl1:
-	wrpr %g0, 1, %tl
-	wrpr %g0, PSTATE_AG_BIT | PSTATE_PRIV_BIT, %pstate
-	PREEMPTIBLE_HANDLER instruction_access_exception
-
-/* TT = 0x0a, TL > 0, instruction_access_error */
-.org trap_table + (TT_INSTRUCTION_ACCESS_ERROR+512)*ENTRY_SIZE
-.global instruction_access_error_tl1
-instruction_access_error_tl1:
-	wrpr %g0, 1, %tl
-	PREEMPTIBLE_HANDLER instruction_access_error
-
-/* TT = 0x10, TL > 0, illegal_instruction */
-.org trap_table + (TT_ILLEGAL_INSTRUCTION+512)*ENTRY_SIZE
-.global illegal_instruction_tl1
-illegal_instruction_tl1:
-	wrpr %g0, 1, %tl
-	PREEMPTIBLE_HANDLER illegal_instruction
-
-/* TT = 0x24, TL > 0, clean_window handler */
-.org trap_table + (TT_CLEAN_WINDOW+512)*ENTRY_SIZE
-.global clean_window_tl1
-clean_window_tl1:
-	CLEAN_WINDOW_HANDLER
-
-/* TT = 0x28, TL > 0, division_by_zero */
-.org trap_table + (TT_DIVISION_BY_ZERO+512)*ENTRY_SIZE
-.global division_by_zero_tl1
-division_by_zero_tl1:
-	wrpr %g0, 1, %tl
-	PREEMPTIBLE_HANDLER division_by_zero
-
-/* TT = 0x30, TL > 0, data_access_exception */
-.org trap_table + (TT_DATA_ACCESS_EXCEPTION+512)*ENTRY_SIZE
-.global data_access_exception_tl1
-data_access_exception_tl1:
-	wrpr %g0, 1, %tl
-	wrpr %g0, PSTATE_AG_BIT | PSTATE_PRIV_BIT, %pstate
-	PREEMPTIBLE_HANDLER data_access_exception
-
-/* TT = 0x32, TL > 0, data_access_error */
-.org trap_table + (TT_DATA_ACCESS_ERROR+512)*ENTRY_SIZE
-.global data_access_error_tl1
-data_access_error_tl1:
-	wrpr %g0, 1, %tl
-	PREEMPTIBLE_HANDLER data_access_error
-
-/* TT = 0x34, TL > 0, mem_address_not_aligned */
-.org trap_table + (TT_MEM_ADDRESS_NOT_ALIGNED+512)*ENTRY_SIZE
-.global mem_address_not_aligned_tl1
-mem_address_not_aligned_tl1:
-	wrpr %g0, 1, %tl
-	PREEMPTIBLE_HANDLER mem_address_not_aligned
-
-/* TT = 0x68, TL > 0, fast_data_access_MMU_miss */
-.org trap_table + (TT_FAST_DATA_ACCESS_MMU_MISS+512)*ENTRY_SIZE
-.global fast_data_access_mmu_miss_handler_tl1
-fast_data_access_mmu_miss_handler_tl1:
-	FAST_DATA_ACCESS_MMU_MISS_HANDLER 1
-
-/* TT = 0x6c, TL > 0, fast_data_access_protection */
-.org trap_table + (TT_FAST_DATA_ACCESS_PROTECTION+512)*ENTRY_SIZE
-.global fast_data_access_protection_handler_tl1
-fast_data_access_protection_handler_tl1:
-	FAST_DATA_ACCESS_PROTECTION_HANDLER 1
-
-/* TT = 0x80, TL > 0, spill_0_normal handler */
-.org trap_table + (TT_SPILL_0_NORMAL+512)*ENTRY_SIZE
-.global spill_0_normal_tl1
-spill_0_normal_tl1:
-	SPILL_NORMAL_HANDLER_KERNEL
-
-/* TT = 0x88, TL > 0, spill_2_normal handler */
-.org trap_table + (TT_SPILL_2_NORMAL+512)*ENTRY_SIZE
-.global spill_2_normal_tl1
-spill_2_normal_tl1:
-	SPILL_TO_USPACE_WINDOW_BUFFER
-
-/* TT = 0xa0, TL > 0, spill_0_other handler */
-.org trap_table + (TT_SPILL_0_OTHER+512)*ENTRY_SIZE
-.global spill_0_other_tl1
-spill_0_other_tl1:
-	SPILL_TO_USPACE_WINDOW_BUFFER
-
-/* TT = 0xc0, TL > 0, fill_0_normal handler */
-.org trap_table + (TT_FILL_0_NORMAL+512)*ENTRY_SIZE
-.global fill_0_normal_tl1
-fill_0_normal_tl1:
-	FILL_NORMAL_HANDLER_KERNEL
-
-.align TABLE_SIZE
-
-
-#define NOT(x)	((x) == 0)
-
-/* Preemptible trap handler for TL=1.
- *
- * This trap handler makes arrangements to make calling of scheduler() from
- * within a trap context possible. It is called from several other trap
- * handlers.
- *
- * This function can be entered either with interrupt globals or alternate
- * globals. Memory management trap handlers are obliged to switch to one of
- * those global sets prior to calling this function. Register window management
- * functions are not allowed to modify the alternate global registers.
- *
- * The kernel is designed to work on trap levels 0 - 4. For instance, the
- * following can happen:
- * TL0: kernel thread runs (CANSAVE=0, kernel stack not in DTLB)
- * TL1: preemptible trap handler started after a tick interrupt
- * TL2: preemptible trap handler did SAVE
- * TL3: spill handler touched the kernel stack  
- * TL4: hardware or software failure
- *
- * Input registers:
- *	%g1		Address of function to call if this is not a syscall.
- * 	%g2	 	First argument for the function.
- *	%g6		Pre-set as kernel stack base if trap from userspace.
- *	%g7		Pre-set as address of the userspace window buffer.
- */
-.macro PREEMPTIBLE_HANDLER_TEMPLATE is_syscall
-	/*
-	 * ASSERT(%tl == 1)
-	 */
-	rdpr %tl, %g3
-	cmp %g3, 1
-	be %xcc, 1f
-	nop
-0:	ba %xcc, 0b				! this is for debugging, if we ever get here
-	nop					! it will be easy to find
-
-1:
-.if NOT(\is_syscall)
-	rdpr %tstate, %g3
-	
-	/*
-	 * One of the ways this handler can be invoked is after a nested MMU trap from
-	 * either spill_1_normal or fill_1_normal traps. Both of these traps manipulate
-	 * the CWP register. We deal with the situation by simulating the MMU trap
-	 * on TL=1 and restart the respective SAVE or RESTORE instruction once the MMU
-	 * trap is resolved. However, because we are in the wrong window from the
-	 * perspective of the MMU trap, we need to synchronize CWP with CWP from TL=0.
-	 */ 
-	and %g3, TSTATE_CWP_MASK, %g4
-	wrpr %g4, 0, %cwp			! resynchronize CWP
-
-	andcc %g3, TSTATE_PRIV_BIT, %g0		! if this trap came from the privileged mode...
-	bnz %xcc, 0f				! ...skip setting of kernel stack and primary context
-	nop
-	
-.endif
-	/*
-	 * Normal window spills will go to the userspace window buffer.
-	 */
-	wrpr %g0, WSTATE_OTHER(0) | WSTATE_NORMAL(2), %wstate
-
-	wrpr %g0, NWINDOWS - 1, %cleanwin	! prevent unnecessary clean_window exceptions
-
-	/*
-	 * Switch to kernel stack. The old stack is
-	 * automatically saved in the old window's %sp
-	 * and the new window's %fp.
-	 */
-	save %g6, -PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE, %sp
-
-.if \is_syscall
-	/*
-	 * Copy arguments for the syscall to the new window.
-	 */
-	mov %i0, %o0
-	mov %i1, %o1
-	mov %i2, %o2
-	mov %i3, %o3
-	mov %i4, %o4
-	mov %i5, %o5
-.endif
-
-	/*
-	 * Mark the CANRESTORE windows as OTHER windows.
-	 */
-	rdpr %canrestore, %l0
-	wrpr %l0, %otherwin
-	wrpr %g0, %canrestore
-
-	/*
-	 * Switch to primary context 0.
-	 */
-	mov VA_PRIMARY_CONTEXT_REG, %l0
-	stxa %g0, [%l0] ASI_DMMU
-	rd %pc, %l0
-	flush %l0
-
-.if NOT(\is_syscall)
-	ba %xcc, 1f
-	nop
-0:
-	save %sp, -PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE, %sp
-
-	/*
-	 * At this moment, we are using the kernel stack 
-	 * and have successfully allocated a register window.
-	 */
-1:
-.endif
-	/*
-	 * Other window spills will go to the userspace window buffer
-	 * and normal spills will go to the kernel stack.
-	 */
-	wrpr %g0, WSTATE_OTHER(0) | WSTATE_NORMAL(0), %wstate
-	
-	/*
-	 * Copy arguments.
-	 */
-	mov %g1, %l0
-.if NOT(\is_syscall)
-	mov %g2, %o0
-.else
-	! store the syscall number on the stack as 7th argument
-	stx %g2, [%sp + STACK_WINDOW_SAVE_AREA_SIZE + STACK_BIAS + STACK_ARG6] 
-.endif
-
-	/*
-	 * Save TSTATE, TPC and TNPC aside.
-	 */
-	rdpr %tstate, %g1
-	rdpr %tpc, %g2
-	rdpr %tnpc, %g3
-	rd %y, %g4
-
-	stx %g1, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TSTATE]
-	stx %g2, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TPC]
-	stx %g3, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TNPC]
-
-	/*
-	 * Save the Y register.
-	 * This register is deprecated according to SPARC V9 specification
-	 * and is only present for backward compatibility with previous
-	 * versions of the SPARC architecture.
-	 * Surprisingly, gcc makes use of this register without a notice.
-	 */
-	stx %g4, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_Y]
-	
-	wrpr %g0, 0, %tl
-	wrpr %g0, PSTATE_PRIV_BIT | PSTATE_PEF_BIT, %pstate
-	SAVE_GLOBALS
-	
-.if NOT(\is_syscall)
-	/*
-	 * Call the higher-level handler and pass istate as second parameter.
-	 */
-	call %l0
-	add %sp, PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TNPC, %o1
-.else
-	/*
-	 * Call the higher-level syscall handler and enable interrupts.
-	 */
-	call syscall_handler
-	wrpr %g0, PSTATE_PRIV_BIT | PSTATE_PEF_BIT | PSTATE_IE_BIT, %pstate
-	mov %o0, %i0				! copy the value returned by the syscall
-.endif
-
-	RESTORE_GLOBALS
-	rdpr %pstate, %l1			! we must preserve the PEF bit
-	wrpr %g0, PSTATE_AG_BIT | PSTATE_PRIV_BIT, %pstate
-	wrpr %g0, 1, %tl
-	
-	/*
-	 * Read TSTATE, TPC and TNPC from saved copy.
-	 */
-	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TSTATE], %g1
-	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TPC], %g2
-	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TNPC], %g3
-
-	/*
-	 * Copy PSTATE.PEF to the in-register copy of TSTATE.
-	 */
-	and %l1, PSTATE_PEF_BIT, %l1
-	sllx %l1, TSTATE_PSTATE_SHIFT, %l1
-	sethi %hi(TSTATE_PEF_BIT), %g4
-	andn %g1, %g4, %g1
-	or %g1, %l1, %g1
-
-	/*
-	 * Restore TSTATE, TPC and TNPC from saved copies.
-	 */
-	wrpr %g1, 0, %tstate
-	wrpr %g2, 0, %tpc
-	wrpr %g3, 0, %tnpc
-
-	/*
-	 * Restore Y.
-	 */
-	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_Y], %g4
-	wr %g4, %y
-
-	/*
-	 * If OTHERWIN is zero, then all the userspace windows have been
-	 * spilled to kernel memory (i.e. register window buffer). Moreover,
-	 * if the scheduler was called in the meantime, all valid windows
-	 * belonging to other threads were spilled by context_save().
-	 * If OTHERWIN is non-zero, then some userspace windows are still
-	 * valid. Others might have been spilled. However, the CWP pointer
-	 * needs no fixing because the scheduler had not been called.
-	 */
-	rdpr %otherwin, %l0
-	brnz %l0, 0f
-	nop
-
-	/*
-	 * OTHERWIN == 0
-	 */
-
-	/*
-	 * If TSTATE.CWP + 1 == CWP, then we still do not have to fix CWP.
-	 */
-	and %g1, TSTATE_CWP_MASK, %l0
-	inc %l0
-	and %l0, NWINDOWS - 1, %l0	! %l0 mod NWINDOWS
-	rdpr %cwp, %l1
-	cmp %l0, %l1
-	bz %xcc, 0f			! CWP is ok
-	nop
-
-	/*
-	 * Fix CWP.
-	 * In order to recapitulate, the input registers in the current
-	 * window are the output registers of the window to which we want
-	 * to restore. Because the fill trap fills only input and local
-	 * registers of a window, we need to preserve those output
-	 * registers manually.
-	 */
-	mov %sp, %g2
-	stx %i0, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I0]
-	stx %i1, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I1]
-	stx %i2, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I2]
-	stx %i3, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I3]
-	stx %i4, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I4]
-	stx %i5, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I5]
-	stx %i6, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I6]
-	stx %i7, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I7]
-	wrpr %l0, 0, %cwp
-	mov %g2, %sp
-	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I0], %i0
-	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I1], %i1
-	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I2], %i2
-	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I3], %i3
-	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I4], %i4
-	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I5], %i5
-	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I6], %i6
-	ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I7], %i7
-
-	/*
-	 * OTHERWIN != 0 or fall-through from the OTHERWIN == 0 case.
-	 * The CWP has already been restored to the value it had after the SAVE
-	 * at the beginning of this function.
-	 */
-0:
-.if NOT(\is_syscall)
-	rdpr %tstate, %g1
-	andcc %g1, TSTATE_PRIV_BIT, %g0		! if we are not returning to userspace...,
-	bnz %xcc, 1f				! ...skip restoring userspace windows
-	nop
-.endif
-
-	/*
-	 * Spills and fills will be processed by the {spill,fill}_1_normal
-	 * handlers.
-	 */
-	wrpr %g0, WSTATE_OTHER(0) | WSTATE_NORMAL(1), %wstate
-
-	/*
-	 * Set primary context according to secondary context.
-	 */
-	wr %g0, ASI_DMMU, %asi
-	ldxa [VA_SECONDARY_CONTEXT_REG] %asi, %g1
-	stxa %g1, [VA_PRIMARY_CONTEXT_REG] %asi
-	rd %pc, %g1
-	flush %g1
-	
-	rdpr %cwp, %g1
-	rdpr %otherwin, %g2
-
-	/*
-	 * Skip all OTHERWIN windows and descend to the first window
-	 * in the userspace window buffer.
-	 */
-	sub %g1, %g2, %g3
-	dec %g3
-	and %g3, NWINDOWS - 1, %g3
-	wrpr %g3, 0, %cwp
-
-	/*
-	 * CWP is now in the window last saved in the userspace window buffer.
-	 * Fill all windows stored in the buffer.
-	 */
-	clr %g4
-0:	andcc %g7, UWB_ALIGNMENT - 1, %g0	! alignment check
-	bz %xcc, 0f				! %g7 is UWB_ALIGNMENT-aligned, no more windows to refill
-	nop
-
-	add %g7, -STACK_WINDOW_SAVE_AREA_SIZE, %g7
-	ldx [%g7 + L0_OFFSET], %l0
-	ldx [%g7 + L1_OFFSET], %l1
-	ldx [%g7 + L2_OFFSET], %l2
-	ldx [%g7 + L3_OFFSET], %l3
-	ldx [%g7 + L4_OFFSET], %l4
-	ldx [%g7 + L5_OFFSET], %l5
-	ldx [%g7 + L6_OFFSET], %l6
-	ldx [%g7 + L7_OFFSET], %l7
-	ldx [%g7 + I0_OFFSET], %i0
-	ldx [%g7 + I1_OFFSET], %i1
-	ldx [%g7 + I2_OFFSET], %i2
-	ldx [%g7 + I3_OFFSET], %i3
-	ldx [%g7 + I4_OFFSET], %i4
-	ldx [%g7 + I5_OFFSET], %i5
-	ldx [%g7 + I6_OFFSET], %i6
-	ldx [%g7 + I7_OFFSET], %i7
-
-	dec %g3
-	and %g3, NWINDOWS - 1, %g3
-	wrpr %g3, 0, %cwp			! switch to the preceeding window
-
-	ba %xcc, 0b
-	inc %g4
-
-0:
-	/*
-	 * Switch back to the proper current window and adjust
-	 * OTHERWIN, CANRESTORE, CANSAVE and CLEANWIN.
-	 */
-	wrpr %g1, 0, %cwp
-	add %g4, %g2, %g2
-	cmp %g2, NWINDOWS - 2
-	bg %xcc, 2f				! fix the CANRESTORE=NWINDOWS-1 anomaly
-	mov NWINDOWS - 2, %g1			! use dealy slot for both cases
-	sub %g1, %g2, %g1
-	
-	wrpr %g0, 0, %otherwin
-	wrpr %g1, 0, %cansave			! NWINDOWS - 2 - CANRESTORE
-	wrpr %g2, 0, %canrestore		! OTHERWIN + windows in the buffer
-	wrpr %g2, 0, %cleanwin			! avoid information leak
-
-1:
-	restore
-
-.if \is_syscall
-	done
-.else
-	retry
-.endif
-
-	/*
-	 * We got here in order to avoid inconsistency of the window state registers.
-	 * If the:
-	 *
-	 * 	save %g6, -PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE, %sp
-	 *
-	 * instruction trapped and spilled a register window into the userspace
-	 * window buffer, we have just restored NWINDOWS - 1 register windows.
-	 * However, CANRESTORE can be only NWINDOW - 2 at most.
-	 *
-	 * The solution is to manually switch to (CWP - 1) mod NWINDOWS
-	 * and set the window state registers so that:
-	 *
-	 * 	CANRESTORE 	= NWINDOWS - 2
-	 *	CLEANWIN	= NWINDOWS - 2
-	 *	CANSAVE 	= 0
-	 *	OTHERWIN	= 0
-	 *
-	 * The RESTORE instruction is therfore to be skipped.
-	 */
-2:
-	wrpr %g0, 0, %otherwin
-	wrpr %g0, 0, %cansave
-	wrpr %g1, 0, %canrestore
-	wrpr %g1, 0, %cleanwin
-
-	rdpr %cwp, %g1
-	dec %g1
-	and %g1, NWINDOWS - 1, %g1
-	wrpr %g1, 0, %cwp			! CWP--
-	
-.if \is_syscall
-	done
-.else
-	retry
-.endif
-
-.endm
-
-.global preemptible_handler
-preemptible_handler:
-	PREEMPTIBLE_HANDLER_TEMPLATE 0
-
-.global trap_instruction_handler
-trap_instruction_handler:
-	PREEMPTIBLE_HANDLER_TEMPLATE 1
