Index: kernel/arch/mips64/src/asm.S
===================================================================
--- kernel/arch/mips64/src/asm.S	(revision 2429e4a21037762ab4104bb2ea7d71156b2fbf0d)
+++ kernel/arch/mips64/src/asm.S	(revision 2429e4a21037762ab4104bb2ea7d71156b2fbf0d)
@@ -0,0 +1,301 @@
+/*
+ * Copyright (c) 2003 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch/asm/regname.h>
+
+.text
+
+.macro cp0_read reg
+	dmfc0 $2, \reg
+	j $31
+	nop
+.endm
+
+.macro cp0_write reg
+	dmtc0 $4, \reg
+	j $31
+	nop
+.endm
+
+.set noat
+.set noreorder
+.set nomacro
+
+.global asm_delay_loop
+asm_delay_loop:
+	j $31
+	nop
+
+.global cpu_halt
+cpu_halt:
+	j cpu_halt
+	nop
+
+.global memcpy_from_uspace
+.global memcpy_to_uspace
+.global memcpy_from_uspace_failover_address
+.global memcpy_to_uspace_failover_address
+memcpy_from_uspace:
+memcpy_to_uspace:
+	move $t2, $a0  /* save dst */
+	
+	addiu $v0, $a1, 3
+	li $v1, -4  /* 0xfffffffffffffffc */
+	and $v0, $v0, $v1
+	beq $a1, $v0, 3f
+	move $t0, $a0
+	
+	0:
+		beq $a2, $zero, 2f
+		move $a3, $zero
+	
+	1:
+		addu $v0, $a1, $a3
+		lbu $a0, 0($v0)
+		addu $v1, $t0, $a3
+		addiu $a3, $a3, 1
+		bne $a3, $a2, 1b
+		sb $a0, 0($v1)
+	
+	2:
+		jr $ra
+		move $v0, $t2
+	
+	3:
+		addiu $v0, $a0, 3
+		and $v0, $v0, $v1
+		bne $a0, $v0, 0b
+		srl $t1, $a2, 2
+		
+		beq $t1, $zero, 5f
+		move $a3, $zero
+		
+		move $a3, $zero
+		move $a0, $zero
+	
+	4:
+		addu $v0, $a1, $a0
+		lw $v1, 0($v0)
+		addiu $a3, $a3, 1
+		addu $v0, $t0, $a0
+		sw $v1, 0($v0)
+		bne $a3, $t1, 4b
+		addiu $a0, $a0, 4
+	
+	5:
+		andi $a2, $a2, 0x3
+		beq $a2, $zero, 2b
+		nop
+		
+		sll $v0, $a3, 2
+		addu $t1, $v0, $t0
+		move $a3, $zero
+		addu $t0, $v0, $a1
+	
+	6:
+		addu $v0, $t0, $a3
+		lbu $a0, 0($v0)
+		addu $v1, $t1, $a3
+		addiu $a3, $a3, 1
+		bne $a3, $a2, 6b
+		sb $a0, 0($v1)
+		
+		jr $ra
+		move $v0, $t2
+
+memcpy_from_uspace_failover_address:
+memcpy_to_uspace_failover_address:
+	jr $ra
+	move $v0, $zero
+
+.macro fpu_gp_save reg ctx
+	mfc1 $t0, $\reg
+	sw $t0, \reg * 4(\ctx)
+.endm
+
+.macro fpu_gp_restore reg ctx
+	lw $t0, \reg * 4(\ctx)
+	mtc1 $t0, $\reg
+.endm
+
+.macro fpu_ct_save reg ctx
+	cfc1 $t0, $1
+	sw $t0, (\reg + 32) * 4(\ctx)
+.endm
+
+.macro fpu_ct_restore reg ctx
+	lw $t0, (\reg + 32) * 4(\ctx)
+	ctc1 $t0, $\reg
+.endm
+
+.global fpu_context_save
+fpu_context_save:
+#ifdef CONFIG_FPU
+	fpu_gp_save 0, $a0
+	fpu_gp_save 1, $a0
+	fpu_gp_save 2, $a0
+	fpu_gp_save 3, $a0
+	fpu_gp_save 4, $a0
+	fpu_gp_save 5, $a0
+	fpu_gp_save 6, $a0
+	fpu_gp_save 7, $a0
+	fpu_gp_save 8, $a0
+	fpu_gp_save 9, $a0
+	fpu_gp_save 10, $a0
+	fpu_gp_save 11, $a0
+	fpu_gp_save 12, $a0
+	fpu_gp_save 13, $a0
+	fpu_gp_save 14, $a0
+	fpu_gp_save 15, $a0
+	fpu_gp_save 16, $a0
+	fpu_gp_save 17, $a0
+	fpu_gp_save 18, $a0
+	fpu_gp_save 19, $a0
+	fpu_gp_save 20, $a0
+	fpu_gp_save 21, $a0
+	fpu_gp_save 22, $a0
+	fpu_gp_save 23, $a0
+	fpu_gp_save 24, $a0
+	fpu_gp_save 25, $a0
+	fpu_gp_save 26, $a0
+	fpu_gp_save 27, $a0
+	fpu_gp_save 28, $a0
+	fpu_gp_save 29, $a0
+	fpu_gp_save 30, $a0
+	fpu_gp_save 31, $a0
+	
+	fpu_ct_save 1, $a0
+	fpu_ct_save 2, $a0
+	fpu_ct_save 3, $a0
+	fpu_ct_save 4, $a0
+	fpu_ct_save 5, $a0
+	fpu_ct_save 6, $a0
+	fpu_ct_save 7, $a0
+	fpu_ct_save 8, $a0
+	fpu_ct_save 9, $a0
+	fpu_ct_save 10, $a0
+	fpu_ct_save 11, $a0
+	fpu_ct_save 12, $a0
+	fpu_ct_save 13, $a0
+	fpu_ct_save 14, $a0
+	fpu_ct_save 15, $a0
+	fpu_ct_save 16, $a0
+	fpu_ct_save 17, $a0
+	fpu_ct_save 18, $a0
+	fpu_ct_save 19, $a0
+	fpu_ct_save 20, $a0
+	fpu_ct_save 21, $a0
+	fpu_ct_save 22, $a0
+	fpu_ct_save 23, $a0
+	fpu_ct_save 24, $a0
+	fpu_ct_save 25, $a0
+	fpu_ct_save 26, $a0
+	fpu_ct_save 27, $a0
+	fpu_ct_save 28, $a0
+	fpu_ct_save 29, $a0
+	fpu_ct_save 30, $a0
+	fpu_ct_save 31, $a0
+#endif
+	j $ra
+	nop
+
+.global fpu_context_restore
+fpu_context_restore:
+#ifdef CONFIG_FPU
+	fpu_gp_restore 0, $a0
+	fpu_gp_restore 1, $a0
+	fpu_gp_restore 2, $a0
+	fpu_gp_restore 3, $a0
+	fpu_gp_restore 4, $a0
+	fpu_gp_restore 5, $a0
+	fpu_gp_restore 6, $a0
+	fpu_gp_restore 7, $a0
+	fpu_gp_restore 8, $a0
+	fpu_gp_restore 9, $a0
+	fpu_gp_restore 10, $a0
+	fpu_gp_restore 11, $a0
+	fpu_gp_restore 12, $a0
+	fpu_gp_restore 13, $a0
+	fpu_gp_restore 14, $a0
+	fpu_gp_restore 15, $a0
+	fpu_gp_restore 16, $a0
+	fpu_gp_restore 17, $a0
+	fpu_gp_restore 18, $a0
+	fpu_gp_restore 19, $a0
+	fpu_gp_restore 20, $a0
+	fpu_gp_restore 21, $a0
+	fpu_gp_restore 22, $a0
+	fpu_gp_restore 23, $a0
+	fpu_gp_restore 24, $a0
+	fpu_gp_restore 25, $a0
+	fpu_gp_restore 26, $a0
+	fpu_gp_restore 27, $a0
+	fpu_gp_restore 28, $a0
+	fpu_gp_restore 29, $a0
+	fpu_gp_restore 30, $a0
+	fpu_gp_restore 31, $a0
+	
+	fpu_ct_restore 1, $a0
+	fpu_ct_restore 2, $a0
+	fpu_ct_restore 3, $a0
+	fpu_ct_restore 4, $a0
+	fpu_ct_restore 5, $a0
+	fpu_ct_restore 6, $a0
+	fpu_ct_restore 7, $a0
+	fpu_ct_restore 8, $a0
+	fpu_ct_restore 9, $a0
+	fpu_ct_restore 10, $a0
+	fpu_ct_restore 11, $a0
+	fpu_ct_restore 12, $a0
+	fpu_ct_restore 13, $a0
+	fpu_ct_restore 14, $a0
+	fpu_ct_restore 15, $a0
+	fpu_ct_restore 16, $a0
+	fpu_ct_restore 17, $a0
+	fpu_ct_restore 18, $a0
+	fpu_ct_restore 19, $a0
+	fpu_ct_restore 20, $a0
+	fpu_ct_restore 21, $a0
+	fpu_ct_restore 22, $a0
+	fpu_ct_restore 23, $a0
+	fpu_ct_restore 24, $a0
+	fpu_ct_restore 25, $a0
+	fpu_ct_restore 26, $a0
+	fpu_ct_restore 27, $a0
+	fpu_ct_restore 28, $a0
+	fpu_ct_restore 29, $a0
+	fpu_ct_restore 30, $a0
+	fpu_ct_restore 31, $a0
+#endif
+	j $ra
+	nop
+
+.global early_putchar
+early_putchar:
+	j $ra
+	nop
Index: kernel/arch/mips64/src/cache.c
===================================================================
--- kernel/arch/mips64/src/cache.c	(revision 2429e4a21037762ab4104bb2ea7d71156b2fbf0d)
+++ kernel/arch/mips64/src/cache.c	(revision 2429e4a21037762ab4104bb2ea7d71156b2fbf0d)
@@ -0,0 +1,1 @@
+../../mips32/src/cache.c
Index: kernel/arch/mips64/src/context.S
===================================================================
--- kernel/arch/mips64/src/context.S	(revision 2429e4a21037762ab4104bb2ea7d71156b2fbf0d)
+++ kernel/arch/mips64/src/context.S	(revision 2429e4a21037762ab4104bb2ea7d71156b2fbf0d)
@@ -0,0 +1,1 @@
+../../mips32/src/context.S
Index: kernel/arch/mips64/src/cpu
===================================================================
--- kernel/arch/mips64/src/cpu	(revision 2429e4a21037762ab4104bb2ea7d71156b2fbf0d)
+++ kernel/arch/mips64/src/cpu	(revision 2429e4a21037762ab4104bb2ea7d71156b2fbf0d)
@@ -0,0 +1,1 @@
+../../mips32/src/cpu
Index: kernel/arch/mips64/src/ddi
===================================================================
--- kernel/arch/mips64/src/ddi	(revision 2429e4a21037762ab4104bb2ea7d71156b2fbf0d)
+++ kernel/arch/mips64/src/ddi	(revision 2429e4a21037762ab4104bb2ea7d71156b2fbf0d)
@@ -0,0 +1,1 @@
+../../mips32/src/ddi
Index: kernel/arch/mips64/src/debug
===================================================================
--- kernel/arch/mips64/src/debug	(revision 2429e4a21037762ab4104bb2ea7d71156b2fbf0d)
+++ kernel/arch/mips64/src/debug	(revision 2429e4a21037762ab4104bb2ea7d71156b2fbf0d)
@@ -0,0 +1,1 @@
+../../mips32/src/debug
Index: kernel/arch/mips64/src/debugger.c
===================================================================
--- kernel/arch/mips64/src/debugger.c	(revision 2429e4a21037762ab4104bb2ea7d71156b2fbf0d)
+++ kernel/arch/mips64/src/debugger.c	(revision 2429e4a21037762ab4104bb2ea7d71156b2fbf0d)
@@ -0,0 +1,1 @@
+../../mips32/src/debugger.c
Index: kernel/arch/mips64/src/exception.c
===================================================================
--- kernel/arch/mips64/src/exception.c	(revision 2429e4a21037762ab4104bb2ea7d71156b2fbf0d)
+++ kernel/arch/mips64/src/exception.c	(revision 2429e4a21037762ab4104bb2ea7d71156b2fbf0d)
@@ -0,0 +1,231 @@
+/*
+ * Copyright (c) 2003-2004 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup mips64
+ * @{
+ */
+/** @file
+ */
+
+#include <arch/exception.h>
+#include <arch/interrupt.h>
+#include <arch/mm/tlb.h>
+#include <panic.h>
+#include <arch/cp0.h>
+#include <typedefs.h>
+#include <arch.h>
+#include <debug.h>
+#include <proc/thread.h>
+#include <print.h>
+#include <interrupt.h>
+#include <func.h>
+#include <ddi/irq.h>
+#include <arch/debugger.h>
+#include <symtab.h>
+
+static const char *exctable[] = {
+	"Interrupt",
+	"TLB Modified",
+	"TLB Invalid",
+	"TLB Invalid Store",
+	"Address Error - load/instr. fetch",
+	"Address Error - store",
+	"Bus Error - fetch instruction",
+	"Bus Error - data reference",
+	"Syscall",
+	"BreakPoint",
+	"Reserved Instruction",
+	"Coprocessor Unusable",
+	"Arithmetic Overflow",
+	"Trap",
+	"Virtual Coherency - instruction",
+	"Floating Point",
+	NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+	"WatchHi/WatchLo",  /* 23 */
+	NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+	"Virtual Coherency - data",
+};
+
+void istate_decode(istate_t *istate)
+{
+	printf("epc=%#018" PRIx64 "\tsta=%#018" PRIx64 "\t"
+	    "lo =%#018" PRIx64 "\thi =%#018" PRIx64 "\n",
+	    istate->epc, istate->status, istate->lo, istate->hi);
+	
+	printf("a0 =%#018" PRIx64 "\ta1 =%#018" PRIx64 "\t"
+	    "a2 =%#018" PRIx64 "\ta3 =%#018" PRIx64 "\n",
+	    istate->a0, istate->a1, istate->a2, istate->a3);
+	
+	printf("t0 =%#018" PRIx64 "\tt1 =%#018" PRIx64 "\t"
+	    "t2 =%#018" PRIx64 "\tt3 =%#018" PRIx64 "\n",
+	    istate->t0, istate->t1, istate->t2, istate->t3);
+	
+	printf("t4 =%#018" PRIx64 "\tt5 =%#018" PRIx64 "\t"
+	    "t6 =%#018" PRIx64 "\tt7 =%#018" PRIx64 "\n",
+	    istate->t4, istate->t5, istate->t6, istate->t7);
+	
+	printf("t8 =%#018" PRIx64 "\tt9 =%#018" PRIx64 "\t"
+	    "v0 =%#018" PRIx64 "\tv1 =%#018" PRIx64 "\n",
+	    istate->t8, istate->t9, istate->v0, istate->v1);
+	
+	printf("s0 =%#018" PRIx64 "\ts1 =%#018" PRIx64 "\t"
+	    "s2 =%#018" PRIx64 "\ts3 =%#018" PRIx64 "\n",
+	    istate->s0, istate->s1, istate->s2, istate->s3);
+	
+	printf("s4 =%#018" PRIx64 "\ts5 =%#018" PRIx64 "\t"
+	    "s6 =%#018" PRIx64 "\ts7 =%#018" PRIx64 "\n",
+	    istate->s4, istate->s5, istate->s6, istate->s7);
+	
+	printf("s8 =%#018" PRIx64 "\tat =%#018" PRIx64 "\t"
+	    "kt0=%#018" PRIx64 "\tkt1=%#018" PRIx64 "\n",
+	    istate->s8, istate->at, istate->kt0, istate->kt1);
+	
+	printf("sp =%#018" PRIx64 "\tra =%#018" PRIx64 "\t"
+	    "gp =%#018" PRIx64 "\n",
+	    istate->sp, istate->ra, istate->gp);
+}
+
+static void unhandled_exception(unsigned int n, istate_t *istate)
+{
+	fault_if_from_uspace(istate, "Unhandled exception %s.", exctable[n]);
+	panic_badtrap(istate, n, "Unhandled exception %s.", exctable[n]);
+}
+
+static void reserved_instr_exception(unsigned int n, istate_t *istate)
+{
+	if (*((uint32_t *) istate->epc) == 0x7c03e83b) {
+		ASSERT(THREAD);
+		istate->epc += 4;
+		istate->v1 = istate->kt1;
+	} else
+		unhandled_exception(n, istate);
+}
+
+static void breakpoint_exception(unsigned int n, istate_t *istate)
+{
+#ifdef CONFIG_DEBUG
+	debugger_bpoint(istate);
+#else
+	/* it is necessary to not re-execute BREAK instruction after 
+	   returning from Exception handler
+	   (see page 138 in R4000 Manual for more information) */
+	istate->epc += 4;
+#endif
+}
+
+static void tlbmod_exception(unsigned int n, istate_t *istate)
+{
+	tlb_modified(istate);
+}
+
+static void tlbinv_exception(unsigned int n, istate_t *istate)
+{
+	tlb_invalid(istate);
+}
+
+#ifdef CONFIG_FPU_LAZY
+static void cpuns_exception(unsigned int n, istate_t *istate)
+{
+	if (cp0_cause_coperr(cp0_cause_read()) == fpu_cop_id)
+		scheduler_fpu_lazy_request();
+	else {
+		fault_if_from_uspace(istate,
+		    "Unhandled Coprocessor Unusable Exception.");
+		panic_badtrap(istate, n,
+		    "Unhandled Coprocessor Unusable Exception.");
+	}
+}
+#endif
+
+static void interrupt_exception(unsigned int n, istate_t *istate)
+{
+	/* Decode interrupt number and process the interrupt */
+	uint32_t cause = (cp0_cause_read() >> 8) & 0xff;
+	
+	unsigned int i;
+	for (i = 0; i < 8; i++) {
+		if (cause & (1 << i)) {
+			irq_t *irq = irq_dispatch_and_lock(i);
+			if (irq) {
+				/*
+				 * The IRQ handler was found.
+				 */
+				irq->handler(irq);
+				irq_spinlock_unlock(&irq->lock, false);
+			} else {
+				/*
+				 * Spurious interrupt.
+				 */
+#ifdef CONFIG_DEBUG
+				printf("cpu%u: spurious interrupt (inum=%u)\n",
+				    CPU->id, i);
+#endif
+			}
+		}
+	}
+}
+
+/** Handle syscall userspace call */
+static void syscall_exception(unsigned int n, istate_t *istate)
+{
+	fault_if_from_uspace(istate, "Syscall is handled through shortcut.");
+}
+
+void exception_init(void)
+{
+	unsigned int i;
+	
+	/* Clear exception table */
+	for (i = 0; i < IVT_ITEMS; i++)
+		exc_register(i, "undef", false,
+		    (iroutine_t) unhandled_exception);
+	
+	exc_register(EXC_Bp, "bkpoint", true,
+	    (iroutine_t) breakpoint_exception);
+	exc_register(EXC_RI, "resinstr", true,
+	    (iroutine_t) reserved_instr_exception);
+	exc_register(EXC_Mod, "tlb_mod", true,
+	    (iroutine_t) tlbmod_exception);
+	exc_register(EXC_TLBL, "tlbinvl", true,
+	    (iroutine_t) tlbinv_exception);
+	exc_register(EXC_TLBS, "tlbinvl", true,
+	    (iroutine_t) tlbinv_exception);
+	exc_register(EXC_Int, "interrupt", true,
+	    (iroutine_t) interrupt_exception);
+	
+#ifdef CONFIG_FPU_LAZY
+	exc_register(EXC_CpU, "cpunus", true,
+	    (iroutine_t) cpuns_exception);
+#endif
+	
+	exc_register(EXC_Sys, "syscall", true,
+	    (iroutine_t) syscall_exception);
+}
+
+/** @}
+ */
Index: kernel/arch/mips64/src/fpu_context.c
===================================================================
--- kernel/arch/mips64/src/fpu_context.c	(revision 2429e4a21037762ab4104bb2ea7d71156b2fbf0d)
+++ kernel/arch/mips64/src/fpu_context.c	(revision 2429e4a21037762ab4104bb2ea7d71156b2fbf0d)
@@ -0,0 +1,1 @@
+../../mips32/src/fpu_context.c
Index: kernel/arch/mips64/src/interrupt.c
===================================================================
--- kernel/arch/mips64/src/interrupt.c	(revision 2429e4a21037762ab4104bb2ea7d71156b2fbf0d)
+++ kernel/arch/mips64/src/interrupt.c	(revision 2429e4a21037762ab4104bb2ea7d71156b2fbf0d)
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2003-2004 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup mips64
+ * @{
+ */
+/** @file
+ */
+
+#include <interrupt.h>
+#include <arch/interrupt.h>
+#include <typedefs.h>
+#include <arch.h>
+#include <arch/cp0.h>
+#include <arch/smp/dorder.h>
+#include <time/clock.h>
+#include <ipc/sysipc.h>
+#include <ddi/device.h>
+
+#define IRQ_COUNT   8
+#define TIMER_IRQ   7
+#define DORDER_IRQ  5
+
+function virtual_timer_fnc = NULL;
+static irq_t timer_irq;
+static irq_t dorder_irq;
+
+// TODO: This is SMP unsafe!!!
+
+uint32_t count_hi = 0;
+static unsigned long nextcount;
+static unsigned long lastcount;
+
+/** Disable interrupts.
+ *
+ * @return Old interrupt priority level.
+ */
+ipl_t interrupts_disable(void)
+{
+	ipl_t ipl = (ipl_t) cp0_status_read();
+	cp0_status_write(ipl & ~cp0_status_ie_enabled_bit);
+	return ipl;
+}
+
+/** Enable interrupts.
+ *
+ * @return Old interrupt priority level.
+ */
+ipl_t interrupts_enable(void)
+{
+	ipl_t ipl = (ipl_t) cp0_status_read();
+	cp0_status_write(ipl | cp0_status_ie_enabled_bit);
+	return ipl;
+}
+
+/** Restore interrupt priority level.
+ *
+ * @param ipl Saved interrupt priority level.
+ */
+void interrupts_restore(ipl_t ipl)
+{
+	cp0_status_write(cp0_status_read() | (ipl & cp0_status_ie_enabled_bit));
+}
+
+/** Read interrupt priority level.
+ *
+ * @return Current interrupt priority level.
+ */
+ipl_t interrupts_read(void)
+{
+	return cp0_status_read();
+}
+
+/** Check interrupts state.
+ *
+ * @return True if interrupts are disabled.
+ *
+ */
+bool interrupts_disabled(void)
+{
+	return !(cp0_status_read() & cp0_status_ie_enabled_bit);
+}
+
+/** Start hardware clock
+ *
+ */
+static void timer_start(void)
+{
+	lastcount = cp0_count_read();
+	nextcount = cp0_compare_value + cp0_count_read();
+	cp0_compare_write(nextcount);
+}
+
+static irq_ownership_t timer_claim(irq_t *irq)
+{
+	return IRQ_ACCEPT;
+}
+
+static void timer_irq_handler(irq_t *irq)
+{
+	if (cp0_count_read() < lastcount)
+		/* Count overflow detected */
+		count_hi++;
+	
+	lastcount = cp0_count_read();
+	
+	unsigned long drift = cp0_count_read() - nextcount;
+	while (drift > cp0_compare_value) {
+		drift -= cp0_compare_value;
+		CPU->missed_clock_ticks++;
+	}
+	
+	nextcount = cp0_count_read() + cp0_compare_value - drift;
+	cp0_compare_write(nextcount);
+	
+	/*
+	 * We are holding a lock which prevents preemption.
+	 * Release the lock, call clock() and reacquire the lock again.
+	 */
+	irq_spinlock_unlock(&irq->lock, false);
+	clock();
+	irq_spinlock_lock(&irq->lock, false);
+	
+	if (virtual_timer_fnc != NULL)
+		virtual_timer_fnc();
+}
+
+static irq_ownership_t dorder_claim(irq_t *irq)
+{
+	return IRQ_ACCEPT;
+}
+
+static void dorder_irq_handler(irq_t *irq)
+{
+	dorder_ipi_ack(1 << dorder_cpuid());
+}
+
+/* Initialize basic tables for exception dispatching */
+void interrupt_init(void)
+{
+	irq_init(IRQ_COUNT, IRQ_COUNT);
+	
+	irq_initialize(&timer_irq);
+	timer_irq.devno = device_assign_devno();
+	timer_irq.inr = TIMER_IRQ;
+	timer_irq.claim = timer_claim;
+	timer_irq.handler = timer_irq_handler;
+	irq_register(&timer_irq);
+	
+	timer_start();
+	cp0_unmask_int(TIMER_IRQ);
+	
+	irq_initialize(&dorder_irq);
+	dorder_irq.devno = device_assign_devno();
+	dorder_irq.inr = DORDER_IRQ;
+	dorder_irq.claim = dorder_claim;
+	dorder_irq.handler = dorder_irq_handler;
+	irq_register(&dorder_irq);
+	
+	cp0_unmask_int(DORDER_IRQ);
+}
+
+/** @}
+ */
Index: kernel/arch/mips64/src/mips64.c
===================================================================
--- kernel/arch/mips64/src/mips64.c	(revision 2429e4a21037762ab4104bb2ea7d71156b2fbf0d)
+++ kernel/arch/mips64/src/mips64.c	(revision 2429e4a21037762ab4104bb2ea7d71156b2fbf0d)
@@ -0,0 +1,259 @@
+/*
+ * Copyright (c) 2003-2004 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup mips64
+ * @{
+ */
+/** @file
+ */
+
+#include <arch.h>
+#include <arch/cp0.h>
+#include <arch/exception.h>
+#include <arch/debug.h>
+#include <mm/as.h>
+#include <userspace.h>
+#include <memstr.h>
+#include <proc/thread.h>
+#include <proc/uarg.h>
+#include <print.h>
+#include <console/console.h>
+#include <syscall/syscall.h>
+#include <sysinfo/sysinfo.h>
+#include <arch/interrupt.h>
+#include <interrupt.h>
+#include <console/chardev.h>
+#include <arch/barrier.h>
+#include <arch/debugger.h>
+#include <genarch/fb/fb.h>
+#include <genarch/fb/visuals.h>
+#include <genarch/drivers/dsrln/dsrlnin.h>
+#include <genarch/drivers/dsrln/dsrlnout.h>
+#include <genarch/srln/srln.h>
+#include <macros.h>
+#include <config.h>
+#include <str.h>
+#include <arch/drivers/msim.h>
+#include <arch/asm/regname.h>
+
+/* Size of the code jumping to the exception handler code
+ * - J+NOP
+ */
+#define EXCEPTION_JUMP_SIZE  8
+
+#define TLB_EXC    ((char *) 0xffffffff80000000)
+#define NORM_EXC   ((char *) 0xffffffff80000180)
+#define CACHE_EXC  ((char *) 0xffffffff80000100)
+
+
+/* Why the linker moves the variable 64K away in assembler
+ * when not in .text section?
+ */
+
+/* Stack pointer saved when entering user mode */
+uintptr_t supervisor_sp __attribute__ ((section (".text")));
+
+size_t cpu_count = 0;
+
+/** Performs mips64-specific initialization before main_bsp() is called. */
+void arch_pre_main(void *entry __attribute__((unused)), bootinfo_t *bootinfo)
+{
+	init.cnt = min3(bootinfo->cnt, TASKMAP_MAX_RECORDS, CONFIG_INIT_TASKS);
+	
+	size_t i;
+	for (i = 0; i < init.cnt; i++) {
+		init.tasks[i].addr = (uintptr_t) bootinfo->tasks[i].addr;
+		init.tasks[i].size = bootinfo->tasks[i].size;
+		str_cpy(init.tasks[i].name, CONFIG_TASK_NAME_BUFLEN,
+		    bootinfo->tasks[i].name);
+	}
+	
+	for (i = 0; i < CPUMAP_MAX_RECORDS; i++) {
+		if ((bootinfo->cpumap & (1 << i)) != 0)
+			cpu_count++;
+	}
+}
+
+void arch_pre_mm_init(void)
+{
+	/* It is not assumed by default */
+	interrupts_disable();
+	
+	/* Initialize dispatch table */
+	exception_init();
+
+	/* Copy the exception vectors to the right places */
+	memcpy(TLB_EXC, (char *) tlb_refill_entry, EXCEPTION_JUMP_SIZE);
+	smc_coherence_block(TLB_EXC, EXCEPTION_JUMP_SIZE);
+	memcpy(NORM_EXC, (char *) exception_entry, EXCEPTION_JUMP_SIZE);
+	smc_coherence_block(NORM_EXC, EXCEPTION_JUMP_SIZE);
+	memcpy(CACHE_EXC, (char *) cache_error_entry, EXCEPTION_JUMP_SIZE);
+	smc_coherence_block(CACHE_EXC, EXCEPTION_JUMP_SIZE);
+	
+	/*
+	 * Switch to BEV normal level so that exception vectors point to the
+	 * kernel. Clear the error level.
+	 */
+	cp0_status_write(cp0_status_read() &
+	    ~(cp0_status_bev_bootstrap_bit | cp0_status_erl_error_bit));
+	
+	/*
+	 * Mask all interrupts
+	 */
+	cp0_mask_all_int();
+	
+	debugger_init();
+}
+
+void arch_post_mm_init(void)
+{
+	interrupt_init();
+	
+#ifdef CONFIG_MIPS_PRN
+	outdev_t *dsrlndev = dsrlnout_init((ioport8_t *) MSIM_KBD_ADDRESS);
+	if (dsrlndev)
+		stdout_wire(dsrlndev);
+#endif
+}
+
+void arch_post_cpu_init(void)
+{
+}
+
+void arch_pre_smp_init(void)
+{
+}
+
+void arch_post_smp_init(void)
+{
+	static const char *platform;
+	
+	/* Set platform name. */
+#ifdef MACHINE_msim
+	platform = "msim";
+#endif
+	sysinfo_set_item_data("platform", NULL, (void *) platform,
+	    str_size(platform));
+	
+#ifdef CONFIG_MIPS_KBD
+	/*
+	 * Initialize the msim/GXemul keyboard port. Then initialize the serial line
+	 * module and connect it to the msim/GXemul keyboard. Enable keyboard interrupts.
+	 */
+	dsrlnin_instance_t *dsrlnin_instance
+	    = dsrlnin_init((dsrlnin_t *) MSIM_KBD_ADDRESS, MSIM_KBD_IRQ);
+	if (dsrlnin_instance) {
+		srln_instance_t *srln_instance = srln_init();
+		if (srln_instance) {
+			indev_t *sink = stdin_wire();
+			indev_t *srln = srln_wire(srln_instance, sink);
+			dsrlnin_wire(dsrlnin_instance, srln);
+			cp0_unmask_int(MSIM_KBD_IRQ);
+		}
+	}
+	
+	/*
+	 * This is the necessary evil until the userspace driver is entirely
+	 * self-sufficient.
+	 */
+	sysinfo_set_item_val("kbd", NULL, true);
+	sysinfo_set_item_val("kbd.inr", NULL, MSIM_KBD_IRQ);
+	sysinfo_set_item_val("kbd.address.virtual", NULL, MSIM_KBD_ADDRESS);
+#endif
+}
+
+void calibrate_delay_loop(void)
+{
+}
+
+void userspace(uspace_arg_t *kernel_uarg)
+{
+	/* EXL = 1, UM = 1, IE = 1 */
+	cp0_status_write(cp0_status_read() | (cp0_status_exl_exception_bit |
+	    cp0_status_um_bit | cp0_status_ie_enabled_bit));
+	cp0_epc_write((uintptr_t) kernel_uarg->uspace_entry);
+	userspace_asm(((uintptr_t) kernel_uarg->uspace_stack + STACK_SIZE),
+	    (uintptr_t) kernel_uarg->uspace_uarg,
+	    (uintptr_t) kernel_uarg->uspace_entry);
+	
+	while (1);
+}
+
+/** Perform mips64 specific tasks needed before the new task is run. */
+void before_task_runs_arch(void)
+{
+}
+
+/** Perform mips64 specific tasks needed before the new thread is scheduled. */
+void before_thread_runs_arch(void)
+{
+	supervisor_sp =
+	    (uintptr_t) &THREAD->kstack[STACK_SIZE - SP_DELTA];
+}
+
+void after_thread_ran_arch(void)
+{
+}
+
+/** Set thread-local-storage pointer
+ *
+ * We have it currently in K1, it is
+ * possible to have it separately in the future.
+ */
+sysarg_t sys_tls_set(sysarg_t addr)
+{
+	return 0;
+}
+
+void arch_reboot(void)
+{
+	___halt();
+	while (1);
+}
+
+/** Construct function pointer
+ *
+ * @param fptr   function pointer structure
+ * @param addr   function address
+ * @param caller calling function address
+ *
+ * @return address of the function pointer
+ *
+ */
+void *arch_construct_function(fncptr_t *fptr, void *addr, void *caller)
+{
+	return addr;
+}
+
+void irq_initialize_arch(irq_t *irq)
+{
+	(void) irq;
+}
+
+/** @}
+ */
Index: kernel/arch/mips64/src/mm/as.c
===================================================================
--- kernel/arch/mips64/src/mm/as.c	(revision 2429e4a21037762ab4104bb2ea7d71156b2fbf0d)
+++ kernel/arch/mips64/src/mm/as.c	(revision 2429e4a21037762ab4104bb2ea7d71156b2fbf0d)
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2005 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup mips64mm
+ * @{
+ */
+/** @file
+ */
+
+#include <arch/mm/as.h>
+#include <genarch/mm/as_ht.h>
+#include <genarch/mm/page_ht.h>
+#include <genarch/mm/asid_fifo.h>
+#include <arch/mm/tlb.h>
+#include <mm/tlb.h>
+#include <mm/as.h>
+#include <arch/cp0.h>
+
+/** Architecture dependent address space init. */
+void as_arch_init(void)
+{
+	as_operations = &as_ht_operations;
+	asid_fifo_init();
+}
+
+/** Install address space.
+ *
+ * Install ASID.
+ *
+ * @param as Address space structure.
+ *
+ */
+void as_install_arch(as_t *as)
+{
+	/*
+	 * Install ASID.
+	 */
+	entry_hi_t hi;
+	hi.value = cp0_entry_hi_read();
+	hi.asid = as->asid;
+	cp0_entry_hi_write(hi.value);
+}
+
+/** @}
+ */
Index: kernel/arch/mips64/src/mm/frame.c
===================================================================
--- kernel/arch/mips64/src/mm/frame.c	(revision 2429e4a21037762ab4104bb2ea7d71156b2fbf0d)
+++ kernel/arch/mips64/src/mm/frame.c	(revision 2429e4a21037762ab4104bb2ea7d71156b2fbf0d)
@@ -0,0 +1,242 @@
+/*
+ * Copyright (c) 2005 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup mips64mm
+ * @{
+ */
+/** @file
+ */
+
+#include <macros.h>
+#include <arch/mm/frame.h>
+#include <arch/mm/tlb.h>
+#include <interrupt.h>
+#include <mm/frame.h>
+#include <mm/asid.h>
+#include <config.h>
+#include <arch/drivers/msim.h>
+#include <print.h>
+
+#define ZERO_PAGE_MASK    TLB_PAGE_MASK_256K
+#define ZERO_FRAMES       2048
+#define ZERO_PAGE_WIDTH   18  /* 256K */
+#define ZERO_PAGE_SIZE    (1 << ZERO_PAGE_WIDTH)
+#define ZERO_PAGE_ASID    ASID_INVALID
+#define ZERO_PAGE_TLBI    0
+#define ZERO_PAGE_ADDR    0
+#define ZERO_PAGE_OFFSET  (ZERO_PAGE_SIZE / sizeof(uint32_t) - 1)
+#define ZERO_PAGE_VALUE   (((volatile uint32_t *) ZERO_PAGE_ADDR)[ZERO_PAGE_OFFSET])
+
+#define ZERO_PAGE_VALUE_KSEG1(frame) \
+	(((volatile uint32_t *) (0xa0000000 + (frame << ZERO_PAGE_WIDTH)))[ZERO_PAGE_OFFSET])
+
+#define MAX_REGIONS  32
+
+typedef struct {
+	pfn_t start;
+	pfn_t count;
+} phys_region_t;
+
+static size_t phys_regions_count = 0;
+static phys_region_t phys_regions[MAX_REGIONS];
+
+/** Check whether frame is available
+ *
+ * Returns true if given frame is generally available for use.
+ * Returns false if given frame is used for physical memory
+ * mapped devices and cannot be used.
+ *
+ */
+static bool frame_available(pfn_t frame)
+{
+#ifdef MACHINE_msim
+	/* MSIM device (dprinter) */
+	if (frame == (KA2PA(MSIM_VIDEORAM) >> ZERO_PAGE_WIDTH))
+		return false;
+	
+	/* MSIM device (dkeyboard) */
+	if (frame == (KA2PA(MSIM_KBD_ADDRESS) >> ZERO_PAGE_WIDTH))
+		return false;
+#endif
+	
+	return true;
+}
+
+/** Check whether frame is safe to write
+ *
+ * Returns true if given frame is safe for read/write test.
+ * Returns false if given frame should not be touched.
+ *
+ */
+static bool frame_safe(pfn_t frame)
+{
+	/* Kernel structures */
+	if ((frame << ZERO_PAGE_WIDTH) < KA2PA(config.base))
+		return false;
+	
+	/* Kernel */
+	if (overlaps(frame << ZERO_PAGE_WIDTH, ZERO_PAGE_SIZE,
+	    KA2PA(config.base), config.kernel_size))
+		return false;
+	
+	/* Kernel stack */
+	if (overlaps(frame << ZERO_PAGE_WIDTH, ZERO_PAGE_SIZE,
+	    KA2PA(config.stack_base), config.stack_size))
+		return false;
+	
+	/* Init tasks */
+	bool safe = true;
+	size_t i;
+	for (i = 0; i < init.cnt; i++)
+		if (overlaps(frame << ZERO_PAGE_WIDTH, ZERO_PAGE_SIZE,
+		    KA2PA(init.tasks[i].addr), init.tasks[i].size)) {
+			safe = false;
+			break;
+		}
+	
+	return safe;
+}
+
+static void frame_add_region(pfn_t start_frame, pfn_t end_frame)
+{
+	if (end_frame > start_frame) {
+		/* Convert 1M frames to 16K frames */
+		pfn_t first = ADDR2PFN(start_frame << ZERO_PAGE_WIDTH);
+		pfn_t count = ADDR2PFN((end_frame - start_frame) << ZERO_PAGE_WIDTH);
+		
+		/* Interrupt vector frame is blacklisted */
+		pfn_t conf_frame;
+		if (first == 0)
+			conf_frame = 1;
+		else
+			conf_frame = first;
+		
+		zone_create(first, count, conf_frame, 0);
+		
+		if (phys_regions_count < MAX_REGIONS) {
+			phys_regions[phys_regions_count].start = first;
+			phys_regions[phys_regions_count].count = count;
+			phys_regions_count++;
+		}
+	}
+}
+
+/** Create memory zones
+ *
+ * Walk through available 256 KB chunks of physical
+ * memory and create zones.
+ *
+ * Note: It is assumed that the TLB is not yet being
+ * used in any way, thus there is no interference.
+ *
+ */
+void frame_arch_init(void)
+{
+	ipl_t ipl = interrupts_disable();
+	
+	/* Clear and initialize TLB */
+	cp0_pagemask_write(ZERO_PAGE_MASK);
+	cp0_entry_lo0_write(0);
+	cp0_entry_lo1_write(0);
+	cp0_entry_hi_write(0);
+	
+	for (size_t i = 0; i < TLB_ENTRY_COUNT; i++) {
+		cp0_index_write(i);
+		tlbwi();
+	}
+	
+	pfn_t start_frame = 0;
+	pfn_t frame;
+	bool avail = true;
+	
+	/* Walk through all 1 MB frames */
+	for (frame = 0; frame < ZERO_FRAMES; frame++) {
+		if (!frame_available(frame))
+			avail = false;
+		else {
+			if (frame_safe(frame)) {
+				entry_lo_t lo0;
+				entry_lo_t lo1;
+				entry_hi_t hi;
+				tlb_prepare_entry_lo(&lo0, false, true, true, false, frame << (ZERO_PAGE_WIDTH - 12));
+				tlb_prepare_entry_lo(&lo1, false, false, false, false, 0);
+				tlb_prepare_entry_hi(&hi, ZERO_PAGE_ASID, ZERO_PAGE_ADDR);
+				
+				cp0_pagemask_write(ZERO_PAGE_MASK);
+				cp0_entry_lo0_write(lo0.value);
+				cp0_entry_lo1_write(lo1.value);
+				cp0_entry_hi_write(hi.value);
+				cp0_index_write(ZERO_PAGE_TLBI);
+				tlbwi();
+				
+				ZERO_PAGE_VALUE = 0;
+				if (ZERO_PAGE_VALUE != 0)
+					avail = false;
+				else {
+					ZERO_PAGE_VALUE = 0xdeadbeef;
+					if (ZERO_PAGE_VALUE != 0xdeadbeef)
+						avail = false;
+				}
+			}
+		}
+		
+		if (!avail) {
+			frame_add_region(start_frame, frame);
+			start_frame = frame + 1;
+			avail = true;
+		}
+	}
+	
+	frame_add_region(start_frame, frame);
+	
+	/* Blacklist interrupt vector frame */
+	frame_mark_unavailable(0, 1);
+	
+	/* Cleanup */
+	cp0_pagemask_write(ZERO_PAGE_MASK);
+	cp0_entry_lo0_write(0);
+	cp0_entry_lo1_write(0);
+	cp0_entry_hi_write(0);
+	cp0_index_write(ZERO_PAGE_TLBI);
+	tlbwi();
+	
+	interrupts_restore(ipl);
+}
+
+void physmem_print(void)
+{
+	printf("[base            ] [size            ]\n");
+	
+	for (size_t i = 0; i < phys_regions_count; i++) {
+		printf("%#018lx %18lu\n", PFN2ADDR(phys_regions[i].start),
+		    PFN2ADDR(phys_regions[i].count));
+	}
+}
+
+/** @}
+ */
Index: kernel/arch/mips64/src/mm/page.c
===================================================================
--- kernel/arch/mips64/src/mm/page.c	(revision 2429e4a21037762ab4104bb2ea7d71156b2fbf0d)
+++ kernel/arch/mips64/src/mm/page.c	(revision 2429e4a21037762ab4104bb2ea7d71156b2fbf0d)
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2003-2004 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup mips64mm
+ * @{
+ */
+/** @file
+ */
+
+#include <arch/mm/page.h>
+#include <genarch/mm/page_ht.h>
+#include <mm/page.h>
+#include <mm/frame.h>
+
+void page_arch_init(void)
+{
+	page_mapping_operations = &ht_mapping_operations;
+}
+
+/** Map device into kernel space
+ * - on mips, all devices are already mapped into kernel space,
+ *   translate the physical address to uncached area
+ */
+uintptr_t hw_map(uintptr_t physaddr, size_t size)
+{
+	return physaddr + 0xffffffffa0000000;
+}
+
+/** @}
+ */
Index: kernel/arch/mips64/src/mm/tlb.c
===================================================================
--- kernel/arch/mips64/src/mm/tlb.c	(revision 2429e4a21037762ab4104bb2ea7d71156b2fbf0d)
+++ kernel/arch/mips64/src/mm/tlb.c	(revision 2429e4a21037762ab4104bb2ea7d71156b2fbf0d)
@@ -0,0 +1,573 @@
+/*
+ * Copyright (c) 2003-2004 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup mips64mm
+ * @{
+ */
+/** @file
+ */
+
+#include <arch/mm/tlb.h>
+#include <mm/asid.h>
+#include <mm/tlb.h>
+#include <mm/page.h>
+#include <mm/as.h>
+#include <arch/cp0.h>
+#include <panic.h>
+#include <arch.h>
+#include <synch/mutex.h>
+#include <print.h>
+#include <debug.h>
+#include <align.h>
+#include <interrupt.h>
+#include <symtab.h>
+
+/** Initialize TLB.
+ *
+ * Invalidate all entries and mark wired entries.
+ *
+ */
+void tlb_arch_init(void)
+{
+	cp0_pagemask_write(TLB_PAGE_MASK_16K);
+	cp0_entry_hi_write(0);
+	cp0_entry_lo0_write(0);
+	cp0_entry_lo1_write(0);
+	
+	/* Clear and initialize TLB. */
+	
+	for (unsigned int i = 0; i < TLB_ENTRY_COUNT; i++) {
+		cp0_index_write(i);
+		tlbwi();
+	}
+	
+	/*
+	 * The kernel is going to make use of some wired
+	 * entries (e.g. mapping kernel stacks in kseg3).
+	 */
+	cp0_wired_write(TLB_WIRED);
+}
+
+/** Try to find PTE for faulting address.
+ *
+ * @param badvaddr Faulting virtual address.
+ * @param access   Access mode that caused the fault.
+ * @param istate   Pointer to interrupted state.
+ * @param pfrc     Pointer to variable where as_page_fault()
+ *                 return code will be stored.
+ *
+ * @return PTE on success, NULL otherwise.
+ *
+ */
+static pte_t *find_mapping_and_check(uintptr_t badvaddr, int access,
+    istate_t *istate, int *pfrc)
+{
+	entry_hi_t hi;
+	hi.value = cp0_entry_hi_read();
+	
+	/*
+	 * Handler cannot succeed if the ASIDs don't match.
+	 */
+	if (hi.asid != AS->asid) {
+		printf("EntryHi.asid=%d, AS->asid=%d\n", hi.asid, AS->asid);
+		return NULL;
+	}
+	
+	/*
+	 * Check if the mapping exists in page tables.
+	 */
+	pte_t *pte = page_mapping_find(AS, badvaddr, true);
+	if ((pte) && (pte->p) && ((pte->w) || (access != PF_ACCESS_WRITE))) {
+		/*
+		 * Mapping found in page tables.
+		 * Immediately succeed.
+		 */
+		return pte;
+	} else {
+		int rc;
+		
+		/*
+		 * Mapping not found in page tables.
+		 * Resort to higher-level page fault handler.
+		 */
+		switch (rc = as_page_fault(badvaddr, access, istate)) {
+		case AS_PF_OK:
+			/*
+			 * The higher-level page fault handler succeeded,
+			 * The mapping ought to be in place.
+			 */
+			pte = page_mapping_find(AS, badvaddr, true);
+			ASSERT(pte);
+			ASSERT(pte->p);
+			ASSERT((pte->w) || (access != PF_ACCESS_WRITE));
+			return pte;
+		case AS_PF_DEFER:
+			*pfrc = AS_PF_DEFER;
+			return NULL;
+		case AS_PF_FAULT:
+			*pfrc = AS_PF_FAULT;
+			return NULL;
+		default:
+			panic("Unexpected return code (%d).", rc);
+		}
+	}
+}
+
+void tlb_prepare_entry_lo(entry_lo_t *lo, bool g, bool v, bool d,
+    bool c, uintptr_t addr)
+{
+	lo->value = 0;
+	lo->g = g;
+	lo->v = v;
+	lo->d = d;
+	lo->c = c ? PAGE_CACHEABLE_EXC_WRITE : PAGE_UNCACHED;
+	lo->pfn = ADDR2PFN(addr);
+}
+
+void tlb_prepare_entry_hi(entry_hi_t *hi, asid_t asid, uintptr_t addr)
+{
+	hi->value = ALIGN_DOWN(addr, PAGE_SIZE * 2);
+	hi->asid = asid;
+}
+
+static void tlb_refill_fail(istate_t *istate)
+{
+	uintptr_t va = cp0_badvaddr_read();
+	
+	fault_if_from_uspace(istate, "TLB Refill Exception on %p.",
+	    (void *) va);
+	panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, "TLB Refill Exception.");
+}
+
+static void tlb_invalid_fail(istate_t *istate)
+{
+	uintptr_t va = cp0_badvaddr_read();
+	
+	fault_if_from_uspace(istate, "TLB Invalid Exception on %p.",
+	    (void *) va);
+	panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, "TLB Invalid Exception.");
+}
+
+static void tlb_modified_fail(istate_t *istate)
+{
+	uintptr_t va = cp0_badvaddr_read();
+	
+	fault_if_from_uspace(istate, "TLB Modified Exception on %p.",
+	    (void *) va);
+	panic_memtrap(istate, PF_ACCESS_WRITE, va, "TLB Modified Exception.");
+}
+
+/** Process TLB Refill Exception.
+ *
+ * @param istate Interrupted register context.
+ *
+ */
+void tlb_refill(istate_t *istate)
+{
+	uintptr_t badvaddr = cp0_badvaddr_read();
+	
+	mutex_lock(&AS->lock);
+	asid_t asid = AS->asid;
+	mutex_unlock(&AS->lock);
+	
+	int pfrc;
+	pte_t *pte = find_mapping_and_check(badvaddr, PF_ACCESS_READ,
+	    istate, &pfrc);
+	if (!pte) {
+		switch (pfrc) {
+		case AS_PF_FAULT:
+			goto fail;
+			break;
+		case AS_PF_DEFER:
+			/*
+			 * The page fault came during copy_from_uspace()
+			 * or copy_to_uspace().
+			 */
+			return;
+		default:
+			panic("Unexpected pfrc (%d).", pfrc);
+		}
+	}
+	
+	/*
+	 * Record access to PTE.
+	 */
+	pte->a = 1;
+	
+	entry_lo_t lo;
+	entry_hi_t hi;
+	
+	tlb_prepare_entry_hi(&hi, asid, badvaddr);
+	tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->d, pte->c,
+	    pte->frame);
+	
+	/*
+	 * New entry is to be inserted into TLB
+	 */
+	cp0_entry_hi_write(hi.value);
+	
+	if ((badvaddr / PAGE_SIZE) % 2 == 0) {
+		cp0_entry_lo0_write(lo.value);
+		cp0_entry_lo1_write(0);
+	} else {
+		cp0_entry_lo0_write(0);
+		cp0_entry_lo1_write(lo.value);
+	}
+	
+	cp0_pagemask_write(TLB_PAGE_MASK_16K);
+	tlbwr();
+	
+	return;
+	
+fail:
+	tlb_refill_fail(istate);
+}
+
+/** Process TLB Invalid Exception.
+ *
+ * @param istate Interrupted register context.
+ *
+ */
+void tlb_invalid(istate_t *istate)
+{
+	uintptr_t badvaddr = cp0_badvaddr_read();
+	
+	/*
+	 * Locate the faulting entry in TLB.
+	 */
+	entry_hi_t hi;
+	hi.value = cp0_entry_hi_read();
+	
+	tlb_prepare_entry_hi(&hi, hi.asid, badvaddr);
+	cp0_entry_hi_write(hi.value);
+	tlbp();
+	
+	tlb_index_t index;
+	index.value = cp0_index_read();
+	
+	/*
+	 * Fail if the entry is not in TLB.
+	 */
+	if (index.p) {
+		printf("TLB entry not found.\n");
+		goto fail;
+	}
+	
+	int pfrc;
+	pte_t *pte = find_mapping_and_check(badvaddr, PF_ACCESS_READ,
+	    istate, &pfrc);
+	if (!pte) {
+		switch (pfrc) {
+		case AS_PF_FAULT:
+			goto fail;
+			break;
+		case AS_PF_DEFER:
+			/*
+			 * The page fault came during copy_from_uspace()
+			 * or copy_to_uspace().
+			 */
+			return;
+		default:
+			panic("Unexpected pfrc (%d).", pfrc);
+		}
+	}
+	
+	/*
+	 * Read the faulting TLB entry.
+	 */
+	tlbr();
+	
+	/*
+	 * Record access to PTE.
+	 */
+	pte->a = 1;
+	
+	entry_lo_t lo;
+	tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->d, pte->c,
+	    pte->frame);
+	
+	/*
+	 * The entry is to be updated in TLB.
+	 */
+	if ((badvaddr / PAGE_SIZE) % 2 == 0)
+		cp0_entry_lo0_write(lo.value);
+	else
+		cp0_entry_lo1_write(lo.value);
+	
+	cp0_pagemask_write(TLB_PAGE_MASK_16K);
+	tlbwi();
+	
+	return;
+	
+fail:
+	tlb_invalid_fail(istate);
+}
+
+/** Process TLB Modified Exception.
+ *
+ * @param istate Interrupted register context.
+ *
+ */
+void tlb_modified(istate_t *istate)
+{
+	uintptr_t badvaddr = cp0_badvaddr_read();
+	
+	/*
+	 * Locate the faulting entry in TLB.
+	 */
+	entry_hi_t hi;
+	hi.value = cp0_entry_hi_read();
+	
+	tlb_prepare_entry_hi(&hi, hi.asid, badvaddr);
+	cp0_entry_hi_write(hi.value);
+	tlbp();
+	
+	tlb_index_t index;
+	index.value = cp0_index_read();
+	
+	/*
+	 * Fail if the entry is not in TLB.
+	 */
+	if (index.p) {
+		printf("TLB entry not found.\n");
+		goto fail;
+	}
+	
+	int pfrc;
+	pte_t *pte = find_mapping_and_check(badvaddr, PF_ACCESS_WRITE,
+	    istate, &pfrc);
+	if (!pte) {
+		switch (pfrc) {
+		case AS_PF_FAULT:
+			goto fail;
+			break;
+		case AS_PF_DEFER:
+			/*
+			 * The page fault came during copy_from_uspace()
+			 * or copy_to_uspace().
+			 */
+			return;
+		default:
+			panic("Unexpected pfrc (%d).", pfrc);
+		}
+	}
+	
+	/*
+	 * Read the faulting TLB entry.
+	 */
+	tlbr();
+	
+	/*
+	 * Record access and write to PTE.
+	 */
+	pte->a = 1;
+	pte->d = 1;
+	
+	entry_lo_t lo;
+	tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->w, pte->c,
+	    pte->frame);
+	
+	/*
+	 * The entry is to be updated in TLB.
+	 */
+	if ((badvaddr / PAGE_SIZE) % 2 == 0)
+		cp0_entry_lo0_write(lo.value);
+	else
+		cp0_entry_lo1_write(lo.value);
+	
+	cp0_pagemask_write(TLB_PAGE_MASK_16K);
+	tlbwi();
+	
+	return;
+	
+fail:
+	tlb_modified_fail(istate);
+}
+
+/** Print contents of TLB. */
+void tlb_print(void)
+{
+	entry_hi_t hi_save;
+	hi_save.value = cp0_entry_hi_read();
+	
+	printf("[nr] [asid] [vpn2] [mask] [gvdc] [pfn ]\n");
+	
+	for (unsigned int i = 0; i < TLB_ENTRY_COUNT; i++) {
+		cp0_index_write(i);
+		tlbr();
+		
+		page_mask_t mask;
+		mask.value = cp0_pagemask_read();
+		
+		entry_hi_t hi;
+		hi.value = cp0_entry_hi_read();
+		
+		entry_lo_t lo0;
+		lo0.value = cp0_entry_lo0_read();
+		
+		entry_lo_t lo1;
+		lo1.value = cp0_entry_lo1_read();
+		
+		printf("%-4u %-6u %#6x %#6x  %1u%1u%1u%1u  %#6x\n",
+		    i, hi.asid, hi.vpn2, mask.mask,
+		    lo0.g, lo0.v, lo0.d, lo0.c, lo0.pfn);
+		printf("                           %1u%1u%1u%1u  %#6x\n",
+		    lo1.g, lo1.v, lo1.d, lo1.c, lo1.pfn);
+	}
+	
+	cp0_entry_hi_write(hi_save.value);
+}
+
+/** Invalidate all not wired TLB entries. */
+void tlb_invalidate_all(void)
+{
+	entry_hi_t hi_save;
+	hi_save.value = cp0_entry_hi_read();
+	ipl_t ipl = interrupts_disable();
+	
+	for (unsigned int i = TLB_WIRED; i < TLB_ENTRY_COUNT; i++) {
+		cp0_index_write(i);
+		tlbr();
+		
+		entry_lo_t lo0;
+		lo0.value = cp0_entry_lo0_read();
+		
+		entry_lo_t lo1;
+		lo1.value = cp0_entry_lo1_read();
+		
+		lo0.v = 0;
+		lo1.v = 0;
+		
+		cp0_entry_lo0_write(lo0.value);
+		cp0_entry_lo1_write(lo1.value);
+		
+		tlbwi();
+	}
+	
+	interrupts_restore(ipl);
+	cp0_entry_hi_write(hi_save.value);
+}
+
+/** Invalidate all TLB entries belonging to specified address space.
+ *
+ * @param asid Address space identifier.
+ *
+ */
+void tlb_invalidate_asid(asid_t asid)
+{
+	ASSERT(asid != ASID_INVALID);
+	
+	entry_hi_t hi_save;
+	hi_save.value = cp0_entry_hi_read();
+	ipl_t ipl = interrupts_disable();
+	
+	for (unsigned int i = 0; i < TLB_ENTRY_COUNT; i++) {
+		cp0_index_write(i);
+		tlbr();
+		
+		entry_hi_t hi;
+		hi.value = cp0_entry_hi_read();
+		
+		if (hi.asid == asid) {
+			entry_lo_t lo0;
+			lo0.value = cp0_entry_lo0_read();
+			
+			entry_lo_t lo1;
+			lo1.value = cp0_entry_lo1_read();
+			
+			lo0.v = 0;
+			lo1.v = 0;
+			
+			cp0_entry_lo0_write(lo0.value);
+			cp0_entry_lo1_write(lo1.value);
+			
+			tlbwi();
+		}
+	}
+	
+	interrupts_restore(ipl);
+	cp0_entry_hi_write(hi_save.value);
+}
+
+/** Invalidate TLB entries for specified page range belonging to specified
+ * address space.
+ *
+ * @param asid Address space identifier.
+ * @param page First page whose TLB entry is to be invalidated.
+ * @param cnt  Number of entries to invalidate.
+ *
+ */
+void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt)
+{
+	if (asid == ASID_INVALID)
+		return;
+	
+	entry_hi_t hi_save;
+	hi_save.value = cp0_entry_hi_read();
+	ipl_t ipl = interrupts_disable();
+	
+	for (unsigned int i = 0; i < cnt + 1; i += 2) {
+		entry_hi_t hi;
+		hi.value = 0;
+		tlb_prepare_entry_hi(&hi, asid, page + i * PAGE_SIZE);
+		cp0_entry_hi_write(hi.value);
+		
+		tlbp();
+		
+		tlb_index_t index;
+		index.value = cp0_index_read();
+		
+		if (!index.p) {
+			/*
+			 * Entry was found, index register contains valid
+			 * index.
+			 */
+			tlbr();
+			
+			entry_lo_t lo0;
+			lo0.value = cp0_entry_lo0_read();
+			
+			entry_lo_t lo1;
+			lo1.value = cp0_entry_lo1_read();
+			
+			lo0.v = 0;
+			lo1.v = 0;
+			
+			cp0_entry_lo0_write(lo0.value);
+			cp0_entry_lo1_write(lo1.value);
+			
+			tlbwi();
+		}
+	}
+	
+	interrupts_restore(ipl);
+	cp0_entry_hi_write(hi_save.value);
+}
+
+/** @}
+ */
Index: kernel/arch/mips64/src/smp/dorder.c
===================================================================
--- kernel/arch/mips64/src/smp/dorder.c	(revision 2429e4a21037762ab4104bb2ea7d71156b2fbf0d)
+++ kernel/arch/mips64/src/smp/dorder.c	(revision 2429e4a21037762ab4104bb2ea7d71156b2fbf0d)
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2007 Martin Decky
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup mips64
+ * @{
+ */
+/** @file
+ */
+
+#include <typedefs.h>
+#include <smp/ipi.h>
+#include <arch/smp/dorder.h>
+
+#define MSIM_DORDER_ADDRESS  0xffffffffb0000100
+
+#ifdef CONFIG_SMP
+
+void ipi_broadcast_arch(int ipi)
+{
+	*((volatile uint32_t *) MSIM_DORDER_ADDRESS) = 0x7fffffff;
+}
+
+#endif
+
+uint32_t dorder_cpuid(void)
+{
+	return *((volatile uint32_t *) MSIM_DORDER_ADDRESS);
+}
+
+void dorder_ipi_ack(uint32_t mask)
+{
+	*((volatile uint32_t *) (MSIM_DORDER_ADDRESS + 4)) = mask;
+}
+
+/** @}
+ */
Index: kernel/arch/mips64/src/smp/smp.c
===================================================================
--- kernel/arch/mips64/src/smp/smp.c	(revision 2429e4a21037762ab4104bb2ea7d71156b2fbf0d)
+++ kernel/arch/mips64/src/smp/smp.c	(revision 2429e4a21037762ab4104bb2ea7d71156b2fbf0d)
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2009 Martin Decky
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup mips64
+ * @{
+ */
+/** @file
+ */
+
+#include <config.h>
+#include <smp/smp.h>
+#include <arch/arch.h>
+
+#ifdef CONFIG_SMP
+
+void smp_init(void)
+{
+	config.cpu_count = cpu_count;
+}
+
+void kmp(void *arg __attribute__((unused)))
+{
+}
+
+#endif /* CONFIG_SMP */
+
+/** @}
+ */
Index: kernel/arch/mips64/src/start.S
===================================================================
--- kernel/arch/mips64/src/start.S	(revision 2429e4a21037762ab4104bb2ea7d71156b2fbf0d)
+++ kernel/arch/mips64/src/start.S	(revision 2429e4a21037762ab4104bb2ea7d71156b2fbf0d)
@@ -0,0 +1,385 @@
+#
+# Copyright (c) 2003-2004 Jakub Jermar
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# - Redistributions of source code must retain the above copyright
+#   notice, this list of conditions and the following disclaimer.
+# - Redistributions in binary form must reproduce the above copyright
+#   notice, this list of conditions and the following disclaimer in the
+#   documentation and/or other materials provided with the distribution.
+# - The name of the author may not be used to endorse or promote products
+#   derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+#include <arch/asm/regname.h>
+#include <arch/mm/page.h>
+#include <arch/asm/boot.h>
+#include <arch/context_offset.h>
+#include <arch/stack.h>
+
+.text
+
+.set noat
+.set noreorder
+.set nomacro
+
+.global kernel_image_start
+.global tlb_refill_entry
+.global cache_error_entry
+.global exception_entry
+.global userspace_asm
+
+/*
+ * Which status bits are thread-local:
+ * KSU(UM), EXL, ERL, IE
+ */
+#define REG_SAVE_MASK 0x1f
+
+#define ISTATE_OFFSET_A0         0
+#define ISTATE_OFFSET_A1         8
+#define ISTATE_OFFSET_A2         16
+#define ISTATE_OFFSET_A3         24
+#define ISTATE_OFFSET_T0         32
+#define ISTATE_OFFSET_T1         40
+#define ISTATE_OFFSET_V0         48
+#define ISTATE_OFFSET_V1         56
+#define ISTATE_OFFSET_AT         64
+#define ISTATE_OFFSET_T2         72
+#define ISTATE_OFFSET_T3         80
+#define ISTATE_OFFSET_T4         88
+#define ISTATE_OFFSET_T5         96
+#define ISTATE_OFFSET_T6         104
+#define ISTATE_OFFSET_T7         112
+#define ISTATE_OFFSET_S0         120
+#define ISTATE_OFFSET_S1         128
+#define ISTATE_OFFSET_S2         136
+#define ISTATE_OFFSET_S3         144
+#define ISTATE_OFFSET_S4         152
+#define ISTATE_OFFSET_S5         160
+#define ISTATE_OFFSET_S6         168
+#define ISTATE_OFFSET_S7         176
+#define ISTATE_OFFSET_T8         184
+#define ISTATE_OFFSET_T9         192
+#define ISTATE_OFFSET_KT0        200
+#define ISTATE_OFFSET_KT1        208
+#define ISTATE_OFFSET_GP         216
+#define ISTATE_OFFSET_SP         224
+#define ISTATE_OFFSET_S8         232
+#define ISTATE_OFFSET_RA         240
+#define ISTATE_OFFSET_LO         248
+#define ISTATE_OFFSET_HI         252
+#define ISTATE_OFFSET_STATUS     256
+#define ISTATE_OFFSET_EPC        264
+#define ISTATE_OFFSET_ALIGNMENT  272
+
+#define ISTATE_SOFT_SIZE         280
+
+/*
+ * The fake ABI prologue is never executed and may not be part of the
+ * procedure's body. Instead, it should be immediately preceding the procedure's
+ * body. Its only purpose is to trick the stack trace walker into thinking that
+ * the exception is more or less just a normal function call.
+ */
+.macro FAKE_ABI_PROLOGUE
+	sub $sp, ISTATE_SOFT_SIZE
+	sd $ra, ISTATE_OFFSET_EPC($sp)
+.endm
+
+/*
+ * Save registers to space defined by \r
+ * We will change status: Disable ERL, EXL, UM, IE
+ * These changes will be automatically reversed in REGISTER_LOAD
+ * %sp is NOT saved as part of these registers
+ */
+.macro REGISTERS_STORE_AND_EXC_RESET r
+	sd $at, ISTATE_OFFSET_AT(\r)
+	sd $v0, ISTATE_OFFSET_V0(\r)
+	sd $v1, ISTATE_OFFSET_V1(\r)
+	sd $a0, ISTATE_OFFSET_A0(\r)
+	sd $a1, ISTATE_OFFSET_A1(\r)
+	sd $a2, ISTATE_OFFSET_A2(\r)
+	sd $a3, ISTATE_OFFSET_A3(\r)
+	sd $t0, ISTATE_OFFSET_T0(\r)
+	sd $t1, ISTATE_OFFSET_T1(\r)
+	sd $t2, ISTATE_OFFSET_T2(\r)
+	sd $t3, ISTATE_OFFSET_T3(\r)
+	sd $t4, ISTATE_OFFSET_T4(\r)
+	sd $t5, ISTATE_OFFSET_T5(\r)
+	sd $t6, ISTATE_OFFSET_T6(\r)
+	sd $t7, ISTATE_OFFSET_T7(\r)
+	sd $t8, ISTATE_OFFSET_T8(\r)
+	sd $t9, ISTATE_OFFSET_T9(\r)
+	sd $s0, ISTATE_OFFSET_S0(\r)
+	sd $s1, ISTATE_OFFSET_S1(\r)
+	sd $s2, ISTATE_OFFSET_S2(\r)
+	sd $s3, ISTATE_OFFSET_S3(\r)
+	sd $s4, ISTATE_OFFSET_S4(\r)
+	sd $s5, ISTATE_OFFSET_S5(\r)
+	sd $s6, ISTATE_OFFSET_S6(\r)
+	sd $s7, ISTATE_OFFSET_S7(\r)
+	sd $s8, ISTATE_OFFSET_S8(\r)
+	
+	mflo $at
+	sw $at, ISTATE_OFFSET_LO(\r)
+	mfhi $at
+	sw $at, ISTATE_OFFSET_HI(\r)
+	
+	sd $gp, ISTATE_OFFSET_GP(\r)
+	sd $ra, ISTATE_OFFSET_RA(\r)
+	sd $k0, ISTATE_OFFSET_KT0(\r)
+	sd $k1, ISTATE_OFFSET_KT1(\r)
+	
+	dmfc0 $t0, $status
+	dmfc0 $t1, $epc
+	
+	/* save only KSU, EXL, ERL, IE */
+	and $t2, $t0, REG_SAVE_MASK
+	
+	/* clear KSU, EXL, ERL, IE */
+	li $t3, ~(REG_SAVE_MASK)
+	and $t0, $t0, $t3
+	
+	sd $t2, ISTATE_OFFSET_STATUS(\r)
+	sd $t1, ISTATE_OFFSET_EPC(\r)
+	dmtc0 $t0, $status
+.endm
+
+.macro REGISTERS_LOAD r
+	/*
+	 * Update only UM, EXR, IE from status, the rest
+	 * is controlled by OS and not bound to task.
+	 */
+	dmfc0 $t0, $status
+	ld $t1, ISTATE_OFFSET_STATUS(\r)
+	
+	/* mask UM, EXL, ERL, IE */
+	li $t2, ~REG_SAVE_MASK
+	and $t0, $t0, $t2
+	
+	/* copy UM, EXL, ERL, IE from saved status */
+	or $t0, $t0, $t1
+	dmtc0 $t0, $status
+	
+	ld $v0, ISTATE_OFFSET_V0(\r)
+	ld $v1, ISTATE_OFFSET_V1(\r)
+	ld $a0, ISTATE_OFFSET_A0(\r)
+	ld $a1, ISTATE_OFFSET_A1(\r)
+	ld $a2, ISTATE_OFFSET_A2(\r)
+	ld $a3, ISTATE_OFFSET_A3(\r)
+	ld $t0, ISTATE_OFFSET_T0(\r)
+	ld $t1, ISTATE_OFFSET_T1(\r)
+	ld $t2, ISTATE_OFFSET_T2(\r)
+	ld $t3, ISTATE_OFFSET_T3(\r)
+	ld $t4, ISTATE_OFFSET_T4(\r)
+	ld $t5, ISTATE_OFFSET_T5(\r)
+	ld $t6, ISTATE_OFFSET_T6(\r)
+	ld $t7, ISTATE_OFFSET_T7(\r)
+	ld $t8, ISTATE_OFFSET_T8(\r)
+	ld $t9, ISTATE_OFFSET_T9(\r)
+	
+	ld $gp, ISTATE_OFFSET_GP(\r)
+	ld $ra, ISTATE_OFFSET_RA(\r)
+	ld $k1, ISTATE_OFFSET_KT1(\r)
+	
+	lw $at, ISTATE_OFFSET_LO(\r)
+	mtlo $at
+	lw $at, ISTATE_OFFSET_HI(\r)
+	mthi $at
+	
+	ld $at, ISTATE_OFFSET_EPC(\r)
+	dmtc0 $at, $epc
+	
+	ld $at, ISTATE_OFFSET_AT(\r)
+	ld $sp, ISTATE_OFFSET_SP(\r)
+.endm
+
+/*
+ * Move kernel stack pointer address to register $k0.
+ * If we are in user mode, load the appropriate stack address.
+ */
+.macro KERNEL_STACK_TO_K0
+	/* if we are in user mode */
+	dmfc0 $k0, $status
+	andi $k0, 0x10
+	
+	beq $k0, $0, 1f
+	move $k0, $sp
+	
+	/* move $k0 pointer to kernel stack */
+	dla $k0, supervisor_sp
+	
+	/* move $k0 (supervisor_sp) */
+	lw $k0, ($k0)
+	
+	1:
+.endm
+
+.org 0x0
+kernel_image_start:
+	/* load temporary stack */
+	lui $sp, %hi(end_stack)
+	ori $sp, $sp, %lo(end_stack)
+	
+	/* not sure about this, but might be needed for PIC code */
+	lui $gp, 0x8000
+	
+	/* $a1 contains physical address of bootinfo_t */
+	jal arch_pre_main
+	nop
+	
+	j main_bsp
+	nop
+
+.space TEMP_STACK_SIZE
+end_stack:
+
+tlb_refill_entry:
+	j tlb_refill_handler
+	nop
+
+cache_error_entry:
+	j cache_error_handler
+	nop
+
+exception_entry:
+	j exception_handler
+	nop
+
+	FAKE_ABI_PROLOGUE
+exception_handler:
+	KERNEL_STACK_TO_K0
+	
+	sub $k0, ISTATE_SOFT_SIZE
+	sw $sp, ISTATE_OFFSET_SP($k0)
+	move $sp, $k0
+	
+	mfc0 $k0, $cause
+	
+	sra $k0, $k0, 0x2    /* cp0_exc_cause() part 1 */
+	andi $k0, $k0, 0x1f  /* cp0_exc_cause() part 2 */
+	sub $k0, 8           /* 8 = SYSCALL */
+	
+	beqz $k0, syscall_shortcut
+	add $k0, 8           /* revert $k0 back to correct exc number */
+	
+	REGISTERS_STORE_AND_EXC_RESET $sp
+	
+	move $a1, $sp
+	jal exc_dispatch     /* exc_dispatch(excno, register_space) */
+	move $a0, $k0
+	
+	REGISTERS_LOAD $sp
+	/* the $sp is automatically restored to former value */
+	eret
+
+/** Syscall entry
+ *
+ * Registers:
+ *
+ * @param $v0 Syscall number.
+ * @param $a0 1st argument.
+ * @param $a1 2nd argument.
+ * @param $a2 3rd argument.
+ * @param $a3 4th argument.
+ * @param $t0 5th argument.
+ * @param $t1 6th argument.
+ *
+ * @return The return value will be stored in $v0.
+ *
+ */
+syscall_shortcut:
+	mfc0 $t3, $epc
+	mfc0 $t2, $status
+	sw $t3, ISTATE_OFFSET_EPC($sp)  /* save EPC */
+	sw $k1, ISTATE_OFFSET_KT1($sp)  /* save $k1 not saved on context switch */
+	
+	and $t4, $t2, REG_SAVE_MASK  /* save only KSU, EXL, ERL, IE */
+	li $t5, ~(0x1f)
+	and $t2, $t2, $t5  /* clear KSU, EXL, ERL */
+	ori $t2, $t2, 0x1  /* set IE */
+	
+	sw $t4, ISTATE_OFFSET_STATUS($sp)
+	mtc0 $t2, $status
+	
+	/*
+	 * Call the higher level system call handler.
+	 *
+	 */
+	sw $t0, ISTATE_OFFSET_T0($sp)  /* save the 5th argument on the stack */
+	sw $t1, ISTATE_OFFSET_T1($sp)  /* save the 6th argument on the stack */
+	jal syscall_handler
+	sw $v0, ISTATE_OFFSET_V0($sp)  /* save the syscall number on the stack */
+	
+	/* restore status */
+	mfc0 $t2, $status
+	lw $t3, ISTATE_OFFSET_STATUS($sp)
+	
+	/*
+	 * Change back to EXL = 1 (from last exception), otherwise
+	 * an interrupt could rewrite the CP0 - EPC.
+	 *
+	 */
+	li $t4, ~REG_SAVE_MASK  /* mask UM, EXL, ERL, IE */
+	and $t2, $t2, $t4
+	or $t2, $t2, $t3  /* copy saved UM, EXL, ERL, IE */
+	mtc0 $t2, $status
+	
+	/* restore epc + 4 */
+	lw $t2, ISTATE_OFFSET_EPC($sp)
+	lw $k1, ISTATE_OFFSET_KT1($sp)
+	addi $t2, $t2, 4
+	mtc0 $t2, $epc
+	
+	lw $sp, ISTATE_OFFSET_SP($sp)  /* restore $sp */
+	eret
+
+	FAKE_ABI_PROLOGUE
+tlb_refill_handler:
+	KERNEL_STACK_TO_K0
+	sub $k0, ISTATE_SOFT_SIZE
+	REGISTERS_STORE_AND_EXC_RESET $k0
+	sw $sp, ISTATE_OFFSET_SP($k0)
+	move $sp, $k0
+	
+	jal tlb_refill
+	move $a0, $sp 
+	
+	REGISTERS_LOAD $sp
+	eret
+
+	FAKE_ABI_PROLOGUE
+cache_error_handler:
+	KERNEL_STACK_TO_K0
+	sub $k0, ISTATE_SOFT_SIZE 
+	REGISTERS_STORE_AND_EXC_RESET $k0
+	sw $sp, ISTATE_OFFSET_SP($k0)
+	move $sp, $k0
+	
+	jal cache_error
+	move $a0, $sp
+	
+	REGISTERS_LOAD $sp
+	eret
+
+userspace_asm:
+	move $sp, $a0
+	move $v0, $a1
+	move $t9, $a2      /* set up correct entry into PIC code */
+	xor $a0, $a0, $a0  /* $a0 is defined to hold pcb_ptr */
+	                   /* set it to 0 */
+	eret
