Index: kernel/arch/abs32le/include/arch/mm/as.h
===================================================================
--- kernel/arch/abs32le/include/arch/mm/as.h	(revision 3daba42eca17fefb7371eb495dcaf63d5a95ed9e)
+++ kernel/arch/abs32le/include/arch/mm/as.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -37,4 +37,5 @@
 
 #define KERNEL_ADDRESS_SPACE_SHADOWED_ARCH  0
+#define KERNEL_SEPARATE_PTL0_ARCH           0
 
 #define KERNEL_ADDRESS_SPACE_START_ARCH  UINT32_C(0x80000000)
Index: kernel/arch/amd64/include/arch/mm/as.h
===================================================================
--- kernel/arch/amd64/include/arch/mm/as.h	(revision 3daba42eca17fefb7371eb495dcaf63d5a95ed9e)
+++ kernel/arch/amd64/include/arch/mm/as.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -40,4 +40,5 @@
 
 #define KERNEL_ADDRESS_SPACE_SHADOWED_ARCH  0
+#define KERNEL_SEPARATE_PTL0_ARCH           0
 
 #define KERNEL_ADDRESS_SPACE_START_ARCH  UINT64_C(0xffff800000000000)
Index: kernel/arch/arm32/include/arch/mm/as.h
===================================================================
--- kernel/arch/arm32/include/arch/mm/as.h	(revision 3daba42eca17fefb7371eb495dcaf63d5a95ed9e)
+++ kernel/arch/arm32/include/arch/mm/as.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -38,4 +38,5 @@
 
 #define KERNEL_ADDRESS_SPACE_SHADOWED_ARCH  0
+#define KERNEL_SEPARATE_PTL0_ARCH           0
 
 #define KERNEL_ADDRESS_SPACE_START_ARCH  UINT32_C(0x80000000)
Index: kernel/arch/arm64/Makefile.inc
===================================================================
--- kernel/arch/arm64/Makefile.inc	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/Makefile.inc	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,68 @@
+#
+# Copyright (c) 2015 Petr Pavlu
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# - Redistributions of source code must retain the above copyright
+#   notice, this list of conditions and the following disclaimer.
+# - Redistributions in binary form must reproduce the above copyright
+#   notice, this list of conditions and the following disclaimer in the
+#   documentation and/or other materials provided with the distribution.
+# - The name of the author may not be used to endorse or promote products
+#   derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+BFD_NAME = elf64-littleaarch64
+BFD_ARCH = aarch64
+BFD = binary
+
+COMMON_CFLAGS += -fno-omit-frame-pointer -march=armv8-a+nofp+nosimd \
+	-mgeneral-regs-only
+
+BITS = 64
+ENDIANESS = LE
+
+ARCH_SOURCES = \
+	arch/$(KARCH)/src/arm64.c \
+	arch/$(KARCH)/src/asm.S \
+	arch/$(KARCH)/src/context.S \
+	arch/$(KARCH)/src/cpu/cpu.c \
+	arch/$(KARCH)/src/debug/stacktrace.c \
+	arch/$(KARCH)/src/debug/stacktrace_asm.S \
+	arch/$(KARCH)/src/exception.c \
+	arch/$(KARCH)/src/fpu.S \
+	arch/$(KARCH)/src/fpu_context.c \
+	arch/$(KARCH)/src/interrupt.c \
+	arch/$(KARCH)/src/machine_func.c \
+	arch/$(KARCH)/src/mm/as.c \
+	arch/$(KARCH)/src/mm/frame.c \
+	arch/$(KARCH)/src/mm/km.c \
+	arch/$(KARCH)/src/mm/page.c \
+	arch/$(KARCH)/src/mm/tlb.c \
+	arch/$(KARCH)/src/smc.c \
+	arch/$(KARCH)/src/smp/ipi.c \
+	arch/$(KARCH)/src/smp/smp.c \
+	arch/$(KARCH)/src/start.S
+
+ifeq ($(MACHINE),virt)
+	ARCH_SOURCES += arch/$(KARCH)/src/mach/virt/virt.c
+endif
+
+ARCH_AUTOCHECK_HEADERS = \
+	arch/$(KARCH)/include/arch/context_struct.h \
+	arch/$(KARCH)/include/arch/fpu_context_struct.h \
+	arch/$(KARCH)/include/arch/istate_struct.h
Index: kernel/arch/arm64/_link.ld.in
===================================================================
--- kernel/arch/arm64/_link.ld.in	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/_link.ld.in	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,60 @@
+/*
+ * ARM64 linker script
+ *
+ *  kernel text
+ *  kernel data
+ *
+ */
+
+#include <arch/boot/boot.h>
+#include <arch/mm/km.h>
+
+#define LOAD_ADDRESS_V  (KM_ARM64_IDENTITY_START + BOOT_OFFSET)
+#define LOAD_ADDRESS_P  BOOT_OFFSET
+
+OUTPUT_ARCH(aarch64)
+ENTRY(kernel_image_start)
+
+SECTIONS {
+	kernel_load_address = LOAD_ADDRESS_V;
+
+	.image (LOAD_ADDRESS_V + SIZEOF_HEADERS) : AT (LOAD_ADDRESS_P + SIZEOF_HEADERS) {
+		. = ALIGN(16);
+		ktext_start = .;
+		*(K_TEXT_START)
+		*(.text .text.*);
+		ktext_end = .;
+
+		kdata_start = .;
+		*(K_DATA_START)
+		*(.data);                       /* initialized data */
+		*(.bss);                        /* uninitialized static variables */
+		*(COMMON);                      /* global variables */
+
+		*(.rodata*);
+		. = ALIGN(8);
+		symbol_table = .;
+		*(symtab.*);
+
+		kdata_end = .;
+	}
+
+#ifdef CONFIG_LINE_DEBUG
+	.comment 0 : { *(.comment); }
+	.debug_abbrev 0 : { *(.debug_abbrev); }
+	.debug_aranges 0 : { *(.debug_aranges); }
+	.debug_frame 0 : { *(.debug_frame); }
+	.debug_info 0 : { *(.debug_info); }
+	.debug_line 0 : { *(.debug_line); }
+	.debug_loc 0 : { *(.debug_loc); }
+	.debug_macinfo 0 : { *(.debug_macinfo); }
+	.debug_pubnames 0 : { *(.debug_pubnames); }
+	.debug_pubtypes 0 : { *(.debug_pubtypes); }
+	.debug_ranges 0 : { *(.debug_ranges); }
+	.debug_str 0 : { *(.debug_str); }
+#endif
+
+	/DISCARD/ : {
+		*(*);
+	}
+}
Index: kernel/arch/arm64/include/arch/arch.h
===================================================================
--- kernel/arch/arm64/include/arch/arch.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/include/arch/arch.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2015 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup kernel_arm64
+ * @{
+ */
+/** @file
+ * @brief Various ARM64-specific macros.
+ */
+
+#ifndef KERN_arm64_ARCH_H_
+#define KERN_arm64_ARCH_H_
+
+#include <arch/boot/boot.h>
+
+extern void arm64_pre_main(void *entry, bootinfo_t *bootinfo);
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/arm64/include/arch/asm.h
===================================================================
--- kernel/arch/arm64/include/arch/asm.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/include/arch/asm.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2015 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup kernel_arm64
+ * @{
+ */
+/** @file
+ * @brief Declarations of functions implemented in assembly.
+ */
+
+#ifndef KERN_arm64_ASM_H_
+#define KERN_arm64_ASM_H_
+
+#include <config.h>
+#include <trace.h>
+
+extern char exc_vector;
+
+/*
+ * Note: Function asm_delay_loop() is defined in arm64.c but declared here
+ * because the generic kernel code expects it in arch/asm.h.
+ */
+extern void asm_delay_loop(uint32_t usec);
+
+/** CPU specific way to sleep cpu. */
+_NO_TRACE static inline void cpu_sleep(void)
+{
+	asm volatile ("wfe");
+}
+
+/** Return base address of current stack.
+ *
+ * Return the base address of the current stack.
+ * The stack is assumed to be STACK_SIZE bytes long.
+ * The stack must start on page boundary.
+ */
+_NO_TRACE static inline uintptr_t get_stack_base(void)
+{
+	uintptr_t v;
+
+	asm volatile (
+	    "mov %[v], sp\n"
+	    "and %[v], %[v], %[size]\n"
+	    : [v] "=&r" (v)
+	    : [size] "r" (~((uint64_t) STACK_SIZE - 1))
+	);
+
+	return v;
+}
+
+/** Halts CPU. */
+_NO_TRACE static inline __attribute__((noreturn)) void cpu_halt(void)
+{
+	while (true)
+		;
+}
+
+/** Output byte to port.
+ *
+ * @param port Port to write to.
+ * @param val  Value to write.
+ */
+_NO_TRACE static inline void pio_write_8(ioport8_t *port, uint8_t val)
+{
+	*port = val;
+}
+
+/** Output half-word to port.
+ *
+ * @param port Port to write to.
+ * @param val  Value to write.
+ */
+_NO_TRACE static inline void pio_write_16(ioport16_t *port, uint16_t val)
+{
+	*port = val;
+}
+
+/** Output word to port.
+ *
+ * @param port Port to write to.
+ * @param val  Value to write.
+ */
+_NO_TRACE static inline void pio_write_32(ioport32_t *port, uint32_t val)
+{
+	*port = val;
+}
+
+/** Get byte from port.
+ *
+ * @param port Port to read from.
+ * @return Value read.
+ */
+_NO_TRACE static inline uint8_t pio_read_8(const ioport8_t *port)
+{
+	return *port;
+}
+
+/** Get word from port.
+ *
+ * @param port Port to read from.
+ * @return Value read.
+ */
+_NO_TRACE static inline uint16_t pio_read_16(const ioport16_t *port)
+{
+	return *port;
+}
+
+/** Get double word from port.
+ *
+ * @param port Port to read from.
+ * @return Value read.
+ */
+_NO_TRACE static inline uint32_t pio_read_32(const ioport32_t *port)
+{
+	return *port;
+}
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/arm64/include/arch/barrier.h
===================================================================
--- kernel/arch/arm64/include/arch/barrier.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/include/arch/barrier.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2015 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup kernel_arm64
+ * @{
+ */
+/** @file
+ * @brief Memory barriers.
+ */
+
+#ifndef KERN_arm64_BARRIER_H_
+#define KERN_arm64_BARRIER_H_
+
+#include <stddef.h>
+
+#define COHERENCE_INVAL_MIN  4
+
+/** Ensure visibility of instruction updates for a multiprocessor.
+ *
+ * @param addr Address of the first instruction.
+ * @param size Size of the instruction block (in bytes).
+ */
+static inline void ensure_visibility(void *addr, size_t len)
+{
+	size_t i;
+
+	/*
+	 * Clean to Point of Unification to make the new instructions visible to
+	 * the instruction cache.
+	 */
+	for (i = 0; i < len; i += COHERENCE_INVAL_MIN)
+		asm volatile (
+		    "dc cvau, %[addr]\n"
+		    : : [addr] "r" ((char *) addr + i)
+		);
+
+	/* Ensure completion on all PEs. */
+	asm volatile ("dsb ish" ::: "memory");
+
+	/* Ensure instruction cache/branch predictor discards stale data. */
+	for (i = 0; i < len; i += COHERENCE_INVAL_MIN)
+		asm volatile (
+		    "ic ivau, %[addr]\n"
+		    : : [addr] "r" ((char *) addr + i)
+		);
+
+	/* Ensure completion on all PEs. */
+	asm volatile ("dsb ish" ::: "memory");
+
+	/* Synchronize context on this PE. */
+	asm volatile ("isb");
+}
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/arm64/include/arch/boot/boot.h
===================================================================
--- kernel/arch/arm64/include/arch/boot/boot.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/include/arch/boot/boot.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2015 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup kernel_arm64
+ * @{
+ */
+/** @file
+ * @brief Shared interface between the bootcode and the kernel.
+ */
+
+#ifndef KERN_arm64_BOOT_H_
+#define KERN_arm64_BOOT_H_
+
+#define BOOT_OFFSET  0x80000
+
+#ifndef __ASSEMBLER__
+#include <stddef.h>
+
+#define BOOTINFO_TASK_NAME_BUFLEN  32
+#define TASKMAP_MAX_RECORDS        32
+#define MEMMAP_MAX_RECORDS        128
+
+/** Task structure. */
+typedef struct {
+	/** Address where the task was placed. */
+	void *addr;
+	/** Size of the task's binary. */
+	size_t size;
+	/** Task name. */
+	char name[BOOTINFO_TASK_NAME_BUFLEN];
+} utask_t;
+
+/** Task map structure. */
+typedef struct {
+	/** Number of boot tasks. */
+	size_t cnt;
+	/** Boot task data. */
+	utask_t tasks[TASKMAP_MAX_RECORDS];
+} taskmap_t;
+
+/** Memory zone types. */
+typedef enum {
+	/** Unusuable memory. */
+	MEMTYPE_UNUSABLE,
+	/** Usable memory. */
+	MEMTYPE_AVAILABLE,
+	/** Memory that can be used after ACPI is enabled. */
+	MEMTYPE_ACPI_RECLAIM
+} memtype_t;
+
+/** Memory area. */
+typedef struct {
+	/** Type of the memory. */
+	memtype_t type;
+	/** Address of the area. */
+	void *start;
+	/** Size of the area. */
+	size_t size;
+} memzone_t;
+
+/** System memory map. */
+typedef struct {
+	/** Number of memory zones. */
+	size_t cnt;
+	/** Memory zones. */
+	memzone_t zones[MEMMAP_MAX_RECORDS];
+} memmap_t;
+
+/** Bootinfo structure. */
+typedef struct {
+	/** Task map. */
+	taskmap_t taskmap;
+	/** Memory map. */
+	memmap_t memmap;
+} bootinfo_t;
+
+#endif /* __ASSEMBLER__ */
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/arm64/include/arch/context.h
===================================================================
--- kernel/arch/arm64/include/arch/context.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/include/arch/context.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2015 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup kernel_arm64
+ * @{
+ */
+/** @file
+ * @brief Thread context.
+ */
+
+#ifndef KERN_arm64_CONTEXT_H_
+#define KERN_arm64_CONTEXT_H_
+
+#include <align.h>
+#include <arch/context_struct.h>
+#include <arch/stack.h>
+
+/* Put one item onto the stack to support get_stack_base() and align it up. */
+#define SP_DELTA  (0 + ALIGN_UP(STACK_ITEM_SIZE, STACK_ALIGNMENT))
+
+#define context_set(c, _pc, stack, size) \
+	do { \
+		(c)->pc = (uint64_t) (_pc); \
+		(c)->sp = ((uint64_t) (stack)) + (size) - SP_DELTA; \
+		/* Set frame pointer too. */ \
+		(c)->x29 = 0; \
+	} while (0)
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/arm64/include/arch/context_struct.h
===================================================================
--- kernel/arch/arm64/include/arch/context_struct.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/include/arch/context_struct.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2015 Petr Pavlu
+ * All rights preserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef KERN_ARCH_CONTEXT_STRUCT_H_
+#define KERN_ARCH_CONTEXT_STRUCT_H_
+
+#define CONTEXT_OFFSET_SP   0x00
+#define CONTEXT_OFFSET_PC   0x08
+#define CONTEXT_OFFSET_X19  0x10
+#define CONTEXT_OFFSET_X20  0x18
+#define CONTEXT_OFFSET_X21  0x20
+#define CONTEXT_OFFSET_X22  0x28
+#define CONTEXT_OFFSET_X23  0x30
+#define CONTEXT_OFFSET_X24  0x38
+#define CONTEXT_OFFSET_X25  0x40
+#define CONTEXT_OFFSET_X26  0x48
+#define CONTEXT_OFFSET_X27  0x50
+#define CONTEXT_OFFSET_X28  0x58
+#define CONTEXT_OFFSET_X29  0x60
+#define CONTEXT_OFFSET_IPL  0x68
+#define CONTEXT_SIZE        0x70
+
+#ifndef __ASSEMBLER__
+
+#include <typedefs.h>
+
+/*
+ * Thread context containing registers that must be preserved across function
+ * calls.
+ */
+typedef struct context {
+	uint64_t sp;
+	uint64_t pc;
+	uint64_t x19;
+	uint64_t x20;
+	uint64_t x21;
+	uint64_t x22;
+	uint64_t x23;
+	uint64_t x24;
+	uint64_t x25;
+	uint64_t x26;
+	uint64_t x27;
+	uint64_t x28;
+	uint64_t x29;
+	ipl_t ipl;
+} context_t;
+
+#endif
+#endif
Index: kernel/arch/arm64/include/arch/cpu.h
===================================================================
--- kernel/arch/arm64/include/arch/cpu.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/include/arch/cpu.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2015 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup kernel_arm64
+ * @{
+ */
+/** @file
+ * @brief CPU identification.
+ */
+
+#ifndef KERN_arm64_CPU_H_
+#define KERN_arm64_CPU_H_
+
+/** Struct representing ARM CPU identification. */
+typedef struct {
+	/** Implementer (vendor) number. */
+	uint32_t implementer;
+
+	/** Variant number. */
+	uint32_t variant;
+
+	/** Primary part number. */
+	uint32_t partnum;
+
+	/** Revision number. */
+	uint32_t revision;
+} cpu_arch_t;
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/arm64/include/arch/cycle.h
===================================================================
--- kernel/arch/arm64/include/arch/cycle.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/include/arch/cycle.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2015 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup kernel_arm64
+ * @{
+ */
+/** @file
+ * @brief Information about a current cycle.
+ */
+
+#ifndef KERN_arm64_CYCLE_H_
+#define KERN_arm64_CYCLE_H_
+
+#include <arch/asm.h>
+#include <trace.h>
+
+/** Get a current cycle.
+ *
+ * No instruction exists on ARM64 to get the actual CPU cycle. The function
+ * instead returns the value of the virtual counter.
+ */
+_NO_TRACE static inline uint64_t get_cycle(void)
+{
+	return CNTVCT_EL0_read();
+}
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/arm64/include/arch/exception.h
===================================================================
--- kernel/arch/arm64/include/arch/exception.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/include/arch/exception.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2016 Petr Pavlu
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup kernel_arm64
+ * @{
+ */
+/** @file
+ * @brief Exception declarations.
+ */
+
+#ifndef KERN_arm64_EXCEPTION_H_
+#define KERN_arm64_EXCEPTION_H_
+
+/* Exception numbers. */
+
+/** Current Exception level with SP_EL0, Synchronous. */
+#define EXC_CURRENT_EL_SP_SEL0_SYNCH    0
+/** Current Exception level with SP_EL0, IRQ or vIRQ. */
+#define EXC_CURRENT_EL_SP_SEL0_IRQ      1
+/** Current Exception level with SP_EL0, FIQ or vFIQ. */
+#define EXC_CURRENT_EL_SP_SEL0_FIQ      2
+/** Current Exception level with SP_EL0, SError or vSError. */
+#define EXC_CURRENT_EL_SP_SEL0_SERROR   3
+/** Current Exception level with SP_ELx, x > 0, Synchronous. */
+#define EXC_CURRENT_EL_SP_SELX_SYNCH    4
+/** Current Exception level with SP_ELx, x > 0, IRQ or vIRQ. */
+#define EXC_CURRENT_EL_SP_SELX_IRQ      5
+/** Current Exception level with SP_ELx, x > 0, FIQ or vFIQ. */
+#define EXC_CURRENT_EL_SP_SELX_FIQ      6
+/** Current Exception level with SP_ELx, x > 0, SError or vSError. */
+#define EXC_CURRENT_EL_SP_SELX_SERROR   7
+/** Lower Exception level, where the implemented level immediately lower than
+ * the target level is using AArch64, Synchronous.
+ */
+#define EXC_LOWER_EL_AARCH64_SYNCH      8
+/** Lower Exception level, where the implemented level immediately lower than
+ * the target level is using AArch64, IRQ or vIRQ.
+ */
+#define EXC_LOWER_EL_AARCH64_IRQ        9
+/** Lower Exception level, where the implemented level immediately lower than
+ * the target level is using AArch64, FIQ or vFIQ.
+ */
+#define EXC_LOWER_EL_AARCH64_FIQ       10
+/** Lower Exception level, where the implemented level immediately lower than
+ * the target level is using AArch64, SError or vSError.
+ */
+#define EXC_LOWER_EL_AARCH64_SERROR    11
+/** Lower Exception level, where the implemented level immediately lower than
+ * the target level is using AArch32, Synchronous.
+ */
+#define EXC_LOWER_EL_AARCH32_SYNCH     12
+/** Lower Exception level, where the implemented level immediately lower than
+ * the target level is using AArch32, IRQ or vIRQ.
+ */
+#define EXC_LOWER_EL_AARCH32_IRQ       13
+/** Lower Exception level, where the implemented level immediately lower than
+ * the target level is using AArch32, FIQ or vFIQ.
+ */
+#define EXC_LOWER_EL_AARCH32_FIQ       14
+/** Lower Exception level, where the implemented level immediately lower than
+ * the target level is using AArch32, SError or vSError.
+ */
+#define EXC_LOWER_EL_AARCH32_SERROR    15
+
+#ifndef __ASSEMBLER__
+extern void exception_init(void);
+#endif /* __ASSEMBLER__ */
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/arm64/include/arch/faddr.h
===================================================================
--- kernel/arch/arm64/include/arch/faddr.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/include/arch/faddr.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2015 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup kernel_arm64
+ * @{
+ */
+/** @file
+ * @brief Function address conversion.
+ */
+
+#ifndef KERN_arm64_FADDR_H_
+#define KERN_arm64_FADDR_H_
+
+#include <typedefs.h>
+
+/** Calculate absolute address of function referenced by fptr pointer.
+ *
+ * @param fptr Function pointer.
+ */
+#define FADDR(fptr)  ((uintptr_t) (fptr))
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/arm64/include/arch/fpu_context.h
===================================================================
--- kernel/arch/arm64/include/arch/fpu_context.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/include/arch/fpu_context.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2015 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup kernel_arm64
+ * @{
+ */
+/** @file
+ * @brief FPU context.
+ */
+
+#ifndef KERN_arm64_FPU_CONTEXT_H_
+#define KERN_arm64_FPU_CONTEXT_H_
+
+#include <arch/fpu_context_struct.h>
+
+#define FPU_CONTEXT_ALIGN  16
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/arm64/include/arch/fpu_context_struct.h
===================================================================
--- kernel/arch/arm64/include/arch/fpu_context_struct.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/include/arch/fpu_context_struct.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2018 Petr Pavlu
+ * All rights preserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef KERN_ARCH_FPU_CONTEXT_STRUCT_H_
+#define KERN_ARCH_FPU_CONTEXT_STRUCT_H_
+
+#define FPU_CONTEXT_OFFSET_VREGS  0x000
+#define FPU_CONTEXT_OFFSET_FPCR   0x200
+#define FPU_CONTEXT_OFFSET_FPSR   0x204
+#define FPU_CONTEXT_SIZE          0x210
+
+#ifndef __ASSEMBLER__
+
+#include <typedefs.h>
+#include <_bits/int128_t.h>
+
+/** ARM64 FPU context. */
+typedef struct fpu_context {
+	uint128_t vregs[32];
+	uint32_t fpcr;
+	uint32_t fpsr;
+} fpu_context_t;
+
+#endif
+#endif
Index: kernel/arch/arm64/include/arch/interrupt.h
===================================================================
--- kernel/arch/arm64/include/arch/interrupt.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/include/arch/interrupt.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2015 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup kernel_arm64_interrupt
+ * @{
+ */
+/** @file
+ * @brief Declarations of interrupt controlling routines.
+ */
+
+#ifndef KERN_arm64_INTERRUPT_H_
+#define KERN_arm64_INTERRUPT_H_
+
+#include <arch/istate.h>
+#include <stdbool.h>
+#include <typedefs.h>
+
+/** Size of exception table. */
+#define IVT_ITEMS  16
+
+/** Index of the first item in exception table. */
+#define IVT_FIRST  0
+
+/* REVISIT */
+/* This needs to be defined for inter-architecture API portability. */
+#define VECTOR_TLB_SHOOTDOWN_IPI  0
+
+extern ipl_t interrupts_disable(void);
+extern ipl_t interrupts_enable(void);
+extern void interrupts_restore(ipl_t ipl);
+extern ipl_t interrupts_read(void);
+extern bool interrupts_disabled(void);
+extern void interrupt_init(void);
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/arm64/include/arch/istate.h
===================================================================
--- kernel/arch/arm64/include/arch/istate.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/include/arch/istate.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2015 Petr Pavlu
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup kernel_arm64_interrupt
+ * @{
+ */
+
+#ifndef KERN_arm64_ISTATE_H_
+#define KERN_arm64_ISTATE_H_
+
+#include <trace.h>
+
+#ifdef KERNEL
+
+#include <arch/istate_struct.h>
+#include <arch/regutils.h>
+
+#else /* KERNEL */
+
+#include <libarch/istate_struct.h>
+#include <libarch/regutils.h>
+
+#endif /* KERNEL */
+
+/** Set Program Counter member of given istate structure.
+ *
+ * @param istate  istate structure
+ * @param retaddr new value of istate's PC member
+ */
+_NO_TRACE static inline void istate_set_retaddr(istate_t *istate,
+    uintptr_t retaddr)
+{
+	istate->pc = retaddr;
+}
+
+/** Return true if exception happened while in userspace. */
+_NO_TRACE static inline int istate_from_uspace(istate_t *istate)
+{
+	return (istate->spsr & SPSR_MODE_MASK) >> SPSR_MODE_SHIFT ==
+	    SPSR_MODE_ARM64_EL0T;
+}
+
+/** Return Program Counter member of given istate structure. */
+_NO_TRACE static inline uintptr_t istate_get_pc(istate_t *istate)
+{
+	return istate->pc;
+}
+
+/** Return Frame Pointer member of given istate structure. */
+_NO_TRACE static inline uintptr_t istate_get_fp(istate_t *istate)
+{
+	return istate->x29;
+}
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/arm64/include/arch/istate_struct.h
===================================================================
--- kernel/arch/arm64/include/arch/istate_struct.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/include/arch/istate_struct.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2015 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef KERN_ARCH_ISTATE_STRUCT_H_
+#define KERN_ARCH_ISTATE_STRUCT_H_
+
+#define ISTATE_OFFSET_SPSR   0x00
+#define ISTATE_OFFSET_SP     0x08
+#define ISTATE_OFFSET_PC     0x10
+#define ISTATE_OFFSET_TPIDR  0x18
+#define ISTATE_OFFSET_X0     0x20
+#define ISTATE_OFFSET_X1     0x28
+#define ISTATE_OFFSET_X2     0x30
+#define ISTATE_OFFSET_X3     0x38
+#define ISTATE_OFFSET_X4     0x40
+#define ISTATE_OFFSET_X5     0x48
+#define ISTATE_OFFSET_X6     0x50
+#define ISTATE_OFFSET_X7     0x58
+#define ISTATE_OFFSET_X8     0x60
+#define ISTATE_OFFSET_X9     0x68
+#define ISTATE_OFFSET_X10    0x70
+#define ISTATE_OFFSET_X11    0x78
+#define ISTATE_OFFSET_X12    0x80
+#define ISTATE_OFFSET_X13    0x88
+#define ISTATE_OFFSET_X14    0x90
+#define ISTATE_OFFSET_X15    0x98
+#define ISTATE_OFFSET_X16    0xa0
+#define ISTATE_OFFSET_X17    0xa8
+#define ISTATE_OFFSET_X18    0xb0
+#define ISTATE_OFFSET_X19    0xb8
+#define ISTATE_OFFSET_X20    0xc0
+#define ISTATE_OFFSET_X21    0xc8
+#define ISTATE_OFFSET_X22    0xd0
+#define ISTATE_OFFSET_X23    0xd8
+#define ISTATE_OFFSET_X24    0xe0
+#define ISTATE_OFFSET_X25    0xe8
+#define ISTATE_OFFSET_X26    0xf0
+#define ISTATE_OFFSET_X27    0xf8
+#define ISTATE_OFFSET_X28    0x100
+#define ISTATE_OFFSET_X29    0x108
+#define ISTATE_OFFSET_X30    0x110
+#define ISTATE_SIZE          0x118
+
+#ifndef __ASSEMBLER__
+
+#include <stdint.h>
+
+typedef struct istate {
+	uint64_t spsr;
+	uint64_t sp;
+	uint64_t pc;
+	uint64_t tpidr;
+	uint64_t x0;
+	uint64_t x1;
+	uint64_t x2;
+	uint64_t x3;
+	uint64_t x4;
+	uint64_t x5;
+	uint64_t x6;
+	uint64_t x7;
+	uint64_t x8;
+	uint64_t x9;
+	uint64_t x10;
+	uint64_t x11;
+	uint64_t x12;
+	uint64_t x13;
+	uint64_t x14;
+	uint64_t x15;
+	uint64_t x16;
+	uint64_t x17;
+	uint64_t x18;
+	uint64_t x19;
+	uint64_t x20;
+	uint64_t x21;
+	uint64_t x22;
+	uint64_t x23;
+	uint64_t x24;
+	uint64_t x25;
+	uint64_t x26;
+	uint64_t x27;
+	uint64_t x28;
+	/* Frame Pointer. */
+	uint64_t x29;
+	/* Link Register. */
+	uint64_t x30;
+} istate_t;
+
+#endif
+#endif
Index: kernel/arch/arm64/include/arch/mach/virt/virt.h
===================================================================
--- kernel/arch/arm64/include/arch/mach/virt/virt.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/include/arch/mach/virt/virt.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2016 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup kernel_arm64_virt
+ * @brief QEMU virt platform.
+ * @ingroup kernel_arm64
+ * @{
+ */
+/** @file
+ * @brief QEMU virt platform driver.
+ */
+
+#ifndef KERN_arm64_virt_H_
+#define KERN_arm64_virt_H_
+
+#include <arch/machine_func.h>
+
+extern struct arm_machine_ops virt_machine_ops;
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/arm64/include/arch/machine_func.h
===================================================================
--- kernel/arch/arm64/include/arch/machine_func.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/include/arch/machine_func.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2016 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup kernel_arm64
+ * @{
+ */
+/** @file
+ * @brief Declarations of machine specific functions.
+ *
+ * These functions enable to differentiate more kinds of ARM platforms.
+ */
+
+#ifndef KERN_arm64_MACHINE_FUNC_H_
+#define KERN_arm64_MACHINE_FUNC_H_
+
+#include <arch/istate.h>
+#include <typedefs.h>
+
+struct arm_machine_ops {
+	void (*machine_init)(void);
+	void (*machine_irq_exception)(unsigned int, istate_t *);
+	void (*machine_output_init)(void);
+	void (*machine_input_init)(void);
+	inr_t (*machine_enable_vtimer_irq)(void);
+	size_t (*machine_get_irq_count)(void);
+	const char *(*machine_get_platform_name)(void);
+};
+
+extern void machine_ops_init(void);
+extern void machine_init(void);
+void machine_irq_exception(unsigned int, istate_t *);
+extern void machine_output_init(void);
+extern void machine_input_init(void);
+extern inr_t machine_enable_vtimer_irq(void);
+extern size_t machine_get_irq_count(void);
+extern const char *machine_get_platform_name(void);
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/arm64/include/arch/mm/as.h
===================================================================
--- kernel/arch/arm64/include/arch/mm/as.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/include/arch/mm/as.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2015 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup kernel_arm64_mm
+ * @{
+ */
+/** @file
+ * @brief Address space manipulating functions declarations.
+ */
+
+#ifndef KERN_arm64_AS_H_
+#define KERN_arm64_AS_H_
+
+#define KERNEL_ADDRESS_SPACE_SHADOWED_ARCH  0
+#define KERNEL_SEPARATE_PTL0_ARCH           1
+
+#define KERNEL_ADDRESS_SPACE_START_ARCH  UINT64_C(0xffff000000000000)
+#define KERNEL_ADDRESS_SPACE_END_ARCH    UINT64_C(0xffffffffffffffff)
+#define USER_ADDRESS_SPACE_START_ARCH    UINT64_C(0x0000000000000000)
+#define USER_ADDRESS_SPACE_END_ARCH      UINT64_C(0x0000ffffffffffff)
+
+typedef struct {
+} as_arch_t;
+
+#include <genarch/mm/as_pt.h>
+
+#define as_constructor_arch(as, flags)  ((void)as, (void)flags, EOK)
+#define as_destructor_arch(as)          ((void)as, 0)
+#define as_create_arch(as, flags)       ((void)as, (void)flags, EOK)
+#define as_deinstall_arch(as)
+#define as_invalidate_translation_cache(as, page, cnt)
+
+extern void as_arch_init(void);
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/arm64/include/arch/mm/asid.h
===================================================================
--- kernel/arch/arm64/include/arch/mm/asid.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/include/arch/mm/asid.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2015 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup kernel_arm64_mm
+ * @{
+ */
+/** @file
+ * @brief ASIDs related declarations.
+ */
+
+#ifndef KERN_arm64_ASID_H_
+#define KERN_arm64_ASID_H_
+
+#include <stdint.h>
+
+/*
+ * The ASID size is in VMSAv8-64 an implementation defined choice of 8 or 16
+ * bits. The actual size can be obtained by reading ID_AA64MMFR0_EL1.ASIDBits
+ * but for simplicity, HelenOS currently defaults to 8 bits.
+ */
+typedef uint8_t asid_t;
+
+#define ASID_MAX_ARCH  255
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/arm64/include/arch/mm/frame.h
===================================================================
--- kernel/arch/arm64/include/arch/mm/frame.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/include/arch/mm/frame.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2015 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup kernel_arm64_mm
+ * @{
+ */
+/** @file
+ * @brief Frame related declarations.
+ */
+
+#ifndef KERN_arm64_FRAME_H_
+#define KERN_arm64_FRAME_H_
+
+#include <arch/boot/boot.h>
+
+#define FRAME_WIDTH  12  /* 4KB frames */
+#define FRAME_SIZE   (1 << FRAME_WIDTH)
+
+#define FRAME_LOWPRIO  0
+
+#ifndef __ASSEMBLER__
+
+extern void frame_low_arch_init(void);
+extern void frame_high_arch_init(void);
+extern void physmem_print(void);
+
+extern memmap_t memmap;
+
+#endif /* __ASSEMBLER__ */
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/arm64/include/arch/mm/km.h
===================================================================
--- kernel/arch/arm64/include/arch/mm/km.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/include/arch/mm/km.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2015 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup kernel_arm64_mm
+ * @{
+ */
+/** @file
+ */
+
+#ifndef KERN_arm64_KM_H_
+#define KERN_arm64_KM_H_
+
+#ifndef __ASSEMBLER__
+
+#include <stdbool.h>
+#include <typedefs.h>
+
+#define KM_ARM64_IDENTITY_START  UINT64_C(0xffffffff80000000)
+#define KM_ARM64_IDENTITY_SIZE   UINT64_C(0x0000000080000000)
+
+#define KM_ARM64_NON_IDENTITY_START  UINT64_C(0xffff000000000000)
+#define KM_ARM64_NON_IDENTITY_SIZE   UINT64_C(0x0000ffff80000000)
+
+extern void km_identity_arch_init(void);
+extern void km_non_identity_arch_init(void);
+extern bool km_is_non_identity_arch(uintptr_t);
+
+#else /* __ASSEMBLER__ */
+
+#define KM_ARM64_IDENTITY_START  0xffffffff80000000
+#define KM_ARM64_IDENTITY_SIZE   0x0000000080000000
+
+#define KM_ARM64_NON_IDENTITY_START  0xffff000000000000
+#define KM_ARM64_NON_IDENTITY_SIZE   0x0000ffff80000000
+
+#endif /* __ASSEMBLER__ */
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/arm64/include/arch/mm/page.h
===================================================================
--- kernel/arch/arm64/include/arch/mm/page.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/include/arch/mm/page.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,422 @@
+/*
+ * Copyright (c) 2015 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup kernel_arm64_mm
+ * @{
+ */
+/** @file
+ * @brief Paging related declarations.
+ */
+
+#ifndef KERN_arm64_PAGE_H_
+#define KERN_arm64_PAGE_H_
+
+#include <arch/mm/frame.h>
+#include <mm/mm.h>
+#include <trace.h>
+
+#ifndef __ASSEMBLER__
+#include <typedefs.h>
+#endif /* __ASSEMBLER__ */
+
+#define PAGE_WIDTH  FRAME_WIDTH
+#define PAGE_SIZE   FRAME_SIZE
+
+#ifndef __ASSEMBLER__
+
+extern uintptr_t physmem_base;
+
+#define KA2PA(x) \
+	(((uintptr_t) (x)) - UINT64_C(0xffffffff80000000) + physmem_base)
+#define PA2KA(x) \
+	(((uintptr_t) (x)) + UINT64_C(0xffffffff80000000) - physmem_base)
+
+#endif /* __ASSEMBLER__ */
+
+/** Log2 size of each translation table entry. */
+#define PTL_ENTRY_SIZE_SHIFT  3
+
+/* Number of entries in each level. */
+#define PTL0_ENTRIES_ARCH  512
+#define PTL1_ENTRIES_ARCH  512
+#define PTL2_ENTRIES_ARCH  512
+#define PTL3_ENTRIES_ARCH  512
+
+/* Page table sizes for each level. */
+#define PTL0_FRAMES_ARCH  1
+#define PTL1_FRAMES_ARCH  1
+#define PTL2_FRAMES_ARCH  1
+#define PTL3_FRAMES_ARCH  1
+
+/* Starting bit of virtual address portion translated in each level. */
+#define PTL0_VA_SHIFT  39
+#define PTL1_VA_SHIFT  30
+#define PTL2_VA_SHIFT  21
+#define PTL3_VA_SHIFT  12
+
+/* Size mask of virtual address portion translated in each level. */
+#define PTL0_VA_MASK  0x1ff
+#define PTL1_VA_MASK  0x1ff
+#define PTL2_VA_MASK  0x1ff
+#define PTL3_VA_MASK  0x1ff
+
+/* Macros calculating indices into page tables for each level. */
+#define PTL0_INDEX_ARCH(vaddr)  (((vaddr) >> PTL0_VA_SHIFT) & PTL0_VA_MASK)
+#define PTL1_INDEX_ARCH(vaddr)  (((vaddr) >> PTL1_VA_SHIFT) & PTL1_VA_MASK)
+#define PTL2_INDEX_ARCH(vaddr)  (((vaddr) >> PTL2_VA_SHIFT) & PTL2_VA_MASK)
+#define PTL3_INDEX_ARCH(vaddr)  (((vaddr) >> PTL3_VA_SHIFT) & PTL3_VA_MASK)
+
+/* Get PTE address accessors for each level. */
+#define GET_PTL1_ADDRESS_ARCH(ptl0, i) \
+	((pte_t *) (((uintptr_t) ((pte_t *) (ptl0))[(i)].output_address) << 12))
+#define GET_PTL2_ADDRESS_ARCH(ptl1, i) \
+	((pte_t *) (((uintptr_t) ((pte_t *) (ptl1))[(i)].output_address) << 12))
+#define GET_PTL3_ADDRESS_ARCH(ptl2, i) \
+	((pte_t *) (((uintptr_t) ((pte_t *) (ptl2))[(i)].output_address) << 12))
+#define GET_FRAME_ADDRESS_ARCH(ptl3, i) \
+	(((uintptr_t) ((pte_t *) (ptl3))[(i)].output_address) << 12)
+
+/*
+ * Set PTE address accessors for each level. Setting of the level 0 table is
+ * ignored because it must be done only by calling as_install_arch() which also
+ * changes ASID.
+ */
+#define SET_PTL0_ADDRESS_ARCH(ptl0)
+#define SET_PTL1_ADDRESS_ARCH(ptl0, i, a) \
+	(((pte_t *) (ptl0))[(i)].output_address = (a) >> 12)
+#define SET_PTL2_ADDRESS_ARCH(ptl1, i, a) \
+	(((pte_t *) (ptl1))[(i)].output_address = (a) >> 12)
+#define SET_PTL3_ADDRESS_ARCH(ptl2, i, a) \
+	(((pte_t *) (ptl2))[(i)].output_address = (a) >> 12)
+#define SET_FRAME_ADDRESS_ARCH(ptl3, i, a) \
+	(((pte_t *) (ptl3))[(i)].output_address = (a) >> 12)
+
+/* Get PTE flags accessors for each level. */
+#define GET_PTL1_FLAGS_ARCH(ptl0, i) \
+	get_pt_level012_flags((pte_t *) (ptl0), (size_t) (i))
+#define GET_PTL2_FLAGS_ARCH(ptl1, i) \
+	get_pt_level012_flags((pte_t *) (ptl1), (size_t) (i))
+#define GET_PTL3_FLAGS_ARCH(ptl2, i) \
+	get_pt_level012_flags((pte_t *) (ptl2), (size_t) (i))
+#define GET_FRAME_FLAGS_ARCH(ptl3, i) \
+	get_pt_level3_flags((pte_t *) (ptl3), (size_t) (i))
+
+/* Set PTE flags accessors for each level. */
+#define SET_PTL1_FLAGS_ARCH(ptl0, i, x) \
+	set_pt_level012_flags((pte_t *) (ptl0), (size_t) (i), (x))
+#define SET_PTL2_FLAGS_ARCH(ptl1, i, x) \
+	set_pt_level012_flags((pte_t *) (ptl1), (size_t) (i), (x))
+#define SET_PTL3_FLAGS_ARCH(ptl2, i, x) \
+	set_pt_level012_flags((pte_t *) (ptl2), (size_t) (i), (x))
+#define SET_FRAME_FLAGS_ARCH(ptl3, i, x) \
+	set_pt_level3_flags((pte_t *) (ptl3), (size_t) (i), (x))
+
+/* Set PTE present bit accessors for each level. */
+#define SET_PTL1_PRESENT_ARCH(ptl0, i) \
+	set_pt_present((pte_t *) (ptl0), (size_t) (i))
+#define SET_PTL2_PRESENT_ARCH(ptl1, i) \
+	set_pt_present((pte_t *) (ptl1), (size_t) (i))
+#define SET_PTL3_PRESENT_ARCH(ptl2, i) \
+	set_pt_present((pte_t *) (ptl2), (size_t) (i))
+#define SET_FRAME_PRESENT_ARCH(ptl3, i) \
+	set_pt_present((pte_t *) (ptl3), (size_t) (i))
+
+/* Macros for querying the last-level PTE entries. */
+#define PTE_VALID_ARCH(pte) \
+	(((pte_t *) (pte))->valid != 0)
+#define PTE_PRESENT_ARCH(pte) \
+	(((pte_t *) (pte))->valid != 0)
+#define PTE_GET_FRAME_ARCH(pte) \
+	(((uintptr_t) ((pte_t *) (pte))->output_address) << FRAME_WIDTH)
+#define PTE_WRITABLE_ARCH(pte) \
+	get_pt_writable((pte_t *) (pte))
+#define PTE_EXECUTABLE_ARCH(pte) \
+	get_pt_executable((pte_t *) (pte))
+
+/* Level 3 access permissions. */
+
+/** Data access permission. User mode: no access, privileged mode: read/write.
+ */
+#define PTE_AP_USER_NO_KERNEL_FULL  0
+
+/** Data access permission. User mode: read/write, privileged mode: read/write.
+ */
+#define PTE_AP_USER_FULL_KERNEL_FULL  1
+
+/** Data access permission. User mode: no access, privileged mode: read only. */
+#define PTE_AP_USER_NO_KERNEL_LIMITED  2
+
+/** Data access permission. User mode: read only, privileged mode: read only. */
+#define PTE_AP_USER_LIMITED_KERNEL_LIMITED  3
+
+/*
+ * Memory types. MAIR_EL1 index 0 is unused, which assures that if a page
+ * table entry is non-null then it is valid (PTE_VALID_ARCH() returns true).
+ */
+
+/** Write-Back Cacheable Normal memory, Inner shareable, Read-write cache
+ * allocation. Defined in MAIR_EL1 index 1.
+ */
+#define MAIR_EL1_NORMAL_MEMORY_ATTR  0xff
+#define MAIR_EL1_NORMAL_MEMORY_INDEX  1
+
+/** Device-nGnRE memory (Device non-Gathering, non-Reordering, Early Write
+ * Acknowledgement). Equivalent to the Device memory type in earlier versions of
+ * the architecture. Defined in MAIR_EL1 index 2.
+ */
+#define MAIR_EL1_DEVICE_MEMORY_ATTR  0x04
+#define MAIR_EL1_DEVICE_MEMORY_INDEX  2
+
+/** Bit width of one memory attribute field in MAIR_EL1. */
+#define MAIR_EL1_ATTR_SHIFT  8
+
+/* Level 0, 1, 2 descriptor types. */
+
+/** Block descriptor (valid in level 0, 1, 2 page translation tables). */
+#define PTE_L012_TYPE_BLOCK  0
+
+/** Next-table descriptor (valid in level 0, 1, 2 page translation tables). */
+#define PTE_L012_TYPE_TABLE  1
+
+/* Level 3 descriptor types. */
+
+/** Page descriptor (valid in level 3 page translation tables). */
+#define PTE_L3_TYPE_PAGE  1
+
+/** HelenOS descriptor type. Table for level 0, 1, 2 page translation tables,
+ * page for level 3 tables. Block descriptors are not used by HelenOS during
+ * normal processing.
+ */
+#define PTE_L0123_TYPE_HELENOS  1
+
+/* Page table entry access macros. */
+
+/** Shift to access the next-level table address in a page table entry. */
+#define PTE_NEXT_LEVEL_ADDRESS_SHIFT  12
+
+/** Shift to access the resulting address in a page table entry. */
+#define PTE_OUTPUT_ADDRESS_SHIFT  12
+
+/** Shift to access the access bit in a page table entry. */
+#define PTE_ACCESS_SHIFT  10
+
+/** Shift to access the attr_index field in a page table entry. */
+#define PTE_ATTR_INDEX_SHIFT  2
+
+/** Shift to access the type bit in a page table entry. */
+#define PTE_TYPE_SHIFT  1
+
+/** Shift to access the present bit in a page table entry. */
+#define PTE_PRESENT_SHIFT  0
+
+/** The present bit in a page table entry. */
+#define PTE_PRESENT_FLAG  (1 << PTE_PRESENT_SHIFT)
+
+#ifndef __ASSEMBLER__
+
+#include <arch/interrupt.h>
+
+/** Page Table Entry.
+ *
+ * HelenOS model:
+ * * Level 0, 1, 2 translation tables hold next-level table descriptors. Block
+ *   descriptors are not used during normal processing.
+ * * Level 3 tables store 4kB page descriptors.
+ */
+typedef struct {
+	/* Common bits. */
+	/** Flag indicating entry contains valid data and can be used for page
+	 * translation.
+	 *
+	 * Note: The flag is called `valid' in the official ARM terminology
+	 * but it has the `present' (valid+active) sense in HelenOS.
+	 */
+	unsigned valid : 1;
+	unsigned type : 1;
+
+	/* Lower block and page attributes. */
+	unsigned attr_index : 3;
+	unsigned non_secure : 1;
+	unsigned access_permission : 2;
+	unsigned shareability : 2;
+	unsigned access : 1;
+	unsigned not_global : 1;
+
+	/* Common output address. */
+	uint64_t output_address : 36;
+
+	unsigned : 4;
+
+	/* Upper block and page attributes. */
+	unsigned contiguous : 1;
+	unsigned privileged_execute_never : 1;
+	unsigned unprivileged_execute_never : 1;
+
+	unsigned : 4;
+
+	/* Next-level table attributes. */
+	unsigned privileged_execute_never_table : 1;
+	unsigned unprivileged_execute_never_table : 1;
+	unsigned access_permission_table : 2;
+	unsigned non_secure_table : 1;
+} __attribute__((packed)) pte_t;
+
+/** Returns level 0, 1, 2 page table entry flags.
+ *
+ * @param pt Level 0, 1, 2 page table.
+ * @param i  Index of the entry to return.
+ */
+_NO_TRACE static inline unsigned int get_pt_level012_flags(pte_t *pt, size_t i)
+{
+	pte_t *p = &pt[i];
+
+	return (1 << PAGE_CACHEABLE_SHIFT) |
+	    (!p->valid << PAGE_PRESENT_SHIFT) | (1 << PAGE_USER_SHIFT) |
+	    (1 << PAGE_READ_SHIFT) | (1 << PAGE_WRITE_SHIFT) |
+	    (1 << PAGE_EXEC_SHIFT);
+}
+
+/** Returns level 3 page table entry flags.
+ *
+ * @param pt Level 3 page table.
+ * @param i  Index of the entry to return.
+ */
+_NO_TRACE static inline unsigned int get_pt_level3_flags(pte_t *pt, size_t i)
+{
+	pte_t *p = &pt[i];
+
+	int cacheable = (p->attr_index == MAIR_EL1_NORMAL_MEMORY_INDEX);
+	int user = (p->access_permission == PTE_AP_USER_FULL_KERNEL_FULL ||
+	    p->access_permission == PTE_AP_USER_LIMITED_KERNEL_LIMITED);
+	int write = (p->access_permission == PTE_AP_USER_FULL_KERNEL_FULL ||
+	    p->access_permission == PTE_AP_USER_NO_KERNEL_FULL);
+	int exec = ((user && !p->unprivileged_execute_never) ||
+	    (!user && !p->privileged_execute_never));
+
+	return (cacheable << PAGE_CACHEABLE_SHIFT) |
+	    (!p->valid << PAGE_PRESENT_SHIFT) | (user << PAGE_USER_SHIFT) |
+	    (1 << PAGE_READ_SHIFT) | (write << PAGE_WRITE_SHIFT) |
+	    (exec << PAGE_EXEC_SHIFT) | (!p->not_global << PAGE_GLOBAL_SHIFT);
+}
+
+/** Sets flags of level 0, 1, 2 page table entry.
+ *
+ * @param pt    Level 0, 1, 2 page table.
+ * @param i     Index of the entry to be changed.
+ * @param flags New flags.
+ */
+_NO_TRACE static inline void set_pt_level012_flags(pte_t *pt, size_t i,
+    int flags)
+{
+	pte_t *p = &pt[i];
+
+	p->valid = (flags & PAGE_PRESENT) != 0;
+	p->type = PTE_L012_TYPE_TABLE;
+}
+
+/** Sets flags of level 3 page table entry.
+ *
+ * @param pt    Level 3 page table.
+ * @param i     Index of the entry to be changed.
+ * @param flags New flags.
+ */
+_NO_TRACE static inline void set_pt_level3_flags(pte_t *pt, size_t i,
+    int flags)
+{
+	pte_t *p = &pt[i];
+
+	if (flags & PAGE_CACHEABLE)
+		p->attr_index = MAIR_EL1_NORMAL_MEMORY_INDEX;
+	else
+		p->attr_index = MAIR_EL1_DEVICE_MEMORY_INDEX;
+	p->valid = (flags & PAGE_PRESENT) != 0;
+	p->type = PTE_L3_TYPE_PAGE;
+
+	/* Translate page permissions to access permissions. */
+	if (flags & PAGE_USER) {
+		if (flags & PAGE_WRITE)
+			p->access_permission = PTE_AP_USER_FULL_KERNEL_FULL;
+		else
+			p->access_permission =
+			    PTE_AP_USER_LIMITED_KERNEL_LIMITED;
+	} else {
+		if (flags & PAGE_WRITE)
+			p->access_permission = PTE_AP_USER_NO_KERNEL_FULL;
+		else
+			p->access_permission = PTE_AP_USER_NO_KERNEL_LIMITED;
+	}
+	p->access = 1;
+	p->unprivileged_execute_never = p->privileged_execute_never =
+	    (flags & PAGE_EXEC) == 0;
+
+	p->not_global = (flags & PAGE_GLOBAL) == 0;
+}
+
+/** Sets the present flag of page table entry.
+ *
+ * @param pt Level 0, 1, 2, 3 page table.
+ * @param i  Index of the entry to be changed.
+ */
+_NO_TRACE static inline void set_pt_present(pte_t *pt, size_t i)
+{
+	pte_t *p = &pt[i];
+
+	p->valid = 1;
+}
+
+/** Gets the executable flag of page table entry.
+ *
+ * @param pte Page table entry.
+ */
+_NO_TRACE static inline bool get_pt_executable(pte_t *pte)
+{
+	if (pte->access_permission == PTE_AP_USER_NO_KERNEL_FULL ||
+	    pte->access_permission == PTE_AP_USER_NO_KERNEL_LIMITED)
+		return pte->privileged_execute_never;
+	else
+		return pte->unprivileged_execute_never;
+}
+
+/** Gets the writable flag of page table entry.
+ *
+ * @param pte Page table entry.
+ */
+_NO_TRACE static inline bool get_pt_writable(pte_t *pte)
+{
+	return pte->access_permission == PTE_AP_USER_FULL_KERNEL_FULL ||
+	    pte->access_permission == PTE_AP_USER_NO_KERNEL_FULL;
+}
+
+extern void page_arch_init(void);
+
+#endif /* __ASSEMBLER__ */
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/arm64/include/arch/mm/tlb.h
===================================================================
--- kernel/arch/arm64/include/arch/mm/tlb.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/include/arch/mm/tlb.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2015 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup kernel_arm64_mm
+ * @{
+ */
+/** @file
+ * @brief TLB related declarations.
+ */
+
+#ifndef KERN_arm64_TLB_H_
+#define KERN_arm64_TLB_H_
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/arm64/include/arch/proc/task.h
===================================================================
--- kernel/arch/arm64/include/arch/proc/task.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/include/arch/proc/task.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2015 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup kernel_arm64_proc
+ * @{
+ */
+/** @file
+ * @brief Task related declarations.
+ */
+
+#ifndef KERN_arm64_TASK_H_
+#define KERN_arm64_TASK_H_
+
+typedef struct {
+} task_arch_t;
+
+#define task_create_arch(t)
+#define task_destroy_arch(t)
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/arm64/include/arch/proc/thread.h
===================================================================
--- kernel/arch/arm64/include/arch/proc/thread.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/include/arch/proc/thread.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2015 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup kernel_arm64_proc
+ * @{
+ */
+/** @file
+ * @brief Thread related declarations.
+ */
+
+#ifndef KERN_arm64_THREAD_H_
+#define KERN_arm64_THREAD_H_
+
+typedef struct {
+} thread_arch_t;
+
+#define thr_constructor_arch(t)
+#define thr_destructor_arch(t)
+#define thread_create_arch(t, flags) (EOK)
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/arm64/include/arch/regutils.h
===================================================================
--- kernel/arch/arm64/include/arch/regutils.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/include/arch/regutils.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2015 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup kernel_arm64
+ * @{
+ */
+/** @file
+ * @brief Utilities for convenient manipulation with ARM registers.
+ */
+
+#ifndef KERN_arm64_REGUTILS_H_
+#define KERN_arm64_REGUTILS_H_
+
+#ifndef __ASSEMBLER__
+
+#include <stdint.h>
+
+#define SPECIAL_REG_GEN_READ(name) \
+	static inline uint64_t name##_read(void) \
+	{ \
+		uint64_t res; \
+		asm volatile ( \
+		    "mrs %[res], " #name \
+		    : [res] "=r" (res) \
+		); \
+		return res; \
+	}
+
+#define SPECIAL_REG_GEN_WRITE(name) \
+	static inline void name##_write(uint64_t regn) \
+	{ \
+		asm volatile ( \
+		    "msr " #name ", %[regn]\n" \
+		    "isb\n" \
+		    :: [regn] "r" (regn) \
+		); \
+	}
+
+#define UWORD64(c)  UINT64_C(c)
+
+#else /* __ASSEMBLER__ */
+
+#define SPECIAL_REG_GEN_READ(name)
+#define SPECIAL_REG_GEN_WRITE(name)
+#define UWORD64(c)  c
+
+#endif /* __ASSEMBLER__*/
+
+/* CNTFRQ_EL0 */
+SPECIAL_REG_GEN_READ(CNTFRQ_EL0);
+
+/* CNTVCT_EL0 */
+SPECIAL_REG_GEN_READ(CNTVCT_EL0);
+
+/* CNTV_CTL_EL0 */
+SPECIAL_REG_GEN_READ(CNTV_CTL_EL0);
+SPECIAL_REG_GEN_WRITE(CNTV_CTL_EL0);
+#define CNTV_CTL_ENABLE_SHIFT  0
+#define CNTV_CTL_ENABLE_FLAG  (UWORD64(1) << CNTV_CTL_ENABLE_SHIFT)
+#define CNTV_CTL_IMASK_SHIFT  1
+#define CNTV_CTL_IMASK_FLAG  (UWORD64(1) << CNTV_CTL_IMASK_SHIFT)
+
+/* CNTV_CVAL_EL0 */
+SPECIAL_REG_GEN_READ(CNTV_CVAL_EL0);
+SPECIAL_REG_GEN_WRITE(CNTV_CVAL_EL0);
+
+/* CPACR_EL1 */
+SPECIAL_REG_GEN_READ(CPACR_EL1);
+SPECIAL_REG_GEN_WRITE(CPACR_EL1);
+#define CPACR_FPEN_SHIFT  20
+#define CPACR_FPEN_MASK  (UWORD64(0x3) << CPACR_FPEN_SHIFT)
+#define CPACR_FPEN_TRAP_ALL  0x0
+#define CPACR_FPEN_TRAP_NONE  0x3
+
+/* CurrentEL */
+SPECIAL_REG_GEN_READ(CurrentEL);
+#define CURRENT_EL_EL0  0x0
+#define CURRENT_EL_EL1  0x4
+#define CURRENT_EL_EL2  0x8
+#define CURRENT_EL_EL3  0xc
+
+/* DAIF */
+SPECIAL_REG_GEN_READ(DAIF);
+SPECIAL_REG_GEN_WRITE(DAIF);
+#define DAIF_IRQ_SHIFT  7
+#define DAIF_IRQ_FLAG  (UWORD64(1) << DAIF_IRQ_SHIFT)
+
+/* ELR */
+SPECIAL_REG_GEN_WRITE(ELR_EL1);
+
+/* ESR */
+SPECIAL_REG_GEN_READ(ESR_EL1);
+#define ESR_EC_SHIFT  26
+#define ESR_EC_MASK  (UWORD64(0x3f) << ESR_EC_SHIFT)
+
+/** Exception from access to Advanced SIMD or floating-point functionality. */
+#define ESR_EC_FP  0x07
+/** Exception from SVC instruction execution. */
+#define ESR_EC_SVC  0x15
+
+/** Instruction abort from Lower Exception level. */
+#define ESR_EC_IA_LOWER_EL  0x20
+/** Data abort from Lower Exception level. */
+#define ESR_EC_DA_LOWER_EL  0x24
+/** Data abort from Current Exception level. */
+#define ESR_EC_DA_CURRENT_EL  0x25
+
+/** Instruction/data Fault Status Code. */
+#define ESR_IDFSC_SHIFT  0
+#define ESR_IDFSC_MASK  (UWORD64(0x3f) << ESR_IDFSC_SHIFT)
+
+/** Instruction/data abort, translation fault, zeroth level. */
+#define ESR_IDA_IDFSC_TF0  0x4
+/** Instruction/data abort, translation fault, first level. */
+#define ESR_IDA_IDFSC_TF1  0x5
+/** Instruction/data abort, translation fault, second level. */
+#define ESR_IDA_IDFSC_TF2  0x6
+/** Instruction/data abort, translation fault, third level. */
+#define ESR_IDA_IDFSC_TF3  0x7
+
+/** Data abort, Write not Read. */
+#define ESR_DA_WNR_SHIFT  6
+#define ESR_DA_WNR_FLAG  (UWORD64(1) << ESR_DA_WNR_SHIFT)
+
+/* FAR */
+SPECIAL_REG_GEN_READ(FAR_EL1);
+
+/* MIDR_EL1 */
+SPECIAL_REG_GEN_READ(MIDR_EL1);
+#define MIDR_REVISION_SHIFT  0
+#define MIDR_REVISION_MASK  (UWORD64(0xf) << MIDR_REVISION_SHIFT)
+#define MIDR_PARTNUM_SHIFT  4
+#define MIDR_PARTNUM_MASK  (UWORD64(0xfff) << MIDR_PARTNUM_SHIFT)
+#define MIDR_VARIANT_SHIFT  20
+#define MIDR_VARIANT_MASK  (UWORD64(0xf) << MIDR_VARIANT_SHIFT)
+#define MIDR_IMPLEMENTER_SHIFT  24
+#define MIDR_IMPLEMENTER_MASK  (UWORD64(0xff) << MIDR_IMPLEMENTER_SHIFT)
+
+/* SCTLR */
+#define SCTLR_M_SHIFT  0
+#define SCTLR_M_FLAG  (UWORD64(1) << SCTLR_M_SHIFT)
+
+/* SP */
+SPECIAL_REG_GEN_WRITE(SP_EL0);
+
+/* SPSR */
+SPECIAL_REG_GEN_READ(SPSR_EL1);
+SPECIAL_REG_GEN_WRITE(SPSR_EL1);
+#define SPSR_MODE_SHIFT  0
+#define SPSR_MODE_MASK  (UWORD64(0x1f) << SPSR_MODE_SHIFT)
+#define SPSR_MODE_ARM64_EL0T  0x00  /* ARM64, Exception Level 0, SP_EL0 */
+
+/* TPIDR */
+SPECIAL_REG_GEN_WRITE(TPIDR_EL0);
+
+/* TTBR */
+SPECIAL_REG_GEN_WRITE(TTBR0_EL1);
+SPECIAL_REG_GEN_WRITE(TTBR1_EL1);
+#define TTBR0_ASID_SHIFT  48
+
+/* VBAR */
+SPECIAL_REG_GEN_WRITE(VBAR_EL1);
+
+/* TLBI VAE1IS and TLBI ASIDE1IS parameter. */
+#define TLBI_ASID_SHIFT  48
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/arm64/include/arch/stack.h
===================================================================
--- kernel/arch/arm64/include/arch/stack.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/include/arch/stack.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2015 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup kernel_arm64
+ * @{
+ */
+/** @file
+ * @brief Stack constants.
+ */
+
+#ifndef KERN_arm64_STACK_H_
+#define KERN_arm64_STACK_H_
+
+#include <config.h>
+
+#define MEM_STACK_SIZE  STACK_SIZE
+
+/** Size of a stack item. */
+#define STACK_ITEM_SIZE	 8
+
+/** Required stack alignment. */
+#define STACK_ALIGNMENT	 16
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/arm64/include/arch/types.h
===================================================================
--- kernel/arch/arm64/include/arch/types.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/include/arch/types.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2015 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup kernel_arm64
+ * @{
+ */
+/** @file
+ */
+
+#ifndef KERN_arm64_TYPES_H_
+#define KERN_arm64_TYPES_H_
+
+#include <_bits/all.h>
+
+typedef struct {
+} fncptr_t;
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/arm64/src/arm64.c
===================================================================
--- kernel/arch/arm64/src/arm64.c	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/src/arm64.c	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,253 @@
+/*
+ * Copyright (c) 2015 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup kernel_arm64
+ * @{
+ */
+/** @file
+ * @brief ARM64 architecture specific functions.
+ */
+
+#include <abi/errno.h>
+#include <arch.h>
+#include <arch/arch.h>
+#include <arch/asm.h>
+#include <arch/exception.h>
+#include <arch/machine_func.h>
+#include <interrupt.h>
+#include <proc/scheduler.h>
+#include <syscall/syscall.h>
+#include <sysinfo/sysinfo.h>
+#include <userspace.h>
+
+static void arm64_post_mm_init(void);
+static void arm64_post_smp_init(void);
+
+arch_ops_t arm64_ops = {
+	.post_mm_init = arm64_post_mm_init,
+	.post_smp_init = arm64_post_smp_init,
+};
+
+arch_ops_t *arch_ops = &arm64_ops;
+
+/** Perform ARM64 specific initialization before main_bsp() is called. */
+void arm64_pre_main(void *entry __attribute__((unused)), bootinfo_t *bootinfo)
+{
+	/* Copy init task info. */
+	init.cnt = min3(bootinfo->taskmap.cnt, TASKMAP_MAX_RECORDS,
+	    CONFIG_INIT_TASKS);
+
+	size_t i;
+	for (i = 0; i < init.cnt; i++) {
+		init.tasks[i].paddr =
+		    (uintptr_t) bootinfo->taskmap.tasks[i].addr;
+		init.tasks[i].size = bootinfo->taskmap.tasks[i].size;
+		str_cpy(init.tasks[i].name, CONFIG_TASK_NAME_BUFLEN,
+		    bootinfo->taskmap.tasks[i].name);
+	}
+
+	/* Copy physical memory map. */
+	memmap.cnt = min(bootinfo->memmap.cnt, MEMMAP_MAX_RECORDS);
+	for (i = 0; i < memmap.cnt; i++) {
+		memmap.zones[i].type = bootinfo->memmap.zones[i].type;
+		memmap.zones[i].start = bootinfo->memmap.zones[i].start;
+		memmap.zones[i].size = bootinfo->memmap.zones[i].size;
+	}
+
+	/* Initialize machine_ops pointer. */
+	machine_ops_init();
+}
+
+/** Perform ARM64 specific tasks needed before the memory management is
+ * initialized.
+ */
+void arm64_post_mm_init(void)
+{
+	if (config.cpu_active != 1)
+		return;
+
+	/* Do machine-specific initialization. */
+	machine_init();
+
+	/* Initialize exception dispatch table. */
+	exception_init();
+	interrupt_init();
+
+	/* Merge all memory zones to 1 big zone. */
+	zone_merge_all();
+
+	/* Initialize output device. */
+	machine_output_init();
+}
+
+/** Perform ARM64 specific tasks needed after the multiprocessing is
+ * initialized.
+ */
+void arm64_post_smp_init(void)
+{
+	/* Set platform name. */
+	const char *platform = machine_get_platform_name();
+
+	sysinfo_set_item_data("platform", NULL, (void *) platform,
+	    str_size(platform));
+
+	/* Initialize input device. */
+	machine_input_init();
+}
+
+/** Calibrate delay loop.
+ *
+ * On ARM64, we implement delay() by waiting for the CNTVCT_EL0 register to
+ * reach a pre-computed value, as opposed to performing some pre-computed amount
+ * of instructions of known duration. We set the delay_loop_const to 1 in order
+ * to neutralize the multiplication done by delay().
+ */
+void calibrate_delay_loop(void)
+{
+	CPU->delay_loop_const = 1;
+}
+
+/** Wait several microseconds.
+ *
+ * @param t Microseconds to wait.
+ */
+void asm_delay_loop(uint32_t usec)
+{
+	uint64_t stop = CNTVCT_EL0_read() + usec * CNTFRQ_EL0_read() / 1000000;
+
+	while (CNTVCT_EL0_read() < stop)
+		;
+}
+
+/** Change processor mode.
+ *
+ * @param kernel_uarg Userspace settings (entry point, stack, ...).
+ */
+void userspace(uspace_arg_t *kernel_uarg)
+{
+	/* Prepare return to EL0. */
+	SPSR_EL1_write((SPSR_EL1_read() & ~SPSR_MODE_MASK) |
+	    SPSR_MODE_ARM64_EL0T);
+
+	/* Set program entry. */
+	ELR_EL1_write((uint64_t) kernel_uarg->uspace_entry);
+
+	/* Set user stack. */
+	SP_EL0_write(((uint64_t) kernel_uarg->uspace_stack +
+	    kernel_uarg->uspace_stack_size));
+
+	/* Clear Thread ID register. */
+	TPIDR_EL0_write(0);
+
+	asm volatile (
+	    /*
+	     * Clear all general-purpose registers, except x0 that holds an
+	     * argument for the user space.
+	     */
+	    "mov x0, %[uspace_uarg]\n"
+	    "mov x1, #0\n"
+	    "mov x2, #0\n"
+	    "mov x3, #0\n"
+	    "mov x4, #0\n"
+	    "mov x5, #0\n"
+	    "mov x6, #0\n"
+	    "mov x7, #0\n"
+	    "mov x8, #0\n"
+	    "mov x9, #0\n"
+	    "mov x10, #0\n"
+	    "mov x11, #0\n"
+	    "mov x12, #0\n"
+	    "mov x13, #0\n"
+	    "mov x14, #0\n"
+	    "mov x15, #0\n"
+	    "mov x16, #0\n"
+	    "mov x17, #0\n"
+	    "mov x18, #0\n"
+	    "mov x19, #0\n"
+	    "mov x20, #0\n"
+	    "mov x21, #0\n"
+	    "mov x22, #0\n"
+	    "mov x23, #0\n"
+	    "mov x24, #0\n"
+	    "mov x25, #0\n"
+	    "mov x26, #0\n"
+	    "mov x27, #0\n"
+	    "mov x28, #0\n"
+	    "mov x29, #0\n"
+	    "mov x30, #0\n"
+	    "eret\n"
+	    :: [uspace_uarg] "r" (kernel_uarg->uspace_uarg)
+	);
+
+	unreachable();
+}
+
+/** Perform ARM64 specific tasks needed before the new task is run. */
+void before_task_runs_arch(void)
+{
+}
+
+/** Perform ARM64 specific tasks needed before the new thread is scheduled.
+ */
+void before_thread_runs_arch(void)
+{
+}
+
+/** Perform ARM64 specific tasks before a thread stops running. */
+void after_thread_ran_arch(void)
+{
+}
+
+/** Reboot the system. */
+void arch_reboot(void)
+{
+	/* Not implemented. */
+	while (true)
+		;
+}
+
+/** Construct function pointer.
+ *
+ * @param fptr   Function pointer structure.
+ * @param addr   Function address.
+ * @param caller Calling function address.
+ *
+ * @return Address of the function pointer.
+ */
+void *arch_construct_function(fncptr_t *fptr, void *addr, void *caller)
+{
+	return addr;
+}
+
+/** Perform ARM64 specific tasks to initialize IRQ processing. */
+void irq_initialize_arch(irq_t *irq __attribute__((unused)))
+{
+}
+
+/** @}
+ */
Index: kernel/arch/arm64/src/asm.S
===================================================================
--- kernel/arch/arm64/src/asm.S	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/src/asm.S	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,220 @@
+/*
+ * Copyright (c) 2015 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <abi/asmtool.h>
+#include <arch/exception.h>
+#include <arch/istate_struct.h>
+
+.text
+
+FUNCTION_BEGIN(memcpy_from_uspace)
+FUNCTION_BEGIN(memcpy_to_uspace)
+	/* Simple (un-optimized) memcpy(). */
+	cbz x2, 2f
+	mov x3, x0
+1:
+	ldrb w4, [x1], #1
+	strb w4, [x3], #1
+	subs x2, x2, #1
+	b.ne 1b
+
+2:
+	ret
+FUNCTION_END(memcpy_from_uspace)
+FUNCTION_END(memcpy_to_uspace)
+
+FUNCTION_BEGIN(memcpy_from_uspace_failover_address)
+FUNCTION_BEGIN(memcpy_to_uspace_failover_address)
+	mov x0, #0
+	ret
+FUNCTION_END(memcpy_from_uspace_failover_address)
+FUNCTION_END(memcpy_to_uspace_failover_address)
+
+FUNCTION_BEGIN(early_putwchar)
+	ret
+FUNCTION_END(early_putwchar)
+
+/* Static checks for the istate_t save/load. */
+#if ISTATE_OFFSET_X0 + 8 != ISTATE_OFFSET_X1
+#error x0 and x1 are not successive in istate_t
+#endif
+#if ISTATE_OFFSET_X2 + 8 != ISTATE_OFFSET_X3
+#error x2 and x3 are not successive in istate_t
+#endif
+#if ISTATE_OFFSET_X4 + 8 != ISTATE_OFFSET_X5
+#error x4 and x5 are not successive in istate_t
+#endif
+#if ISTATE_OFFSET_X6 + 8 != ISTATE_OFFSET_X7
+#error x6 and x7 are not successive in istate_t
+#endif
+#if ISTATE_OFFSET_X8 + 8 != ISTATE_OFFSET_X9
+#error x8 and x9 are not successive in istate_t
+#endif
+#if ISTATE_OFFSET_X10 + 8 != ISTATE_OFFSET_X11
+#error x10 and x11 are not successive in istate_t
+#endif
+#if ISTATE_OFFSET_X12 + 8 != ISTATE_OFFSET_X13
+#error x12 and x13 are not successive in istate_t
+#endif
+#if ISTATE_OFFSET_X14 + 8 != ISTATE_OFFSET_X15
+#error x14 and x15 are not successive in istate_t
+#endif
+#if ISTATE_OFFSET_X16 + 8 != ISTATE_OFFSET_X17
+#error x16 and x17 are not successive in istate_t
+#endif
+#if ISTATE_OFFSET_X18 + 8 != ISTATE_OFFSET_X19
+#error x18 and x19 are not successive in istate_t
+#endif
+#if ISTATE_OFFSET_X20 + 8 != ISTATE_OFFSET_X21
+#error x20 and x21 are not successive in istate_t
+#endif
+#if ISTATE_OFFSET_X22 + 8 != ISTATE_OFFSET_X23
+#error x22 and x23 are not successive in istate_t
+#endif
+#if ISTATE_OFFSET_X24 + 8 != ISTATE_OFFSET_X25
+#error x24 and x25 are not successive in istate_t
+#endif
+#if ISTATE_OFFSET_X26 + 8 != ISTATE_OFFSET_X27
+#error x26 and x27 are not successive in istate_t
+#endif
+#if ISTATE_OFFSET_X28 + 8 != ISTATE_OFFSET_X29
+#error x28 and x29 are not successive in istate_t
+#endif
+#if ISTATE_OFFSET_SPSR + 8 != ISTATE_OFFSET_SP
+#error spsr and sp are not successive in istate_t
+#endif
+#if ISTATE_OFFSET_PC + 8 != ISTATE_OFFSET_TPIDR
+#error pc and tpidr are not successive in istate_t
+#endif
+
+/* Exception vector. */
+.macro handler i
+handler_\i:
+	/*
+	 * Initial code for each handler, at maximum 128 bytes (32
+	 * instructions).
+	 */
+
+	/* Save current state. */
+	sub sp, sp, #ISTATE_SIZE                /* 0x00 */
+	stp x0, x1, [sp, #ISTATE_OFFSET_X0]     /* 0x04 */
+	stp x2, x3, [sp, #ISTATE_OFFSET_X2]     /* 0x08 */
+	stp x4, x5, [sp, #ISTATE_OFFSET_X4]     /* 0x0c */
+	stp x6, x7, [sp, #ISTATE_OFFSET_X6]     /* 0x10 */
+	stp x8, x9, [sp, #ISTATE_OFFSET_X8]     /* 0x14 */
+	stp x10, x11, [sp, #ISTATE_OFFSET_X10]  /* 0x18 */
+	stp x12, x13, [sp, #ISTATE_OFFSET_X12]  /* 0x1c */
+	stp x14, x15, [sp, #ISTATE_OFFSET_X14]  /* 0x20 */
+	stp x16, x17, [sp, #ISTATE_OFFSET_X16]  /* 0x24 */
+	stp x18, x19, [sp, #ISTATE_OFFSET_X18]  /* 0x28 */
+	stp x20, x21, [sp, #ISTATE_OFFSET_X20]  /* 0x2c */
+	stp x22, x23, [sp, #ISTATE_OFFSET_X22]  /* 0x30 */
+	stp x24, x25, [sp, #ISTATE_OFFSET_X24]  /* 0x34 */
+	stp x26, x27, [sp, #ISTATE_OFFSET_X26]  /* 0x38 */
+	stp x28, x29, [sp, #ISTATE_OFFSET_X28]  /* 0x3c */
+	str x30, [sp, #ISTATE_OFFSET_X30]       /* 0x40 */
+
+	mrs x0, spsr_el1                        /* 0x44 */
+	mrs x1, sp_el0                          /* 0x48 */
+	stp x0, x1, [sp, #ISTATE_OFFSET_SPSR]   /* 0x4c */
+
+	mrs x0, elr_el1                         /* 0x50 */
+	mrs x1, tpidr_el0                       /* 0x54 */
+	stp x0, x1, [sp, #ISTATE_OFFSET_PC]     /* 0x58 */
+
+	mov x0, #\i                             /* 0x5c */
+	mov x1, sp                              /* 0x60 */
+	bl exc_dispatch                         /* 0x64 */
+
+	/* Restore previous state. */
+	ldp x0, x1, [sp, #ISTATE_OFFSET_SPSR]   /* 0x68 */
+	msr spsr_el1, x0                        /* 0x6c */
+	msr sp_el0, x1                          /* 0x70 */
+
+	ldp x0, x1, [sp, #ISTATE_OFFSET_PC]     /* 0x74 */
+	msr elr_el1, x0                         /* 0x78 */
+	b exc_restore_end                       /* 0x7c */
+.endm
+
+exc_restore_end:
+	/* Restore remaining registers and return from the exception handler. */
+	msr tpidr_el0, x1
+	ldp x0, x1, [sp, #ISTATE_OFFSET_X0]
+	ldp x2, x3, [sp, #ISTATE_OFFSET_X2]
+	ldp x4, x5, [sp, #ISTATE_OFFSET_X4]
+	ldp x6, x7, [sp, #ISTATE_OFFSET_X6]
+	ldp x8, x9, [sp, #ISTATE_OFFSET_X8]
+	ldp x10, x11, [sp, #ISTATE_OFFSET_X10]
+	ldp x12, x13, [sp, #ISTATE_OFFSET_X12]
+	ldp x14, x15, [sp, #ISTATE_OFFSET_X14]
+	ldp x16, x17, [sp, #ISTATE_OFFSET_X16]
+	ldp x18, x19, [sp, #ISTATE_OFFSET_X18]
+	ldp x20, x21, [sp, #ISTATE_OFFSET_X20]
+	ldp x22, x23, [sp, #ISTATE_OFFSET_X22]
+	ldp x24, x25, [sp, #ISTATE_OFFSET_X24]
+	ldp x26, x27, [sp, #ISTATE_OFFSET_X26]
+	ldp x28, x29, [sp, #ISTATE_OFFSET_X28]
+	ldr x30, [sp, #ISTATE_OFFSET_X30]
+	add sp, sp, #ISTATE_SIZE
+	eret
+
+.align 11
+SYMBOL(exc_vector)
+.org exc_vector + 0x000
+	handler EXC_CURRENT_EL_SP_SEL0_SYNCH
+.org exc_vector + 0x080
+	handler EXC_CURRENT_EL_SP_SEL0_IRQ
+.org exc_vector + 0x100
+	handler EXC_CURRENT_EL_SP_SEL0_FIQ
+.org exc_vector + 0x180
+	handler EXC_CURRENT_EL_SP_SEL0_SERROR
+.org exc_vector + 0x200
+	handler EXC_CURRENT_EL_SP_SELX_SYNCH
+.org exc_vector + 0x280
+	handler EXC_CURRENT_EL_SP_SELX_IRQ
+.org exc_vector + 0x300
+	handler EXC_CURRENT_EL_SP_SELX_FIQ
+.org exc_vector + 0x380
+	handler EXC_CURRENT_EL_SP_SELX_SERROR
+.org exc_vector + 0x400
+	handler EXC_LOWER_EL_AARCH64_SYNCH
+.org exc_vector + 0x480
+	handler EXC_LOWER_EL_AARCH64_IRQ
+.org exc_vector + 0x500
+	handler EXC_LOWER_EL_AARCH64_FIQ
+.org exc_vector + 0x580
+	handler EXC_LOWER_EL_AARCH64_SERROR
+.org exc_vector + 0x600
+	handler EXC_LOWER_EL_AARCH32_SYNCH
+.org exc_vector + 0x680
+	handler EXC_LOWER_EL_AARCH32_IRQ
+.org exc_vector + 0x700
+	handler EXC_LOWER_EL_AARCH32_FIQ
+.org exc_vector + 0x780
+	handler EXC_LOWER_EL_AARCH32_SERROR
+.org exc_vector + 0x800
Index: kernel/arch/arm64/src/context.S
===================================================================
--- kernel/arch/arm64/src/context.S	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/src/context.S	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2015 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <abi/asmtool.h>
+#include <arch/context_struct.h>
+
+.text
+
+/* Static checks for the context_t save/load. */
+#if CONTEXT_OFFSET_SP + 8 != CONTEXT_OFFSET_PC
+#error sp and pc are not successive in context_t
+#endif
+#if CONTEXT_OFFSET_X19 + 8 != CONTEXT_OFFSET_X20
+#error x19 and x20 are not successive in context_t
+#endif
+#if CONTEXT_OFFSET_X21 + 8 != CONTEXT_OFFSET_X22
+#error x21 and x22 are not successive in context_t
+#endif
+#if CONTEXT_OFFSET_X23 + 8 != CONTEXT_OFFSET_X24
+#error x23 and x24 are not successive in context_t
+#endif
+#if CONTEXT_OFFSET_X25 + 8 != CONTEXT_OFFSET_X26
+#error x25 and x26 are not successive in context_t
+#endif
+#if CONTEXT_OFFSET_X27 + 8 != CONTEXT_OFFSET_X28
+#error x27 and x28 are not successive in context_t
+#endif
+
+FUNCTION_BEGIN(context_save_arch)
+	/* Save callee-saved registers into context_t pointed by x0. */
+	mov x1, sp
+	stp x1, x30, [x0, #CONTEXT_OFFSET_SP]
+	stp x19, x20, [x0, #CONTEXT_OFFSET_X19]
+	stp x21, x22, [x0, #CONTEXT_OFFSET_X21]
+	stp x23, x24, [x0, #CONTEXT_OFFSET_X23]
+	stp x25, x26, [x0, #CONTEXT_OFFSET_X25]
+	stp x27, x28, [x0, #CONTEXT_OFFSET_X27]
+	str x29, [x0, #CONTEXT_OFFSET_X29]
+
+	/* Return 1. */
+	mov x0, #1
+	ret
+FUNCTION_END(context_save_arch)
+
+FUNCTION_BEGIN(context_restore_arch)
+	/* Restore callee-saved registers from context_t pointed by x0. */
+	ldp x1, x30, [x0, #CONTEXT_OFFSET_SP]
+	mov sp, x1
+	ldp x19, x20, [x0, #CONTEXT_OFFSET_X19]
+	ldp x21, x22, [x0, #CONTEXT_OFFSET_X21]
+	ldp x23, x24, [x0, #CONTEXT_OFFSET_X23]
+	ldp x25, x26, [x0, #CONTEXT_OFFSET_X25]
+	ldp x27, x28, [x0, #CONTEXT_OFFSET_X27]
+	ldr x29, [x0, #CONTEXT_OFFSET_X29]
+
+	/* Return 0. */
+	mov x0, #0
+	ret
+FUNCTION_END(context_restore_arch)
Index: kernel/arch/arm64/src/cpu/cpu.c
===================================================================
--- kernel/arch/arm64/src/cpu/cpu.c	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/src/cpu/cpu.c	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2015 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup kernel_arm64
+ * @{
+ */
+/** @file
+ * @brief CPU identification.
+ */
+
+#include <arch/regutils.h>
+#include <cpu.h>
+#include <typedefs.h>
+
+/** Decode implementer (vendor) name. */
+static const char *implementer(uint32_t id)
+{
+	switch (id) {
+	case 0x41:
+		return "ARM Limited";
+	case 0x42:
+		return "Broadcom Corporation";
+	case 0x43:
+		return "Cavium Inc.";
+	case 0x44:
+		return "Digital Equipment Corporation";
+	case 0x49:
+		return "Infineon Technologies AG";
+	case 0x4d:
+		return "Motorola or Freescale Semiconductor Inc.";
+	case 0x4e:
+		return "NVIDIA Corporation";
+	case 0x50:
+		return "Applied Micro Circuits Corporation";
+	case 0x51:
+		return "Qualcomm Inc.";
+	case 0x56:
+		return "Marvell International Ltd.";
+	case 0x69:
+		return "Intel Corporation";
+	}
+	return "Unknown implementer";
+}
+
+/** Perform ARM64-specific tasks needed for CPU initialization. */
+void cpu_arch_init(void)
+{
+}
+
+/** Retrieve processor identification and stores it to #CPU.arch */
+void cpu_identify(void)
+{
+	uint64_t midr = MIDR_EL1_read();
+
+	CPU->arch.implementer =
+	    (midr & MIDR_IMPLEMENTER_MASK) >> MIDR_IMPLEMENTER_SHIFT;
+	CPU->arch.variant = (midr & MIDR_VARIANT_MASK) >> MIDR_VARIANT_SHIFT;
+	CPU->arch.partnum = (midr & MIDR_PARTNUM_MASK) >> MIDR_PARTNUM_SHIFT;
+	CPU->arch.revision = (midr & MIDR_REVISION_MASK) >> MIDR_REVISION_SHIFT;
+}
+
+/** Print CPU identification. */
+void cpu_print_report(cpu_t *m)
+{
+	printf("cpu%d: vendor=%s, variant=%" PRIx32 ", part number=%" PRIx32
+	    ", revision=%" PRIx32 "\n",
+	    m->id, implementer(m->arch.implementer), m->arch.variant,
+	    m->arch.partnum, m->arch.revision);
+}
+
+/** @}
+ */
Index: kernel/arch/arm64/src/debug/stacktrace.c
===================================================================
--- kernel/arch/arm64/src/debug/stacktrace.c	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/src/debug/stacktrace.c	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2015 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup kernel_arm64
+ * @{
+ */
+/** @file
+ */
+
+#include <stacktrace.h>
+#include <syscall/copy.h>
+#include <typedefs.h>
+
+#define FRAME_OFFSET_FP_PREV  0
+#define FRAME_OFFSET_RA       1
+
+bool kernel_stack_trace_context_validate(stack_trace_context_t *ctx)
+{
+	return ctx->fp != 0;
+}
+
+bool kernel_frame_pointer_prev(stack_trace_context_t *ctx, uintptr_t *prev)
+{
+	uint64_t *stack = (void *) ctx->fp;
+
+	*prev = stack[FRAME_OFFSET_FP_PREV];
+	return true;
+}
+
+bool kernel_return_address_get(stack_trace_context_t *ctx, uintptr_t *ra)
+{
+	uint64_t *stack = (void *) ctx->fp;
+
+	*ra = stack[FRAME_OFFSET_RA];
+	return true;
+}
+
+bool uspace_stack_trace_context_validate(stack_trace_context_t *ctx)
+{
+	return ctx->fp != 0;
+}
+
+bool uspace_frame_pointer_prev(stack_trace_context_t *ctx, uintptr_t *prev)
+{
+	return !copy_from_uspace((void *) prev,
+	    (uint64_t *) ctx->fp + FRAME_OFFSET_FP_PREV, sizeof(*prev));
+}
+
+bool uspace_return_address_get(stack_trace_context_t *ctx, uintptr_t *ra)
+{
+	return !copy_from_uspace((void *) ra,
+	    (uint64_t *) ctx->fp + FRAME_OFFSET_RA, sizeof(*ra));
+}
+
+/** @}
+ */
Index: kernel/arch/arm64/src/debug/stacktrace_asm.S
===================================================================
--- kernel/arch/arm64/src/debug/stacktrace_asm.S	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/src/debug/stacktrace_asm.S	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2015 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <abi/asmtool.h>
+
+.text
+
+FUNCTION_BEGIN(frame_pointer_get)
+	mov x0, x29
+	ret
+FUNCTION_END(frame_pointer_get)
+
+FUNCTION_BEGIN(program_counter_get)
+	mov x0, x30
+	ret
+FUNCTION_END(program_counter_get)
Index: kernel/arch/arm64/src/exception.c
===================================================================
--- kernel/arch/arm64/src/exception.c	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/src/exception.c	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,324 @@
+/*
+ * Copyright (c) 2015 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup kernel_arm64
+ * @{
+ */
+/** @file
+ * @brief Exception handlers and exception initialization routines.
+ */
+
+#include <arch/asm.h>
+#include <arch/exception.h>
+#include <arch/machine_func.h>
+#include <arch/regutils.h>
+#include <interrupt.h>
+#include <mm/as.h>
+#include <stdio.h>
+#include <syscall/syscall.h>
+
+static void current_el_sp_sel0_synch_exception(unsigned int exc_no,
+    istate_t *istate)
+{
+	panic_badtrap(istate, exc_no, "Unhandled exception from Current EL, "
+	    "SP_SEL0, Synch, ESR_EL1=%0#10" PRIx32 ", FAR_EL1=%0#18" PRIx64 ".",
+	    (uint32_t) ESR_EL1_read(), FAR_EL1_read());
+}
+
+static void current_el_sp_sel0_irq_exception(unsigned int exc_no,
+    istate_t *istate)
+{
+	panic_badtrap(istate, exc_no, "Unhandled exception from Current EL, "
+	    "SP_SEL0, IRQ, ESR_EL1=%0#10" PRIx32 ", FAR_EL1=%0#18" PRIx64 ".",
+	    (uint32_t) ESR_EL1_read(), FAR_EL1_read());
+}
+
+static void current_el_sp_sel0_fiq_exception(unsigned int exc_no,
+    istate_t *istate)
+{
+	panic_badtrap(istate, exc_no, "Unhandled exception from Current EL, "
+	    "SP_SEL0, FIQ, ESR_EL1=%0#10" PRIx32 ", FAR_EL1=%0#18" PRIx64 ".",
+	    (uint32_t) ESR_EL1_read(), FAR_EL1_read());
+}
+
+static void current_el_sp_sel0_serror_exception(unsigned int exc_no,
+    istate_t *istate)
+{
+	panic_badtrap(istate, exc_no, "Unhandled exception from Current EL, "
+	    "SP_SEL0, SError, ESR_EL1=%0#10" PRIx32 ", FAR_EL1=%0#18" PRIx64
+	    ".", (uint32_t) ESR_EL1_read(), FAR_EL1_read());
+}
+
+static void current_el_sp_selx_synch_exception(unsigned int exc_no,
+    istate_t *istate)
+{
+	uint64_t esr_el1 = ESR_EL1_read();
+	uint64_t far_el1 = FAR_EL1_read();
+	pf_access_t access;
+
+	switch ((esr_el1 & ESR_EC_MASK) >> ESR_EC_SHIFT) {
+	case ESR_EC_DA_CURRENT_EL:
+		/* Data abort. */
+		switch ((esr_el1 & ESR_IDFSC_MASK) >> ESR_IDFSC_SHIFT) {
+		case ESR_IDA_IDFSC_TF0:
+		case ESR_IDA_IDFSC_TF1:
+		case ESR_IDA_IDFSC_TF2:
+		case ESR_IDA_IDFSC_TF3:
+			/* Translation fault. */
+			access = (esr_el1 & ESR_DA_WNR_FLAG) ? PF_ACCESS_WRITE :
+			    PF_ACCESS_READ;
+			as_page_fault(far_el1, access, istate);
+			return;
+		}
+	}
+
+	panic_badtrap(istate, exc_no, "Unhandled exception from Current EL, "
+	    "SP_SELx, Synch, ESR_EL1=%0#10" PRIx32 ", FAR_EL1=%0#18" PRIx64 ".",
+	    (uint32_t) ESR_EL1_read(), FAR_EL1_read());
+}
+
+static void current_el_sp_selx_irq_exception(unsigned int exc_no,
+    istate_t *istate)
+{
+	machine_irq_exception(exc_no, istate);
+}
+
+static void current_el_sp_selx_fiq_exception(unsigned int exc_no,
+    istate_t *istate)
+{
+	panic_badtrap(istate, exc_no, "Unhandled exception from Current EL, "
+	    "SP_SELx, FIQ, ESR_EL1=%0#10" PRIx32 ", FAR_EL1=%0#18" PRIx64 ".",
+	    (uint32_t) ESR_EL1_read(), FAR_EL1_read());
+}
+
+static void current_el_sp_selx_serror_exception(unsigned int exc_no,
+    istate_t *istate)
+{
+	panic_badtrap(istate, exc_no, "Unhandled exception from Current EL, "
+	    "SP_SELx, SError, ESR_EL1=%0#10" PRIx32 ", FAR_EL1=%0#18" PRIx64
+	    ".", (uint32_t) ESR_EL1_read(), FAR_EL1_read());
+}
+
+static void lower_el_aarch64_synch_exception(unsigned int exc_no,
+    istate_t *istate)
+{
+	uint64_t esr_el1 = ESR_EL1_read();
+	uint64_t far_el1 = FAR_EL1_read();
+	pf_access_t access;
+	bool exec = false;
+
+	switch ((esr_el1 & ESR_EC_MASK) >> ESR_EC_SHIFT) {
+	case ESR_EC_FP:
+		/* Access to Advanced SIMD or floating-point functionality. */
+#ifdef CONFIG_FPU_LAZY
+		scheduler_fpu_lazy_request();
+#else
+		fault_from_uspace(istate, "AdvSIMD/FP fault.");
+#endif
+		return;
+	case ESR_EC_SVC:
+		/* SVC instruction. */
+		interrupts_enable();
+		istate->x0 = syscall_handler(istate->x0, istate->x1, istate->x2,
+		    istate->x3, istate->x4, istate->x5, istate->x6);
+		interrupts_disable();
+		return;
+	case ESR_EC_IA_LOWER_EL:
+		/* Instruction abort. */
+		exec = true;
+		/* Fallthrough */
+	case ESR_EC_DA_LOWER_EL:
+		/* Data abort. */
+		switch ((esr_el1 & ESR_IDFSC_MASK) >> ESR_IDFSC_SHIFT) {
+		case ESR_IDA_IDFSC_TF0:
+		case ESR_IDA_IDFSC_TF1:
+		case ESR_IDA_IDFSC_TF2:
+		case ESR_IDA_IDFSC_TF3:
+			/* Translation fault. */
+			if (exec)
+				access = PF_ACCESS_EXEC;
+			else
+				access = (esr_el1 & ESR_DA_WNR_FLAG) ?
+				    PF_ACCESS_WRITE : PF_ACCESS_READ;
+			as_page_fault(far_el1, access, istate);
+			return;
+		}
+	}
+
+	fault_from_uspace(istate, "Unhandled exception from Lower EL, AArch64, "
+	    "Synch, ESR_EL1=%0#10" PRIx32 ", FAR_EL1=%0#18" PRIx64 ".",
+	    (uint32_t) esr_el1, far_el1);
+}
+
+static void lower_el_aarch64_irq_exception(unsigned int exc_no,
+    istate_t *istate)
+{
+	machine_irq_exception(exc_no, istate);
+}
+
+static void lower_el_aarch64_fiq_exception(unsigned int exc_no,
+    istate_t *istate)
+{
+	fault_from_uspace(istate, "Unhandled exception from Lower EL, AArch64, "
+	    "FIQ, ESR_EL1=%0#10" PRIx32 ", FAR_EL1=%0#18" PRIx64 ".",
+	    (uint32_t) ESR_EL1_read(), FAR_EL1_read());
+}
+
+static void lower_el_aarch64_serror_exception(unsigned int exc_no,
+    istate_t *istate)
+{
+	fault_from_uspace(istate, "Unhandled exception from Lower EL, AArch64, "
+	    "SError, ESR_EL1=%0#10" PRIx32 ", FAR_EL1=%0#18" PRIx64 ".",
+	    (uint32_t) ESR_EL1_read(), FAR_EL1_read());
+}
+
+static void lower_el_aarch32_synch_exception(unsigned int exc_no,
+    istate_t *istate)
+{
+	fault_from_uspace(istate, "Unhandled exception from Lower EL, AArch32, "
+	    "Synch, ESR_EL1=%0#10" PRIx32 ", FAR_EL1=%0#18" PRIx64 ".",
+	    (uint32_t) ESR_EL1_read(), FAR_EL1_read());
+}
+
+static void lower_el_aarch32_irq_exception(unsigned int exc_no,
+    istate_t *istate)
+{
+	fault_from_uspace(istate, "Unhandled exception from Lower EL, AArch32, "
+	    "IRQ, ESR_EL1=%0#10" PRIx32 ", FAR_EL1=%0#18" PRIx64 ".",
+	    (uint32_t) ESR_EL1_read(), FAR_EL1_read());
+}
+
+static void lower_el_aarch32_fiq_exception(unsigned int exc_no,
+    istate_t *istate)
+{
+	fault_from_uspace(istate, "Unhandled exception from Lower EL, AArch32, "
+	    "FIQ, ESR_EL1=%0#10" PRIx32 ", FAR_EL1=%0#18" PRIx64 ".",
+	    (uint32_t) ESR_EL1_read(), FAR_EL1_read());
+}
+
+static void lower_el_aarch32_serror_exception(unsigned int exc_no,
+    istate_t *istate)
+{
+	fault_from_uspace(istate, "Unhandled exception from Lower EL, AArch32, "
+	    "SError, ESR_EL1=%0#10" PRIx32 ", FAR_EL1=%0#18" PRIx64 ".",
+	    (uint32_t) ESR_EL1_read(), FAR_EL1_read());
+}
+
+/** Initializes exception handling.
+ *
+ * Installs low-level exception handlers and then registers exceptions and their
+ * handlers to kernel exception dispatcher.
+ */
+void exception_init(void)
+{
+	exc_register(EXC_CURRENT_EL_SP_SEL0_SYNCH,
+	    "current EL, SP_SEL0, Synchronous", true,
+	    (iroutine_t) current_el_sp_sel0_synch_exception);
+	exc_register(EXC_CURRENT_EL_SP_SEL0_IRQ,
+	    "current EL, SP_SEL0, IRQ", true,
+	    (iroutine_t) current_el_sp_sel0_irq_exception);
+	exc_register(EXC_CURRENT_EL_SP_SEL0_FIQ,
+	    "current EL, SP_SEL0, FIQ", true,
+	    (iroutine_t) current_el_sp_sel0_fiq_exception);
+	exc_register(EXC_CURRENT_EL_SP_SEL0_SERROR,
+	    "current EL, SP_SEL0, SError", true,
+	    (iroutine_t) current_el_sp_sel0_serror_exception);
+	exc_register(EXC_CURRENT_EL_SP_SELX_SYNCH,
+	    "current EL, SP_SELx, Synchronous", true,
+	    (iroutine_t) current_el_sp_selx_synch_exception);
+	exc_register(EXC_CURRENT_EL_SP_SELX_IRQ,
+	    "current EL, SP_SELx, IRQ", true,
+	    (iroutine_t) current_el_sp_selx_irq_exception);
+	exc_register(EXC_CURRENT_EL_SP_SELX_FIQ,
+	    "current EL, SP_SELx, FIQ", true,
+	    (iroutine_t) current_el_sp_selx_fiq_exception);
+	exc_register(EXC_CURRENT_EL_SP_SELX_SERROR,
+	    "current EL, SP_SELx, SError", true,
+	    (iroutine_t) current_el_sp_selx_serror_exception);
+	exc_register(EXC_LOWER_EL_AARCH64_SYNCH,
+	    "lower EL, AArch64, Synchronous", true,
+	    (iroutine_t) lower_el_aarch64_synch_exception);
+	exc_register(EXC_LOWER_EL_AARCH64_IRQ,
+	    "lower EL, AArch64, IRQ", true,
+	    (iroutine_t) lower_el_aarch64_irq_exception);
+	exc_register(EXC_LOWER_EL_AARCH64_FIQ,
+	    "lower EL, AArch64, FIQ", true,
+	    (iroutine_t) lower_el_aarch64_fiq_exception);
+	exc_register(EXC_LOWER_EL_AARCH64_SERROR,
+	    "lower EL, AArch64, SError", true,
+	    (iroutine_t) lower_el_aarch64_serror_exception);
+	exc_register(EXC_LOWER_EL_AARCH32_SYNCH,
+	    "lower EL, AArch32, Synchronous", true,
+	    (iroutine_t) lower_el_aarch32_synch_exception);
+	exc_register(EXC_LOWER_EL_AARCH32_IRQ,
+	    "lower EL, AArch32, IRQ", true,
+	    (iroutine_t) lower_el_aarch32_irq_exception);
+	exc_register(EXC_LOWER_EL_AARCH32_FIQ,
+	    "lower EL, AArch32, FIQ", true,
+	    (iroutine_t) lower_el_aarch32_fiq_exception);
+	exc_register(EXC_LOWER_EL_AARCH32_SERROR,
+	    "lower EL, AArch32, SError", true,
+	    (iroutine_t) lower_el_aarch32_serror_exception);
+
+	VBAR_EL1_write(((uint64_t) &exc_vector));
+}
+
+/** Print #istate_t structure content.
+ *
+ * @param istate Structure to be printed.
+ */
+void istate_decode(istate_t *istate)
+{
+	printf("x0 =%0#18" PRIx64 "\tx1 =%0#18" PRIx64 "\t"
+	    "x2 =%0#18" PRIx64 "\n", istate->x0, istate->x1, istate->x2);
+	printf("x3 =%0#18" PRIx64 "\tx4 =%0#18" PRIx64 "\t"
+	    "x5 =%0#18" PRIx64 "\n", istate->x3, istate->x4, istate->x5);
+	printf("x6 =%0#18" PRIx64 "\tx7 =%0#18" PRIx64 "\t"
+	    "x8 =%0#18" PRIx64 "\n", istate->x6, istate->x7, istate->x8);
+	printf("x9 =%0#18" PRIx64 "\tx10=%0#18" PRIx64 "\t"
+	    "x11=%0#18" PRIx64 "\n", istate->x9, istate->x10, istate->x11);
+	printf("x12=%0#18" PRIx64 "\tx13=%0#18" PRIx64 "\t"
+	    "x14=%0#18" PRIx64 "\n", istate->x12, istate->x13, istate->x14);
+	printf("x15=%0#18" PRIx64 "\tx16=%0#18" PRIx64 "\t"
+	    "x17=%0#18" PRIx64 "\n", istate->x15, istate->x16, istate->x17);
+	printf("x18=%0#18" PRIx64 "\tx19=%0#18" PRIx64 "\t"
+	    "x20=%0#18" PRIx64 "\n", istate->x18, istate->x19, istate->x20);
+	printf("x21=%0#18" PRIx64 "\tx22=%0#18" PRIx64 "\t"
+	    "x23=%0#18" PRIx64 "\n", istate->x21, istate->x22, istate->x23);
+	printf("x24=%0#18" PRIx64 "\tx25=%0#18" PRIx64 "\t"
+	    "x26=%0#18" PRIx64 "\n", istate->x24, istate->x25, istate->x26);
+	printf("x27=%0#18" PRIx64 "\tx28=%0#18" PRIx64 "\t"
+	    "x29=%0#18" PRIx64 "\n", istate->x27, istate->x28, istate->x29);
+	printf("x30=%0#18" PRIx64 "\tsp =%0#18" PRIx64 "\t"
+	    "pc =%0#18" PRIx64 "\n", istate->x30, istate->sp, istate->pc);
+	printf("spsr=%0#18" PRIx64 "\ttpidr=%0#18" PRIx64 "\n", istate->spsr,
+	    istate->tpidr);
+}
+
+/** @}
+ */
Index: kernel/arch/arm64/src/fpu.S
===================================================================
--- kernel/arch/arm64/src/fpu.S	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/src/fpu.S	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2018 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <abi/asmtool.h>
+#include <arch/fpu_context_struct.h>
+
+.text
+
+FUNCTION_BEGIN(fpu_context_save)
+	/* Save FPU registers into fpu_context_t pointed by x0. */
+	stp q0, q1, [x0, #FPU_CONTEXT_OFFSET_VREGS + 16 * 0]
+	stp q2, q3, [x0, #FPU_CONTEXT_OFFSET_VREGS + 16 * 2]
+	stp q4, q5, [x0, #FPU_CONTEXT_OFFSET_VREGS + 16 * 4]
+	stp q6, q7, [x0, #FPU_CONTEXT_OFFSET_VREGS + 16 * 6]
+	stp q8, q9, [x0, #FPU_CONTEXT_OFFSET_VREGS + 16 * 8]
+	stp q10, q11, [x0, #FPU_CONTEXT_OFFSET_VREGS + 16 * 10]
+	stp q12, q13, [x0, #FPU_CONTEXT_OFFSET_VREGS + 16 * 12]
+	stp q14, q15, [x0, #FPU_CONTEXT_OFFSET_VREGS + 16 * 14]
+	stp q16, q17, [x0, #FPU_CONTEXT_OFFSET_VREGS + 16 * 16]
+	stp q18, q19, [x0, #FPU_CONTEXT_OFFSET_VREGS + 16 * 18]
+	stp q20, q21, [x0, #FPU_CONTEXT_OFFSET_VREGS + 16 * 20]
+	stp q22, q23, [x0, #FPU_CONTEXT_OFFSET_VREGS + 16 * 22]
+	stp q24, q25, [x0, #FPU_CONTEXT_OFFSET_VREGS + 16 * 24]
+	stp q26, q27, [x0, #FPU_CONTEXT_OFFSET_VREGS + 16 * 26]
+	stp q28, q29, [x0, #FPU_CONTEXT_OFFSET_VREGS + 16 * 28]
+	stp q30, q31, [x0, #FPU_CONTEXT_OFFSET_VREGS + 16 * 30]
+	mrs x1, fpcr
+	str w1, [x0, #FPU_CONTEXT_OFFSET_FPCR]
+	mrs x1, fpsr
+	str w1, [x0, #FPU_CONTEXT_OFFSET_FPSR]
+	ret
+FUNCTION_END(fpu_context_save)
+
+FUNCTION_BEGIN(fpu_context_restore)
+	/* Restore FPU registers from fpu_context_t pointed by x0. */
+	ldp q0, q1, [x0, #FPU_CONTEXT_OFFSET_VREGS + 16 * 0]
+	ldp q2, q3, [x0, #FPU_CONTEXT_OFFSET_VREGS + 16 * 2]
+	ldp q4, q5, [x0, #FPU_CONTEXT_OFFSET_VREGS + 16 * 4]
+	ldp q6, q7, [x0, #FPU_CONTEXT_OFFSET_VREGS + 16 * 6]
+	ldp q8, q9, [x0, #FPU_CONTEXT_OFFSET_VREGS + 16 * 8]
+	ldp q10, q11, [x0, #FPU_CONTEXT_OFFSET_VREGS + 16 * 10]
+	ldp q12, q13, [x0, #FPU_CONTEXT_OFFSET_VREGS + 16 * 12]
+	ldp q14, q15, [x0, #FPU_CONTEXT_OFFSET_VREGS + 16 * 14]
+	ldp q16, q17, [x0, #FPU_CONTEXT_OFFSET_VREGS + 16 * 16]
+	ldp q18, q19, [x0, #FPU_CONTEXT_OFFSET_VREGS + 16 * 18]
+	ldp q20, q21, [x0, #FPU_CONTEXT_OFFSET_VREGS + 16 * 20]
+	ldp q22, q23, [x0, #FPU_CONTEXT_OFFSET_VREGS + 16 * 22]
+	ldp q24, q25, [x0, #FPU_CONTEXT_OFFSET_VREGS + 16 * 24]
+	ldp q26, q27, [x0, #FPU_CONTEXT_OFFSET_VREGS + 16 * 26]
+	ldp q28, q29, [x0, #FPU_CONTEXT_OFFSET_VREGS + 16 * 28]
+	ldp q30, q31, [x0, #FPU_CONTEXT_OFFSET_VREGS + 16 * 30]
+	ldr w1, [x0, #FPU_CONTEXT_OFFSET_FPCR]
+	msr fpcr, x1
+	ldr w1, [x0, #FPU_CONTEXT_OFFSET_FPSR]
+	msr fpsr, x1
+	ret
+FUNCTION_END(fpu_context_restore)
Index: kernel/arch/arm64/src/fpu_context.c
===================================================================
--- kernel/arch/arm64/src/fpu_context.c	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/src/fpu_context.c	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2015 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup kernel_arm64
+ * @{
+ */
+/** @file
+ * @brief ARM64 FPU context.
+ */
+
+#include <arch/regutils.h>
+#include <fpu_context.h>
+
+/** Initialize FPU functionality. */
+void fpu_init(void)
+{
+	/*
+	 * Set initial FPU state:
+	 * o Registers v0-v31 are cleared.
+	 * o FPCR value:
+	 *   [31:27] - Reserved 0.
+	 *   [26]    - AHP=0, IEEE half-precision format selected.
+	 *   [25]    - DN=0, NaN operands propagate through to the output of a
+	 *             floating-point operation.
+	 *   [24]    - FZ=0, flush-to-zero mode disabled.
+	 *   [23:22] - RMode=00, round to nearest mode.
+	 *   [21:20] - Stride=00, this field has no function in AArch64 state.
+	 *   [19]    - FZ16=0, flush-to-zero mode disabled.
+	 *   [18:16] - Len=000, this field has no function in AArch64 state.
+	 *   [15]    - IDE=0, input denormal FP exception is untrapped.
+	 *   [14:13] - Reserved 0.
+	 *   [12]    - IXE=0, inexact FP exception is untrapped.
+	 *   [11]    - UFE=0, underflow FP exception is untrapped.
+	 *   [10]    - OFE=0, overflow FP exception is untrapped.
+	 *   [9]     - DZE=0, divide by zero FP exception is untrapped.
+	 *   [8]     - IOE=0, invalid operation FP exception is untrapped.
+	 *   [7:0]   - Reserved 0.
+	 * o FPSR value:
+	 *   [31]    - N=0, negative condition flag for AArch32.
+	 *   [30]    - Z=0, zero condition flag for AArch32.
+	 *   [29]    - C=0, carry condition flag for AArch32.
+	 *   [28]    - V=0, overflow condition flag for AArch32.
+	 *   [27]    - QC=0, cumulative saturation bit.
+	 *   [26:8]  - Reserved 0.
+	 *   [7]     - IDC=0, input denormal cumulative FP exception bit.
+	 *   [6:5]   - Reserved 0.
+	 *   [4]     - IXC=0, inexact cumulative FP exception bit.
+	 *   [3]     - UFC=0, underflow cumulative FP exception bit.
+	 *   [2]     - OFC=0, overflow cumulative FP exception bit.
+	 *   [1]     - DZC=0, divide by zero cumulative FP exception bit.
+	 *   [0]     - IOC=0, invalid operation cumulative FP exception bit.
+	 */
+	static fpu_context_t init = { .vregs = { 0 }, .fpcr = 0, .fpsr = 0 };
+	fpu_context_restore(&init);
+}
+
+/** Enable FPU instructions. */
+void fpu_enable(void)
+{
+	CPACR_EL1_write((CPACR_EL1_read() & ~CPACR_FPEN_MASK) |
+	    (CPACR_FPEN_TRAP_NONE << CPACR_FPEN_SHIFT));
+}
+
+/** Disable FPU instructions. */
+void fpu_disable(void)
+{
+	CPACR_EL1_write((CPACR_EL1_read() & ~CPACR_FPEN_MASK) |
+	    (CPACR_FPEN_TRAP_ALL << CPACR_FPEN_SHIFT));
+}
+
+/** @}
+ */
Index: kernel/arch/arm64/src/interrupt.c
===================================================================
--- kernel/arch/arm64/src/interrupt.c	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/src/interrupt.c	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 2015 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup kernel_arm64
+ * @{
+ */
+/** @file
+ * @brief Interrupts controlling routines.
+ */
+
+#include <arch/interrupt.h>
+#include <arch/machine_func.h>
+#include <ddi/irq.h>
+#include <interrupt.h>
+#include <time/clock.h>
+
+static irq_t timer_irq;
+static uint64_t timer_increment;
+
+/** Disable interrupts.
+ *
+ * @return Old interrupt priority level.
+ */
+ipl_t interrupts_disable(void)
+{
+	uint64_t daif = DAIF_read();
+
+	DAIF_write(daif | DAIF_IRQ_FLAG);
+
+	return (daif >> DAIF_IRQ_SHIFT) & 1;
+}
+
+/** Enable interrupts.
+ *
+ * @return Old interrupt priority level.
+ */
+ipl_t interrupts_enable(void)
+{
+	uint64_t daif = DAIF_read();
+
+	DAIF_write(daif & ~DAIF_IRQ_FLAG);
+
+	return (daif >> DAIF_IRQ_SHIFT) & 1;
+}
+
+/** Restore interrupt priority level.
+ *
+ * @param ipl Saved interrupt priority level.
+ */
+void interrupts_restore(ipl_t ipl)
+{
+	uint64_t daif = DAIF_read();
+
+	DAIF_write((daif & ~DAIF_IRQ_FLAG) |
+	    ((ipl & 1) << DAIF_IRQ_SHIFT));
+}
+
+/** Read interrupt priority level.
+ *
+ * @return Current interrupt priority level.
+ */
+ipl_t interrupts_read(void)
+{
+	return (DAIF_read() >> DAIF_IRQ_SHIFT) & 1;
+}
+
+/** Check interrupts state.
+ *
+ * @return True if interrupts are disabled.
+ */
+bool interrupts_disabled(void)
+{
+	return DAIF_read() & DAIF_IRQ_FLAG;
+}
+
+/** Suspend the virtual timer. */
+static void timer_suspend(void)
+{
+	uint64_t cntv_ctl = CNTV_CTL_EL0_read();
+
+	CNTV_CTL_EL0_write(cntv_ctl | CNTV_CTL_IMASK_FLAG);
+}
+
+/** Start the virtual timer. */
+static void timer_start(void)
+{
+	uint64_t cntfrq = CNTFRQ_EL0_read();
+	uint64_t cntvct = CNTVCT_EL0_read();
+	uint64_t cntv_ctl = CNTV_CTL_EL0_read();
+
+	/* Calculate the increment. */
+	timer_increment = cntfrq / HZ;
+
+	/* Program the timer. */
+	CNTV_CVAL_EL0_write(cntvct + timer_increment);
+	CNTV_CTL_EL0_write(
+	    (cntv_ctl & ~CNTV_CTL_IMASK_FLAG) | CNTV_CTL_ENABLE_FLAG);
+}
+
+/** Claim the virtual timer interrupt. */
+static irq_ownership_t timer_claim(irq_t *irq)
+{
+	return IRQ_ACCEPT;
+}
+
+/** Handle the virtual timer interrupt. */
+static void timer_irq_handler(irq_t *irq)
+{
+	uint64_t cntvct = CNTVCT_EL0_read();
+	uint64_t cntv_cval = CNTV_CVAL_EL0_read();
+
+	uint64_t drift = cntvct - cntv_cval;
+	while (drift > timer_increment) {
+		drift -= timer_increment;
+		CPU->missed_clock_ticks++;
+	}
+	CNTV_CVAL_EL0_write(cntvct + timer_increment - drift);
+
+	/*
+	 * We are holding a lock which prevents preemption.
+	 * Release the lock, call clock() and reacquire the lock again.
+	 */
+	irq_spinlock_unlock(&irq->lock, false);
+	clock();
+	irq_spinlock_lock(&irq->lock, false);
+}
+
+/** Initialize basic tables for exception dispatching. */
+void interrupt_init(void)
+{
+	size_t irq_count = machine_get_irq_count();
+	irq_init(irq_count, irq_count);
+
+	/* Initialize virtual timer. */
+	timer_suspend();
+	inr_t timer_inr = machine_enable_vtimer_irq();
+
+	irq_initialize(&timer_irq);
+	timer_irq.inr = timer_inr;
+	timer_irq.claim = timer_claim;
+	timer_irq.handler = timer_irq_handler;
+	irq_register(&timer_irq);
+
+	timer_start();
+}
+
+/** @}
+ */
Index: kernel/arch/arm64/src/mach/virt/virt.c
===================================================================
--- kernel/arch/arm64/src/mach/virt/virt.c	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/src/mach/virt/virt.c	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2016 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup kernel_arm64_virt
+ * @{
+ */
+/** @file
+ * @brief QEMU virt platform driver.
+ */
+
+#include <arch/mach/virt/virt.h>
+#include <console/console.h>
+#include <genarch/drivers/gicv2/gicv2.h>
+#include <genarch/drivers/pl011/pl011.h>
+#include <genarch/srln/srln.h>
+#include <mm/km.h>
+#include <sysinfo/sysinfo.h>
+
+#define VIRT_VTIMER_IRQ         27
+#define VIRT_UART_IRQ           33
+#define VIRT_GIC_DISTR_ADDRESS  0x08000000
+#define VIRT_GIC_CPUI_ADDRESS   0x08010000
+#define VIRT_UART_ADDRESS       0x09000000
+
+static void virt_init(void);
+static void virt_irq_exception(unsigned int exc_no, istate_t *istate);
+static void virt_output_init(void);
+static void virt_input_init(void);
+inr_t virt_enable_vtimer_irq(void);
+size_t virt_get_irq_count(void);
+static const char *virt_get_platform_name(void);
+
+struct {
+	gicv2_t gicv2;
+	pl011_uart_t uart;
+} virt;
+
+struct arm_machine_ops virt_machine_ops = {
+	virt_init,
+	virt_irq_exception,
+	virt_output_init,
+	virt_input_init,
+	virt_enable_vtimer_irq,
+	virt_get_irq_count,
+	virt_get_platform_name
+};
+
+static void virt_init(void)
+{
+	/* Initialize interrupt controller. */
+	gicv2_distr_regs_t *distr = (void *) km_map(VIRT_GIC_DISTR_ADDRESS,
+	    ALIGN_UP(sizeof(*distr), PAGE_SIZE), KM_NATURAL_ALIGNMENT,
+	    PAGE_NOT_CACHEABLE | PAGE_READ | PAGE_WRITE | PAGE_KERNEL);
+	gicv2_cpui_regs_t *cpui = (void *) km_map(VIRT_GIC_CPUI_ADDRESS,
+	    ALIGN_UP(sizeof(*cpui), PAGE_SIZE), KM_NATURAL_ALIGNMENT,
+	    PAGE_NOT_CACHEABLE | PAGE_READ | PAGE_WRITE | PAGE_KERNEL);
+	gicv2_init(&virt.gicv2, distr, cpui);
+}
+
+static void virt_irq_exception(unsigned int exc_no, istate_t *istate)
+{
+	unsigned inum, cpuid;
+	gicv2_inum_get(&virt.gicv2, &inum, &cpuid);
+
+	/* Dispatch the interrupt. */
+	irq_t *irq = irq_dispatch_and_lock(inum);
+	if (irq) {
+		/* The IRQ handler was found. */
+		irq->handler(irq);
+		irq_spinlock_unlock(&irq->lock, false);
+	} else {
+		/* Spurious interrupt.*/
+		printf("cpu%d: spurious interrupt (inum=%u)\n", CPU->id, inum);
+	}
+
+	/* Signal end of interrupt to the controller. */
+	gicv2_end(&virt.gicv2, inum, cpuid);
+}
+
+static void virt_output_init(void)
+{
+	if (!pl011_uart_init(&virt.uart, VIRT_UART_IRQ, VIRT_UART_ADDRESS))
+		return;
+
+	stdout_wire(&virt.uart.outdev);
+}
+
+static void virt_input_init(void)
+{
+	srln_instance_t *srln_instance = srln_init();
+	if (srln_instance == NULL)
+		return;
+
+	indev_t *sink = stdin_wire();
+	indev_t *srln = srln_wire(srln_instance, sink);
+	pl011_uart_input_wire(&virt.uart, srln);
+	gicv2_enable(&virt.gicv2, VIRT_UART_IRQ);
+}
+
+inr_t virt_enable_vtimer_irq(void)
+{
+	gicv2_enable(&virt.gicv2, VIRT_VTIMER_IRQ);
+	return VIRT_VTIMER_IRQ;
+}
+
+size_t virt_get_irq_count(void)
+{
+	return gicv2_inum_get_total(&virt.gicv2);
+}
+
+const char *virt_get_platform_name(void)
+{
+	return "arm64virt";
+}
+
+/** @}
+ */
Index: kernel/arch/arm64/src/machine_func.c
===================================================================
--- kernel/arch/arm64/src/machine_func.c	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/src/machine_func.c	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2016 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup kernel_arm64
+ * @{
+ */
+/** @file
+ * @brief Definitions of machine specific functions.
+ *
+ * These functions enable to differentiate more kinds of ARM platforms.
+ */
+
+#include <arch/machine_func.h>
+#include <arch/mach/virt/virt.h>
+
+/** Pointer to machine_ops structure being used. */
+static struct arm_machine_ops *machine_ops;
+
+/** Initialize machine_ops pointer. */
+void machine_ops_init(void)
+{
+#if defined(MACHINE_virt)
+	machine_ops = &virt_machine_ops;
+#else
+#error Machine type not defined.
+#endif
+}
+
+/** Perform machine-specific initialization. */
+void machine_init(void)
+{
+	machine_ops->machine_init();
+}
+
+/** Interrupt exception handler.
+ *
+ * @param exc_no Interrupt exception number.
+ * @param istate Saved processor state.
+ */
+void machine_irq_exception(unsigned int exc_no, istate_t *istate)
+{
+	machine_ops->machine_irq_exception(exc_no, istate);
+}
+
+/** Configure the output device. */
+void machine_output_init(void)
+{
+	machine_ops->machine_output_init();
+}
+
+/** Configure the input device. */
+void machine_input_init(void)
+{
+	machine_ops->machine_input_init();
+}
+
+/** Get IRQ number range used by machine. */
+size_t machine_get_irq_count(void)
+{
+	return machine_ops->machine_get_irq_count();
+}
+
+/** Enable virtual timer interrupt and return its number. */
+inr_t machine_enable_vtimer_irq(void)
+{
+	return machine_ops->machine_enable_vtimer_irq();
+}
+
+/** Get platform identifier. */
+const char *machine_get_platform_name(void)
+{
+	return machine_ops->machine_get_platform_name();
+}
+
+/** @}
+ */
Index: kernel/arch/arm64/src/mm/as.c
===================================================================
--- kernel/arch/arm64/src/mm/as.c	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/src/mm/as.c	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2015 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup kernel_arm64_mm
+ * @{
+ */
+/** @file
+ * @brief Address space functions.
+ */
+
+#include <arch/mm/as.h>
+#include <arch/regutils.h>
+#include <genarch/mm/asid_fifo.h>
+#include <genarch/mm/page_pt.h>
+#include <mm/as.h>
+#include <mm/asid.h>
+
+/** Architecture dependent address space init.
+ *
+ * Since ARM64 supports page tables, #as_pt_operations are used.
+ */
+void as_arch_init(void)
+{
+	as_operations = &as_pt_operations;
+	asid_fifo_init();
+}
+
+/** Perform ARM64-specific tasks when an address space becomes active on the
+ * processor.
+ *
+ * Change the level 0 page table (this is normally done by
+ * SET_PTL0_ADDRESS_ARCH() on other architectures) and install ASID.
+ *
+ * @param as Address space.
+ */
+void as_install_arch(as_t *as)
+{
+	uint64_t val;
+
+	val = (uint64_t) as->genarch.page_table;
+	if (as->asid != ASID_KERNEL) {
+		val |= (uint64_t) as->asid << TTBR0_ASID_SHIFT;
+		TTBR0_EL1_write(val);
+	} else
+		TTBR1_EL1_write(val);
+}
+
+/** @}
+ */
Index: kernel/arch/arm64/src/mm/frame.c
===================================================================
--- kernel/arch/arm64/src/mm/frame.c	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/src/mm/frame.c	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2015 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup kernel_arm64_mm
+ * @{
+ */
+/** @file
+ * @brief Frame related functions.
+ */
+
+#include <arch/mm/frame.h>
+#include <mm/frame.h>
+#include <config.h>
+#include <align.h>
+#include <macros.h>
+
+/** Physical memory map received from the bootcode. */
+memmap_t memmap;
+
+/** Print memory layout. */
+void physmem_print(void)
+{
+	printf("[base            ] [size            ] [type      ]\n");
+
+	size_t i;
+	for (i = 0; i < memmap.cnt; i++) {
+		const char *type;
+		switch (memmap.zones[i].type) {
+		case MEMTYPE_AVAILABLE:
+			type = "available";
+			break;
+		case MEMTYPE_ACPI_RECLAIM:
+			type = "ACPI reclaim";
+			break;
+		default:
+			type = "unusable";
+			break;
+		}
+
+		printf("%p %#018zx %s\n", memmap.zones[i].start,
+		    memmap.zones[i].size, type);
+	}
+}
+
+/** Create memory zones according to information stored in memmap.
+ *
+ * Walk the memory map and create frame zones according to it.
+ */
+static void frame_common_arch_init(bool low)
+{
+	size_t i;
+
+	for (i = 0; i < memmap.cnt; i++) {
+		if (memmap.zones[i].type != MEMTYPE_AVAILABLE)
+			continue;
+
+		/* To be safe, make the available zone possibly smaller. */
+		uintptr_t base = ALIGN_UP((uintptr_t) memmap.zones[i].start,
+		    FRAME_SIZE);
+		size_t size = ALIGN_DOWN(memmap.zones[i].size -
+		    (base - (uintptr_t) memmap.zones[i].start), FRAME_SIZE);
+
+		if (!frame_adjust_zone_bounds(low, &base, &size))
+			continue;
+
+		pfn_t confdata;
+		pfn_t pfn = ADDR2PFN(base);
+		size_t count = SIZE2FRAMES(size);
+
+		if (low) {
+			/* Determine where to place confdata. */
+			if (pfn == 0) {
+				/*
+				 * Avoid placing confdata at the NULL address.
+				 */
+				if (count == 1)
+					continue;
+				confdata = 1;
+			} else
+				confdata = pfn;
+
+			zone_create(pfn, count, confdata,
+			    ZONE_AVAILABLE | ZONE_LOWMEM);
+		} else {
+			confdata = zone_external_conf_alloc(count);
+			if (confdata != 0)
+				zone_create(pfn, count, confdata,
+				    ZONE_AVAILABLE | ZONE_HIGHMEM);
+		}
+	}
+}
+
+/** Create low memory zones. */
+void frame_low_arch_init(void)
+{
+	if (config.cpu_active > 1)
+		return;
+
+	frame_common_arch_init(true);
+
+	/*
+	 * On ARM64, physical memory can start on a non-zero address. The
+	 * generic frame_init() only marks PFN 0 as not free, so we must mark
+	 * the physically first frame not free explicitly here, no matter what
+	 * is its address.
+	 */
+	frame_mark_unavailable(ADDR2PFN(physmem_base), 1);
+}
+
+/** Create high memory zones. */
+void frame_high_arch_init(void)
+{
+	if (config.cpu_active > 1)
+		return;
+
+	frame_common_arch_init(false);
+}
+
+/** @}
+ */
Index: kernel/arch/arm64/src/mm/km.c
===================================================================
--- kernel/arch/arm64/src/mm/km.c	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/src/mm/km.c	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2015 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup kernel_arm64_mm
+ * @{
+ */
+
+#include <arch/mm/km.h>
+#include <config.h>
+#include <macros.h>
+#include <mm/km.h>
+#include <typedefs.h>
+
+void km_identity_arch_init(void)
+{
+	config.identity_base = KM_ARM64_IDENTITY_START;
+	config.identity_size = KM_ARM64_IDENTITY_SIZE;
+}
+
+void km_non_identity_arch_init(void)
+{
+	km_non_identity_span_add(KM_ARM64_NON_IDENTITY_START,
+	    KM_ARM64_NON_IDENTITY_SIZE);
+}
+
+bool km_is_non_identity_arch(uintptr_t addr)
+{
+	return iswithin(KM_ARM64_NON_IDENTITY_START, KM_ARM64_NON_IDENTITY_SIZE,
+	    addr, 1);
+}
+
+/** @}
+ */
Index: kernel/arch/arm64/src/mm/page.c
===================================================================
--- kernel/arch/arm64/src/mm/page.c	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/src/mm/page.c	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2015 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup kernel_arm64_mm
+ * @{
+ */
+/** @file
+ * @brief Paging related functions.
+ */
+
+#include <arch/mm/page.h>
+#include <genarch/mm/page_pt.h>
+#include <mm/as.h>
+#include <mm/page.h>
+
+/** Initializes page tables. */
+void page_arch_init(void)
+{
+	if (config.cpu_active > 1) {
+		as_switch(NULL, AS_KERNEL);
+		return;
+	}
+
+	page_mapping_operations = &pt_mapping_operations;
+
+	page_table_lock(AS_KERNEL, true);
+
+	/* PA2KA(identity) mapping for all low-memory frames. */
+	for (uintptr_t cur = 0; cur < config.identity_size; cur += FRAME_SIZE) {
+		uintptr_t addr = physmem_base + cur;
+		page_mapping_insert(AS_KERNEL, PA2KA(addr), addr,
+		    PAGE_GLOBAL | PAGE_CACHEABLE | PAGE_EXEC | PAGE_WRITE |
+		    PAGE_READ);
+	}
+
+	page_table_unlock(AS_KERNEL, true);
+
+	as_switch(NULL, AS_KERNEL);
+}
+
+/** @}
+ */
Index: kernel/arch/arm64/src/mm/tlb.c
===================================================================
--- kernel/arch/arm64/src/mm/tlb.c	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/src/mm/tlb.c	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2015 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup kernel_arm64_mm
+ * @{
+ */
+/** @file
+ * @brief TLB related functions.
+ */
+
+#include <arch/mm/asid.h>
+#include <arch/mm/page.h>
+#include <arch/regutils.h>
+#include <mm/tlb.h>
+#include <typedefs.h>
+
+/** Invalidate all entries in TLB. */
+void tlb_invalidate_all(void)
+{
+	asm volatile (
+	    /* TLB Invalidate All, EL1, Inner Shareable. */
+	    "tlbi alle1is\n"
+	    /* Ensure completion on all PEs. */
+	    "dsb ish\n"
+	    /* Synchronize context on this PE. */
+	    "isb\n"
+	    : : : "memory"
+	);
+}
+
+/** Invalidate all entries in TLB that belong to specified address space.
+ *
+ * @param asid Address Space ID.
+ */
+void tlb_invalidate_asid(asid_t asid)
+{
+	uintptr_t val = (uintptr_t)asid << TLBI_ASID_SHIFT;
+
+	asm volatile (
+	    /* TLB Invalidate by ASID, EL1, Inner Shareable. */
+	    "tlbi aside1is, %[val]\n"
+	    /* Ensure completion on all PEs. */
+	    "dsb ish\n"
+	    /* Synchronize context on this PE. */
+	    "isb\n"
+	    : : [val] "r" (val) : "memory"
+	);
+}
+
+/** Invalidate TLB entries for specified page range belonging to specified
+ * address space.
+ *
+ * @param asid Address Space ID.
+ * @param page Address of the first page whose entry is to be invalidated.
+ * @param cnt  Number of entries to invalidate.
+ */
+void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt)
+{
+	for (size_t i = 0; i < cnt; i++) {
+		uintptr_t val;
+
+		val = (page + i * PAGE_SIZE) >> PAGE_WIDTH;
+		val |= (uintptr_t) asid << TLBI_ASID_SHIFT;
+
+		asm volatile (
+		    /* TLB Invalidate by Virt. Address, EL1, Inner Shareable. */
+		    "tlbi vae1is, %[val]\n"
+		    /* Ensure completion on all PEs. */
+		    "dsb ish\n"
+		    /* Synchronize context on this PE. */
+		    "isb\n"
+		    : : [val] "r" (val) : "memory"
+		);
+	}
+}
+
+void tlb_arch_init(void)
+{
+}
+
+void tlb_print(void)
+{
+}
+
+/** @}
+ */
Index: kernel/arch/arm64/src/smc.c
===================================================================
--- kernel/arch/arm64/src/smc.c	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/src/smc.c	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2019 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch/barrier.h>
+#include <barrier.h>
+
+void smc_coherence(void *a, size_t l)
+{
+	ensure_visibility(a, l);
+}
Index: kernel/arch/arm64/src/smp/ipi.c
===================================================================
--- kernel/arch/arm64/src/smp/ipi.c	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/src/smp/ipi.c	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2015 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup kernel_arm64
+ * @{
+ */
+/** @file
+ */
+
+#ifdef CONFIG_SMP
+
+#include <smp/ipi.h>
+#include <panic.h>
+
+/** Deliver IPI to all processors except the current one.
+ *
+ * @param ipi IPI number.
+ */
+void ipi_broadcast_arch(int ipi)
+{
+	panic("broadcast IPI not implemented.");
+}
+
+#endif /* CONFIG_SMP */
+
+/** @}
+ */
Index: kernel/arch/arm64/src/smp/smp.c
===================================================================
--- kernel/arch/arm64/src/smp/smp.c	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/src/smp/smp.c	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2015 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup kernel_arm64
+ * @{
+ */
+/** @file
+ */
+
+#include <smp/smp.h>
+
+#ifdef CONFIG_SMP
+
+void smp_init(void)
+{
+}
+
+void kmp(void *arg)
+{
+}
+
+#endif /* CONFIG_SMP */
+
+/** @}
+ */
Index: kernel/arch/arm64/src/start.S
===================================================================
--- kernel/arch/arm64/src/start.S	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/arch/arm64/src/start.S	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,288 @@
+/*
+ * Copyright (c) 2015 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <abi/asmtool.h>
+#include <arch/boot/boot.h>
+#include <arch/mm/km.h>
+#include <arch/mm/page.h>
+#include <arch/regutils.h>
+
+.section K_TEXT_START, "ax"
+
+SYMBOL(kernel_image_start)
+	/*
+	 * Parameters:
+	 * x0 is kernel entry point (kernel_image_start).
+	 * x1 is pointer to the bootinfo structure.
+	 *
+	 * MMU must be disabled at this point.
+	 */
+
+	/* Get address of the main memory and remember it. */
+	adrp x20, kernel_image_start - BOOT_OFFSET
+	adrp x2, physmem_base
+	/* add x2, x2, #:lo12:physmem_base */
+	str x20, [x2]
+
+	/*
+	 * Set up address translation that identity maps the gigabyte area that
+	 * is holding the current execution page.
+	 */
+
+	/* Prepare the level 0 page table. */
+	adrp x2, lower_page_table_level0
+	lsr x3, x20, #PTL0_VA_SHIFT
+	and x3, x3, #PTL0_VA_MASK
+	add x2, x2, x3, lsl #PTL_ENTRY_SIZE_SHIFT
+	mov x3, #( \
+	    1 << PTE_ACCESS_SHIFT | \
+	    PTE_L012_TYPE_TABLE << PTE_TYPE_SHIFT | \
+	    1 << PTE_PRESENT_SHIFT)
+	adrp x4, lower_page_table_level1
+	lsr x4, x4, #FRAME_WIDTH
+	orr x3, x3, x4, lsl #PTE_NEXT_LEVEL_ADDRESS_SHIFT
+	str x3, [x2]
+
+	/* Prepare the level 1 page table. */
+	adrp x2, lower_page_table_level1
+	lsr x3, x20, #PTL1_VA_SHIFT
+	and x3, x3, #PTL1_VA_MASK
+	add x2, x2, x3, lsl #PTL_ENTRY_SIZE_SHIFT
+	mov x3, #( \
+	    1 << PTE_ACCESS_SHIFT | \
+	    MAIR_EL1_DEVICE_MEMORY_INDEX << PTE_ATTR_INDEX_SHIFT | \
+	    PTE_L012_TYPE_BLOCK << PTE_TYPE_SHIFT | \
+	    1 << PTE_PRESENT_SHIFT)
+	lsr x4, x20, #FRAME_WIDTH
+	orr x3, x3, x4, lsl #PTE_OUTPUT_ADDRESS_SHIFT
+	str x3, [x2]
+
+	/*
+	 * Set up address translation that maps the first gigabyte of the kernel
+	 * identity virtual address space to the first gigabyte of the physical
+	 * memory.
+	 */
+
+	mov x21, #KM_ARM64_IDENTITY_START
+
+	/* Prepare the level 0 page table. */
+	adrp x2, upper_page_table_level0
+	lsr x3, x21, #PTL0_VA_SHIFT
+	and x3, x3, #PTL0_VA_MASK
+	add x2, x2, x3, lsl #PTL_ENTRY_SIZE_SHIFT
+	mov x3, #( \
+	    1 << PTE_ACCESS_SHIFT | \
+	    PTE_L012_TYPE_TABLE << PTE_TYPE_SHIFT | \
+	    1 << PTE_PRESENT_SHIFT)
+	adrp x4, upper_page_table_level1
+	lsr x4, x4, #FRAME_WIDTH
+	orr x3, x3, x4, lsl #PTE_NEXT_LEVEL_ADDRESS_SHIFT
+	str x3, [x2]
+
+	/* Prepare the level 1 page table. */
+	adrp x2, upper_page_table_level1
+	lsr x3, x21, #PTL1_VA_SHIFT
+	and x3, x3, #PTL1_VA_MASK
+	add x2, x2, x3, lsl #PTL_ENTRY_SIZE_SHIFT
+	mov x3, #( \
+	    1 << PTE_ACCESS_SHIFT | \
+	    MAIR_EL1_DEVICE_MEMORY_INDEX << PTE_ATTR_INDEX_SHIFT | \
+	    PTE_L012_TYPE_BLOCK << PTE_TYPE_SHIFT | \
+	    1 << PTE_PRESENT_SHIFT)
+	lsr x4, x20, #FRAME_WIDTH
+	orr x3, x3, x4, lsl #PTE_OUTPUT_ADDRESS_SHIFT
+	str x3, [x2]
+
+	/* Make sure there are not any stale TLB entries. */
+	tlbi vmalle1is
+	dsb ish
+
+	/*
+	 * Set TCR_EL1:
+	 * [63:39] - Reserved 0.
+	 * [38]    - TBI1=0, top byte of an address is used in the address
+	 *           calculation for the TTBR1_EL1 region.
+	 * [37]    - TBI0=0, top byte of an address is used in the address
+	 *           calculation for the TTBR0_EL1 region.
+	 * [36]    - AS=1, the upper 16 bits of TTBR0_EL1 and TTBR1_EL1 are used
+	 *           for allocation and matching in the TLB.
+	 * [35]    - Reserved 0.
+	 * [34:32] - IPS=101, intermediate physical address size is 48 bits,
+	 *           256TB.
+	 * [31:30] - TG1=10, TTBR1_EL1 granule size is 4KB.
+	 * [29:28] - SH1=11, memory associated with translation table walks
+	 *           using TTBR1_EL1 is inner shareable.
+	 * [27:26] - ORGN1=01, memory associated with translation table walks
+	 *           using TTBR1_EL1 is normal memory, outer write-through
+	 *           cacheable.
+	 * [25:24] - IRGN1=01, memory associated with translation table walks
+	 *           using TTBR1_EL1 is normal memory, inner write-back
+	 *           write-allocate cacheable.
+	 * [23]    - EPD1=0, perform translation table walks using TTBR1_EL1.
+	 * [22]    - A1=0, TTBR0_EL1.ASID defines the ASID.
+	 * [21:16] - T1SZ=010000, size of the memory region addressed by
+	 *           TTBR1_EL1 is 2^(64 - 16) bytes.
+	 * [15:14] - TG0=00, TTBR0_EL1 granule size is 4KB.
+	 * [13:12] - SH0=11, memory associated with translation table walks
+	 *           using TTBR0_EL1 is inner shareable.
+	 * [11:10] - ORGN0=01, memory associated with translation table walks
+	 *           using TTBR0_EL1 is normal memory, outer write-through
+	 *           cacheable.
+	 * [9:8]   - IRGN0=01, memory associated with translation table walks
+	 *           using TTBR0_EL1 is normal memory, inner write-back
+	 *           write-allocate cacheable.
+	 * [7]     - EPD0=0, perform translation table walks using TTBR0.
+	 * [6]     - Reserved 0.
+	 * [5:0]   - T0SZ=010000, size of the memory region addressed by
+	 *           TTBR0_EL1 is 2^(64 - 16) bytes.
+	 */
+	ldr x2, =0x00000015b5103510
+	msr tcr_el1, x2
+
+	/* Initialize memory attributes. */
+	ldr x2, =(MAIR_EL1_DEVICE_MEMORY_ATTR << \
+	    (MAIR_EL1_DEVICE_MEMORY_INDEX * MAIR_EL1_ATTR_SHIFT) | \
+	    MAIR_EL1_NORMAL_MEMORY_ATTR << \
+	    (MAIR_EL1_NORMAL_MEMORY_INDEX * MAIR_EL1_ATTR_SHIFT))
+	msr mair_el1, x2
+
+	/* Set translation tables. */
+	adrp x2, lower_page_table_level0
+	msr ttbr0_el1, x2
+	adrp x2, upper_page_table_level0
+	msr ttbr1_el1, x2
+	isb
+
+	/*
+	 * Set SCTLR_EL1:
+	 * [31:30] - Reserved 0.
+	 * [29:28] - Reserved 1.
+	 * [27]    - Reserved 0.
+	 * [26]    - UCI=0, any attempt to execute cache maintenance
+	 *           instructions at EL0 is trapped to EL1.
+	 * [25]    - EE=0, explicit data accesses at EL1, and stage 1
+	 *           translation table walks in the EL1&0 translation regime are
+	 *           little-endian.
+	 * [24]    - E0E=0, explicit data accesses at EL1 are little-endian.
+	 * [23:22] - Reserved 1.
+	 * [21]    - Reserved 0.
+	 * [20]    - Reserved 1.
+	 * [19]    - WXN=0, regions with write permission are not forced to
+	 *           Execute Never.
+	 * [18]    - nTWE=0, any attempt to execute WFE at EL0 is trapped to
+	 *           EL1.
+	 * [17]    - Reserved 0.
+	 * [16]    - nTWI=0, any attempt to execute WFI at EL0 is trapped to
+	 *           EL1.
+	 * [15]    - UCT=0, accesses to CTR_EL0 from EL0 are trapped to EL1.
+	 * [14]    - DZE=0, any attempt to execute DC ZVA at EL0 is trapped to
+	 *           EL1.
+	 * [13]    - Reserved 0.
+	 * [12]    - I=1, this control has no effect on the cacheability of
+	 *           instruction access to normal memory.
+	 * [11]    - Reserved 1.
+	 * [10]    - Reserved 0.
+	 * [9]     - UMA=0, any attempt to execute MSR/MRS that accesses DAIF at
+	 *           EL0 is trapped to EL1.
+	 * [8]     - SED=1, SETEND is undefined at EL0 using AArch32.
+	 * [7]     - ITD=1, disables some uses of IT at EL0 using AArch32.
+	 * [6]     - Reserved 0.
+	 * [5]     - CP15BEN=0, CP15DMB/DSB/ISB is undefined at EL0 using
+	 *           AArch32.
+	 * [4]     - SA0=1, use of stack pointer with load/store at EL0 must be
+	 *           aligned to a 16-byte boundary.
+	 * [3]     - SA=1, use of stack pointer with load/store at EL1 must be
+	 *           aligned to a 16-byte boundary.
+	 * [2]     - C=1, this control has no effect on the cacheability of data
+	 *           access to normal memory from EL0 and EL1, and normal memory
+	 *           accesses to the EL1&0 stage 1 translation tables.
+	 * [1]     - A=0, instructions that load/store registers (other than
+	 *           load/store exclusive and load-acquire/store-release) do not
+	 *           check that the address being accessed is aligned to the
+	 *           size of the data element(s) being accessed.
+	 * [0]     - M=1, EL1 and EL0 stage 1 address translation enabled.
+	 */
+	ldr w2, =0x30d0199d
+	msr sctlr_el1, x2
+	isb
+
+	/*
+	 * MMU is enabled at this point (SCTLR_EL1.M=1), switch to the kernel
+	 * mapping.
+	 */
+	ldr x2, =1f
+	br x2
+1:
+
+	/* Disable access to low addresses. */
+	mov x2, #0
+	msr ttbr0_el1, x2
+	isb
+	tlbi vmalle1is
+	dsb ish
+
+	/* Jump on a temporary stack. */
+	ldr x2, =temp_stack
+	mov sp, x2
+
+	/* Create the first stack frame. */
+	mov x29, #0
+	mov x30, #0
+	stp x29, x30, [sp, #-16]!
+	mov x29, sp
+
+	/* PA2KA(bootinfo). */
+	sub x1, x1, x20
+	ldr x2, =KM_ARM64_IDENTITY_START
+	add x1, x1, x2
+
+	bl arm64_pre_main
+	bl main_bsp
+
+.section K_DATA_START, "ax"
+
+	/* Page tables. */
+.align 12
+lower_page_table_level0:
+	.space 4096
+lower_page_table_level1:
+	.space 4096
+upper_page_table_level0:
+	.space 4096
+upper_page_table_level1:
+	.space 4096
+
+	/* Physical memory base address. */
+.align 12
+SYMBOL(physmem_base)
+	.quad 0
+
+	/* Temporary stack. */
+.align 10
+	.space 1024
+temp_stack:
Index: kernel/arch/ia32/include/arch/mm/as.h
===================================================================
--- kernel/arch/ia32/include/arch/mm/as.h	(revision 3daba42eca17fefb7371eb495dcaf63d5a95ed9e)
+++ kernel/arch/ia32/include/arch/mm/as.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -37,4 +37,5 @@
 
 #define KERNEL_ADDRESS_SPACE_SHADOWED_ARCH  0
+#define KERNEL_SEPARATE_PTL0_ARCH           0
 
 #define KERNEL_ADDRESS_SPACE_START_ARCH  UINT32_C(0x80000000)
Index: kernel/arch/ia64/include/arch/mm/as.h
===================================================================
--- kernel/arch/ia64/include/arch/mm/as.h	(revision 3daba42eca17fefb7371eb495dcaf63d5a95ed9e)
+++ kernel/arch/ia64/include/arch/mm/as.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -37,4 +37,5 @@
 
 #define KERNEL_ADDRESS_SPACE_SHADOWED_ARCH  0
+#define KERNEL_SEPARATE_PTL0_ARCH           0
 
 #define KERNEL_ADDRESS_SPACE_START_ARCH  UINT64_C(0xe000000000000000)
Index: kernel/arch/mips32/include/arch/mm/as.h
===================================================================
--- kernel/arch/mips32/include/arch/mm/as.h	(revision 3daba42eca17fefb7371eb495dcaf63d5a95ed9e)
+++ kernel/arch/mips32/include/arch/mm/as.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -37,4 +37,5 @@
 
 #define KERNEL_ADDRESS_SPACE_SHADOWED_ARCH  0
+#define KERNEL_SEPARATE_PTL0_ARCH           0
 
 #define KERNEL_ADDRESS_SPACE_START_ARCH  UINT32_C(0x80000000)
Index: kernel/arch/ppc32/include/arch/mm/as.h
===================================================================
--- kernel/arch/ppc32/include/arch/mm/as.h	(revision 3daba42eca17fefb7371eb495dcaf63d5a95ed9e)
+++ kernel/arch/ppc32/include/arch/mm/as.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -39,4 +39,5 @@
 
 #define KERNEL_ADDRESS_SPACE_SHADOWED_ARCH  0
+#define KERNEL_SEPARATE_PTL0_ARCH           0
 
 #define KERNEL_ADDRESS_SPACE_START_ARCH  UINT32_C(0x80000000)
Index: kernel/arch/riscv64/include/arch/mm/as.h
===================================================================
--- kernel/arch/riscv64/include/arch/mm/as.h	(revision 3daba42eca17fefb7371eb495dcaf63d5a95ed9e)
+++ kernel/arch/riscv64/include/arch/mm/as.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -40,4 +40,5 @@
 
 #define KERNEL_ADDRESS_SPACE_SHADOWED_ARCH  0
+#define KERNEL_SEPARATE_PTL0_ARCH           0
 
 #define KERNEL_ADDRESS_SPACE_START_ARCH  UINT64_C(0xffff800000000000)
Index: kernel/arch/sparc64/include/arch/mm/sun4u/as.h
===================================================================
--- kernel/arch/sparc64/include/arch/mm/sun4u/as.h	(revision 3daba42eca17fefb7371eb495dcaf63d5a95ed9e)
+++ kernel/arch/sparc64/include/arch/mm/sun4u/as.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -39,4 +39,5 @@
 
 #define KERNEL_ADDRESS_SPACE_SHADOWED_ARCH  1
+#define KERNEL_SEPARATE_PTL0_ARCH           0
 
 #define KERNEL_ADDRESS_SPACE_START_ARCH  UINT64_C(0x0000000000000000)
Index: kernel/arch/sparc64/include/arch/mm/sun4v/as.h
===================================================================
--- kernel/arch/sparc64/include/arch/mm/sun4v/as.h	(revision 3daba42eca17fefb7371eb495dcaf63d5a95ed9e)
+++ kernel/arch/sparc64/include/arch/mm/sun4v/as.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -41,4 +41,5 @@
 
 #define KERNEL_ADDRESS_SPACE_SHADOWED_ARCH  1
+#define KERNEL_SEPARATE_PTL0_ARCH           0
 
 #define KERNEL_ADDRESS_SPACE_START_ARCH  UINT64_C(0x0000000000000000)
Index: kernel/doc/doxygroups.h
===================================================================
--- kernel/doc/doxygroups.h	(revision 3daba42eca17fefb7371eb495dcaf63d5a95ed9e)
+++ kernel/doc/doxygroups.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -47,4 +47,11 @@
 
 /**
+ *     @cond arm64
+ *     @defgroup kernel_arm64_proc arm64
+ *     @ingroup proc
+ *     @endcond
+ */
+
+/**
  *     @cond ia32
  *     @defgroup kernel_ia32_proc ia32
@@ -123,4 +130,11 @@
  *     @cond arm32
  *     @defgroup kernel_arm32_mm arm32
+ *     @ingroup mm
+ *     @endcond
+ */
+
+/**
+ *     @cond arm64
+ *     @defgroup kernel_arm64_mm arm64
  *     @ingroup mm
  *     @endcond
@@ -209,4 +223,11 @@
 
 /**
+ *     @cond arm64
+ *     @defgroup kernel_arm64_ddi arm64
+ *     @ingroup ddi
+ *     @endcond
+ */
+
+/**
  *     @cond ia32
  *     @defgroup kernel_ia32_ddi ia32
@@ -281,4 +302,11 @@
 
 /**
+ *     @cond arm64
+ *     @defgroup kernel_arm64_debug arm64
+ *     @ingroup debug
+ *     @endcond
+ */
+
+/**
  *     @cond ia32
  *     @defgroup kernel_amd64_debug ia32/amd64
@@ -352,4 +380,11 @@
 
 /**
+ *     @cond arm64
+ *     @defgroup kernel_arm64_interrupt arm64
+ *     @ingroup interrupt
+ *     @endcond
+ */
+
+/**
  *     @cond ia32
  *     @defgroup kernel_ia32_interrupt ia32
@@ -429,4 +464,11 @@
 
 /**
+ *     @cond arm64
+ *     @defgroup kernel_arm64 arm64
+ *     @ingroup others
+ *     @endcond
+ */
+
+/**
  *     @cond ia32
  *     @defgroup kernel_ia32 ia32
Index: kernel/genarch/Makefile.inc
===================================================================
--- kernel/genarch/Makefile.inc	(revision 3daba42eca17fefb7371eb495dcaf63d5a95ed9e)
+++ kernel/genarch/Makefile.inc	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -127,4 +127,9 @@
 endif
 
+ifeq ($(CONFIG_GICV2), y)
+GENARCH_SOURCES += \
+	genarch/src/drivers/gicv2/gicv2.c
+endif
+
 ifeq ($(CONFIG_VIA_CUDA),y)
 GENARCH_SOURCES += \
Index: kernel/genarch/include/genarch/drivers/gicv2/gicv2.h
===================================================================
--- kernel/genarch/include/genarch/drivers/gicv2/gicv2.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/genarch/include/genarch/drivers/gicv2/gicv2.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2016 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup genarch
+ * @{
+ */
+/** @file
+ * @brief ARM Generic Interrupt Controller, Architecture version 2.0.
+ */
+
+#ifndef KERN_GICV2_H_
+#define KERN_GICV2_H_
+
+#include <typedefs.h>
+
+/** GICv2 distributor register map. */
+typedef struct {
+	/** Distributor control register. */
+	ioport32_t ctlr;
+#define GICV2D_CTLR_ENABLE_FLAG  0x1
+
+	/** Interrupt controller type register. */
+	const ioport32_t typer;
+#define GICV2D_TYPER_IT_LINES_NUMBER_SHIFT  0
+#define GICV2D_TYPER_IT_LINES_NUMBER_MASK \
+	(0x1f << GICV2D_TYPER_IT_LINES_NUMBER_SHIFT)
+
+	/** Distributor implementer identification register. */
+	const ioport32_t iidr;
+	/** Reserved. */
+	ioport32_t res_[5];
+	/** Implementation defined registers. */
+	ioport32_t impl[8];
+	/** Reserved. */
+	ioport32_t res2_[16];
+	/** Interrupt group registers. */
+	ioport32_t igroupr[32];
+	/** Interrupt set-enable registers. */
+	ioport32_t isenabler[32];
+	/** Interrupt clear-enable registers. */
+	ioport32_t icenabler[32];
+	/** Interrupt set-pending registers. */
+	ioport32_t ispendr[32];
+	/** Interrupt clear-pending registers. */
+	ioport32_t icpendr[32];
+	/** GICv2 interrupt set-active registers. */
+	ioport32_t isactiver[32];
+	/** Interrupt clear-active registers. */
+	ioport32_t icactiver[32];
+	/** Interrupt priority registers. */
+	ioport32_t ipriorityr[255];
+	/** Reserved. */
+	ioport32_t res3_;
+	/** Interrupt processor target registers. First 8 words are read-only.
+	 */
+	ioport32_t itargetsr[255];
+	/** Reserved. */
+	ioport32_t res4_;
+	/** Interrupt configuration registers. */
+	ioport32_t icfgr[64];
+	/** Implementation defined registers. */
+	ioport32_t impl2[64];
+	/** Non-secure access control registers. */
+	ioport32_t nsacr[64];
+	/** Software generated interrupt register. */
+	ioport32_t sgir;
+	/** Reserved. */
+	ioport32_t res5_[3];
+	/** SGI clear-pending registers. */
+	ioport32_t cpendsgir[4];
+	/** SGI set-pending registers. */
+	ioport32_t spendsgir[4];
+	/** Reserved. */
+	ioport32_t res6_[40];
+	/** Implementation defined identification registers. */
+	const ioport32_t impl3[12];
+} gicv2_distr_regs_t;
+
+/* GICv2 CPU interface register map. */
+typedef struct {
+	/** CPU interface control register. */
+	ioport32_t ctlr;
+#define GICV2C_CTLR_ENABLE_FLAG  0x1
+
+	/** Interrupt priority mask register. */
+	ioport32_t pmr;
+	/** Binary point register. */
+	ioport32_t bpr;
+	/** Interrupt acknowledge register. */
+	const ioport32_t iar;
+#define GICV2C_IAR_INTERRUPT_ID_SHIFT  0
+#define GICV2C_IAR_INTERRUPT_ID_MASK \
+	(0x3ff << GICV2C_IAR_INTERRUPT_ID_SHIFT)
+#define GICV2C_IAR_CPUID_SHIFT  10
+#define GICV2C_IAR_CPUID_MASK \
+	(0x7 << GICV2C_IAR_CPUID_SHIFT)
+
+	/** End of interrupt register. */
+	ioport32_t eoir;
+	/** Running priority register. */
+	const ioport32_t rpr;
+	/** Highest priority pending interrupt register. */
+	const ioport32_t hppir;
+	/** Aliased binary point register. */
+	ioport32_t abpr;
+	/** Aliased interrupt acknowledge register. */
+	const ioport32_t aiar;
+	/** Aliased end of interrupt register. */
+	ioport32_t aeoir;
+	/** Aliased highest priority pending interrupt register. */
+	const ioport32_t ahppir;
+	/** Reserved. */
+	ioport32_t res_[5];
+	/** Implementation defined registers. */
+	ioport32_t impl[36];
+	/** Active priorities registers. */
+	ioport32_t apr[4];
+	/** Non-secure active priorities registers. */
+	ioport32_t nsapr[4];
+	/** Reserved. */
+	ioport32_t res2_[3];
+	/** CPU interface identification register. */
+	const ioport32_t iidr;
+	/** Unallocated. */
+	ioport32_t unalloc_[960];
+	/** Deactivate interrupt register. */
+	ioport32_t dir;
+} gicv2_cpui_regs_t;
+
+/** GICv2 driver-specific device data. */
+typedef struct {
+	gicv2_distr_regs_t *distr;
+	gicv2_cpui_regs_t *cpui;
+	unsigned inum_total;
+} gicv2_t;
+
+extern void gicv2_init(gicv2_t *, gicv2_distr_regs_t *, gicv2_cpui_regs_t *);
+extern unsigned gicv2_inum_get_total(gicv2_t *);
+extern void gicv2_inum_get(gicv2_t *, unsigned *, unsigned *);
+extern void gicv2_end(gicv2_t *, unsigned, unsigned);
+extern void gicv2_enable(gicv2_t *, unsigned);
+extern void gicv2_disable(gicv2_t *, unsigned);
+
+#endif
+
+/** @}
+ */
Index: kernel/genarch/include/genarch/drivers/pl011/pl011.h
===================================================================
--- kernel/genarch/include/genarch/drivers/pl011/pl011.h	(revision 3daba42eca17fefb7371eb495dcaf63d5a95ed9e)
+++ kernel/genarch/include/genarch/drivers/pl011/pl011.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -38,4 +38,5 @@
 #define KERN_PL011_H_
 
+#include <ddi/ddi.h>
 #include <ddi/irq.h>
 #include <console/chardev.h>
@@ -150,4 +151,5 @@
 	outdev_t outdev;
 	irq_t irq;
+	parea_t parea;
 } pl011_uart_t;
 
Index: kernel/genarch/src/drivers/gicv2/gicv2.c
===================================================================
--- kernel/genarch/src/drivers/gicv2/gicv2.c	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
+++ kernel/genarch/src/drivers/gicv2/gicv2.c	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2016 Petr Pavlu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup genarch
+ * @{
+ */
+/** @file
+ * @brief ARM Generic Interrupt Controller, Architecture version 2.0.
+ *
+ * This IRQ controller is present on the QEMU virt platform for ARM.
+ */
+
+#include <arch/asm.h>
+#include <genarch/drivers/gicv2/gicv2.h>
+#include <assert.h>
+
+/** Initialize GICv2 interrupt controller.
+ *
+ * @param irqc Instance structure.
+ * @param distr Distributor registers.
+ * @param cpui CPU interface registers.
+ */
+void gicv2_init(gicv2_t *irqc, gicv2_distr_regs_t *distr,
+    gicv2_cpui_regs_t *cpui)
+{
+	irqc->distr = distr;
+	irqc->cpui = cpui;
+
+	/* Get maximum number of interrupts. */
+	uint32_t typer = pio_read_32(&distr->typer);
+	irqc->inum_total = (((typer & GICV2D_TYPER_IT_LINES_NUMBER_MASK) >>
+	    GICV2D_TYPER_IT_LINES_NUMBER_SHIFT) + 1) * 32;
+
+	/* Disable all interrupts. */
+	for (unsigned i = 0; i < irqc->inum_total / 32; i++)
+		pio_write_32(&distr->icenabler[i], 0xffffffff);
+
+	/* Enable interrupts for all priority levels. */
+	pio_write_32(&cpui->pmr, 0xff);
+
+	/* Enable signaling of interrupts. */
+	pio_write_32(&cpui->ctlr, GICV2C_CTLR_ENABLE_FLAG);
+	pio_write_32(&distr->ctlr, GICV2D_CTLR_ENABLE_FLAG);
+}
+
+/** Obtain total number of interrupts that the controller supports. */
+unsigned gicv2_inum_get_total(gicv2_t *irqc)
+{
+	return irqc->inum_total;
+}
+
+/** Obtain number of pending interrupt. */
+void gicv2_inum_get(gicv2_t *irqc, unsigned *inum, unsigned *cpuid)
+{
+	uint32_t iar = pio_read_32(&irqc->cpui->iar);
+
+	*inum = (iar & GICV2C_IAR_INTERRUPT_ID_MASK) >>
+	    GICV2C_IAR_INTERRUPT_ID_SHIFT;
+	*cpuid = (iar & GICV2C_IAR_CPUID_MASK) >> GICV2C_IAR_CPUID_SHIFT;
+}
+
+/** Signal end of interrupt to the controller. */
+void gicv2_end(gicv2_t *irqc, unsigned inum, unsigned cpuid)
+{
+	assert((inum & ~((unsigned) GICV2C_IAR_INTERRUPT_ID_MASK >>
+	    GICV2C_IAR_INTERRUPT_ID_SHIFT)) == 0);
+	assert((cpuid & ~((unsigned) GICV2C_IAR_CPUID_MASK >>
+	    GICV2C_IAR_CPUID_SHIFT)) == 0);
+
+	uint32_t eoir = (inum << GICV2C_IAR_INTERRUPT_ID_SHIFT) |
+	    (cpuid << GICV2C_IAR_CPUID_SHIFT);
+	pio_write_32(&irqc->cpui->eoir, eoir);
+}
+
+/** Enable specific interrupt. */
+void gicv2_enable(gicv2_t *irqc, unsigned inum)
+{
+	assert(inum < irqc->inum_total);
+
+	pio_write_32(&irqc->distr->isenabler[inum / 32], 1 << (inum % 32));
+}
+
+/** Disable specific interrupt. */
+void gicv2_disable(gicv2_t *irqc, unsigned inum)
+{
+	assert(inum < irqc->inum_total);
+
+	pio_write_32(&irqc->distr->icenabler[inum / 32], 1 << (inum % 32));
+}
+
+/** @}
+ */
Index: kernel/genarch/src/drivers/pl011/pl011.c
===================================================================
--- kernel/genarch/src/drivers/pl011/pl011.c	(revision 3daba42eca17fefb7371eb495dcaf63d5a95ed9e)
+++ kernel/genarch/src/drivers/pl011/pl011.c	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -60,7 +60,11 @@
 	pl011_uart_t *uart = dev->data;
 
-	if (!ascii_check(ch)) {
+	/* If the userspace owns the console, do not output anything. */
+	if (uart->parea.mapped && !console_override)
+		return;
+
+	if (!ascii_check(ch))
 		pl011_uart_sendb(uart, U_SPECIAL);
-	} else {
+	else {
 		if (ch == '\n')
 			pl011_uart_sendb(uart, (uint8_t) '\r');
@@ -100,5 +104,5 @@
 	assert(uart);
 	uart->regs = (void *)km_map(addr, sizeof(pl011_uart_regs_t),
-	    KM_NATURAL_ALIGNMENT, PAGE_NOT_CACHEABLE);
+	    KM_NATURAL_ALIGNMENT, PAGE_WRITE | PAGE_NOT_CACHEABLE);
 	assert(uart->regs);
 
@@ -131,4 +135,11 @@
 	uart->irq.instance = uart;
 
+	ddi_parea_init(&uart->parea);
+	uart->parea.pbase = addr;
+	uart->parea.frames = 1;
+	uart->parea.unpriv = false;
+	uart->parea.mapped = false;
+	ddi_parea_register(&uart->parea);
+
 	return true;
 }
Index: kernel/genarch/src/mm/as_pt.c
===================================================================
--- kernel/genarch/src/mm/as_pt.c	(revision 3daba42eca17fefb7371eb495dcaf63d5a95ed9e)
+++ kernel/genarch/src/mm/as_pt.c	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -76,7 +76,7 @@
 	    PA2KA(frame_alloc(PTL0_FRAMES, FRAME_LOWMEM, PTL0_SIZE - 1));
 
-	if (flags & FLAG_AS_KERNEL)
-		memsetb(dst_ptl0, PTL0_SIZE, 0);
-	else {
+	memsetb(dst_ptl0, PTL0_SIZE, 0);
+
+	if (!KERNEL_SEPARATE_PTL0 && !(flags & FLAG_AS_KERNEL)) {
 		/*
 		 * Copy the kernel address space portion to new PTL0.
@@ -93,5 +93,4 @@
 		    &dst_ptl0[PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)];
 
-		memsetb(dst_ptl0, PTL0_SIZE, 0);
 		memcpy((void *) dst, (void *) src,
 		    PTL0_SIZE - (src - (uintptr_t) src_ptl0));
Index: kernel/generic/include/mm/as.h
===================================================================
--- kernel/generic/include/mm/as.h	(revision 3daba42eca17fefb7371eb495dcaf63d5a95ed9e)
+++ kernel/generic/include/mm/as.h	(revision a4bd537597f8fbdaf19ad3871f0bd70b416845d9)
@@ -59,4 +59,10 @@
 #define KERNEL_ADDRESS_SPACE_SHADOWED  KERNEL_ADDRESS_SPACE_SHADOWED_ARCH
 
+/**
+ * Defined to be true if user address space and kernel address space do not
+ * share the same page table.
+ */
+#define KERNEL_SEPARATE_PTL0 KERNEL_SEPARATE_PTL0_ARCH
+
 #define KERNEL_ADDRESS_SPACE_START  KERNEL_ADDRESS_SPACE_START_ARCH
 #define KERNEL_ADDRESS_SPACE_END    KERNEL_ADDRESS_SPACE_END_ARCH
