Index: kernel/arch/arm64/include/arch/barrier.h
===================================================================
--- kernel/arch/arm64/include/arch/barrier.h	(revision d1582b502edcb7bfeef771a2019538f265e7e27c)
+++ kernel/arch/arm64/include/arch/barrier.h	(revision f7c12b31590a80fc5aa38db903e609c2055972e4)
@@ -37,44 +37,4 @@
 #define KERN_arm64_BARRIER_H_
 
-#include <stddef.h>
-
-#define COHERENCE_INVAL_MIN  4
-
-/** Ensure visibility of instruction updates for a multiprocessor.
- *
- * @param addr Address of the first instruction.
- * @param size Size of the instruction block (in bytes).
- */
-static inline void ensure_visibility(void *addr, size_t len)
-{
-	size_t i;
-
-	/*
-	 * Clean to Point of Unification to make the new instructions visible to
-	 * the instruction cache.
-	 */
-	for (i = 0; i < len; i += COHERENCE_INVAL_MIN)
-		asm volatile (
-		    "dc cvau, %[addr]\n"
-		    : : [addr] "r" ((char *) addr + i)
-		);
-
-	/* Ensure completion on all PEs. */
-	asm volatile ("dsb ish" ::: "memory");
-
-	/* Ensure instruction cache/branch predictor discards stale data. */
-	for (i = 0; i < len; i += COHERENCE_INVAL_MIN)
-		asm volatile (
-		    "ic ivau, %[addr]\n"
-		    : : [addr] "r" ((char *) addr + i)
-		);
-
-	/* Ensure completion on all PEs. */
-	asm volatile ("dsb ish" ::: "memory");
-
-	/* Synchronize context on this PE. */
-	asm volatile ("isb");
-}
-
 #endif
 
Index: kernel/arch/arm64/include/arch/istate_struct.h
===================================================================
--- kernel/arch/arm64/include/arch/istate_struct.h	(revision d1582b502edcb7bfeef771a2019538f265e7e27c)
+++ kernel/arch/arm64/include/arch/istate_struct.h	(revision f7c12b31590a80fc5aa38db903e609c2055972e4)
@@ -65,5 +65,6 @@
 #define ISTATE_OFFSET_X29    0x108
 #define ISTATE_OFFSET_X30    0x110
-#define ISTATE_SIZE          0x118
+#define ISTATE_OFFSET_PAD0   0x118
+#define ISTATE_SIZE          0x120
 
 #ifndef __ASSEMBLER__
@@ -109,6 +110,15 @@
 	/* Link Register. */
 	uint64_t x30;
+
+	/*
+	 * ARM64 mandates that the stack pointer is always aligned to
+	 * a 16-byte boundary. To satisfy this condition, the size of
+	 * this data structure needs to be also a multiple of 16 bytes.
+	 * This is the reason for this padding.
+	 */
+	uint64_t pad0;
 } istate_t;
 
 #endif
+
 #endif
Index: kernel/arch/arm64/include/arch/mach/hikey960/hikey960.h
===================================================================
--- kernel/arch/arm64/include/arch/mach/hikey960/hikey960.h	(revision f7c12b31590a80fc5aa38db903e609c2055972e4)
+++ kernel/arch/arm64/include/arch/mach/hikey960/hikey960.h	(revision f7c12b31590a80fc5aa38db903e609c2055972e4)
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2021 Martin Decky
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup kernel_arm64_hikey960
+ * @brief HiKey 960 platform.
+ * @ingroup kernel_arm64
+ * @{
+ */
+/** @file
+ * @brief HiKey 960 platform driver.
+ */
+
+#ifndef KERN_arm64_mach_hikey960_H_
+#define KERN_arm64_mach_hikey960_H_
+
+#include <arch/machine_func.h>
+
+extern struct arm_machine_ops hikey960_machine_ops;
+
+#endif
+
+/** @}
+ */
Index: kernel/arch/arm64/include/arch/mach/virt/virt.h
===================================================================
--- kernel/arch/arm64/include/arch/mach/virt/virt.h	(revision d1582b502edcb7bfeef771a2019538f265e7e27c)
+++ kernel/arch/arm64/include/arch/mach/virt/virt.h	(revision f7c12b31590a80fc5aa38db903e609c2055972e4)
@@ -36,6 +36,6 @@
  */
 
-#ifndef KERN_arm64_virt_H_
-#define KERN_arm64_virt_H_
+#ifndef KERN_arm64_mach_virt_H_
+#define KERN_arm64_mach_virt_H_
 
 #include <arch/machine_func.h>
Index: kernel/arch/arm64/include/arch/mm/km.h
===================================================================
--- kernel/arch/arm64/include/arch/mm/km.h	(revision d1582b502edcb7bfeef771a2019538f265e7e27c)
+++ kernel/arch/arm64/include/arch/mm/km.h	(revision f7c12b31590a80fc5aa38db903e609c2055972e4)
@@ -41,9 +41,9 @@
 #include <typedefs.h>
 
-#define KM_ARM64_IDENTITY_START  UINT64_C(0xffffffff80000000)
-#define KM_ARM64_IDENTITY_SIZE   UINT64_C(0x0000000080000000)
+#define KM_ARM64_IDENTITY_START  UINT64_C(0xffffffff00000000)
+#define KM_ARM64_IDENTITY_SIZE   UINT64_C(0x0000000100000000)
 
 #define KM_ARM64_NON_IDENTITY_START  UINT64_C(0xffff000000000000)
-#define KM_ARM64_NON_IDENTITY_SIZE   UINT64_C(0x0000ffff80000000)
+#define KM_ARM64_NON_IDENTITY_SIZE   UINT64_C(0x0000ffff00000000)
 
 extern void km_identity_arch_init(void);
@@ -53,9 +53,9 @@
 #else /* __ASSEMBLER__ */
 
-#define KM_ARM64_IDENTITY_START  0xffffffff80000000
-#define KM_ARM64_IDENTITY_SIZE   0x0000000080000000
+#define KM_ARM64_IDENTITY_START  0xffffffff00000000
+#define KM_ARM64_IDENTITY_SIZE   0x0000000100000000
 
 #define KM_ARM64_NON_IDENTITY_START  0xffff000000000000
-#define KM_ARM64_NON_IDENTITY_SIZE   0x0000ffff80000000
+#define KM_ARM64_NON_IDENTITY_SIZE   0x0000ffff00000000
 
 #endif /* __ASSEMBLER__ */
Index: kernel/arch/arm64/include/arch/mm/page.h
===================================================================
--- kernel/arch/arm64/include/arch/mm/page.h	(revision d1582b502edcb7bfeef771a2019538f265e7e27c)
+++ kernel/arch/arm64/include/arch/mm/page.h	(revision f7c12b31590a80fc5aa38db903e609c2055972e4)
@@ -53,7 +53,7 @@
 
 #define KA2PA(x) \
-	(((uintptr_t) (x)) - UINT64_C(0xffffffff80000000) + physmem_base)
+	(((uintptr_t) (x)) - UINT64_C(0xffffffff00000000) + physmem_base)
 #define PA2KA(x) \
-	(((uintptr_t) (x)) + UINT64_C(0xffffffff80000000) - physmem_base)
+	(((uintptr_t) (x)) + UINT64_C(0xffffffff00000000) - physmem_base)
 
 #endif /* __ASSEMBLER__ */
Index: kernel/arch/arm64/meson.build
===================================================================
--- kernel/arch/arm64/meson.build	(revision d1582b502edcb7bfeef771a2019538f265e7e27c)
+++ kernel/arch/arm64/meson.build	(revision f7c12b31590a80fc5aa38db903e609c2055972e4)
@@ -44,5 +44,4 @@
 	'src/mm/page.c',
 	'src/mm/tlb.c',
-	'src/smc.c',
 	'src/smp/ipi.c',
 	'src/smp/smp.c',
@@ -52,4 +51,8 @@
 if MACHINE == 'virt'
 	arch_src += files('src/mach/virt/virt.c')
+endif
+
+if MACHINE == 'hikey960'
+	arch_src += files('src/mach/hikey960/hikey960.c')
 endif
 
Index: kernel/arch/arm64/src/asm.S
===================================================================
--- kernel/arch/arm64/src/asm.S	(revision d1582b502edcb7bfeef771a2019538f265e7e27c)
+++ kernel/arch/arm64/src/asm.S	(revision f7c12b31590a80fc5aa38db903e609c2055972e4)
@@ -59,4 +59,42 @@
 	ret
 FUNCTION_END(early_putuchar)
+
+/** Flush instruction caches
+ *
+ * @param x0 Starting address of the flushing.
+ * @param x1 Number of bytes to flush.
+ *
+ */
+FUNCTION_BEGIN(smc_coherence)
+	/* Initialize loop */
+	mov x9, x0
+	mov x10, xzr
+
+	__dc_loop:
+		/* Data or Unified Cache Line Clean */
+		dc cvau, x9
+		add x9, x9, #4
+		add x10, x10, #4
+		cmp x10, x1
+		blo __dc_loop
+
+	dsb ish
+
+	/* Initialize loop */
+	mov x9, x0
+	mov x10, xzr
+
+	__ic_loop:
+		/* Instruction Cache Line Invalidate */
+		ic ivau, x9
+		add x9, x9, #4
+		add x10, x10, #4
+		cmp x10, x1
+		blo __ic_loop
+
+	dsb ish
+	isb
+	ret
+FUNCTION_END(smc_coherence)
 
 /* Static checks for the istate_t save/load. */
Index: kernel/arch/arm64/src/mach/hikey960/hikey960.c
===================================================================
--- kernel/arch/arm64/src/mach/hikey960/hikey960.c	(revision f7c12b31590a80fc5aa38db903e609c2055972e4)
+++ kernel/arch/arm64/src/mach/hikey960/hikey960.c	(revision f7c12b31590a80fc5aa38db903e609c2055972e4)
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2021 Martin Decky
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup kernel_arm64_hikey960
+ * @{
+ */
+/** @file
+ * @brief HiKey 960 platform driver.
+ */
+
+#include <arch/mach/hikey960/hikey960.h>
+#include <console/console.h>
+#include <genarch/drivers/gicv2/gicv2.h>
+#include <genarch/drivers/pl011/pl011.h>
+#include <genarch/srln/srln.h>
+#include <mm/km.h>
+#include <sysinfo/sysinfo.h>
+
+#define HIKEY960_VTIMER_IRQ         27
+#define HIKEY960_UART_IRQ           111
+#define HIKEY960_GIC_DISTR_ADDRESS  0xE82B1000
+#define HIKEY960_GIC_CPUI_ADDRESS   0xE82B2000
+#define HIKEY960_UART_ADDRESS       0xFFF32000
+
+struct {
+	gicv2_t gicv2;
+	pl011_uart_t uart;
+} hikey960;
+
+static void hikey960_init(void)
+{
+	/* Initialize interrupt controller. */
+	gicv2_distr_regs_t *distr = (void *) km_map(HIKEY960_GIC_DISTR_ADDRESS,
+	    ALIGN_UP(sizeof(*distr), PAGE_SIZE), KM_NATURAL_ALIGNMENT,
+	    PAGE_NOT_CACHEABLE | PAGE_READ | PAGE_WRITE | PAGE_KERNEL);
+	gicv2_cpui_regs_t *cpui = (void *) km_map(HIKEY960_GIC_CPUI_ADDRESS,
+	    ALIGN_UP(sizeof(*cpui), PAGE_SIZE), KM_NATURAL_ALIGNMENT,
+	    PAGE_NOT_CACHEABLE | PAGE_READ | PAGE_WRITE | PAGE_KERNEL);
+	gicv2_init(&hikey960.gicv2, distr, cpui);
+}
+
+static void hikey960_irq_exception(unsigned int exc_no, istate_t *istate)
+{
+	unsigned int inum, cpuid;
+	gicv2_inum_get(&hikey960.gicv2, &inum, &cpuid);
+
+	/* Dispatch the interrupt. */
+	irq_t *irq = irq_dispatch_and_lock(inum);
+	if (irq) {
+		/* The IRQ handler was found. */
+		irq->handler(irq);
+		irq_spinlock_unlock(&irq->lock, false);
+	} else {
+		/* Spurious interrupt. */
+		printf("cpu%d: spurious interrupt (inum=%u)\n", CPU->id, inum);
+	}
+
+	/* Signal end of interrupt to the controller. */
+	gicv2_end(&hikey960.gicv2, inum, cpuid);
+}
+
+static void hikey960_output_init(void)
+{
+	if (!pl011_uart_init(&hikey960.uart, HIKEY960_UART_IRQ,
+	    HIKEY960_UART_ADDRESS))
+		return;
+
+	stdout_wire(&hikey960.uart.outdev);
+}
+
+static void hikey960_input_init(void)
+{
+	srln_instance_t *srln_instance = srln_init();
+	if (srln_instance == NULL)
+		return;
+
+	indev_t *sink = stdin_wire();
+	indev_t *srln = srln_wire(srln_instance, sink);
+	pl011_uart_input_wire(&hikey960.uart, srln);
+	gicv2_enable(&hikey960.gicv2, HIKEY960_UART_IRQ);
+}
+
+static inr_t hikey960_enable_vtimer_irq(void)
+{
+	gicv2_enable(&hikey960.gicv2, HIKEY960_VTIMER_IRQ);
+	return HIKEY960_VTIMER_IRQ;
+}
+
+static size_t hikey960_get_irq_count(void)
+{
+	return gicv2_inum_get_total(&hikey960.gicv2);
+}
+
+static const char *hikey960_get_platform_name(void)
+{
+	return "hikey960";
+}
+
+struct arm_machine_ops hikey960_machine_ops = {
+	hikey960_init,
+	hikey960_irq_exception,
+	hikey960_output_init,
+	hikey960_input_init,
+	hikey960_enable_vtimer_irq,
+	hikey960_get_irq_count,
+	hikey960_get_platform_name
+};
+
+/** @}
+ */
Index: kernel/arch/arm64/src/machine_func.c
===================================================================
--- kernel/arch/arm64/src/machine_func.c	(revision d1582b502edcb7bfeef771a2019538f265e7e27c)
+++ kernel/arch/arm64/src/machine_func.c	(revision f7c12b31590a80fc5aa38db903e609c2055972e4)
@@ -37,4 +37,5 @@
 
 #include <arch/machine_func.h>
+#include <arch/mach/hikey960/hikey960.h>
 #include <arch/mach/virt/virt.h>
 
@@ -47,4 +48,6 @@
 #if defined(MACHINE_virt)
 	machine_ops = &virt_machine_ops;
+#elif defined(MACHINE_hikey960)
+	machine_ops = &hikey960_machine_ops;
 #else
 #error Machine type not defined.
Index: kernel/arch/arm64/src/smc.c
===================================================================
--- kernel/arch/arm64/src/smc.c	(revision d1582b502edcb7bfeef771a2019538f265e7e27c)
+++ 	(revision )
@@ -1,35 +1,0 @@
-/*
- * Copyright (c) 2019 Petr Pavlu
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- *   notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- *   notice, this list of conditions and the following disclaimer in the
- *   documentation and/or other materials provided with the distribution.
- * - The name of the author may not be used to endorse or promote products
- *   derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch/barrier.h>
-#include <barrier.h>
-
-void smc_coherence(void *a, size_t l)
-{
-	ensure_visibility(a, l);
-}
Index: kernel/arch/arm64/src/start.S
===================================================================
--- kernel/arch/arm64/src/start.S	(revision d1582b502edcb7bfeef771a2019538f265e7e27c)
+++ kernel/arch/arm64/src/start.S	(revision f7c12b31590a80fc5aa38db903e609c2055972e4)
@@ -35,21 +35,40 @@
 .section K_TEXT_START, "ax"
 
+.macro dcache_flush addr size temp0 temp1
+	mov \temp0, \addr
+	mov \temp1, xzr
+
+	0:
+		/* Data or Unified Cache Line Clean */
+		dc cvau, \temp0
+		add \temp0, \temp0, #4
+		add \temp1, \temp1, #4
+		cmp \temp1, \size
+		blo 0b
+
+	dsb ish
+	isb
+.endm
+
+/** Kernel entry
+ *
+ * MMU must be disabled at this point.
+ *
+ * @param x0 Kernel entry point (kernel_image_start).
+ * @param x1 Pointer to the bootinfo structure.
+ *
+ */
 SYMBOL(kernel_image_start)
-	/*
-	 * Parameters:
-	 * x0 is kernel entry point (kernel_image_start).
-	 * x1 is pointer to the bootinfo structure.
-	 *
-	 * MMU must be disabled at this point.
-	 */
-
 	/* Get address of the main memory and remember it. */
 	adrp x20, kernel_image_start - BOOT_OFFSET
 	adrp x2, physmem_base
-	/* add x2, x2, #:lo12:physmem_base */
 	str x20, [x2]
 
-	/*
-	 * Set up address translation that identity maps the gigabyte area that
+	/* Flush the data cache of physmem_base. */
+	mov x28, #8
+	dcache_flush x2 x28 x29 x30
+
+	/*
+	 * Set up address translation that identity maps the 1 GiB area that
 	 * is holding the current execution page.
 	 */
@@ -76,5 +95,5 @@
 	mov x3, #( \
 	    1 << PTE_ACCESS_SHIFT | \
-	    MAIR_EL1_DEVICE_MEMORY_INDEX << PTE_ATTR_INDEX_SHIFT | \
+	    MAIR_EL1_NORMAL_MEMORY_INDEX << PTE_ATTR_INDEX_SHIFT | \
 	    PTE_L012_TYPE_BLOCK << PTE_TYPE_SHIFT | \
 	    1 << PTE_PRESENT_SHIFT)
@@ -84,10 +103,11 @@
 
 	/*
-	 * Set up address translation that maps the first gigabyte of the kernel
-	 * identity virtual address space to the first gigabyte of the physical
+	 * Set up address translation that maps the first 4 GiB of the kernel
+	 * identity virtual address space to the first 4 GiB of the physical
 	 * memory.
 	 */
 
 	mov x21, #KM_ARM64_IDENTITY_START
+	ldr x22, =(1024 * 1024 * 1024)
 
 	/* Prepare the level 0 page table. */
@@ -112,10 +132,78 @@
 	mov x3, #( \
 	    1 << PTE_ACCESS_SHIFT | \
+	    MAIR_EL1_NORMAL_MEMORY_INDEX << PTE_ATTR_INDEX_SHIFT | \
+	    PTE_L012_TYPE_BLOCK << PTE_TYPE_SHIFT | \
+	    1 << PTE_PRESENT_SHIFT)
+	lsr x4, x20, #FRAME_WIDTH
+	orr x3, x3, x4, lsl #PTE_OUTPUT_ADDRESS_SHIFT
+	str x3, [x2]
+
+	/* 2nd GiB */
+	add x23, x20, x22
+	add x24, x21, x22
+
+	adrp x2, upper_page_table_level1
+	lsr x3, x24, #PTL1_VA_SHIFT
+	and x3, x3, #PTL1_VA_MASK
+	add x2, x2, x3, lsl #PTL_ENTRY_SIZE_SHIFT
+	mov x3, #( \
+	    1 << PTE_ACCESS_SHIFT | \
+	    MAIR_EL1_NORMAL_MEMORY_INDEX << PTE_ATTR_INDEX_SHIFT | \
+	    PTE_L012_TYPE_BLOCK << PTE_TYPE_SHIFT | \
+	    1 << PTE_PRESENT_SHIFT)
+	lsr x4, x23, #FRAME_WIDTH
+	orr x3, x3, x4, lsl #PTE_OUTPUT_ADDRESS_SHIFT
+	str x3, [x2]
+
+	/* 3rd GiB */
+	add x23, x23, x22
+	add x24, x24, x22
+
+	adrp x2, upper_page_table_level1
+	lsr x3, x24, #PTL1_VA_SHIFT
+	and x3, x3, #PTL1_VA_MASK
+	add x2, x2, x3, lsl #PTL_ENTRY_SIZE_SHIFT
+	mov x3, #( \
+	    1 << PTE_ACCESS_SHIFT | \
+	    MAIR_EL1_NORMAL_MEMORY_INDEX << PTE_ATTR_INDEX_SHIFT | \
+	    PTE_L012_TYPE_BLOCK << PTE_TYPE_SHIFT | \
+	    1 << PTE_PRESENT_SHIFT)
+	lsr x4, x23, #FRAME_WIDTH
+	orr x3, x3, x4, lsl #PTE_OUTPUT_ADDRESS_SHIFT
+	str x3, [x2]
+
+	/* 4th GiB */
+	add x23, x23, x22
+	add x24, x24, x22
+
+	adrp x2, upper_page_table_level1
+	lsr x3, x24, #PTL1_VA_SHIFT
+	and x3, x3, #PTL1_VA_MASK
+	add x2, x2, x3, lsl #PTL_ENTRY_SIZE_SHIFT
+	mov x3, #( \
+	    1 << PTE_ACCESS_SHIFT | \
 	    MAIR_EL1_DEVICE_MEMORY_INDEX << PTE_ATTR_INDEX_SHIFT | \
 	    PTE_L012_TYPE_BLOCK << PTE_TYPE_SHIFT | \
 	    1 << PTE_PRESENT_SHIFT)
-	lsr x4, x20, #FRAME_WIDTH
-	orr x3, x3, x4, lsl #PTE_OUTPUT_ADDRESS_SHIFT
-	str x3, [x2]
+	lsr x4, x23, #FRAME_WIDTH
+	orr x3, x3, x4, lsl #PTE_OUTPUT_ADDRESS_SHIFT
+	str x3, [x2]
+
+	/* Flush the data cache of page tables. */
+	adrp x27, lower_page_table_level0
+	mov x28, #4096
+	dcache_flush x27 x28 x29 x30
+
+	adrp x27, lower_page_table_level1
+	mov x28, #4096
+	dcache_flush x27 x28 x29 x30
+
+	adrp x27, upper_page_table_level0
+	mov x28, #4096
+	dcache_flush x27 x28 x29 x30
+
+	adrp x27, upper_page_table_level1
+	mov x28, #4096
+	dcache_flush x27 x28 x29 x30
 
 	/* Make sure there are not any stale TLB entries. */
@@ -271,8 +359,11 @@
 lower_page_table_level0:
 	.space 4096
+
 lower_page_table_level1:
 	.space 4096
+
 upper_page_table_level0:
 	.space 4096
+
 upper_page_table_level1:
 	.space 4096
