Index: arch/amd64/src/boot/boot.S
===================================================================
--- arch/amd64/src/boot/boot.S	(revision c245372b7a8a5edb74f7f730b4f5c269e3d0548e)
+++ arch/amd64/src/boot/boot.S	(revision db5e25fded450f437b3829aa1a7d37235477e5df)
@@ -27,8 +27,9 @@
 #
 
-.section K_TEXT_START
-.global kernel_image_start
+#include <arch/mm/ptl.h>
 
-.code16
+#define START_STACK     0x7c00	
+#define START_STACK_64  $0xffffffff80007c00
+					
 #
 # This is where we require any SPARTAN-kernel-compatible boot loader
@@ -39,4 +40,7 @@
 # switch to protected mode.
 #
+.section K_TEXT_START
+.code16
+.global kernel_image_start
 kernel_image_start:
 	cli
@@ -44,7 +48,7 @@
 	movw %ax,%ds
 	movw %ax,%ss            # initialize stack segment register
-	movl $0x7c00,%esp	# initialize stack pointer
+	movl START_STACK,%esp	# initialize stack pointer
 	
-	call memmap_arch_init
+#	call memmap_arch_init
 	
 	mov $0x80000000, %eax  
@@ -57,37 +61,98 @@
 	jnc no_long_mode
 
-# Fill out GDTR.base, IDTR.base
-	leal gdtr, %eax
-	movl gdt_addr, %ebx
-	movl %ebx, 2(%eax)
+	# Load gdtr, idtr
+	lgdt gdtr_inst
+	lidt idtr_inst
+	
+	movl %cr0,%eax
+	orl $0x1,%eax
+	movl %eax,%cr0			# switch to protected mode
 
-	movl idt_addr, %ebx
-	leal idtr, %eax
-	movl %ebx, 2(%eax)
-
-# Load gdtr, idtr	
-	lgdt gdtr
-	lidt idtr
-	
-	mov $1, %eax    # Enable protected mode (CR0.PE = 1)
-	mov %eax, %cr0 
-
-	jmpl $8, $now_in_prot
-	
-now_in_prot:
-	
+	jmpl $40, $now_in_prot
 
 no_long_mode:
 1:
 	jmp 1b
+
+# Protected 16-bit. We want to reuse the code-seg descriptor,
+# the Default operand size must not be 1 when entering long mode	
+now_in_prot:  
+	# Set up stack & data descriptors
+	movw $16, %ax
+	movw %ax, %ds
+	movw %ax, %fs
+	movw %ax, %gs
+	movw %ax, %ss
+
+	# Enable 64-bit page transaltion entries - CR4.PAE = 1.
+	# Paging is not enabled until after long mode is enabled
+	movl %cr4, %eax
+	btsl $5, %eax
+	movl %eax, %cr4
+
+	# Set up paging tables
+	leal ptl_0, %eax
+	movl %eax, %cr3
+		
+	# Enable long mode
+	movl $0xc0000080, %ecx   # EFER MSR number
+	rdmsr                   # Read EFER
+	btsl $8, %eax            # Set LME=1
+	wrmsr                   # Write EFER
+	
+	# Enable paging to activate long mode (set CR0.PG=1)
+	movl %cr0, %eax
+	btsl $31, %eax
+	movl %eax, %cr0
+	
+	# At this point we are in compatibility mode
+	jmpl $8, $start64
+
+.code64
+start64:
+	movq START_STACK_64, %rsp
+	
+	lidt idtr_inst
+	
+	call main_bsp   # never returns
+1:
+	jmp 1b
 			
 
-.section K_DATA_START	
+.section K_DATA_START
 .align 4096
-page_directory:
-	.space 4096, 0
+.global ptl_2
+ptl_2:	
+	.quad 0x0 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
+	.quad 0x200000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
+	.quad 0x400000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
+	.quad 0x600000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
+	.quad 0x800000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
+	.quad 0xa00000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
+	.quad 0xc00000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
+	.quad 0xe00000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
+	
+.align 4096
+.global ptl_1
+ptl_1:
+	.quad ptl_2 + (PTL_WRITABLE | PTL_PRESENT)
+	.fill 509,8,0
+	.quad ptl_2 + (PTL_WRITABLE | PTL_PRESENT)
+	.fill 2,8,0
+	
+.align 4096
+.global ptl_0
+ptl_0:
+	.quad ptl_1 + (PTL_WRITABLE | PTL_PRESENT)
+	.fill 510,8,0
+	.quad ptl_1 + (PTL_WRITABLE | PTL_PRESENT)
 
-gdt_addr:	
-	.quad gdt + 0x80000000
-idt_addr:	
-	.quad idt + 0x80000000
+.global gdtr_inst				
+gdtr_inst:
+	.word 7*8  # GDT_ITEMS * 8
+	.long gdt + 0x80000000
+
+.global idtr_inst
+idtr_inst:
+	.word 0
+	.long idt + 0x80000000
Index: arch/amd64/src/pm.c
===================================================================
--- arch/amd64/src/pm.c	(revision c245372b7a8a5edb74f7f730b4f5c269e3d0548e)
+++ arch/amd64/src/pm.c	(revision db5e25fded450f437b3829aa1a7d37235477e5df)
@@ -45,9 +45,9 @@
 	  .base_0_15   = 0, 
 	  .base_16_23  = 0, 
-	  .access      = AR_PRESENT | AR_CODE | DPL_KERNEL, 
+	  .access      = AR_PRESENT | AR_CODE | DPL_KERNEL | AR_READABLE , 
 	  .limit_16_19 = 0xf, 
 	  .available   = 0, 
 	  .longmode    = 1, 
-	  .special     = 0, 
+	  .special     = 0,
 	  .granularity = 1, 
 	  .base_24_31  = 0 },
@@ -61,5 +61,5 @@
 	  .longmode    = 0, 
 	  .special     = 0, 
-	  .granularity = 0, 
+	  .granularity = 1, 
 	  .base_24_31  = 0 },
 	/* UTEXT descriptor */
@@ -85,4 +85,15 @@
 	  .granularity = 1, 
 	  .base_24_31  = 0 },
+	/* KTEXT 16-bit protected */
+	{ .limit_0_15  = 0xffff, 
+	  .base_0_15   = 0, 
+	  .base_16_23  = 0, 
+	  .access      = AR_PRESENT | AR_CODE | DPL_KERNEL | AR_READABLE, 
+	  .limit_16_19 = 0xf, 
+	  .available   = 0, 
+	  .longmode    = 0, 
+	  .special     = 0,
+	  .granularity = 1, 
+	  .base_24_31  = 0 },
 	/* TSS descriptor - set up will be completed later */
 	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
@@ -93,6 +104,4 @@
 static struct tss tss;
 
-/* gdtr is changed by kmp before next CPU is initialized */
-struct ptr_16_32 gdtr __attribute__ ((section ("K_DATA_START"))) = { .limit = sizeof(gdt) };
-//struct ptr_16_32 gdtr __attribute__ ((section ("K_DATA_START"))) = { .limit = sizeof(gdt), .base = KA2PA((__address) gdt) };
-struct ptr_16_32 idtr __attribute__ ((section ("K_DATA_START"))) = { .limit = sizeof(idt) };
+/* Does not compile correctly if it does not exist */
+int __attribute__ ((section ("K_DATA_START"))) __fake;
