Index: kernel/arch/amd64/src/amd64.c
===================================================================
--- kernel/arch/amd64/src/amd64.c	(revision 22a28a696141d62f28e48ed72d1d255ff519795c)
+++ kernel/arch/amd64/src/amd64.c	(revision c5da138d8b8cc6946884f1cfd5912b1afb9fa5d6)
@@ -122,5 +122,5 @@
 	/* Enable FPU */
 	cpu_setup_fpu();
-
+	
 	/* Initialize segmentation */
 	pm_init();
@@ -132,5 +132,5 @@
 	/* Disable alignment check */
 	clean_AM_flag();
-
+	
 	if (config.cpu_active == 1) {
 		interrupt_init();
@@ -260,4 +260,5 @@
 	THREAD->arch.tls = addr;
 	write_msr(AMD_MSR_FS, addr);
+	
 	return 0;
 }
Index: kernel/arch/amd64/src/asm_utils.S
===================================================================
--- kernel/arch/amd64/src/asm_utils.S	(revision 22a28a696141d62f28e48ed72d1d255ff519795c)
+++ kernel/arch/amd64/src/asm_utils.S	(revision c5da138d8b8cc6946884f1cfd5912b1afb9fa5d6)
@@ -27,28 +27,28 @@
 #
 
-#define IREGISTER_SPACE	80
-
-#define IOFFSET_RAX	0x0
-#define IOFFSET_RCX	0x8
-#define IOFFSET_RDX	0x10
-#define IOFFSET_RSI	0x18
-#define IOFFSET_RDI	0x20
-#define IOFFSET_R8	0x28
-#define IOFFSET_R9	0x30
-#define IOFFSET_R10	0x38
-#define IOFFSET_R11	0x40
-#define IOFFSET_RBP	0x48
-
-#  Mask for interrupts 0 - 31 (bits 0 - 31) where 0 means that int has no error word
-# and 1 means interrupt with error word
-#define ERROR_WORD_INTERRUPT_LIST 0x00027D00
+#define IREGISTER_SPACE  80
+
+#define IOFFSET_RAX  0x00
+#define IOFFSET_RCX  0x08
+#define IOFFSET_RDX  0x10
+#define IOFFSET_RSI  0x18
+#define IOFFSET_RDI  0x20
+#define IOFFSET_R8   0x28
+#define IOFFSET_R9   0x30
+#define IOFFSET_R10  0x38
+#define IOFFSET_R11  0x40
+#define IOFFSET_RBP  0x48
+
+# Mask for interrupts 0 - 31 (bits 0 - 31) where 0 means that int
+# has no error word  and 1 means interrupt with error word
+
+#define ERROR_WORD_INTERRUPT_LIST  0x00027D00
 
 #include <arch/pm.h>
 #include <arch/mm/page.h>
-	
+
 .text
 .global interrupt_handlers
 .global syscall_entry
-
 .global cpuid
 .global has_cpuid
@@ -71,7 +71,7 @@
 	jmp _memsetw
 
-#define MEMCPY_DST	%rdi
-#define MEMCPY_SRC	%rsi
-#define MEMCPY_SIZE	%rdx
+#define MEMCPY_DST   %rdi
+#define MEMCPY_SRC   %rsi
+#define MEMCPY_SIZE  %rdx
 
 /**
@@ -84,9 +84,10 @@
  * or copy_to_uspace().
  *
- * @param MEMCPY_DST	Destination address.
- * @param MEMCPY_SRC	Source address.
- * @param MEMCPY_SIZE	Number of bytes to copy.
+ * @param MEMCPY_DST  Destination address.
+ * @param MEMCPY_SRC  Source address.
+ * @param MEMCPY_SIZE Number of bytes to copy.
  *
  * @retrun MEMCPY_DST on success, 0 on failure.
+ *
  */
 memcpy:
@@ -94,22 +95,22 @@
 memcpy_to_uspace:
 	movq MEMCPY_DST, %rax
-
+	
 	movq MEMCPY_SIZE, %rcx
-	shrq $3, %rcx			/* size / 8 */
-	
-	rep movsq			/* copy as much as possible word by word */
-
+	shrq $3, %rcx           /* size / 8 */
+	
+	rep movsq               /* copy as much as possible word by word */
+	
 	movq MEMCPY_SIZE, %rcx
-	andq $7, %rcx			/* size % 8 */
+	andq $7, %rcx           /* size % 8 */
 	jz 0f
 	
-	rep movsb			/* copy the rest byte by byte */
-	
-0:
-	ret				/* return MEMCPY_SRC, success */
+	rep movsb               /* copy the rest byte by byte */
+	
+	0:
+		ret                 /* return MEMCPY_SRC, success */
 
 memcpy_from_uspace_failover_address:
 memcpy_to_uspace_failover_address:
-	xorq %rax, %rax			/* return 0, failure */
+	xorq %rax, %rax         /* return 0, failure */
 	ret
 
@@ -119,28 +120,28 @@
 #
 has_cpuid:
-	pushfq			# store flags
-	popq %rax		# read flags
-	movq %rax,%rdx		# copy flags
-	btcl $21,%edx		# swap the ID bit
+	pushfq                 # store flags
+	popq %rax              # read flags
+	movq %rax, %rdx        # copy flags
+	btcl $21, %edx         # swap the ID bit
 	pushq %rdx
-	popfq			# propagate the change into flags
+	popfq                  # propagate the change into flags
 	pushfq
-	popq %rdx		# read flags	
-	andl $(1<<21),%eax	# interested only in ID bit
-	andl $(1<<21),%edx
-	xorl %edx,%eax		# 0 if not supported, 1 if supported
+	popq %rdx              # read flags
+	andl $(1 << 21), %eax  # interested only in ID bit
+	andl $(1 << 21), %edx
+	xorl %edx, %eax        # 0 if not supported, 1 if supported
 	ret
 
 cpuid:
-	movq %rbx, %r10  # we have to preserve rbx across function calls
-
-	movl %edi,%eax	# load the command into %eax
-
-	cpuid	
-	movl %eax,0(%rsi)
-	movl %ebx,4(%rsi)
-	movl %ecx,8(%rsi)
-	movl %edx,12(%rsi)
-
+	movq %rbx, %r10        # we have to preserve rbx across function calls
+	
+	movl %edi,%eax         # load the command into %eax
+	
+	cpuid
+	movl %eax, 0(%rsi)
+	movl %ebx, 4(%rsi)
+	movl %ecx, 8(%rsi)
+	movl %edx, 12(%rsi)
+	
 	movq %r10, %rbx
 	ret
@@ -152,9 +153,9 @@
 	wrmsr
 	ret
-	
+
 read_efer_flag:	
 	movq $0xc0000080, %rcx
 	rdmsr
-	ret 		
+	ret
 
 # Push all volatile general purpose registers on stack
@@ -185,6 +186,6 @@
 .endm
 
-#define INTERRUPT_ALIGN 128
-	
+#define INTERRUPT_ALIGN  128
+
 ## Declare interrupt handlers
 #
@@ -195,5 +196,5 @@
 #
 .macro handler i n
-
+	
 	/*
 	 * Choose between version with error code and version without error
@@ -204,5 +205,5 @@
 	 * Therefore we align the interrupt handlers.
 	 */
-
+	
 	.iflt \i-32
 		.if (1 << \i) & ERROR_WORD_INTERRUPT_LIST
@@ -215,5 +216,5 @@
 			 * Version without error word,
 			 */
-			subq $(IREGISTER_SPACE+8), %rsp
+			subq $(IREGISTER_SPACE + 8), %rsp
 		.endif
 	.else
@@ -221,7 +222,7 @@
 		 * Version without error word,
 		 */
-		subq $(IREGISTER_SPACE+8), %rsp
-	.endif	
-
+		subq $(IREGISTER_SPACE + 8), %rsp
+	.endif
+	
 	save_all_gpr
 	cld
@@ -241,10 +242,10 @@
 	restore_all_gpr
 	# $8 = Skip error word
-	addq $(IREGISTER_SPACE+8), %rsp
+	addq $(IREGISTER_SPACE + 8), %rsp
 	iretq
-
+	
 	.align INTERRUPT_ALIGN
-	.if (\n-\i)-1
-	handler "(\i+1)",\n
+	.if (\n - \i) - 1
+		handler "(\i + 1)", \n
 	.endif
 .endm
@@ -252,35 +253,35 @@
 .align INTERRUPT_ALIGN
 interrupt_handlers:
-h_start:
-	handler 0 IDT_ITEMS
-h_end:
+	h_start:
+		handler 0 IDT_ITEMS
+	h_end:
 
 ## Low-level syscall handler
-# 
+#
 # Registers on entry:
 #
-# @param rcx		Userspace return address.
-# @param r11		Userspace RLFAGS.
-#
-# @param rax		Syscall number.
-# @param rdi		1st syscall argument.
-# @param rsi		2nd syscall argument.
-# @param rdx		3rd syscall argument.
-# @param r10		4th syscall argument. Used instead of RCX because the
-#			SYSCALL instruction clobbers it.
-# @param r8		5th syscall argument.
-# @param r9		6th syscall argument.
-#
-# @return		Return value is in rax.
+# @param rcx Userspace return address.
+# @param r11 Userspace RLFAGS.
+#
+# @param rax Syscall number.
+# @param rdi 1st syscall argument.
+# @param rsi 2nd syscall argument.
+# @param rdx 3rd syscall argument.
+# @param r10 4th syscall argument. Used instead of RCX because
+#            the SYSCALL instruction clobbers it.
+# @param r8  5th syscall argument.
+# @param r9  6th syscall argument.
+#
+# @return Return value is in rax.
 #
 syscall_entry:
-	swapgs			# Switch to hidden gs	
-	# 
-	# %gs:0			Scratch space for this thread's user RSP
-	# %gs:8			Address to be used as this thread's kernel RSP
+	swapgs            # Switch to hidden gs
 	#
-	movq %rsp, %gs:0	# Save this thread's user RSP
-	movq %gs:8, %rsp	# Set this thread's kernel RSP
-	swapgs			# Switch back to remain consistent
+	# %gs:0 Scratch space for this thread's user RSP
+	# %gs:8 Address to be used as this thread's kernel RSP
+	#
+	movq %rsp, %gs:0  # Save this thread's user RSP
+	movq %gs:8, %rsp  # Set this thread's kernel RSP
+	swapgs            # Switch back to remain consistent
 	sti
 	
@@ -299,10 +300,10 @@
 	popq %r11
 	popq %rcx
-
+	
 	cli
 	swapgs
-	movq %gs:0, %rsp	# Restore the user RSP
+	movq %gs:0, %rsp  # Restore the user RSP
 	swapgs
-
+	
 	sysretq
 
@@ -310,3 +311,3 @@
 .global interrupt_handler_size
 
-interrupt_handler_size: .quad (h_end-h_start)/IDT_ITEMS
+interrupt_handler_size: .quad (h_end - h_start) / IDT_ITEMS
Index: kernel/arch/amd64/src/boot/boot.S
===================================================================
--- kernel/arch/amd64/src/boot/boot.S	(revision 22a28a696141d62f28e48ed72d1d255ff519795c)
+++ kernel/arch/amd64/src/boot/boot.S	(revision c5da138d8b8cc6946884f1cfd5912b1afb9fa5d6)
@@ -31,5 +31,5 @@
 #include <arch/boot/boot.h>
 #include <arch/boot/memmap.h>
-#include <arch/mm/page.h>	
+#include <arch/mm/page.h>
 #include <arch/mm/ptl.h>
 #include <arch/pm.h>
@@ -172,11 +172,14 @@
 	xorq %rsi, %rsi
 	movl grub_ebx, %esi
-	call arch_pre_main
+	
+	movabsq $arch_pre_main, %rax
+	callq *%rax
 	
 	# create the first stack frame
 	pushq $0
 	movq %rsp, %rbp
-
-	call main_bsp
+	
+	movabsq $main_bsp, %rax
+	call *%rax
 	
 	# not reached
@@ -256,57 +259,73 @@
 #
 # Macro for generating initial page table contents.
-# @param cnt Number of entries to generat. Must be multiple of 8.
+# @param cnt Number of entries to generate. Must be multiple of 8.
 # @param g   Number of GB that will be added to the mapping.
 #
-.macro ptl2gen cnt g 
-.if \cnt
-	ptl2gen "\cnt - 8" \g 
-	.quad ((\cnt - 8) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
-	.quad ((\cnt - 7) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
-	.quad ((\cnt - 6) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
-	.quad ((\cnt - 5) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
-	.quad ((\cnt - 4) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
-	.quad ((\cnt - 3) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
-	.quad ((\cnt - 2) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
-	.quad ((\cnt - 1) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
-.endif
+.macro ptl2gen cnt g
+	.if \cnt
+		ptl2gen "\cnt - 8" \g
+		.quad ((\cnt - 8) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
+		.quad ((\cnt - 7) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
+		.quad ((\cnt - 6) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
+		.quad ((\cnt - 5) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
+		.quad ((\cnt - 4) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
+		.quad ((\cnt - 3) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
+		.quad ((\cnt - 2) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
+		.quad ((\cnt - 1) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
+	.endif
 .endm
 
-# Page table for pages in the first gigabyte.
-.align 4096
-.global ptl_2_0g
-ptl_2_0g:	
+# Page table for pages in the 1st gigabyte.
+.align 4096
+ptl_2_0g:
 	ptl2gen 512 0
 
-# Page table for pages in the second gigabyte.
-.align 4096
-.global ptl_2_1g
+# Page table for pages in the 2nd gigabyte.
+.align 4096
 ptl_2_1g:
 	ptl2gen 512 1
 
-# Page table for pages in the third gigabyte.
-.align 4096
-.global ptl_2_2g
+# Page table for pages in the 3rd gigabyte.
+.align 4096
 ptl_2_2g:
 	ptl2gen 512 2
 
-# Page table for pages in the fourth gigabyte.
-.align 4096
-.global ptl_2_3g
+# Page table for pages in the 4th gigabyte.
+.align 4096
 ptl_2_3g:
 	ptl2gen 512 3
 
-.align 4096
-.global ptl_1
+# Page table for pages in the 5th gigabyte.
+.align 4096
+ptl_2_4g:
+	ptl2gen 512 3
+
+# Page table for pages in the 6th gigabyte.
+.align 4096
+ptl_2_5g:
+	ptl2gen 512 3
+
+# Page table for pages in the 7th gigabyte.
+.align 4096
+ptl_2_6g:
+	ptl2gen 512 3
+
+# Page table for pages in the 8th gigabyte.
+.align 4096
+ptl_2_7g:
+	ptl2gen 512 3
+
+.align 4096
 ptl_1:
-	# Identity mapping for [0; 4G)
+	# Identity mapping for [0; 8G)
 	.quad ptl_2_0g + (PTL_WRITABLE | PTL_PRESENT)
-	.quad ptl_2_1g + (PTL_WRITABLE | PTL_PRESENT) 
+	.quad ptl_2_1g + (PTL_WRITABLE | PTL_PRESENT)
 	.quad ptl_2_2g + (PTL_WRITABLE | PTL_PRESENT)
 	.quad ptl_2_3g + (PTL_WRITABLE | PTL_PRESENT)
-	.fill 506, 8, 0
-	# Mapping of [0; 1G) at -2G
-	.quad ptl_2_0g + (PTL_WRITABLE | PTL_PRESENT)
-	.fill 1, 8, 0
+	.quad ptl_2_4g + (PTL_WRITABLE | PTL_PRESENT)
+	.quad ptl_2_5g + (PTL_WRITABLE | PTL_PRESENT)
+	.quad ptl_2_6g + (PTL_WRITABLE | PTL_PRESENT)
+	.quad ptl_2_7g + (PTL_WRITABLE | PTL_PRESENT)
+	.fill 504, 8, 0
 
 .align 4096
@@ -314,8 +333,7 @@
 ptl_0:
 	.quad ptl_1 + (PTL_WRITABLE | PTL_PRESENT)
-	.fill 255,8,0
+	.fill 255, 8, 0
 	.quad ptl_1 + (PTL_WRITABLE | PTL_PRESENT)
-	.fill 254,8,0
-	.quad ptl_1 + (PTL_WRITABLE | PTL_PRESENT)
+	.fill 255, 8, 0
 
 .section K_DATA_START, "aw", @progbits
Index: kernel/arch/amd64/src/context.S
===================================================================
--- kernel/arch/amd64/src/context.S	(revision 22a28a696141d62f28e48ed72d1d255ff519795c)
+++ kernel/arch/amd64/src/context.S	(revision c5da138d8b8cc6946884f1cfd5912b1afb9fa5d6)
@@ -41,9 +41,9 @@
 context_save_arch:
 	movq (%rsp), %rdx     # the caller's return %eip
-
-	# In %edi is passed 1st argument
-	CONTEXT_SAVE_ARCH_CORE %rdi %rdx 
 	
-	xorq %rax,%rax		# context_save returns 1
+	# 1st argument passed in %edi
+	CONTEXT_SAVE_ARCH_CORE %rdi %rdx
+	
+	xorq %rax, %rax       # context_save returns 1
 	incq %rax
 	ret
@@ -55,10 +55,9 @@
 # pointed by the 1st argument. Returns 0 in EAX.
 #
-context_restore_arch:	
-
+context_restore_arch:
 	CONTEXT_RESTORE_ARCH_CORE %rdi %rdx
-
-	movq %rdx,(%rsp)
-
-	xorq %rax,%rax		# context_restore returns 0
+	
+	movq %rdx, (%rsp)
+	
+	xorq %rax, %rax       # context_restore returns 0
 	ret
Index: kernel/arch/amd64/src/cpu/cpu.c
===================================================================
--- kernel/arch/amd64/src/cpu/cpu.c	(revision 22a28a696141d62f28e48ed72d1d255ff519795c)
+++ kernel/arch/amd64/src/cpu/cpu.c	(revision c5da138d8b8cc6946884f1cfd5912b1afb9fa5d6)
@@ -47,11 +47,11 @@
  * Contains only non-MP-Specification specific SMP code.
  */
-#define AMD_CPUID_EBX	0x68747541
-#define AMD_CPUID_ECX 	0x444d4163
-#define AMD_CPUID_EDX 	0x69746e65
+#define AMD_CPUID_EBX  0x68747541
+#define AMD_CPUID_ECX  0x444d4163
+#define AMD_CPUID_EDX  0x69746e65
 
-#define INTEL_CPUID_EBX	0x756e6547
-#define INTEL_CPUID_ECX 0x6c65746e
-#define INTEL_CPUID_EDX 0x49656e69
+#define INTEL_CPUID_EBX  0x756e6547
+#define INTEL_CPUID_ECX  0x6c65746e
+#define INTEL_CPUID_EDX  0x49656e69
 
 
@@ -127,31 +127,31 @@
 {
 	cpu_info_t info;
-
+	
 	CPU->arch.vendor = VendorUnknown;
 	if (has_cpuid()) {
 		cpuid(INTEL_CPUID_LEVEL, &info);
-
+		
 		/*
 		 * Check for AMD processor.
 		 */
-		if (info.cpuid_ebx == AMD_CPUID_EBX &&
-		    info.cpuid_ecx == AMD_CPUID_ECX &&
-		    info.cpuid_edx == AMD_CPUID_EDX) {
+		if ((info.cpuid_ebx == AMD_CPUID_EBX) &&
+		    (info.cpuid_ecx == AMD_CPUID_ECX) &&
+		    (info.cpuid_edx == AMD_CPUID_EDX)) {
 			CPU->arch.vendor = VendorAMD;
 		}
-
+		
 		/*
 		 * Check for Intel processor.
-		 */		
-		if (info.cpuid_ebx == INTEL_CPUID_EBX &&
-		    info.cpuid_ecx == INTEL_CPUID_ECX &&
-		    info.cpuid_edx == INTEL_CPUID_EDX) {
+		 */
+		if ((info.cpuid_ebx == INTEL_CPUID_EBX) &&
+		    (info.cpuid_ecx == INTEL_CPUID_ECX) &&
+		    (info.cpuid_edx == INTEL_CPUID_EDX)) {
 			CPU->arch.vendor = VendorIntel;
 		}
-				
+		
 		cpuid(INTEL_CPUID_STANDARD, &info);
 		CPU->arch.family = (info.cpuid_eax >> 8) & 0xf;
 		CPU->arch.model = (info.cpuid_eax >> 4) & 0xf;
-		CPU->arch.stepping = (info.cpuid_eax >> 0) & 0xf;						
+		CPU->arch.stepping = (info.cpuid_eax >> 0) & 0xf;
 	}
 }
Index: kernel/arch/amd64/src/debug/stacktrace.c
===================================================================
--- kernel/arch/amd64/src/debug/stacktrace.c	(revision 22a28a696141d62f28e48ed72d1d255ff519795c)
+++ kernel/arch/amd64/src/debug/stacktrace.c	(revision c5da138d8b8cc6946884f1cfd5912b1afb9fa5d6)
@@ -37,6 +37,6 @@
 #include <typedefs.h>
 
-#define FRAME_OFFSET_FP_PREV	0
-#define FRAME_OFFSET_RA		1
+#define FRAME_OFFSET_FP_PREV  0
+#define FRAME_OFFSET_RA       1
 
 bool kernel_frame_pointer_validate(uintptr_t fp)
@@ -49,4 +49,5 @@
 	uint64_t *stack = (void *) fp;
 	*prev = stack[FRAME_OFFSET_FP_PREV];
+	
 	return true;
 }
@@ -56,4 +57,5 @@
 	uint64_t *stack = (void *) fp;
 	*ra = stack[FRAME_OFFSET_RA];
+	
 	return true;
 }
Index: kernel/arch/amd64/src/delay.S
===================================================================
--- kernel/arch/amd64/src/delay.S	(revision 22a28a696141d62f28e48ed72d1d255ff519795c)
+++ kernel/arch/amd64/src/delay.S	(revision c5da138d8b8cc6946884f1cfd5912b1afb9fa5d6)
@@ -37,10 +37,14 @@
 
 asm_delay_loop:
-0:	dec %rdi
-	jnz 0b
+	0:
+		dec %rdi
+		jnz 0b
+	
 	ret
 
 asm_fake_loop:
-0:	dec %rdi
-	jz 0b
+	0:
+		dec %rdi
+		jz 0b
+	
 	ret
Index: kernel/arch/amd64/src/fpu_context.c
===================================================================
--- kernel/arch/amd64/src/fpu_context.c	(revision 22a28a696141d62f28e48ed72d1d255ff519795c)
+++ kernel/arch/amd64/src/fpu_context.c	(revision c5da138d8b8cc6946884f1cfd5912b1afb9fa5d6)
@@ -27,5 +27,5 @@
  */
 
-/** @addtogroup amd64	
+/** @addtogroup amd64
  * @{
  */
Index: kernel/arch/amd64/src/interrupt.c
===================================================================
--- kernel/arch/amd64/src/interrupt.c	(revision 22a28a696141d62f28e48ed72d1d255ff519795c)
+++ kernel/arch/amd64/src/interrupt.c	(revision c5da138d8b8cc6946884f1cfd5912b1afb9fa5d6)
@@ -202,5 +202,4 @@
 	exc_register(12, "ss_fault", true, (iroutine_t) ss_fault);
 	exc_register(13, "gp_fault", true, (iroutine_t) gp_fault);
-	exc_register(14, "ident_mapper", true, (iroutine_t) ident_page_fault);
 	
 #ifdef CONFIG_SMP
Index: kernel/arch/amd64/src/mm/page.c
===================================================================
--- kernel/arch/amd64/src/mm/page.c	(revision 22a28a696141d62f28e48ed72d1d255ff519795c)
+++ kernel/arch/amd64/src/mm/page.c	(revision c5da138d8b8cc6946884f1cfd5912b1afb9fa5d6)
@@ -39,5 +39,4 @@
 #include <mm/frame.h>
 #include <mm/as.h>
-#include <arch/interrupt.h>
 #include <arch/asm.h>
 #include <config.h>
@@ -48,72 +47,23 @@
 #include <align.h>
 
-/* Definitions for identity page mapper */
-pte_t helper_ptl1[512] __attribute__((aligned (PAGE_SIZE)));
-pte_t helper_ptl2[512] __attribute__((aligned (PAGE_SIZE)));
-pte_t helper_ptl3[512] __attribute__((aligned (PAGE_SIZE)));
-extern pte_t ptl_0; /* From boot.S */
-
-#define PTL1_PRESENT(ptl0, page) (!(GET_PTL1_FLAGS_ARCH(ptl0, PTL0_INDEX_ARCH(page)) & PAGE_NOT_PRESENT))
-#define PTL2_PRESENT(ptl1, page) (!(GET_PTL2_FLAGS_ARCH(ptl1, PTL1_INDEX_ARCH(page)) & PAGE_NOT_PRESENT))
-#define PTL3_PRESENT(ptl2, page) (!(GET_PTL3_FLAGS_ARCH(ptl2, PTL2_INDEX_ARCH(page)) & PAGE_NOT_PRESENT))
-
-#define PTL1_ADDR(ptl0, page) ((pte_t *)PA2KA(GET_PTL1_ADDRESS_ARCH(ptl0, PTL0_INDEX_ARCH(page))))
-#define PTL2_ADDR(ptl1, page) ((pte_t *)PA2KA(GET_PTL2_ADDRESS_ARCH(ptl1, PTL1_INDEX_ARCH(page))))
-#define PTL3_ADDR(ptl2, page) ((pte_t *)PA2KA(GET_PTL3_ADDRESS_ARCH(ptl2, PTL2_INDEX_ARCH(page))))
-
-#define SETUP_PTL1(ptl0, page, tgt)  {	\
-	SET_PTL1_ADDRESS_ARCH(ptl0, PTL0_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \
-        SET_PTL1_FLAGS_ARCH(ptl0, PTL0_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \
-    }
-#define SETUP_PTL2(ptl1, page, tgt)  {	\
-	SET_PTL2_ADDRESS_ARCH(ptl1, PTL1_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \
-        SET_PTL2_FLAGS_ARCH(ptl1, PTL1_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \
-    }
-#define SETUP_PTL3(ptl2, page, tgt)  {	\
-	SET_PTL3_ADDRESS_ARCH(ptl2, PTL2_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \
-        SET_PTL3_FLAGS_ARCH(ptl2, PTL2_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \
-    }
-#define SETUP_FRAME(ptl3, page, tgt)  {	\
-	SET_FRAME_ADDRESS_ARCH(ptl3, PTL3_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \
-        SET_FRAME_FLAGS_ARCH(ptl3, PTL3_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \
-    }
-
-
 void page_arch_init(void)
 {
-	uintptr_t cur;
-	unsigned int i;
-	int identity_flags = PAGE_CACHEABLE | PAGE_EXEC | PAGE_GLOBAL | PAGE_WRITE;
-
 	if (config.cpu_active == 1) {
+		uintptr_t cur;
+		unsigned int identity_flags =
+		    PAGE_CACHEABLE | PAGE_EXEC | PAGE_GLOBAL | PAGE_WRITE;
+		
 		page_mapping_operations = &pt_mapping_operations;
-
+		
 		page_table_lock(AS_KERNEL, true);
-
+		
 		/*
 		 * PA2KA(identity) mapping for all frames.
 		 */
-		for (cur = 0; cur < last_frame; cur += FRAME_SIZE) {
-			/* Standard identity mapping */
+		for (cur = 0; cur < last_frame; cur += FRAME_SIZE)
 			page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, identity_flags);
-		}
 		
-		/* Upper kernel mapping
-		 * - from zero to top of kernel (include bottom addresses
-		 *   because some are needed for init)
-		 */
-		for (cur = PA2KA_CODE(0); cur < config.base + config.kernel_size; cur += FRAME_SIZE)
-			page_mapping_insert(AS_KERNEL, cur, KA2PA(cur), identity_flags);
+		page_table_unlock(AS_KERNEL, true);
 		
-		for (cur = config.stack_base; cur < config.stack_base + config.stack_size; cur += FRAME_SIZE)
-			page_mapping_insert(AS_KERNEL, cur, KA2PA(cur), identity_flags);
-		
-		for (i = 0; i < init.cnt; i++) {
-			for (cur = init.tasks[i].addr; cur < init.tasks[i].addr + init.tasks[i].size; cur += FRAME_SIZE)
-				page_mapping_insert(AS_KERNEL, PA2KA_CODE(KA2PA(cur)), KA2PA(cur), identity_flags);
-		}
-
-		page_table_unlock(AS_KERNEL, true);
-
 		exc_register(14, "page_fault", true, (iroutine_t) page_fault);
 		write_cr3((uintptr_t) AS_KERNEL->genarch.page_table);
@@ -122,68 +72,12 @@
 }
 
-
-/** Identity page mapper
- *
- * We need to map whole physical memory identically before the page subsystem
- * is initializaed. This thing clears page table and fills in the specific
- * items.
- */
-void ident_page_fault(unsigned int n, istate_t *istate)
-{
-	uintptr_t page;
-	static uintptr_t oldpage = 0;
-	pte_t *aptl_1, *aptl_2, *aptl_3;
-
-	page = read_cr2();
-	if (oldpage) {
-		/* Unmap old address */
-		aptl_1 = PTL1_ADDR(&ptl_0, oldpage);
-		aptl_2 = PTL2_ADDR(aptl_1, oldpage);
-		aptl_3 = PTL3_ADDR(aptl_2, oldpage);
-
-		SET_FRAME_FLAGS_ARCH(aptl_3, PTL3_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT);
-		if (KA2PA(aptl_3) == KA2PA(helper_ptl3))
-			SET_PTL3_FLAGS_ARCH(aptl_2, PTL2_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT);
-		if (KA2PA(aptl_2) == KA2PA(helper_ptl2))
-			SET_PTL2_FLAGS_ARCH(aptl_1, PTL1_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT);
-		if (KA2PA(aptl_1) == KA2PA(helper_ptl1))
-			SET_PTL1_FLAGS_ARCH(&ptl_0, PTL0_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT);
-	}
-	if (PTL1_PRESENT(&ptl_0, page))
-		aptl_1 = PTL1_ADDR(&ptl_0, page);
-	else {
-		SETUP_PTL1(&ptl_0, page, helper_ptl1);
-		aptl_1 = helper_ptl1;
-	}
-	    
-	if (PTL2_PRESENT(aptl_1, page)) 
-		aptl_2 = PTL2_ADDR(aptl_1, page);
-	else {
-		SETUP_PTL2(aptl_1, page, helper_ptl2);
-		aptl_2 = helper_ptl2;
-	}
-
-	if (PTL3_PRESENT(aptl_2, page))
-		aptl_3 = PTL3_ADDR(aptl_2, page);
-	else {
-		SETUP_PTL3(aptl_2, page, helper_ptl3);
-		aptl_3 = helper_ptl3;
-	}
-	
-	SETUP_FRAME(aptl_3, page, page);
-
-	oldpage = page;
-}
-
-
 void page_fault(unsigned int n, istate_t *istate)
 {
-	uintptr_t page;
-	pf_access_t access;
-	
-	page = read_cr2();
+	uintptr_t page = read_cr2();
 	
 	if (istate->error_word & PFERR_CODE_RSVD)
 		panic("Reserved bit set in page table entry.");
+	
+	pf_access_t access;
 	
 	if (istate->error_word & PFERR_CODE_RW)
@@ -200,17 +94,18 @@
 }
 
-
 uintptr_t hw_map(uintptr_t physaddr, size_t size)
 {
 	if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH))
-		panic("Unable to map physical memory %p (%d bytes).", physaddr,
+		panic("Unable to map physical memory %p (%" PRIs " bytes).", physaddr,
 		    size);
 	
 	uintptr_t virtaddr = PA2KA(last_frame);
 	pfn_t i;
-
+	
 	page_table_lock(AS_KERNEL, true);
+	
 	for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++)
 		page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE | PAGE_WRITE);
+	
 	page_table_unlock(AS_KERNEL, true);
 	
Index: kernel/arch/amd64/src/proc/scheduler.c
===================================================================
--- kernel/arch/amd64/src/proc/scheduler.c	(revision 22a28a696141d62f28e48ed72d1d255ff519795c)
+++ kernel/arch/amd64/src/proc/scheduler.c	(revision c5da138d8b8cc6946884f1cfd5912b1afb9fa5d6)
@@ -38,5 +38,5 @@
 #include <proc/thread.h>
 #include <arch.h>
-#include <arch/context.h>	/* SP_DELTA */
+#include <arch/context.h>
 #include <arch/asm.h>
 #include <print.h>
@@ -58,12 +58,12 @@
 	CPU->arch.tss->rsp0 =
 	    (uintptr_t) &THREAD->kstack[THREAD_STACK_SIZE - SP_DELTA];
-
+	
 	/*
 	 * Syscall support.
 	 */
 	swapgs();
-	write_msr(AMD_MSR_GS, (uintptr_t)THREAD->arch.syscall_rsp);
+	write_msr(AMD_MSR_GS, (uintptr_t) THREAD->arch.syscall_rsp);
 	swapgs();
-
+	
 	/* TLS support - set FS to thread local storage */
 	write_msr(AMD_MSR_FS, THREAD->arch.tls);
Index: kernel/arch/amd64/src/proc/task.c
===================================================================
--- kernel/arch/amd64/src/proc/task.c	(revision 22a28a696141d62f28e48ed72d1d255ff519795c)
+++ kernel/arch/amd64/src/proc/task.c	(revision c5da138d8b8cc6946884f1cfd5912b1afb9fa5d6)
@@ -39,20 +39,22 @@
 /** Perform amd64 specific task initialization.
  *
- * @param t Task to be initialized.
+ * @param task Task to be initialized.
+ *
  */
-void task_create_arch(task_t *t)
+void task_create_arch(task_t *task)
 {
-	t->arch.iomapver = 0;
-	bitmap_initialize(&t->arch.iomap, NULL, 0);
+	task->arch.iomapver = 0;
+	bitmap_initialize(&task->arch.iomap, NULL, 0);
 }
 
 /** Perform amd64 specific task destruction.
  *
- * @param t Task to be initialized.
+ * @param task Task to be initialized.
+ *
  */
-void task_destroy_arch(task_t *t)
+void task_destroy_arch(task_t *task)
 {
-	if (t->arch.iomap.map)
-		free(t->arch.iomap.map);
+	if (task->arch.iomap.map)
+		free(task->arch.iomap.map);
 }
 
Index: kernel/arch/amd64/src/proc/thread.c
===================================================================
--- kernel/arch/amd64/src/proc/thread.c	(revision 22a28a696141d62f28e48ed72d1d255ff519795c)
+++ kernel/arch/amd64/src/proc/thread.c	(revision c5da138d8b8cc6946884f1cfd5912b1afb9fa5d6)
@@ -37,15 +37,17 @@
 /** Perform amd64 specific thread initialization.
  *
- * @param t Thread to be initialized.
+ * @param thread Thread to be initialized.
+ *
  */
-void thread_create_arch(thread_t *t)
+void thread_create_arch(thread_t *thread)
 {
-	t->arch.tls = 0;
-	t->arch.syscall_rsp[SYSCALL_USTACK_RSP] = 0;
+	thread->arch.tls = 0;
+	thread->arch.syscall_rsp[SYSCALL_USTACK_RSP] = 0;
+	
 	/*
 	 * Kernel RSP can be precalculated at thread creation time.
 	 */
-	t->arch.syscall_rsp[SYSCALL_KSTACK_RSP] =
-	    (uintptr_t) &t->kstack[PAGE_SIZE - sizeof(uint64_t)];
+	thread->arch.syscall_rsp[SYSCALL_KSTACK_RSP] =
+	    (uintptr_t) &thread->kstack[PAGE_SIZE - sizeof(uint64_t)];
 }
 
Index: kernel/arch/amd64/src/smp/ap.S
===================================================================
--- kernel/arch/amd64/src/smp/ap.S	(revision 22a28a696141d62f28e48ed72d1d255ff519795c)
+++ kernel/arch/amd64/src/smp/ap.S	(revision c5da138d8b8cc6946884f1cfd5912b1afb9fa5d6)
@@ -55,12 +55,12 @@
 	xorw %ax, %ax
 	movw %ax, %ds
-
-	lgdtl ap_gdtr		# initialize Global Descriptor Table register
+	
+	lgdtl ap_gdtr       # initialize Global Descriptor Table register
 	
 	movl %cr0, %eax
 	orl $1, %eax
-	movl %eax, %cr0		# switch to protected mode
+	movl %eax, %cr0     # switch to protected mode
 	jmpl $gdtselector(KTEXT32_DES), $jump_to_kernel - BOOT_OFFSET + AP_BOOT_OFFSET
-	
+
 jump_to_kernel:
 .code32
@@ -72,5 +72,5 @@
 	movw %ax, %gs
 	
-	# Enable 64-bit page transaltion entries - CR4.PAE = 1.
+	# Enable 64-bit page transaltion entries (CR4.PAE = 1).
 	# Paging is not enabled until after long mode is enabled
 	
@@ -78,15 +78,15 @@
 	btsl $5, %eax
 	movl %eax, %cr4
-
+	
 	leal ptl_0, %eax
 	movl %eax, %cr3
 	
 	# Enable long mode
-	movl $EFER_MSR_NUM, %ecx	# EFER MSR number
-	rdmsr				# Read EFER
-	btsl $AMD_LME_FLAG, %eax	# Set LME=1
-	wrmsr				# Write EFER
+	movl $EFER_MSR_NUM, %ecx  # EFER MSR number
+	rdmsr                     # Read EFER
+	btsl $AMD_LME_FLAG, %eax  # Set LME=1
+	wrmsr                     # Write EFER
 	
-	# Enable paging to activate long mode (set CR0.PG=1)
+	# Enable paging to activate long mode (set CR0.PG = 1)
 	movl %cr0, %eax
 	btsl $31, %eax
@@ -98,8 +98,12 @@
 .code64
 start64:
-	movq (ctx), %rsp
+	movabsq $ctx, %rsp
+	movq (%rsp), %rsp
+	
 	pushq $0
 	movq %rsp, %rbp
-	call main_ap - AP_BOOT_OFFSET + BOOT_OFFSET   # never returns
+	
+	movabsq $main_ap, %rax
+	callq *%rax   # never returns
 
 #endif /* CONFIG_SMP */
