Index: kernel/arch/sparc64/src/start.S
===================================================================
--- kernel/arch/sparc64/src/start.S	(revision 92778f2136ec900a873b76c69db8efbdb1998fea)
+++ kernel/arch/sparc64/src/start.S	(revision b84ce45da9a88a07aa08440a60c3520e28ffd2e2)
@@ -78,11 +78,16 @@
 
 	wrpr %g0, NWINDOWS - 2, %cansave	! set maximum saveable windows
-	wrpr %g0, 0, %canrestore		! get rid of windows we will never need again
-	wrpr %g0, 0, %otherwin			! make sure the window state is consistent
-	wrpr %g0, NWINDOWS - 1, %cleanwin	! prevent needless clean_window traps for kernel
-
-	wrpr %g0, 0, %tl			! TL = 0, primary context register is used
-
-	wrpr %g0, PSTATE_PRIV_BIT, %pstate	! Disable interrupts and disable 32-bit address masking.
+	wrpr %g0, 0, %canrestore		! get rid of windows we will
+						! never need again
+	wrpr %g0, 0, %otherwin			! make sure the window state is
+						! consistent
+	wrpr %g0, NWINDOWS - 1, %cleanwin	! prevent needless clean_window
+						! traps for kernel
+
+	wrpr %g0, 0, %tl			! TL = 0, primary context
+						! register is used
+
+	wrpr %g0, PSTATE_PRIV_BIT, %pstate	! disable interrupts and disable
+						! 32-bit address masking
 
 	wrpr %g0, 0, %pil			! intialize %pil
@@ -95,12 +100,10 @@
 
 	/* 
-	 * Take over the DMMU by installing global locked
-	 * TTE entry identically mapping the first 4M
-	 * of memory.
+	 * Take over the DMMU by installing global locked TTE entry identically
+	 * mapping the first 4M of memory.
 	 *
-	 * In case of DMMU, no FLUSH instructions need to be
-	 * issued. Because of that, the old DTLB contents can
-	 * be demapped pretty straightforwardly and without
-	 * causing any traps.
+	 * In case of DMMU, no FLUSH instructions need to be issued. Because of
+	 * that, the old DTLB contents can be demapped pretty straightforwardly
+	 * and without causing any traps.
 	 */
 
@@ -108,5 +111,6 @@
 
 #define SET_TLB_DEMAP_CMD(r1, context_id) \
-	set (TLB_DEMAP_CONTEXT<<TLB_DEMAP_TYPE_SHIFT) | (context_id<<TLB_DEMAP_CONTEXT_SHIFT), %r1
+	set (TLB_DEMAP_CONTEXT << TLB_DEMAP_TYPE_SHIFT) | (context_id << \
+		TLB_DEMAP_CONTEXT_SHIFT), %r1
 	
 	! demap context 0
@@ -116,5 +120,5 @@
 
 #define SET_TLB_TAG(r1, context) \
-	set VMA | (context<<TLB_TAG_ACCESS_CONTEXT_SHIFT), %r1
+	set VMA | (context << TLB_TAG_ACCESS_CONTEXT_SHIFT), %r1
 
 	! write DTLB tag
@@ -145,9 +149,8 @@
 
 	/*
-	 * Because we cannot use global mappings (because we want to
-	 * have separate 64-bit address spaces for both the kernel
-	 * and the userspace), we prepare the identity mapping also in
-	 * context 1. This step is required by the
-	 * code installing the ITLB mapping.
+	 * Because we cannot use global mappings (because we want to have
+	 * separate 64-bit address spaces for both the kernel and the
+	 * userspace), we prepare the identity mapping also in context 1. This
+	 * step is required by the code installing the ITLB mapping.
 	 */
 	! write DTLB tag of context 1 (i.e. MEM_CONTEXT_TEMP)
@@ -162,23 +165,21 @@
 	
 	/*
-	 * Now is time to take over the IMMU.
-	 * Unfortunatelly, it cannot be done as easily as the DMMU,
-	 * because the IMMU is mapping the code it executes.
+	 * Now is time to take over the IMMU. Unfortunatelly, it cannot be done
+	 * as easily as the DMMU, because the IMMU is mapping the code it
+	 * executes.
 	 *
-	 * [ Note that brave experiments with disabling the IMMU
-	 * and using the DMMU approach failed after a dozen
-	 * of desparate days with only little success. ]
+	 * [ Note that brave experiments with disabling the IMMU and using the
+	 * DMMU approach failed after a dozen of desparate days with only little
+	 * success. ]
 	 *
-	 * The approach used here is inspired from OpenBSD.
-	 * First, the kernel creates IMMU mapping for itself
-	 * in context 1 (MEM_CONTEXT_TEMP) and switches to
-	 * it. Context 0 (MEM_CONTEXT_KERNEL) can be demapped
-	 * afterwards and replaced with the kernel permanent
-	 * mapping. Finally, the kernel switches back to
-	 * context 0 and demaps context 1.
+	 * The approach used here is inspired from OpenBSD. First, the kernel
+	 * creates IMMU mapping for itself in context 1 (MEM_CONTEXT_TEMP) and
+	 * switches to it. Context 0 (MEM_CONTEXT_KERNEL) can be demapped
+	 * afterwards and replaced with the kernel permanent mapping. Finally,
+	 * the kernel switches back to context 0 and demaps context 1.
 	 *
-	 * Moreover, the IMMU requires use of the FLUSH instructions.
-	 * But that is OK because we always use operands with
-	 * addresses already mapped by the taken over DTLB.
+	 * Moreover, the IMMU requires use of the FLUSH instructions. But that
+	 * is OK because we always use operands with addresses already mapped by
+	 * the taken over DTLB.
 	 */
 	
@@ -292,7 +293,6 @@
 #ifdef CONFIG_SMP
 	/*
-	 * Active loop for APs until the BSP picks them up.
-	 * A processor cannot leave the loop until the
-	 * global variable 'waking_up_mid' equals its
+	 * Active loop for APs until the BSP picks them up. A processor cannot
+	 * leave the loop until the global variable 'waking_up_mid' equals its
 	 * MID.
 	 */
@@ -327,13 +327,11 @@
 
 /*
- * Create small stack to be used by the bootstrap processor.
- * It is going to be used only for a very limited period of
- * time, but we switch to it anyway, just to be sure we are
- * properly initialized.
+ * Create small stack to be used by the bootstrap processor. It is going to be
+ * used only for a very limited period of time, but we switch to it anyway,
+ * just to be sure we are properly initialized.
  *
- * What is important is that this piece of memory is covered
- * by the 4M DTLB locked entry and therefore there will be
- * no surprises like deadly combinations of spill trap and
- * and TLB miss on the stack address.
+ * What is important is that this piece of memory is covered by the 4M DTLB
+ * locked entry and therefore there will be no surprises like deadly
+ * combinations of spill trap and and TLB miss on the stack address.
  */
 
@@ -355,13 +353,15 @@
 
 /*
- * This variable is used by the fast_data_MMU_miss trap handler.
- * In runtime, it is further modified to reflect the starting address of
- * physical memory.
+ * This variable is used by the fast_data_MMU_miss trap handler. In runtime, it
+ * is further modified to reflect the starting address of physical memory.
  */
 .global kernel_8k_tlb_data_template
 kernel_8k_tlb_data_template:
 #ifdef CONFIG_VIRT_IDX_DCACHE
-	.quad ((1 << TTE_V_SHIFT) | (PAGESIZE_8K << TTE_SIZE_SHIFT) | TTE_CP | TTE_CV | TTE_P | TTE_W)
+	.quad ((1 << TTE_V_SHIFT) | (PAGESIZE_8K << TTE_SIZE_SHIFT) | TTE_CP | \
+		 TTE_CV | TTE_P | TTE_W)
 #else /* CONFIG_VIRT_IDX_DCACHE */
-	.quad ((1 << TTE_V_SHIFT) | (PAGESIZE_8K << TTE_SIZE_SHIFT) | TTE_CP | TTE_P | TTE_W)
+	.quad ((1 << TTE_V_SHIFT) | (PAGESIZE_8K << TTE_SIZE_SHIFT) | TTE_CP | \
+		TTE_P | TTE_W)
 #endif /* CONFIG_VIRT_IDX_DCACHE */
+
