source: mainline/kernel/arch/sparc64/src/start.S@ 2bc137c2

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 2bc137c2 was 79f119b9, checked in by Jakub Jermar <jakub@…>, 19 years ago

Modify the sparc64 startup code to not cause MMU traps before it takes over the TLB and
the trap table. Fix several PA2KA and KA2PA omittions or errors. Fix configuration to pass
the DEFS variable along.

  • Property mode set to 100644
File size: 9.6 KB
Line 
1#
2# Copyright (C) 2005 Jakub Jermar
3# All rights reserved.
4#
5# Redistribution and use in source and binary forms, with or without
6# modification, are permitted provided that the following conditions
7# are met:
8#
9# - Redistributions of source code must retain the above copyright
10# notice, this list of conditions and the following disclaimer.
11# - Redistributions in binary form must reproduce the above copyright
12# notice, this list of conditions and the following disclaimer in the
13# documentation and/or other materials provided with the distribution.
14# - The name of the author may not be used to endorse or promote products
15# derived from this software without specific prior written permission.
16#
17# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27#
28
29#include <arch/arch.h>
30#include <arch/regdef.h>
31#include <arch/boot/boot.h>
32#include <arch/stack.h>
33
34#include <arch/mm/mmu.h>
35#include <arch/mm/tlb.h>
36#include <arch/mm/tte.h>
37
38#ifdef CONFIG_SMP
39#include <arch/context_offset.h>
40#endif
41
42.register %g2, #scratch
43.register %g3, #scratch
44
45.section K_TEXT_START, "ax"
46
47#define BSP_FLAG 1
48
49/*
50 * Here is where the kernel is passed control from the boot loader.
51 *
52 * The registers are expected to be in this state:
53 * - %o0 starting address of physical memory + bootstrap processor flag
54 * bits 63...1: physical memory starting address / 2
55 * bit 0: non-zero on BSP processor, zero on AP processors
56 * - %o1 bootinfo structure address (BSP only)
57 * - %o2 bootinfo structure size (BSP only)
58 *
59 * Moreover, we depend on boot having established the following environment:
60 * - TLBs are on
61 * - identity mapping for the kernel image
62 */
63
64.global kernel_image_start
65kernel_image_start:
66 mov BSP_FLAG, %l0
67 and %o0, %l0, %l7 ! l7 <= bootstrap processor?
68 andn %o0, %l0, %l6 ! l6 <= start of physical memory
69
70 ! Get bits 40:13 of physmem_base.
71 srlx %l6, 13, %l5
72 sllx %l5, 13 + (63 - 40), %l5
73 srlx %l5, 63 - 40, %l5 ! l5 <= physmem_base[40:13]
74
75 /*
76 * Setup basic runtime environment.
77 */
78
79 wrpr %g0, NWINDOWS - 2, %cansave ! set maximum saveable windows
80 wrpr %g0, 0, %canrestore ! get rid of windows we will never need again
81 wrpr %g0, 0, %otherwin ! make sure the window state is consistent
82 wrpr %g0, NWINDOWS - 1, %cleanwin ! prevent needless clean_window traps for kernel
83
84 wrpr %g0, 0, %tl ! TL = 0, primary context register is used
85
86 wrpr %g0, PSTATE_PRIV_BIT, %pstate ! Disable interrupts and disable 32-bit address masking.
87
88 wrpr %g0, 0, %pil ! intialize %pil
89
90 /*
91 * Switch to kernel trap table.
92 */
93 sethi %hi(trap_table), %g1
94 wrpr %g1, %lo(trap_table), %tba
95
96 /*
97 * Take over the DMMU by installing global locked
98 * TTE entry identically mapping the first 4M
99 * of memory.
100 *
101 * In case of DMMU, no FLUSH instructions need to be
102 * issued. Because of that, the old DTLB contents can
103 * be demapped pretty straightforwardly and without
104 * causing any traps.
105 */
106
107 wr %g0, ASI_DMMU, %asi
108
109#define SET_TLB_DEMAP_CMD(r1, context_id) \
110 set (TLB_DEMAP_CONTEXT<<TLB_DEMAP_TYPE_SHIFT) | (context_id<<TLB_DEMAP_CONTEXT_SHIFT), %r1
111
112 ! demap context 0
113 SET_TLB_DEMAP_CMD(g1, TLB_DEMAP_NUCLEUS)
114 stxa %g0, [%g1] ASI_DMMU_DEMAP
115 membar #Sync
116
117#define SET_TLB_TAG(r1, context) \
118 set VMA | (context<<TLB_TAG_ACCESS_CONTEXT_SHIFT), %r1
119
120 ! write DTLB tag
121 SET_TLB_TAG(g1, MEM_CONTEXT_KERNEL)
122 stxa %g1, [VA_DMMU_TAG_ACCESS] %asi
123 membar #Sync
124
125#define SET_TLB_DATA(r1, r2, imm) \
126 set TTE_CV | TTE_CP | TTE_P | LMA | imm, %r1; \
127 or %r1, %l5, %r1; \
128 mov PAGESIZE_4M, %r2; \
129 sllx %r2, TTE_SIZE_SHIFT, %r2; \
130 or %r1, %r2, %r1; \
131 mov 1, %r2; \
132 sllx %r2, TTE_V_SHIFT, %r2; \
133 or %r1, %r2, %r1;
134
135 ! write DTLB data and install the kernel mapping
136 SET_TLB_DATA(g1, g2, TTE_L | TTE_W) ! use non-global mapping
137 stxa %g1, [%g0] ASI_DTLB_DATA_IN_REG
138 membar #Sync
139
140 /*
141 * Because we cannot use global mappings (because we want to
142 * have separate 64-bit address spaces for both the kernel
143 * and the userspace), we prepare the identity mapping also in
144 * context 1. This step is required by the
145 * code installing the ITLB mapping.
146 */
147 ! write DTLB tag of context 1 (i.e. MEM_CONTEXT_TEMP)
148 SET_TLB_TAG(g1, MEM_CONTEXT_TEMP)
149 stxa %g1, [VA_DMMU_TAG_ACCESS] %asi
150 membar #Sync
151
152 ! write DTLB data and install the kernel mapping in context 1
153 SET_TLB_DATA(g1, g2, TTE_W) ! use non-global mapping
154 stxa %g1, [%g0] ASI_DTLB_DATA_IN_REG
155 membar #Sync
156
157 /*
158 * Now is time to take over the IMMU.
159 * Unfortunatelly, it cannot be done as easily as the DMMU,
160 * because the IMMU is mapping the code it executes.
161 *
162 * [ Note that brave experiments with disabling the IMMU
163 * and using the DMMU approach failed after a dozen
164 * of desparate days with only little success. ]
165 *
166 * The approach used here is inspired from OpenBSD.
167 * First, the kernel creates IMMU mapping for itself
168 * in context 1 (MEM_CONTEXT_TEMP) and switches to
169 * it. Context 0 (MEM_CONTEXT_KERNEL) can be demapped
170 * afterwards and replaced with the kernel permanent
171 * mapping. Finally, the kernel switches back to
172 * context 0 and demaps context 1.
173 *
174 * Moreover, the IMMU requires use of the FLUSH instructions.
175 * But that is OK because we always use operands with
176 * addresses already mapped by the taken over DTLB.
177 */
178
179 set kernel_image_start, %g5
180
181 ! write ITLB tag of context 1
182 SET_TLB_TAG(g1, MEM_CONTEXT_TEMP)
183 mov VA_DMMU_TAG_ACCESS, %g2
184 stxa %g1, [%g2] ASI_IMMU
185 flush %g5
186
187 ! write ITLB data and install the temporary mapping in context 1
188 SET_TLB_DATA(g1, g2, 0) ! use non-global mapping
189 stxa %g1, [%g0] ASI_ITLB_DATA_IN_REG
190 flush %g5
191
192 ! switch to context 1
193 mov MEM_CONTEXT_TEMP, %g1
194 stxa %g1, [VA_PRIMARY_CONTEXT_REG] %asi ! ASI_DMMU is correct here !!!
195 flush %g5
196
197 ! demap context 0
198 SET_TLB_DEMAP_CMD(g1, TLB_DEMAP_NUCLEUS)
199 stxa %g0, [%g1] ASI_IMMU_DEMAP
200 flush %g5
201
202 ! write ITLB tag of context 0
203 SET_TLB_TAG(g1, MEM_CONTEXT_KERNEL)
204 mov VA_DMMU_TAG_ACCESS, %g2
205 stxa %g1, [%g2] ASI_IMMU
206 flush %g5
207
208 ! write ITLB data and install the permanent kernel mapping in context 0
209 SET_TLB_DATA(g1, g2, TTE_L) ! use non-global mapping
210 stxa %g1, [%g0] ASI_ITLB_DATA_IN_REG
211 flush %g5
212
213 ! enter nucleus - using context 0
214 wrpr %g0, 1, %tl
215
216 ! demap context 1
217 SET_TLB_DEMAP_CMD(g1, TLB_DEMAP_PRIMARY)
218 stxa %g0, [%g1] ASI_IMMU_DEMAP
219 flush %g5
220
221 ! set context 0 in the primary context register
222 stxa %g0, [VA_PRIMARY_CONTEXT_REG] %asi ! ASI_DMMU is correct here !!!
223 flush %g5
224
225 ! leave nucleus - using primary context, i.e. context 0
226 wrpr %g0, 0, %tl
227
228 brz %l7, 1f ! skip if you are not the bootstrap CPU
229 nop
230
231 /*
232 * Save physmem_base for use by the mm subsystem.
233 * %l6 contains starting physical address
234 */
235 sethi %hi(physmem_base), %l4
236 stx %l6, [%l4 + %lo(physmem_base)]
237
238 /*
239 * Precompute kernel 8K TLB data template.
240 * %l5 contains starting physical address bits [40:13]
241 */
242 sethi %hi(kernel_8k_tlb_data_template), %l4
243 ldx [%l4 + %lo(kernel_8k_tlb_data_template)], %l3
244 or %l3, %l5, %l3
245 stx %l3, [%l4 + %lo(kernel_8k_tlb_data_template)]
246
247 /*
248 * So far, we have not touched the stack.
249 * It is a good idea to set the kernel stack to a known state now.
250 */
251 sethi %hi(temporary_boot_stack), %sp
252 or %sp, %lo(temporary_boot_stack), %sp
253 sub %sp, STACK_BIAS, %sp
254
255 sethi %hi(bootinfo), %o0
256 call memcpy ! copy bootinfo
257 or %o0, %lo(bootinfo), %o0
258
259 call arch_pre_main
260 nop
261
262 call main_bsp
263 nop
264
265 /* Not reached. */
266
2670:
268 ba 0b
269 nop
270
271
272 /*
273 * Read MID from the processor.
274 */
2751:
276 ldxa [%g0] ASI_UPA_CONFIG, %g1
277 srlx %g1, UPA_CONFIG_MID_SHIFT, %g1
278 and %g1, UPA_CONFIG_MID_MASK, %g1
279
280#ifdef CONFIG_SMP
281 /*
282 * Active loop for APs until the BSP picks them up.
283 * A processor cannot leave the loop until the
284 * global variable 'waking_up_mid' equals its
285 * MID.
286 */
287 set waking_up_mid, %g2
2882:
289 ldx [%g2], %g3
290 cmp %g3, %g1
291 bne 2b
292 nop
293
294 /*
295 * Configure stack for the AP.
296 * The AP is expected to use the stack saved
297 * in the ctx global variable.
298 */
299 set ctx, %g1
300 add %g1, OFFSET_SP, %g1
301 ldx [%g1], %o6
302
303 call main_ap
304 nop
305
306 /* Not reached. */
307#endif
308
3090:
310 ba 0b
311 nop
312
313
314.section K_DATA_START, "aw", @progbits
315
316/*
317 * Create small stack to be used by the bootstrap processor.
318 * It is going to be used only for a very limited period of
319 * time, but we switch to it anyway, just to be sure we are
320 * properly initialized.
321 *
322 * What is important is that this piece of memory is covered
323 * by the 4M DTLB locked entry and therefore there will be
324 * no surprises like deadly combinations of spill trap and
325 * and TLB miss on the stack address.
326 */
327
328#define INITIAL_STACK_SIZE 1024
329
330.align STACK_ALIGNMENT
331 .space INITIAL_STACK_SIZE
332.align STACK_ALIGNMENT
333temporary_boot_stack:
334 .space STACK_WINDOW_SAVE_AREA_SIZE
335
336
337.data
338
339.align 8
340.global physmem_base ! copy of the physical memory base address
341physmem_base:
342 .quad 0
343
344/*
345 * This variable is used by the fast_data_MMU_miss trap handler.
346 * In runtime, it is further modified to reflect the starting address of
347 * physical memory.
348 */
349.global kernel_8k_tlb_data_template
350kernel_8k_tlb_data_template:
351 .quad ((1 << TTE_V_SHIFT) | TTE_CV | TTE_CP | TTE_P | TTE_W)
Note: See TracBrowser for help on using the repository browser.