source: mainline/kernel/arch/ia64/src/start.S@ bdd1600

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since bdd1600 was d9ee2ea, checked in by Jakub Jermar <jakub@…>, 16 years ago

Change the way how RIDs are mapped onto ASIDs. Instead of 7 RIDs per ASID, now
there will be 8 RIDs per one ASID. This will slightly reduce the number of
available ASIDs, but not significantly.

The kernel will now have all 8 RIDs instead of only one. RID 0 - 7 belong to the
kernel, but only RID 7 accessible from VRN 7 is actually used by the kernel.
This allows us to use RID 0 for VRN 0 and differentiate thus between
0x0000000000000000 and 0xe000000000000000 in a more elegant way. test fault1
will now associate the kernel bad trap with RID 0 which maps to ASID_KERNEL.

User tasks will also be given 8 RIDs, but will use only the lower 7 that fit
into VRN 0 - 6, because the last VRN needs to be reserved for the kernel. The
eighth RID will be unused for now. It can be used for something completely
different one day or if the task needs to establish some special mappings.

So with this change, the kernel now has a 64-bit address space compared to
previous 61 bits, but still makes use only of the highest 1/8 (i.e. 61-bits).
Applications continue to have an address space composed of 7 61-bit blocks which
are arranged in a consecutive way. Each application now has one hidden and
currently unused 61-bit segment.

  • Property mode set to 100644
File size: 5.9 KB
Line 
1#
2# Copyright (c) 2005 Jakub Jermar
3# All rights reserved.
4#
5# Redistribution and use in source and binary forms, with or without
6# modification, are permitted provided that the following conditions
7# are met:
8#
9# - Redistributions of source code must retain the above copyright
10# notice, this list of conditions and the following disclaimer.
11# - Redistributions in binary form must reproduce the above copyright
12# notice, this list of conditions and the following disclaimer in the
13# documentation and/or other materials provided with the distribution.
14# - The name of the author may not be used to endorse or promote products
15# derived from this software without specific prior written permission.
16#
17# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27#
28
29#include <arch/register.h>
30#include <arch/mm/page.h>
31#include <arch/mm/asid.h>
32#include <mm/asid.h>
33
34#define RR_MASK (0xFFFFFFFF00000002)
35#define RID_SHIFT 8
36#define PS_SHIFT 2
37
38#define KERNEL_TRANSLATION_I 0x0010000000000661
39#define KERNEL_TRANSLATION_D 0x0010000000000661
40#define KERNEL_TRANSLATION_VIO 0x0010000000000671
41#define KERNEL_TRANSLATION_IO 0x00100FFFFC000671
42#define KERNEL_TRANSLATION_FW 0x00100000F0000671
43
44.section K_TEXT_START, "ax"
45
46.global kernel_image_start
47
48stack0:
49kernel_image_start:
50 .auto
51
52#ifdef CONFIG_SMP
53 # Identify self(CPU) in OS structures by ID / EID
54
55 mov r9 = cr64
56 mov r10 = 1
57 movl r12 = 0xffffffff
58 movl r8 = cpu_by_id_eid_list
59 and r8 = r8, r12
60 shr r9 = r9, 16
61 add r8 = r8, r9
62 st1 [r8] = r10
63#endif
64
65 mov psr.l = r0
66 srlz.i
67 srlz.d
68
69 # Fill TR.i and TR.d using Region Register #VRN_KERNEL
70
71 movl r8 = (VRN_KERNEL << VRN_SHIFT)
72 mov r9 = rr[r8]
73
74 movl r10 = (RR_MASK)
75 and r9 = r10, r9
76 movl r10 = (((RID_KERNEL7) << RID_SHIFT) | (KERNEL_PAGE_WIDTH << PS_SHIFT))
77 or r9 = r10, r9
78
79 mov rr[r8] = r9
80
81 movl r8 = (VRN_KERNEL << VRN_SHIFT)
82 mov cr.ifa = r8
83
84 mov r11 = cr.itir
85 movl r10 = (KERNEL_PAGE_WIDTH << PS_SHIFT)
86 or r10 = r10, r11
87 mov cr.itir = r10
88
89 movl r10 = (KERNEL_TRANSLATION_I)
90 itr.i itr[r0] = r10
91 movl r10 = (KERNEL_TRANSLATION_D)
92 itr.d dtr[r0] = r10
93
94 movl r7 = 1
95 movl r8 = (VRN_KERNEL << VRN_SHIFT) | VIO_OFFSET
96 mov cr.ifa = r8
97 movl r10 = (KERNEL_TRANSLATION_VIO)
98 itr.d dtr[r7] = r10
99
100 mov r11 = cr.itir
101 movl r10 = ~0xfc
102 and r10 = r10, r11
103 movl r11 = (IO_PAGE_WIDTH << PS_SHIFT)
104 or r10 = r10, r11
105 mov cr.itir = r10
106
107 movl r7 = 2
108 movl r8 = (VRN_KERNEL << VRN_SHIFT) | IO_OFFSET
109 mov cr.ifa = r8
110 movl r10 = (KERNEL_TRANSLATION_IO)
111 itr.d dtr[r7] = r10
112
113 # Setup mapping for firmware area (also SAPIC)
114
115 mov r11 = cr.itir
116 movl r10 = ~0xfc
117 and r10 = r10, r11
118 movl r11 = (FW_PAGE_WIDTH << PS_SHIFT)
119 or r10 = r10, r11
120 mov cr.itir = r10
121
122 movl r7 = 3
123 movl r8 = (VRN_KERNEL << VRN_SHIFT) | FW_OFFSET
124 mov cr.ifa = r8
125 movl r10 = (KERNEL_TRANSLATION_FW)
126 itr.d dtr[r7] = r10
127
128 # Initialize DSR
129
130 movl r10 = (DCR_DP_MASK | DCR_DK_MASK | DCR_DX_MASK | DCR_DR_MASK | DCR_DA_MASK | DCR_DD_MASK | DCR_LC_MASK)
131 mov r9 = cr.dcr
132 or r10 = r10, r9
133 mov cr.dcr = r10
134
135 # Initialize PSR
136
137 movl r10 = (PSR_DT_MASK | PSR_RT_MASK | PSR_IT_MASK | PSR_IC_MASK) /* Enable paging */
138 mov r9 = psr
139
140 or r10 = r10, r9
141 mov cr.ipsr = r10
142 mov cr.ifs = r0
143 movl r8 = paging_start
144 mov cr.iip = r8
145 srlz.d
146 srlz.i
147
148 .explicit
149
150 /*
151 * Return From Interrupt is the only way to
152 * fill the upper half word of PSR.
153 */
154 rfi ;;
155
156.global paging_start
157paging_start:
158
159 /*
160 * Now we are paging.
161 */
162
163 # Switch to register bank 1
164 bsw.1
165
166#ifdef CONFIG_SMP
167 # Am I BSP or AP?
168 movl r20 = bsp_started ;;
169 ld8 r20 = [r20] ;;
170 cmp.eq p3, p2 = r20, r0 ;;
171#else
172 cmp.eq p3, p2 = r0, r0 ;; /* you are BSP */
173#endif /* CONFIG_SMP */
174
175 # Initialize register stack
176 mov ar.rsc = r0
177 movl r8 = (VRN_KERNEL << VRN_SHIFT) ;;
178 mov ar.bspstore = r8
179 loadrs
180
181 # Initialize memory stack to some sane value
182 movl r12 = stack0 ;;
183 add r12 = -16, r12 /* allocate a scratch area on the stack */
184
185 # Initialize gp (Global Pointer) register
186 movl r20 = (VRN_KERNEL << VRN_SHIFT) ;;
187 or r20 = r20, r1 ;;
188 movl r1 = kernel_image_start
189
190 /*
191 * Initialize bootinfo on BSP.
192 */
193(p3) addl r21 = @gprel(bootinfo), gp ;;
194(p3) st8 [r21] = r20
195
196 ssm (1 << 19) ;; /* Disable f32 - f127 */
197 srlz.i
198 srlz.d ;;
199
200#ifdef CONFIG_SMP
201(p2) movl r18 = main_ap ;;
202(p2) mov b1 = r18 ;;
203(p2) br.call.sptk.many b0 = b1
204
205 # Mark that BSP is on
206
207 mov r20 = 1 ;;
208 movl r21 = bsp_started ;;
209 st8 [r21] = r20 ;;
210#endif
211
212 br.call.sptk.many b0 = arch_pre_main
213
214 movl r18 = main_bsp ;;
215 mov b1 = r18 ;;
216 br.call.sptk.many b0 = b1
217
2180:
219 br 0b
220
221#ifdef CONFIG_SMP
222
223.align 4096
224kernel_image_ap_start:
225 .auto
226
227 # Identify self(CPU) in OS structures by ID / EID
228
229 mov r9 = cr64
230 mov r10 = 1
231 movl r12 = 0xffffffff
232 movl r8 = cpu_by_id_eid_list
233 and r8 = r8, r12
234 shr r9 = r9, 16
235 add r8 = r8, r9
236 st1 [r8] = r10
237
238 # Wait for wakeup synchro signal (#3 in cpu_by_id_eid_list)
239
240kernel_image_ap_start_loop:
241 movl r11 = kernel_image_ap_start_loop
242 and r11 = r11, r12
243 mov b1 = r11
244
245 ld1 r20 = [r8]
246 movl r21 = 3
247 cmp.eq p2, p3 = r20, r21
248(p3) br.call.sptk.many b0 = b1
249
250 movl r11 = kernel_image_start
251 and r11 = r11, r12
252 mov b1 = r11
253 br.call.sptk.many b0 = b1
254
255.align 16
256.global bsp_started
257bsp_started:
258 .space 8
259
260.align 4096
261.global cpu_by_id_eid_list
262cpu_by_id_eid_list:
263 .space 65536
264
265#endif /* CONFIG_SMP */
Note: See TracBrowser for help on using the repository browser.