source: mainline/arch/ia64/src/ivt.S@ 83d2d0e

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 83d2d0e was 83d2d0e, checked in by Jakub Jermar <jakub@…>, 20 years ago

ia64 work.

ivt.S:
Detect userspace stack in heavyweight handler and switch to kernel stack.
Remember the old stack pointer.
As for register stack, kernel stack is assumed still.
Fix alignment issues that showed when STACK_ITEMS was odd.
Fix ld8 instruction that did subtraction of 8 instead of addition of 8.

scheduler.c:
Change before_thread_runs_arch() to calculate address of top of the stack
for the interrupt handler.

  • Property mode set to 100644
File size: 11.4 KB
Line 
1#
2# Copyright (C) 2005 Jakub Vana
3# Copyright (C) 2005 Jakub Jermar
4# All rights reserved.
5#
6# Redistribution and use in source and binary forms, with or without
7# modification, are permitted provided that the following conditions
8# are met:
9#
10# - Redistributions of source code must retain the above copyright
11# notice, this list of conditions and the following disclaimer.
12# - Redistributions in binary form must reproduce the above copyright
13# notice, this list of conditions and the following disclaimer in the
14# documentation and/or other materials provided with the distribution.
15# - The name of the author may not be used to endorse or promote products
16# derived from this software without specific prior written permission.
17#
18# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28#
29
30#include <arch/stack.h>
31#include <arch/register.h>
32#include <arch/mm/page.h>
33#include <align.h>
34
35#define STACK_ITEMS 13
36#define STACK_FRAME_SIZE ALIGN_UP((STACK_ITEMS*STACK_ITEM_SIZE) + STACK_SCRATCH_AREA_SIZE, STACK_ALIGNMENT)
37
38#if (STACK_ITEMS % 2 == 0)
39# define STACK_FRAME_BIAS 8
40#else
41# define STACK_FRAME_BIAS 16
42#endif
43
44/** Partitioning of bank 0 registers. */
45#define R_OFFS r16
46#define R_HANDLER r17
47#define R_RET r18
48#define R_KSTACK r23 /* keep in sync with before_thread_runs_arch() */
49
50/** Heavyweight interrupt handler
51 *
52 * This macro roughly follows steps from 1 to 19 described in
53 * Intel Itanium Architecture Software Developer's Manual, Chapter 3.4.2.
54 *
55 * HEAVYWEIGHT_HANDLER macro must cram into 16 bundles (48 instructions).
56 * This goal is achieved by using procedure calls after RSE becomes operational.
57 *
58 * Some steps are skipped (enabling and disabling interrupts).
59 * Some steps are not fully supported yet (e.g. interruptions
60 * from userspace and floating-point context).
61 *
62 * @param offs Offset from the beginning of IVT.
63 * @param handler Interrupt handler address.
64 */
65.macro HEAVYWEIGHT_HANDLER offs, handler=universal_handler
66 .org ivt + \offs
67 mov R_OFFS = \offs
68 movl R_HANDLER = \handler ;;
69 br heavyweight_handler
70.endm
71
72.global heavyweight_handler
73heavyweight_handler:
74 /* 1. copy interrupt registers into bank 0 */
75
76 /*
77 * Note that r24-r31 from bank 0 can be used only as long as PSR.ic = 0.
78 */
79 mov r24 = cr.iip
80 mov r25 = cr.ipsr
81 mov r26 = cr.iipa
82 mov r27 = cr.isr
83 mov r28 = cr.ifa
84
85 /* 2. preserve predicate register into bank 0 */
86 mov r29 = pr ;;
87
88 /* 3. switch to kernel memory stack */
89 mov r30 = cr.ipsr
90 shr.u r31 = r12, VRN_SHIFT ;;
91
92 /*
93 * Set p6 to true if the stack register references kernel address space.
94 * Set p7 to false if the stack register doesn't reference kernel address space.
95 */
96 cmp.eq p6, p7 = VRN_KERNEL, r31 ;;
97
98 (p6) shr.u r30 = r30, PSR_CPL_SHIFT ;;
99 (p6) and r30 = PSR_CPL_MASK_SHIFTED, r30 ;;
100
101 /*
102 * Set p6 to true if the interrupted context executed in kernel mode.
103 * Set p7 to false if the interrupted context didn't execute in kernel mode.
104 */
105 (p6) cmp.eq p6, p7 = r30, r0 ;;
106
107 /*
108 * Now, p7 is true iff the stack needs to be switched to kernel stack.
109 */
110 mov r30 = r12
111 (p7) mov r12 = R_KSTACK ;;
112
113 add r31 = -STACK_FRAME_BIAS, r12 ;;
114 add r12 = -STACK_FRAME_SIZE, r12
115
116 /* 4. save registers in bank 0 into memory stack */
117 st8 [r31] = r30, -8 ;; /* save old stack pointer */
118
119 st8 [r31] = r29, -8 ;; /* save predicate registers */
120
121 st8 [r31] = r24, -8 ;; /* save cr.iip */
122 st8 [r31] = r25, -8 ;; /* save cr.ipsr */
123 st8 [r31] = r26, -8 ;; /* save cr.iipa */
124 st8 [r31] = r27, -8 ;; /* save cr.isr */
125 st8 [r31] = r28, -8 ;; /* save cr.ifa */
126
127 /* 5. RSE switch from interrupted context */
128 mov r24 = ar.rsc
129 mov r25 = ar.pfs
130 cover
131 mov r26 = cr.ifs
132
133 st8 [r31] = r24, -8 ;; /* save ar.rsc */
134 st8 [r31] = r25, -8 ;; /* save ar.pfs */
135 st8 [r31] = r26, -8 /* save ar.ifs */
136
137 and r30 = ~3, r24 ;;
138 mov ar.rsc = r30 ;; /* place RSE in enforced lazy mode */
139
140 mov r27 = ar.rnat
141 mov r28 = ar.bspstore ;;
142
143 /* assume kernel backing store */
144 /* mov ar.bspstore = r28 ;; */
145
146 mov r29 = ar.bsp
147
148 st8 [r31] = r27, -8 ;; /* save ar.rnat */
149 st8 [r31] = r28, -8 ;; /* save ar.bspstore */
150 st8 [r31] = r29, -8 /* save ar.bsp */
151
152 mov ar.rsc = r24 /* restore RSE's setting */
153
154 /* steps 6 - 15 are done by heavyweight_handler_inner() */
155 mov R_RET = b0 /* save b0 belonging to interrupted context */
156 br.call.sptk.many b0 = heavyweight_handler_inner
1570: mov b0 = R_RET /* restore b0 belonging to the interrupted context */
158
159 /* 16. RSE switch to interrupted context */
160 cover /* allocate zerro size frame (step 1 (from Intel Docs)) */
161
162 add r31 = STACK_SCRATCH_AREA_SIZE, r12 ;;
163
164 mov r28 = ar.bspstore /* calculate loadrs (step 2) */
165 ld8 r29 = [r31], +8 ;; /* load ar.bsp */
166 sub r27 = r29 , r28 ;;
167 shl r27 = r27, 16
168
169 mov r24 = ar.rsc ;;
170 and r30 = ~3, r24 ;;
171 or r24 = r30 , r27 ;;
172 mov ar.rsc = r24 ;; /* place RSE in enforced lazy mode */
173
174 loadrs /* (step 3) */
175
176 ld8 r28 = [r31], +8 ;; /* load ar.bspstore */
177 ld8 r27 = [r31], +8 ;; /* load ar.rnat */
178 ld8 r26 = [r31], +8 ;; /* load cr.ifs */
179 ld8 r25 = [r31], +8 ;; /* load ar.pfs */
180 ld8 r24 = [r31], +8 ;; /* load ar.rsc */
181
182 /* mov ar.bspstore = r28 ;; */ /* (step 4) */
183 /* mov ar.rnat = r27 */ /* (step 5) */
184
185 mov ar.pfs = r25 /* (step 6) */
186 mov cr.ifs = r26
187
188 mov ar.rsc = r24 /* (step 7) */
189
190 /* 17. restore interruption state from memory stack */
191 ld8 r28 = [r31], +8 ;; /* load cr.ifa */
192 ld8 r27 = [r31], +8 ;; /* load cr.isr */
193 ld8 r26 = [r31], +8 ;; /* load cr.iipa */
194 ld8 r25 = [r31], +8 ;; /* load cr.ipsr */
195 ld8 r24 = [r31], +8 ;; /* load cr.iip */
196
197 mov cr.iip = r24
198 mov cr.ipsr = r25
199 mov cr.iipa = r26
200 mov cr.isr = r27
201 mov cr.ifa = r28
202
203 /* 18. restore predicate registers from memory stack */
204 ld8 r29 = [r31], +8 ;; /* load predicate registers */
205 mov pr = r29
206
207 /* 19. return from interruption */
208 ld8 r12 = [r31] /* load stack pointer */
209 rfi ;;
210
211.global heavyweight_handler_inner
212heavyweight_handler_inner:
213 /*
214 * From this point, the rest of the interrupted context
215 * will be preserved in stacked registers and backing store.
216 */
217 alloc loc0 = ar.pfs, 0, 47, 2, 0 ;;
218
219 /* bank 0 is going to be shadowed, copy essential data from there */
220 mov loc1 = R_RET /* b0 belonging to interrupted context */
221 mov loc2 = R_HANDLER
222 mov out0 = R_OFFS
223
224 add out1 = STACK_SCRATCH_AREA_SIZE, r12
225
226 /* 6. switch to bank 1 and reenable PSR.ic */
227 ssm PSR_IC_MASK
228 bsw.1 ;;
229 srlz.d
230
231 /* 7. preserve branch and application registers */
232 mov loc3 = ar.unat
233 mov loc4 = ar.lc
234 mov loc5 = ar.ec
235 mov loc6 = ar.ccv
236 mov loc7 = ar.csd
237 mov loc8 = ar.ssd
238
239 mov loc9 = b0
240 mov loc10 = b1
241 mov loc11 = b2
242 mov loc12 = b3
243 mov loc13 = b4
244 mov loc14 = b5
245 mov loc15 = b6
246 mov loc16 = b7
247
248 /* 8. preserve general and floating-point registers */
249 /* TODO: save floating-point context */
250 mov loc17 = r1
251 mov loc18 = r2
252 mov loc19 = r3
253 mov loc20 = r4
254 mov loc21 = r5
255 mov loc22 = r6
256 mov loc23 = r7
257 mov loc24 = r8
258 mov loc25 = r9
259 mov loc26 = r10
260 mov loc27 = r11
261 /* skip r12 (stack pointer) */
262 mov loc28 = r13
263 mov loc29 = r14
264 mov loc30 = r15
265 mov loc31 = r16
266 mov loc32 = r17
267 mov loc33 = r18
268 mov loc34 = r19
269 mov loc35 = r20
270 mov loc36 = r21
271 mov loc37 = r22
272 mov loc38 = r23
273 mov loc39 = r24
274 mov loc40 = r25
275 mov loc41 = r26
276 mov loc42 = r27
277 mov loc43 = r28
278 mov loc44 = r29
279 mov loc45 = r30
280 mov loc46 = r31
281
282 /* 9. skipped (will not enable interrupts) */
283 /*
284 * ssm PSR_I_MASK
285 * ;;
286 * srlz.d
287 */
288
289 /* 10. call handler */
290 mov b1 = loc2
291 br.call.sptk.many b0 = b1
292
293 /* 11. return from handler */
2940:
295
296 /* 12. skipped (will not disable interrupts) */
297 /*
298 * rsm PSR_I_MASK
299 * ;;
300 * srlz.d
301 */
302
303 /* 13. restore general and floating-point registers */
304 /* TODO: restore floating-point context */
305 mov r1 = loc17
306 mov r2 = loc18
307 mov r3 = loc19
308 mov r4 = loc20
309 mov r5 = loc21
310 mov r6 = loc22
311 mov r7 = loc23
312 mov r8 = loc24
313 mov r9 = loc25
314 mov r10 = loc26
315 mov r11 = loc27
316 /* skip r12 (stack pointer) */
317 mov r13 = loc28
318 mov r14 = loc29
319 mov r15 = loc30
320 mov r16 = loc31
321 mov r17 = loc32
322 mov r18 = loc33
323 mov r19 = loc34
324 mov r20 = loc35
325 mov r21 = loc36
326 mov r22 = loc37
327 mov r23 = loc38
328 mov r24 = loc39
329 mov r25 = loc40
330 mov r26 = loc41
331 mov r27 = loc42
332 mov r28 = loc43
333 mov r29 = loc44
334 mov r30 = loc45
335 mov r31 = loc46
336
337 /* 14. restore branch and application registers */
338 mov ar.unat = loc3
339 mov ar.lc = loc4
340 mov ar.ec = loc5
341 mov ar.ccv = loc6
342 mov ar.csd = loc7
343 mov ar.ssd = loc8
344
345 mov b0 = loc9
346 mov b1 = loc10
347 mov b2 = loc11
348 mov b3 = loc12
349 mov b4 = loc13
350 mov b5 = loc14
351 mov b6 = loc15
352 mov b7 = loc16
353
354 /* 15. disable PSR.ic and switch to bank 0 */
355 rsm PSR_IC_MASK
356 bsw.0 ;;
357 srlz.d
358
359 mov R_RET = loc1
360 mov ar.pfs = loc0
361 br.ret.sptk.many b0
362
363.global ivt
364.align 32768
365ivt:
366 HEAVYWEIGHT_HANDLER 0x0000
367 HEAVYWEIGHT_HANDLER 0x0400
368 HEAVYWEIGHT_HANDLER 0x0800
369 HEAVYWEIGHT_HANDLER 0x0c00 alternate_instruction_tlb_fault
370 HEAVYWEIGHT_HANDLER 0x1000 alternate_data_tlb_fault
371 HEAVYWEIGHT_HANDLER 0x1400 data_nested_tlb_fault
372 HEAVYWEIGHT_HANDLER 0x1800
373 HEAVYWEIGHT_HANDLER 0x1c00
374 HEAVYWEIGHT_HANDLER 0x2000 data_dirty_bit_fault
375 HEAVYWEIGHT_HANDLER 0x2400 instruction_access_bit_fault
376 HEAVYWEIGHT_HANDLER 0x2800 data_access_bit_fault
377 HEAVYWEIGHT_HANDLER 0x2c00 break_instruction
378 HEAVYWEIGHT_HANDLER 0x3000 external_interrupt /* For external interrupt, heavyweight handler is used. */
379 HEAVYWEIGHT_HANDLER 0x3400
380 HEAVYWEIGHT_HANDLER 0x3800
381 HEAVYWEIGHT_HANDLER 0x3c00
382 HEAVYWEIGHT_HANDLER 0x4000
383 HEAVYWEIGHT_HANDLER 0x4400
384 HEAVYWEIGHT_HANDLER 0x4800
385 HEAVYWEIGHT_HANDLER 0x4c00
386
387 HEAVYWEIGHT_HANDLER 0x5000 page_not_present
388 HEAVYWEIGHT_HANDLER 0x5100
389 HEAVYWEIGHT_HANDLER 0x5200
390 HEAVYWEIGHT_HANDLER 0x5300
391 HEAVYWEIGHT_HANDLER 0x5400 general_exception
392 HEAVYWEIGHT_HANDLER 0x5500
393 HEAVYWEIGHT_HANDLER 0x5600
394 HEAVYWEIGHT_HANDLER 0x5700
395 HEAVYWEIGHT_HANDLER 0x5800
396 HEAVYWEIGHT_HANDLER 0x5900
397 HEAVYWEIGHT_HANDLER 0x5a00
398 HEAVYWEIGHT_HANDLER 0x5b00
399 HEAVYWEIGHT_HANDLER 0x5c00
400 HEAVYWEIGHT_HANDLER 0x5d00
401 HEAVYWEIGHT_HANDLER 0x5e00
402 HEAVYWEIGHT_HANDLER 0x5f00
403
404 HEAVYWEIGHT_HANDLER 0x6000
405 HEAVYWEIGHT_HANDLER 0x6100
406 HEAVYWEIGHT_HANDLER 0x6200
407 HEAVYWEIGHT_HANDLER 0x6300
408 HEAVYWEIGHT_HANDLER 0x6400
409 HEAVYWEIGHT_HANDLER 0x6500
410 HEAVYWEIGHT_HANDLER 0x6600
411 HEAVYWEIGHT_HANDLER 0x6700
412 HEAVYWEIGHT_HANDLER 0x6800
413 HEAVYWEIGHT_HANDLER 0x6900
414 HEAVYWEIGHT_HANDLER 0x6a00
415 HEAVYWEIGHT_HANDLER 0x6b00
416 HEAVYWEIGHT_HANDLER 0x6c00
417 HEAVYWEIGHT_HANDLER 0x6d00
418 HEAVYWEIGHT_HANDLER 0x6e00
419 HEAVYWEIGHT_HANDLER 0x6f00
420
421 HEAVYWEIGHT_HANDLER 0x7000
422 HEAVYWEIGHT_HANDLER 0x7100
423 HEAVYWEIGHT_HANDLER 0x7200
424 HEAVYWEIGHT_HANDLER 0x7300
425 HEAVYWEIGHT_HANDLER 0x7400
426 HEAVYWEIGHT_HANDLER 0x7500
427 HEAVYWEIGHT_HANDLER 0x7600
428 HEAVYWEIGHT_HANDLER 0x7700
429 HEAVYWEIGHT_HANDLER 0x7800
430 HEAVYWEIGHT_HANDLER 0x7900
431 HEAVYWEIGHT_HANDLER 0x7a00
432 HEAVYWEIGHT_HANDLER 0x7b00
433 HEAVYWEIGHT_HANDLER 0x7c00
434 HEAVYWEIGHT_HANDLER 0x7d00
435 HEAVYWEIGHT_HANDLER 0x7e00
436 HEAVYWEIGHT_HANDLER 0x7f00
Note: See TracBrowser for help on using the repository browser.