source: mainline/kernel/arch/ia64/src/ivt.S@ 11b285d

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 11b285d was a35b458, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 7 years ago

style: Remove trailing whitespace on _all_ lines, including empty ones, for particular file types.

Command used: tools/srepl '\s\+$' '' -- *.c *.h *.py *.sh *.s *.S *.ag

Currently, whitespace on empty lines is very inconsistent.
There are two basic choices: Either remove the whitespace, or keep empty lines
indented to the level of surrounding code. The former is AFAICT more common,
and also much easier to do automatically.

Alternatively, we could write script for automatic indentation, and use that
instead. However, if such a script exists, it's possible to use the indented
style locally, by having the editor apply relevant conversions on load/save,
without affecting remote repository. IMO, it makes more sense to adopt
the simpler rule.

  • Property mode set to 100644
File size: 17.2 KB
Line 
1#
2# Copyright (c) 2005 Jakub Vana
3# Copyright (c) 2005 Jakub Jermar
4# All rights reserved.
5#
6# Redistribution and use in source and binary forms, with or without
7# modification, are permitted provided that the following conditions
8# are met:
9#
10# - Redistributions of source code must retain the above copyright
11# notice, this list of conditions and the following disclaimer.
12# - Redistributions in binary form must reproduce the above copyright
13# notice, this list of conditions and the following disclaimer in the
14# documentation and/or other materials provided with the distribution.
15# - The name of the author may not be used to endorse or promote products
16# derived from this software without specific prior written permission.
17#
18# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28#
29
30#include <abi/asmtool.h>
31#include <arch/stack.h>
32#include <arch/register.h>
33#include <arch/mm/page.h>
34#include <arch/interrupt.h>
35#include <arch/istate_struct.h>
36#include <align.h>
37
38#define STACK_FRAME_SIZE ALIGN_UP(ISTATE_SIZE + STACK_SCRATCH_AREA_SIZE, STACK_ALIGNMENT)
39
40#define FLOAT_ITEM_SIZE (STACK_ITEM_SIZE * 2)
41
42/** Partitioning of bank 0 registers. */
43#define R_VECTOR r16
44#define R_HANDLER r17
45#define R_RET r18
46#define R_KSTACK_BSP r22 /* keep in sync with before_thread_runs_arch() */
47#define R_KSTACK r23 /* keep in sync with before_thread_runs_arch() */
48
49/* Speculation vector handler */
50.macro SPECULATION_VECTOR_HANDLER vector
51 .org ivt + \vector * 0x100
52
53 /* 1. Save predicates, IIM, IIP, IPSR and ISR CR's in bank 0 registers. */
54 mov r16 = pr
55 mov r17 = cr.iim
56 mov r18 = cr.iip
57 mov r19 = cr.ipsr
58 mov r20 = cr.isr ;;
59
60 /* 2. Move IIP to IIPA. */
61 mov cr.iipa = r18
62
63 /* 3. Sign extend IIM[20:0], shift left by 4 and add to IIP. */
64 shl r17 = r17, 43 ;; /* shift bit 20 to bit 63 */
65 shr r17 = r17, 39 ;; /* signed shift right to bit 24 */
66 add r18 = r18, r17 ;;
67 mov cr.iip = r18
68
69 /* 4. Set IPSR.ri to 0. */
70 dep r19 = 0, r19, PSR_RI_SHIFT, PSR_RI_LEN ;;
71 mov cr.ipsr = r19
72
73 /* 5. Check whether IPSR.tb or IPSR.ss is set. */
74
75 /* TODO:
76 * Implement this when Taken Branch and Single Step traps can occur.
77 */
78
79 /* 6. Restore predicates and return from interruption. */
80 mov pr = r16 ;;
81 rfi
82.endm
83
84/** Heavyweight interrupt handler
85 *
86 * This macro roughly follows steps from 1 to 19 described in
87 * Intel Itanium Architecture Software Developer's Manual, Chapter 3.4.2.
88 *
89 * HEAVYWEIGHT_HANDLER macro must cram into 16 bundles (48 instructions).
90 * This goal is achieved by using procedure calls after RSE becomes operational.
91 *
92 * Some steps are skipped (enabling and disabling interrupts).
93 *
94 * @param offs Offset from the beginning of IVT.
95 * @param handler Interrupt handler address.
96 */
97.macro HEAVYWEIGHT_HANDLER vector, handler=exc_dispatch
98 .org ivt + \vector * 0x100
99 mov R_VECTOR = \vector
100 movl R_HANDLER = \handler ;;
101 br heavyweight_handler
102.endm
103
104SYMBOL(heavyweight_handler)
105 /* 1. copy interrupt registers into bank 0 */
106
107 /*
108 * Note that r24-r31 from bank 0 can be used only as long as PSR.ic = 0.
109 */
110
111 /* Set up FPU as in interrupted context. */
112 mov r24 = psr
113 mov r25 = cr.ipsr
114 mov r26 = PSR_DFH_MASK
115 mov r27 = ~PSR_DFH_MASK ;;
116 and r26 = r25, r26
117 and r24 = r24, r27 ;;
118 or r24 = r24, r26 ;;
119 mov psr.l = r24 ;;
120 srlz.i
121 srlz.d ;;
122
123 mov r24 = cr.iip
124 mov r25 = cr.ipsr
125 mov r26 = cr.iipa
126 mov r27 = cr.isr
127 mov r28 = cr.ifa
128
129 /* 2. preserve predicate register into bank 0 */
130 mov r29 = pr ;;
131
132 /* 3. switch to kernel memory stack */
133 mov r30 = cr.ipsr
134 shr.u r31 = r12, VRN_SHIFT ;;
135
136 shr.u r30 = r30, PSR_CPL_SHIFT ;;
137 and r30 = PSR_CPL_MASK_SHIFTED, r30 ;;
138
139 /*
140 * Set p3 to true if the interrupted context executed in kernel mode.
141 * Set p4 to false if the interrupted context didn't execute in kernel mode.
142 */
143 cmp.eq p3, p4 = r30, r0 ;;
144 cmp.eq p1, p2 = r30, r0 ;; /* remember IPSR setting in p1 and p2 */
145
146 /*
147 * Set p3 to true if the stack register references kernel address space.
148 * Set p4 to false if the stack register doesn't reference kernel address space.
149 */
150(p3) cmp.eq p3, p4 = VRN_KERNEL, r31 ;;
151
152 /*
153 * Now, p4 is true iff the stack needs to be switched to kernel stack.
154 */
155 mov r30 = r12
156(p4) mov r12 = R_KSTACK ;;
157
158 add r12 = -STACK_FRAME_SIZE, r12 ;;
159 add r31 = STACK_SCRATCH_AREA_SIZE + ISTATE_OFFSET_IN6, r12
160
161 /* 4. save registers in bank 0 into memory stack */
162
163 /*
164 * If this is break_instruction handler,
165 * copy input parameters to stack.
166 */
167 cmp.eq p6, p5 = EXC_BREAK_INSTRUCTION, R_VECTOR ;;
168
169 /*
170 * From now on, if this is break_instruction handler, p6 is true and p5
171 * is false. Otherwise p6 is false and p5 is true.
172 * Note that p5 is a preserved predicate register and we make use of it.
173 */
174
175(p6) st8 [r31] = r38, -STACK_ITEM_SIZE ;; /* save in6 */
176(p6) st8 [r31] = r37, -STACK_ITEM_SIZE ;; /* save in5 */
177(p6) st8 [r31] = r36, -STACK_ITEM_SIZE ;; /* save in4 */
178(p6) st8 [r31] = r35, -STACK_ITEM_SIZE ;; /* save in3 */
179(p6) st8 [r31] = r34, -STACK_ITEM_SIZE ;; /* save in2 */
180(p6) st8 [r31] = r33, -STACK_ITEM_SIZE ;; /* save in1 */
181(p6) st8 [r31] = r32, -STACK_ITEM_SIZE ;; /* save in0 */
182(p5) add r31 = -(7 * STACK_ITEM_SIZE), r31 ;;
183
184 st8 [r31] = r30, -STACK_ITEM_SIZE ;; /* save old stack pointer */
185
186 st8 [r31] = r29, -STACK_ITEM_SIZE ;; /* save predicate registers */
187
188 st8 [r31] = r24, -STACK_ITEM_SIZE ;; /* save cr.iip */
189 st8 [r31] = r25, -STACK_ITEM_SIZE ;; /* save cr.ipsr */
190 st8 [r31] = r26, -STACK_ITEM_SIZE ;; /* save cr.iipa */
191 st8 [r31] = r27, -STACK_ITEM_SIZE ;; /* save cr.isr */
192 st8 [r31] = r28, -STACK_ITEM_SIZE ;; /* save cr.ifa */
193
194 /* 5. RSE switch from interrupted context */
195 mov r24 = ar.rsc
196 mov r25 = ar.pfs
197 cover
198 mov r26 = cr.ifs
199
200 st8 [r31] = r24, -STACK_ITEM_SIZE ;; /* save ar.rsc */
201 st8 [r31] = r25, -STACK_ITEM_SIZE ;; /* save ar.pfs */
202 st8 [r31] = r26, -STACK_ITEM_SIZE /* save ar.ifs */
203
204 and r24 = ~(RSC_PL_MASK), r24 ;;
205 and r30 = ~(RSC_MODE_MASK), r24 ;;
206 mov ar.rsc = r30 ;; /* update RSE state */
207
208 mov r27 = ar.rnat
209 mov r28 = ar.bspstore ;;
210
211 /*
212 * Inspect BSPSTORE to figure out whether it is necessary to switch to
213 * kernel BSPSTORE.
214 */
215(p1) shr.u r30 = r28, VRN_SHIFT ;;
216(p1) cmp.eq p1, p2 = VRN_KERNEL, r30 ;;
217
218 /*
219 * If BSPSTORE needs to be switched, p1 is false and p2 is true.
220 */
221(p1) mov r30 = r28
222(p2) mov r30 = R_KSTACK_BSP ;;
223(p2) mov ar.bspstore = r30 ;;
224
225 mov r29 = ar.bsp
226
227 st8 [r31] = r27, -STACK_ITEM_SIZE ;; /* save ar.rnat */
228 st8 [r31] = r30, -STACK_ITEM_SIZE ;; /* save new value written to ar.bspstore */
229 st8 [r31] = r28, -STACK_ITEM_SIZE ;; /* save ar.bspstore */
230 st8 [r31] = r29, -STACK_ITEM_SIZE /* save ar.bsp */
231
232 mov ar.rsc = r24 /* restore RSE's setting + kernel privileges */
233
234 /* steps 6 - 15 are done by heavyweight_handler_inner() */
235 mov R_RET = b0 /* save b0 belonging to interrupted context */
236 br.call.sptk.many b0 = heavyweight_handler_inner
2370: mov b0 = R_RET /* restore b0 belonging to the interrupted context */
238
239 /* 16. RSE switch to interrupted context */
240 cover /* allocate zero size frame (step 1 (from Intel Docs)) */
241
242 add r31 = STACK_SCRATCH_AREA_SIZE + ISTATE_OFFSET_AR_BSP, r12 ;;
243
244 ld8 r30 = [r31], +STACK_ITEM_SIZE ;; /* load ar.bsp */
245 ld8 r29 = [r31], +STACK_ITEM_SIZE ;; /* load ar.bspstore */
246 ld8 r28 = [r31], +STACK_ITEM_SIZE ;; /* load ar.bspstore_new */
247 sub r27 = r30 , r28 ;; /* calculate loadrs (step 2) */
248 shl r27 = r27, 16
249
250 mov r24 = ar.rsc ;;
251 and r30 = ~3, r24 ;;
252 or r24 = r30 , r27 ;;
253 mov ar.rsc = r24 ;; /* place RSE in enforced lazy mode */
254
255 loadrs /* (step 3) */
256
257 ld8 r27 = [r31], +STACK_ITEM_SIZE ;; /* load ar.rnat */
258 ld8 r26 = [r31], +STACK_ITEM_SIZE ;; /* load cr.ifs */
259 ld8 r25 = [r31], +STACK_ITEM_SIZE ;; /* load ar.pfs */
260 ld8 r24 = [r31], +STACK_ITEM_SIZE ;; /* load ar.rsc */
261
262 mov ar.bspstore = r29 ;; /* (step 4) */
263 mov ar.rnat = r27 /* (step 5) */
264
265 mov ar.pfs = r25 /* (step 6) */
266 mov cr.ifs = r26
267
268 mov ar.rsc = r24 /* (step 7) */
269
270 /* 17. restore interruption state from memory stack */
271 ld8 r28 = [r31], +STACK_ITEM_SIZE ;; /* load cr.ifa */
272 ld8 r27 = [r31], +STACK_ITEM_SIZE ;; /* load cr.isr */
273 ld8 r26 = [r31], +STACK_ITEM_SIZE ;; /* load cr.iipa */
274 ld8 r25 = [r31], +STACK_ITEM_SIZE ;; /* load cr.ipsr */
275 ld8 r24 = [r31], +STACK_ITEM_SIZE ;; /* load cr.iip */
276
277 mov cr.iip = r24;;
278 mov cr.iipa = r26
279 mov cr.isr = r27
280 mov cr.ifa = r28
281
282 /* Set up FPU as in exception. */
283 mov r24 = psr
284 mov r26 = PSR_DFH_MASK
285 mov r27 = ~PSR_DFH_MASK ;;
286 and r25 = r25, r27
287 and r24 = r24, r26 ;;
288 or r25 = r25, r24 ;;
289 mov cr.ipsr = r25
290
291 /* 18. restore predicate registers from memory stack */
292 ld8 r29 = [r31], +STACK_ITEM_SIZE ;; /* load predicate registers */
293 mov pr = r29
294
295 /* 19. return from interruption */
296 ld8 r12 = [r31] /* load stack pointer */
297 rfi ;;
298
299FUNCTION_BEGIN(heavyweight_handler_inner)
300 /*
301 * From this point, the rest of the interrupted context
302 * will be preserved in stacked registers and backing store.
303 */
304 alloc loc0 = ar.pfs, 0, 48, 2, 0 ;;
305
306 /* bank 0 is going to be shadowed, copy essential data from there */
307 mov loc1 = R_RET /* b0 belonging to interrupted context */
308 mov loc2 = R_HANDLER
309 mov out0 = R_VECTOR
310
311 add out1 = STACK_SCRATCH_AREA_SIZE, r12
312
313 /* 6. switch to bank 1 and reenable PSR.ic */
314 ssm PSR_IC_MASK
315 bsw.1 ;;
316 srlz.d
317
318 /* 7. preserve branch and application registers */
319 mov loc3 = ar.unat
320 mov loc4 = ar.lc
321 mov loc5 = ar.ec
322 mov loc6 = ar.ccv
323 mov loc7 = ar.csd
324 mov loc8 = ar.ssd
325
326 mov loc9 = b0
327 mov loc10 = b1
328 mov loc11 = b2
329 mov loc12 = b3
330 mov loc13 = b4
331 mov loc14 = b5
332 mov loc15 = b6
333 mov loc16 = b7
334
335 /* 8. preserve general and floating-point registers */
336 mov loc17 = r1
337 mov loc18 = r2
338 mov loc19 = r3
339 mov loc20 = r4
340 mov loc21 = r5
341 mov loc22 = r6
342 mov loc23 = r7
343(p5) mov loc24 = r8 /* only if not in break_instruction handler */
344 mov loc25 = r9
345 mov loc26 = r10
346 mov loc27 = r11
347 /* skip r12 (stack pointer) */
348 mov loc28 = r13
349 mov loc29 = r14
350 mov loc30 = r15
351 mov loc31 = r16
352 mov loc32 = r17
353 mov loc33 = r18
354 mov loc34 = r19
355 mov loc35 = r20
356 mov loc36 = r21
357 mov loc37 = r22
358 mov loc38 = r23
359 mov loc39 = r24
360 mov loc40 = r25
361 mov loc41 = r26
362 mov loc42 = r27
363 mov loc43 = r28
364 mov loc44 = r29
365 mov loc45 = r30
366 mov loc46 = r31
367
368 add r24 = ISTATE_OFFSET_F8 + STACK_SCRATCH_AREA_SIZE, r12
369 add r25 = ISTATE_OFFSET_F9 + STACK_SCRATCH_AREA_SIZE, r12
370 add r26 = ISTATE_OFFSET_F2 + STACK_SCRATCH_AREA_SIZE, r12
371 add r27 = ISTATE_OFFSET_F3 + STACK_SCRATCH_AREA_SIZE, r12
372 add r28 = ISTATE_OFFSET_F4 + STACK_SCRATCH_AREA_SIZE, r12
373 add r29 = ISTATE_OFFSET_F5 + STACK_SCRATCH_AREA_SIZE, r12
374 add r30 = ISTATE_OFFSET_F6 + STACK_SCRATCH_AREA_SIZE, r12
375 add r31 = ISTATE_OFFSET_F7 + STACK_SCRATCH_AREA_SIZE, r12 ;;
376
377 stf.spill [r26] = f2, 8 * FLOAT_ITEM_SIZE
378 stf.spill [r27] = f3, 8 * FLOAT_ITEM_SIZE
379 stf.spill [r28] = f4, 8 * FLOAT_ITEM_SIZE
380 stf.spill [r29] = f5, 8 * FLOAT_ITEM_SIZE
381 stf.spill [r30] = f6, 8 * FLOAT_ITEM_SIZE
382 stf.spill [r31] = f7, 8 * FLOAT_ITEM_SIZE ;;
383
384 stf.spill [r24] = f8, 8 * FLOAT_ITEM_SIZE
385 stf.spill [r25] = f9, 8 * FLOAT_ITEM_SIZE
386 stf.spill [r26] = f10, 8 * FLOAT_ITEM_SIZE
387 stf.spill [r27] = f11, 8 * FLOAT_ITEM_SIZE
388 stf.spill [r28] = f12, 8 * FLOAT_ITEM_SIZE
389 stf.spill [r29] = f13, 8 * FLOAT_ITEM_SIZE
390 stf.spill [r30] = f14, 8 * FLOAT_ITEM_SIZE
391 stf.spill [r31] = f15, 8 * FLOAT_ITEM_SIZE ;;
392
393 stf.spill [r24] = f16, 8 * FLOAT_ITEM_SIZE
394 stf.spill [r25] = f17, 8 * FLOAT_ITEM_SIZE
395 stf.spill [r26] = f18, 8 * FLOAT_ITEM_SIZE
396 stf.spill [r27] = f19, 8 * FLOAT_ITEM_SIZE
397 stf.spill [r28] = f20, 8 * FLOAT_ITEM_SIZE
398 stf.spill [r29] = f21, 8 * FLOAT_ITEM_SIZE
399 stf.spill [r30] = f22, 8 * FLOAT_ITEM_SIZE
400 stf.spill [r31] = f23, 8 * FLOAT_ITEM_SIZE ;;
401
402 stf.spill [r24] = f24
403 stf.spill [r25] = f25
404 stf.spill [r26] = f26
405 stf.spill [r27] = f27
406 stf.spill [r28] = f28
407 stf.spill [r29] = f29
408 stf.spill [r30] = f30
409 stf.spill [r31] = f31 ;;
410
411 mov loc47 = ar.fpsr /* preserve floating point status register */
412
413 /* 9. skipped (will not enable interrupts) */
414 /*
415 * ssm PSR_I_MASK
416 * ;;
417 * srlz.d
418 */
419
420 /* 10. call handler */
421 movl r1 = __gp
422
423 mov b1 = loc2
424 br.call.sptk.many b0 = b1
425
426 /* 11. return from handler */
4270:
428
429 /* 12. skipped (will not disable interrupts) */
430 /*
431 * rsm PSR_I_MASK
432 * ;;
433 * srlz.d
434 */
435
436 /* 13. restore general and floating-point registers */
437 add r24 = ISTATE_OFFSET_F8 + STACK_SCRATCH_AREA_SIZE, r12
438 add r25 = ISTATE_OFFSET_F9 + STACK_SCRATCH_AREA_SIZE, r12
439 add r26 = ISTATE_OFFSET_F2 + STACK_SCRATCH_AREA_SIZE, r12
440 add r27 = ISTATE_OFFSET_F3 + STACK_SCRATCH_AREA_SIZE, r12
441 add r28 = ISTATE_OFFSET_F4 + STACK_SCRATCH_AREA_SIZE, r12
442 add r29 = ISTATE_OFFSET_F5 + STACK_SCRATCH_AREA_SIZE, r12
443 add r30 = ISTATE_OFFSET_F6 + STACK_SCRATCH_AREA_SIZE, r12
444 add r31 = ISTATE_OFFSET_F7 + STACK_SCRATCH_AREA_SIZE, r12 ;;
445
446 ldf.fill f2 = [r26], 8 * FLOAT_ITEM_SIZE
447 ldf.fill f3 = [r27], 8 * FLOAT_ITEM_SIZE
448 ldf.fill f4 = [r28], 8 * FLOAT_ITEM_SIZE
449 ldf.fill f5 = [r29], 8 * FLOAT_ITEM_SIZE
450 ldf.fill f6 = [r30], 8 * FLOAT_ITEM_SIZE
451 ldf.fill f7 = [r31], 8 * FLOAT_ITEM_SIZE ;;
452
453 ldf.fill f8 = [r24], 8 * FLOAT_ITEM_SIZE
454 ldf.fill f9 = [r25], 8 * FLOAT_ITEM_SIZE
455 ldf.fill f10 = [r26],8 * FLOAT_ITEM_SIZE
456 ldf.fill f11 = [r27], 8 * FLOAT_ITEM_SIZE
457 ldf.fill f12 = [r28], 8 * FLOAT_ITEM_SIZE
458 ldf.fill f13 = [r29], 8 * FLOAT_ITEM_SIZE
459 ldf.fill f14 = [r30], 8 * FLOAT_ITEM_SIZE
460 ldf.fill f15 = [r31], 8 * FLOAT_ITEM_SIZE ;;
461
462 ldf.fill f16 = [r24], 8 * FLOAT_ITEM_SIZE
463 ldf.fill f17 = [r25], 8 * FLOAT_ITEM_SIZE
464 ldf.fill f18 = [r26], 8 * FLOAT_ITEM_SIZE
465 ldf.fill f19 = [r27], 8 * FLOAT_ITEM_SIZE
466 ldf.fill f20 = [r28], 8 * FLOAT_ITEM_SIZE
467 ldf.fill f21 = [r29], 8 * FLOAT_ITEM_SIZE
468 ldf.fill f22 = [r30], 8 * FLOAT_ITEM_SIZE
469 ldf.fill f23 = [r31], 8 * FLOAT_ITEM_SIZE ;;
470
471 ldf.fill f24 = [r24]
472 ldf.fill f25 = [r25]
473 ldf.fill f26 = [r26]
474 ldf.fill f27 = [r27]
475 ldf.fill f28 = [r28]
476 ldf.fill f29 = [r29]
477 ldf.fill f30 = [r30]
478 ldf.fill f31 = [r31] ;;
479
480 mov r1 = loc17
481 mov r2 = loc18
482 mov r3 = loc19
483 mov r4 = loc20
484 mov r5 = loc21
485 mov r6 = loc22
486 mov r7 = loc23
487(p5) mov r8 = loc24 /* only if not in break_instruction handler */
488 mov r9 = loc25
489 mov r10 = loc26
490 mov r11 = loc27
491 /* skip r12 (stack pointer) */
492 mov r13 = loc28
493 mov r14 = loc29
494 mov r15 = loc30
495 mov r16 = loc31
496 mov r17 = loc32
497 mov r18 = loc33
498 mov r19 = loc34
499 mov r20 = loc35
500 mov r21 = loc36
501 mov r22 = loc37
502 mov r23 = loc38
503 mov r24 = loc39
504 mov r25 = loc40
505 mov r26 = loc41
506 mov r27 = loc42
507 mov r28 = loc43
508 mov r29 = loc44
509 mov r30 = loc45
510 mov r31 = loc46
511
512 mov ar.fpsr = loc47 /* restore floating point status register */
513
514 /* 14. restore branch and application registers */
515 mov ar.unat = loc3
516 mov ar.lc = loc4
517 mov ar.ec = loc5
518 mov ar.ccv = loc6
519 mov ar.csd = loc7
520 mov ar.ssd = loc8
521
522 mov b0 = loc9
523 mov b1 = loc10
524 mov b2 = loc11
525 mov b3 = loc12
526 mov b4 = loc13
527 mov b5 = loc14
528 mov b6 = loc15
529 mov b7 = loc16
530
531 /* 15. disable PSR.ic and switch to bank 0 */
532 rsm PSR_IC_MASK
533 bsw.0 ;;
534 srlz.d
535
536 mov R_RET = loc1
537 mov ar.pfs = loc0
538 br.ret.sptk.many b0
539FUNCTION_END(heavyweight_handler_inner)
540
541.align 32768
542SYMBOL(ivt)
543 HEAVYWEIGHT_HANDLER 0x00
544 HEAVYWEIGHT_HANDLER 0x04
545 HEAVYWEIGHT_HANDLER 0x08
546 HEAVYWEIGHT_HANDLER 0x0c
547 HEAVYWEIGHT_HANDLER 0x10
548 HEAVYWEIGHT_HANDLER 0x14
549 HEAVYWEIGHT_HANDLER 0x18
550 HEAVYWEIGHT_HANDLER 0x1c
551 HEAVYWEIGHT_HANDLER 0x20
552 HEAVYWEIGHT_HANDLER 0x24
553 HEAVYWEIGHT_HANDLER 0x28
554 HEAVYWEIGHT_HANDLER 0x2c break_instruction
555 HEAVYWEIGHT_HANDLER 0x30
556 HEAVYWEIGHT_HANDLER 0x34
557 HEAVYWEIGHT_HANDLER 0x38
558 HEAVYWEIGHT_HANDLER 0x3c
559 HEAVYWEIGHT_HANDLER 0x40
560 HEAVYWEIGHT_HANDLER 0x44
561 HEAVYWEIGHT_HANDLER 0x48
562 HEAVYWEIGHT_HANDLER 0x4c
563
564 HEAVYWEIGHT_HANDLER 0x50
565 HEAVYWEIGHT_HANDLER 0x51
566 HEAVYWEIGHT_HANDLER 0x52
567 HEAVYWEIGHT_HANDLER 0x53
568 HEAVYWEIGHT_HANDLER 0x54
569 HEAVYWEIGHT_HANDLER 0x55
570 HEAVYWEIGHT_HANDLER 0x56
571 SPECULATION_VECTOR_HANDLER 0x57
572 HEAVYWEIGHT_HANDLER 0x58
573 HEAVYWEIGHT_HANDLER 0x59
574 HEAVYWEIGHT_HANDLER 0x5a
575 HEAVYWEIGHT_HANDLER 0x5b
576 HEAVYWEIGHT_HANDLER 0x5c
577 HEAVYWEIGHT_HANDLER 0x5d
578 HEAVYWEIGHT_HANDLER 0x5e
579 HEAVYWEIGHT_HANDLER 0x5f
580
581 HEAVYWEIGHT_HANDLER 0x60
582 HEAVYWEIGHT_HANDLER 0x61
583 HEAVYWEIGHT_HANDLER 0x62
584 HEAVYWEIGHT_HANDLER 0x63
585 HEAVYWEIGHT_HANDLER 0x64
586 HEAVYWEIGHT_HANDLER 0x65
587 HEAVYWEIGHT_HANDLER 0x66
588 HEAVYWEIGHT_HANDLER 0x67
589 HEAVYWEIGHT_HANDLER 0x68
590 HEAVYWEIGHT_HANDLER 0x69
591 HEAVYWEIGHT_HANDLER 0x6a
592 HEAVYWEIGHT_HANDLER 0x6b
593 HEAVYWEIGHT_HANDLER 0x6c
594 HEAVYWEIGHT_HANDLER 0x6d
595 HEAVYWEIGHT_HANDLER 0x6e
596 HEAVYWEIGHT_HANDLER 0x6f
597
598 HEAVYWEIGHT_HANDLER 0x70
599 HEAVYWEIGHT_HANDLER 0x71
600 HEAVYWEIGHT_HANDLER 0x72
601 HEAVYWEIGHT_HANDLER 0x73
602 HEAVYWEIGHT_HANDLER 0x74
603 HEAVYWEIGHT_HANDLER 0x75
604 HEAVYWEIGHT_HANDLER 0x76
605 HEAVYWEIGHT_HANDLER 0x77
606 HEAVYWEIGHT_HANDLER 0x78
607 HEAVYWEIGHT_HANDLER 0x79
608 HEAVYWEIGHT_HANDLER 0x7a
609 HEAVYWEIGHT_HANDLER 0x7b
610 HEAVYWEIGHT_HANDLER 0x7c
611 HEAVYWEIGHT_HANDLER 0x7d
612 HEAVYWEIGHT_HANDLER 0x7e
613 HEAVYWEIGHT_HANDLER 0x7f
Note: See TracBrowser for help on using the repository browser.