Changeset 46c20c8 in mainline for kernel/arch/mips32/src
- Timestamp:
- 2010-11-26T20:08:10Z (15 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 45df59a
- Parents:
- fb150d78 (diff), ffdd2b9 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- kernel/arch/mips32/src
- Files:
-
- 1 deleted
- 15 edited
-
asm.S (modified) (8 diffs)
-
cache.c (modified) (1 diff)
-
context.S (modified) (2 diffs)
-
cpu/cpu.c (modified) (1 diff)
-
ddi/ddi.c (modified) (1 diff)
-
debug/stacktrace.c (modified) (1 diff)
-
debug/stacktrace_asm.S (modified) (1 diff)
-
debugger.c (modified) (9 diffs)
-
exception.c (modified) (9 diffs)
-
interrupt.c (modified) (6 diffs)
-
mips32.c (modified) (3 diffs)
-
mm/frame.c (modified) (1 diff)
-
mm/tlb.c (modified) (5 diffs)
-
panic.S (deleted)
-
smp/dorder.c (modified) (1 diff)
-
start.S (modified) (3 diffs)
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/mips32/src/asm.S
rfb150d78 r46c20c8 1 # 2 # Copyright (c) 2003-2004Jakub Jermar3 #All rights reserved.4 # 5 #Redistribution and use in source and binary forms, with or without6 #modification, are permitted provided that the following conditions7 #are met:8 # 9 #- Redistributions of source code must retain the above copyright10 #notice, this list of conditions and the following disclaimer.11 #- Redistributions in binary form must reproduce the above copyright12 #notice, this list of conditions and the following disclaimer in the13 #documentation and/or other materials provided with the distribution.14 #- The name of the author may not be used to endorse or promote products15 #derived from this software without specific prior written permission.16 # 17 #THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR18 #IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES19 #OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.20 #IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,21 #INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT22 #NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,23 #DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY24 #THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT25 #(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF26 #THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.27 # 1 /* 2 * Copyright (c) 2003 Jakub Jermar 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * - Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * - Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * - The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 28 29 29 #include <arch/asm/regname.h> … … 57 57 nop 58 58 59 60 59 .global memsetb 61 60 memsetb: … … 63 62 nop 64 63 65 66 64 .global memsetw 67 65 memsetw: 68 66 j _memsetw 69 67 nop 70 71 68 72 69 .global memcpy … … 78 75 memcpy_from_uspace: 79 76 memcpy_to_uspace: 80 move $t2, $a0 # save dst77 move $t2, $a0 /* save dst */ 81 78 82 79 addiu $v0, $a1, 3 83 li $v1, -4 # 0xfffffffffffffffc80 li $v1, -4 /* 0xfffffffffffffffc */ 84 81 and $v0, $v0, $v1 85 82 beq $a1, $v0, 3f … … 149 146 move $v0, $zero 150 147 151 152 153 148 .macro fpu_gp_save reg ctx 154 149 mfc1 $t0, $\reg … … 164 159 cfc1 $t0, $1 165 160 sw $t0, (\reg + 32) * 4(\ctx) 166 .endm 161 .endm 167 162 168 163 .macro fpu_ct_restore reg ctx … … 170 165 ctc1 $t0, $\reg 171 166 .endm 172 173 167 174 168 .global fpu_context_save … … 313 307 j $ra 314 308 nop 309 310 .global early_putchar 311 early_putchar: 312 j $ra 313 nop -
kernel/arch/mips32/src/cache.c
rfb150d78 r46c20c8 39 39 void cache_error(istate_t *istate) 40 40 { 41 panic("cache_error exception (epc=%p).", istate->epc);41 panic("cache_error exception (epc=%p).", (void *) istate->epc); 42 42 } 43 43 -
kernel/arch/mips32/src/context.S
rfb150d78 r46c20c8 28 28 29 29 #include <arch/context_offset.h> 30 31 .text 30 31 .text 32 32 33 33 .set noat … … 38 38 .global context_restore_arch 39 39 40 41 40 context_save_arch: 42 41 CONTEXT_SAVE_ARCH_CORE $a0 43 42 44 43 # context_save returns 1 45 44 j $31 46 li $2, 1 47 45 li $2, 1 46 48 47 context_restore_arch: 49 48 CONTEXT_RESTORE_ARCH_CORE $a0 50 49 51 50 # context_restore returns 0 52 51 j $31 53 xor $2, $2 52 xor $2, $2 -
kernel/arch/mips32/src/cpu/cpu.c
rfb150d78 r46c20c8 40 40 41 41 struct data_t { 42 c har *vendor;43 c har *model;42 const char *vendor; 43 const char *model; 44 44 }; 45 45 -
kernel/arch/mips32/src/ddi/ddi.c
rfb150d78 r46c20c8 35 35 #include <ddi/ddi.h> 36 36 #include <proc/task.h> 37 #include < arch/types.h>37 #include <typedefs.h> 38 38 #include <security/cap.h> 39 39 #include <arch.h> -
kernel/arch/mips32/src/debug/stacktrace.c
rfb150d78 r46c20c8 33 33 */ 34 34 35 /* 36 * This stack tracing code is based on the suggested algorithm described on page 37 * 3-27 and 3-28 of: 38 * 39 * SYSTEM V 40 * APPLICATION BINARY INTERFACE 41 * 42 * MIPS RISC Processor 43 * Supplement 44 * 3rd Edition 45 * 46 * Unfortunately, GCC generates code which is not entirely compliant with this 47 * method. For example, it places the "jr ra" instruction quite arbitrarily in 48 * the middle of the function which makes the original algorithm unapplicable. 49 * 50 * We deal with this problem by simply not using those parts of the algorithm 51 * that rely on the "jr ra" instruction occurring in the last basic block of a 52 * function, which gives us still usable, but less reliable stack tracer. The 53 * unreliability stems from the fact that under some circumstances it can become 54 * confused and produce incorrect or incomplete stack trace. We apply extra 55 * sanity checks so that the algorithm is still safe and should not crash the 56 * system. 57 * 58 * Even though not perfect, our solution is pretty lightweight, especially when 59 * compared with a prospective alternative solution based on additional 60 * debugging information stored directly in the kernel image. 61 */ 62 35 63 #include <stacktrace.h> 36 64 #include <syscall/copy.h> 37 #include <arch/types.h>38 65 #include <typedefs.h> 39 40 bool kernel_frame_pointer_validate(uintptr_t fp) 66 #include <arch/debugger.h> 67 #include <print.h> 68 69 #define R0 0U 70 #define SP 29U 71 #define RA 31U 72 73 #define OP_SHIFT 26 74 #define RS_SHIFT 21 75 #define RT_SHIFT 16 76 #define RD_SHIFT 11 77 78 #define HINT_SHIFT 6 79 #define BASE_SHIFT RS_SHIFT 80 #define IMM_SHIFT 0 81 #define OFFSET_SHIFT IMM_SHIFT 82 83 #define RS_MASK (0x1f << RS_SHIFT) 84 #define RT_MASK (0x1f << RT_SHIFT) 85 #define RD_MASK (0x1f << RD_SHIFT) 86 #define HINT_MASK (0x1f << HINT_SHIFT) 87 #define BASE_MASK RS_MASK 88 #define IMM_MASK (0xffff << IMM_SHIFT) 89 #define OFFSET_MASK IMM_MASK 90 91 #define RS_GET(inst) (((inst) & RS_MASK) >> RS_SHIFT) 92 #define RD_GET(inst) (((inst) & RD_MASK) >> RD_SHIFT) 93 #define IMM_GET(inst) (int16_t)(((inst) & IMM_MASK) >> IMM_SHIFT) 94 #define BASE_GET(inst) RS_GET(inst) 95 #define OFFSET_GET(inst) IMM_GET(inst) 96 97 #define ADDU_R_SP_R0_TEMPL \ 98 ((0x0 << OP_SHIFT) | (SP << RS_SHIFT) | (R0 << RT_SHIFT) | 0x21) 99 #define ADDU_SP_R_R0_TEMPL \ 100 ((0x0 << OP_SHIFT) | (SP << RD_SHIFT) | (R0 << RT_SHIFT) | 0x21) 101 #define ADDI_SP_SP_IMM_TEMPL \ 102 ((0x8 << OP_SHIFT) | (SP << RS_SHIFT) | (SP << RT_SHIFT)) 103 #define ADDIU_SP_SP_IMM_TEMPL \ 104 ((0x9 << OP_SHIFT) | (SP << RS_SHIFT) | (SP << RT_SHIFT)) 105 #define JR_RA_TEMPL \ 106 ((0x0 << OP_SHIFT) | (RA << RS_SHIFT) | (0x0 << HINT_SHIFT) | 0x8) 107 #define SW_RA_TEMPL \ 108 ((0x2b << OP_SHIFT) | (RA << RT_SHIFT)) 109 110 #define IS_ADDU_R_SP_R0(inst) \ 111 (((inst) & ~RD_MASK) == ADDU_R_SP_R0_TEMPL) 112 #define IS_ADDU_SP_R_R0(inst) \ 113 (((inst) & ~RS_MASK) == ADDU_SP_R_R0_TEMPL) 114 #define IS_ADDI_SP_SP_IMM(inst) \ 115 (((inst) & ~IMM_MASK) == ADDI_SP_SP_IMM_TEMPL) 116 #define IS_ADDIU_SP_SP_IMM(inst) \ 117 (((inst) & ~IMM_MASK) == ADDIU_SP_SP_IMM_TEMPL) 118 #define IS_JR_RA(inst) \ 119 (((inst) & ~HINT_MASK) == JR_RA_TEMPL) 120 #define IS_SW_RA(inst) \ 121 (((inst) & ~(BASE_MASK | OFFSET_MASK)) == SW_RA_TEMPL) 122 123 extern char ktext_start; 124 extern char ktext_end; 125 126 static bool bounds_check(uintptr_t pc) 127 { 128 return (pc >= (uintptr_t) &ktext_start) && 129 (pc < (uintptr_t) &ktext_end); 130 } 131 132 static bool 133 scan(stack_trace_context_t *ctx, uintptr_t *prev_fp, uintptr_t *prev_ra) 134 { 135 uint32_t *inst = (void *) ctx->pc; 136 bool has_fp = false; 137 size_t frame_size; 138 unsigned int fp = SP; 139 140 do { 141 inst--; 142 if (!bounds_check((uintptr_t) inst)) 143 return false; 144 #if 0 145 /* 146 * This is one of the situations in which the theory (ABI) does 147 * not meet the practice (GCC). GCC simply does not place the 148 * JR $ra instruction as dictated by the ABI, rendering the 149 * official stack tracing algorithm somewhat unapplicable. 150 */ 151 152 if (IS_ADDU_R_SP_R0(*inst)) { 153 uint32_t *cur; 154 fp = RD_GET(*inst); 155 /* 156 * We have a candidate for frame pointer. 157 */ 158 159 /* Seek to the end of this function. */ 160 for (cur = inst + 1; !IS_JR_RA(*cur); cur++) 161 ; 162 /* Scan the last basic block */ 163 for (cur--; !is_jump(*(cur - 1)); cur--) { 164 if (IS_ADDU_SP_R_R0(*cur) && 165 (fp == RS_GET(*cur))) { 166 has_fp = true; 167 } 168 } 169 continue; 170 } 171 172 if (IS_JR_RA(*inst)) { 173 if (!ctx->istate) 174 return false; 175 /* 176 * No stack frame has been allocated yet. 177 * Use the values stored in istate. 178 */ 179 if (prev_fp) 180 *prev_fp = ctx->istate->sp; 181 if (prev_ra) 182 *prev_ra = ctx->istate->ra - 8; 183 ctx->istate = NULL; 184 return true; 185 } 186 #endif 187 188 } while ((!IS_ADDIU_SP_SP_IMM(*inst) && !IS_ADDI_SP_SP_IMM(*inst)) || 189 (IMM_GET(*inst) >= 0)); 190 191 /* 192 * We are at the instruction which allocates the space for the current 193 * stack frame. 194 */ 195 frame_size = -IMM_GET(*inst); 196 if (prev_fp) 197 *prev_fp = ctx->fp + frame_size; 198 199 /* 200 * Scan the first basic block for the occurrence of 201 * SW $ra, OFFSET($base). 202 */ 203 for (inst++; !is_jump(*(inst - 1)) && (uintptr_t) inst < ctx->pc; 204 inst++) { 205 if (IS_SW_RA(*inst)) { 206 unsigned int base = BASE_GET(*inst); 207 int16_t offset = OFFSET_GET(*inst); 208 209 if (base == SP || (has_fp && base == fp)) { 210 uint32_t *addr = (void *) (ctx->fp + offset); 211 212 if (offset % 4 != 0) 213 return false; 214 /* cannot store below current stack pointer */ 215 if (offset < 0) 216 return false; 217 /* too big offsets are suspicious */ 218 if ((size_t) offset > sizeof(istate_t)) 219 return false; 220 221 if (prev_ra) 222 *prev_ra = *addr; 223 return true; 224 } 225 } 226 } 227 228 /* 229 * The first basic block does not save the return address or saves it 230 * after ctx->pc, which means that the correct value is in istate. 231 */ 232 if (prev_ra) { 233 if (!ctx->istate) 234 return false; 235 *prev_ra = ctx->istate->ra - 8; 236 ctx->istate = NULL; 237 } 238 return true; 239 } 240 241 242 bool kernel_stack_trace_context_validate(stack_trace_context_t *ctx) 243 { 244 return !((ctx->fp == 0) || ((ctx->fp % 8) != 0) || 245 (ctx->pc % 4 != 0) || !bounds_check(ctx->pc)); 246 } 247 248 bool kernel_frame_pointer_prev(stack_trace_context_t *ctx, uintptr_t *prev) 249 { 250 return scan(ctx, prev, NULL); 251 } 252 253 bool kernel_return_address_get(stack_trace_context_t *ctx, uintptr_t *ra) 254 { 255 return scan(ctx, NULL, ra); 256 } 257 258 bool uspace_stack_trace_context_validate(stack_trace_context_t *ctx) 41 259 { 42 260 return false; 43 261 } 44 262 45 bool kernel_frame_pointer_prev(uintptr_t fp, uintptr_t *prev)263 bool uspace_frame_pointer_prev(stack_trace_context_t *ctx, uintptr_t *prev) 46 264 { 47 265 return false; 48 266 } 49 267 50 bool kernel_return_address_get(uintptr_t fp, uintptr_t *ra)268 bool uspace_return_address_get(stack_trace_context_t *ctx, uintptr_t *ra) 51 269 { 52 270 return false; 53 271 } 54 272 55 bool uspace_frame_pointer_validate(uintptr_t fp)56 {57 return false;58 }59 60 bool uspace_frame_pointer_prev(uintptr_t fp, uintptr_t *prev)61 {62 return false;63 }64 65 bool uspace_return_address_get(uintptr_t fp, uintptr_t *ra)66 {67 return false;68 }69 70 273 /** @} 71 274 */ -
kernel/arch/mips32/src/debug/stacktrace_asm.S
rfb150d78 r46c20c8 37 37 frame_pointer_get: 38 38 j $ra 39 xor $v0, $v039 move $v0, $sp 40 40 41 41 program_counter_get: 42 42 j $ra 43 xor $v0, $v043 move $v0, $ra -
kernel/arch/mips32/src/debugger.c
rfb150d78 r46c20c8 46 46 47 47 bpinfo_t breakpoints[BKPOINTS_MAX]; 48 SPINLOCK_INITIALIZE(bkpoint_lock);48 IRQ_SPINLOCK_STATIC_INITIALIZE(bkpoint_lock); 49 49 50 50 #ifdef CONFIG_KCONSOLE 51 51 52 static int cmd_print_breakpoints(cmd_arg_t *argv); 52 static int cmd_print_breakpoints(cmd_arg_t *); 53 static int cmd_del_breakpoint(cmd_arg_t *); 54 static int cmd_add_breakpoint(cmd_arg_t *); 55 53 56 static cmd_info_t bkpts_info = { 54 57 .name = "bkpts", … … 58 61 }; 59 62 60 static int cmd_del_breakpoint(cmd_arg_t *argv);61 63 static cmd_arg_t del_argv = { 62 64 .type = ARG_TYPE_INT 63 65 }; 66 64 67 static cmd_info_t delbkpt_info = { 65 68 .name = "delbkpt", 66 .description = " delbkpt <number> -Delete breakpoint.",69 .description = "Delete breakpoint.", 67 70 .func = cmd_del_breakpoint, 68 71 .argc = 1, … … 70 73 }; 71 74 72 static int cmd_add_breakpoint(cmd_arg_t *argv);73 75 static cmd_arg_t add_argv = { 74 76 .type = ARG_TYPE_INT 75 77 }; 78 76 79 static cmd_info_t addbkpt_info = { 77 80 .name = "addbkpt", 78 .description = "addbkpt <&symbol> - new bkpoint. Break on J/Branch " 79 "insts unsupported.", 81 .description = "Add bkpoint (break on J/Branch insts unsupported).", 80 82 .func = cmd_add_breakpoint, 81 83 .argc = 1, … … 89 91 static cmd_info_t addbkpte_info = { 90 92 .name = "addbkpte", 91 .description = "addebkpte <&symbol> <&func> - new bkpoint. Call " 92 "func(or Nothing if 0).", 93 .description = "Add bkpoint with a trigger function.", 93 94 .func = cmd_add_breakpoint, 94 95 .argc = 2, … … 100 101 uint32_t value; 101 102 } jmpinstr[] = { 102 {0xf3ff0000, 0x41000000}, /* BCzF */ 103 {0xf3ff0000, 0x41020000}, /* BCzFL */ 104 {0xf3ff0000, 0x41010000}, /* BCzT */ 105 {0xf3ff0000, 0x41030000}, /* BCzTL */ 106 {0xfc000000, 0x10000000}, /* BEQ */ 107 {0xfc000000, 0x50000000}, /* BEQL */ 108 {0xfc1f0000, 0x04010000}, /* BEQL */ 109 {0xfc1f0000, 0x04110000}, /* BGEZAL */ 110 {0xfc1f0000, 0x04130000}, /* BGEZALL */ 111 {0xfc1f0000, 0x04030000}, /* BGEZL */ 112 {0xfc1f0000, 0x1c000000}, /* BGTZ */ 113 {0xfc1f0000, 0x5c000000}, /* BGTZL */ 114 {0xfc1f0000, 0x18000000}, /* BLEZ */ 115 {0xfc1f0000, 0x58000000}, /* BLEZL */ 116 {0xfc1f0000, 0x04000000}, /* BLTZ */ 117 {0xfc1f0000, 0x04100000}, /* BLTZAL */ 118 {0xfc1f0000, 0x04120000}, /* BLTZALL */ 119 {0xfc1f0000, 0x04020000}, /* BLTZL */ 120 {0xfc000000, 0x14000000}, /* BNE */ 121 {0xfc000000, 0x54000000}, /* BNEL */ 122 {0xfc000000, 0x08000000}, /* J */ 123 {0xfc000000, 0x0c000000}, /* JAL */ 124 {0xfc1f07ff, 0x00000009}, /* JALR */ 125 {0, 0} /* EndOfTable */ 126 }; 127 103 {0xf3ff0000, 0x41000000}, /* BCzF */ 104 {0xf3ff0000, 0x41020000}, /* BCzFL */ 105 {0xf3ff0000, 0x41010000}, /* BCzT */ 106 {0xf3ff0000, 0x41030000}, /* BCzTL */ 107 {0xfc000000, 0x10000000}, /* BEQ */ 108 {0xfc000000, 0x50000000}, /* BEQL */ 109 {0xfc1f0000, 0x04010000}, /* BEQL */ 110 {0xfc1f0000, 0x04110000}, /* BGEZAL */ 111 {0xfc1f0000, 0x04130000}, /* BGEZALL */ 112 {0xfc1f0000, 0x04030000}, /* BGEZL */ 113 {0xfc1f0000, 0x1c000000}, /* BGTZ */ 114 {0xfc1f0000, 0x5c000000}, /* BGTZL */ 115 {0xfc1f0000, 0x18000000}, /* BLEZ */ 116 {0xfc1f0000, 0x58000000}, /* BLEZL */ 117 {0xfc1f0000, 0x04000000}, /* BLTZ */ 118 {0xfc1f0000, 0x04100000}, /* BLTZAL */ 119 {0xfc1f0000, 0x04120000}, /* BLTZALL */ 120 {0xfc1f0000, 0x04020000}, /* BLTZL */ 121 {0xfc000000, 0x14000000}, /* BNE */ 122 {0xfc000000, 0x54000000}, /* BNEL */ 123 {0xfc000000, 0x08000000}, /* J */ 124 {0xfc000000, 0x0c000000}, /* JAL */ 125 {0xfc1f07ff, 0x00000009}, /* JALR */ 126 {0, 0} /* end of table */ 127 }; 128 128 129 129 /** Test, if the given instruction is a jump or branch instruction 130 130 * 131 131 * @param instr Instruction code 132 * @return true - it is jump instruction, false otherwise 133 * 134 */ 135 static bool is_jump(unative_t instr) 136 { 137 int i; 138 132 * 133 * @return true if it is jump instruction, false otherwise 134 * 135 */ 136 bool is_jump(unative_t instr) 137 { 138 unsigned int i; 139 139 140 for (i = 0; jmpinstr[i].andmask; i++) { 140 141 if ((instr & jmpinstr[i].andmask) == jmpinstr[i].value) 141 142 return true; 142 143 } 143 144 144 145 return false; 145 146 } 146 147 147 /** Add new breakpoint to table */ 148 /** Add new breakpoint to table 149 * 150 */ 148 151 int cmd_add_breakpoint(cmd_arg_t *argv) 149 152 { 150 bpinfo_t *cur = NULL;151 ipl_t ipl;152 int i;153 154 153 if (argv->intval & 0x3) { 155 154 printf("Not aligned instruction, forgot to use &symbol?\n"); 156 155 return 1; 157 156 } 158 ipl = interrupts_disable();159 spinlock_lock(&bkpoint_lock);160 157 158 irq_spinlock_lock(&bkpoint_lock, true); 159 161 160 /* Check, that the breakpoints do not conflict */ 161 unsigned int i; 162 162 for (i = 0; i < BKPOINTS_MAX; i++) { 163 if (breakpoints[i].address == (uintptr_t) argv->intval) {163 if (breakpoints[i].address == (uintptr_t) argv->intval) { 164 164 printf("Duplicate breakpoint %d.\n", i); 165 spinlock_unlock(&bkpoint_lock);165 irq_spinlock_unlock(&bkpoint_lock, true); 166 166 return 0; 167 } else if ( breakpoints[i].address == (uintptr_t)argv->intval +168 sizeof(unative_t) ||breakpoints[i].address ==169 (uintptr_t) argv->intval - sizeof(unative_t)) {167 } else if ((breakpoints[i].address == (uintptr_t) argv->intval + 168 sizeof(unative_t)) || (breakpoints[i].address == 169 (uintptr_t) argv->intval - sizeof(unative_t))) { 170 170 printf("Adjacent breakpoints not supported, conflict " 171 171 "with %d.\n", i); 172 spinlock_unlock(&bkpoint_lock);172 irq_spinlock_unlock(&bkpoint_lock, true); 173 173 return 0; 174 174 } 175 175 176 176 } 177 178 for (i = 0; i < BKPOINTS_MAX; i++) 177 178 bpinfo_t *cur = NULL; 179 180 for (i = 0; i < BKPOINTS_MAX; i++) { 179 181 if (!breakpoints[i].address) { 180 182 cur = &breakpoints[i]; 181 183 break; 182 184 } 185 } 186 183 187 if (!cur) { 184 188 printf("Too many breakpoints.\n"); 185 spinlock_unlock(&bkpoint_lock); 186 interrupts_restore(ipl); 189 irq_spinlock_unlock(&bkpoint_lock, true); 187 190 return 0; 188 191 } 192 193 printf("Adding breakpoint on address %p\n", (void *) argv->intval); 194 189 195 cur->address = (uintptr_t) argv->intval; 190 printf("Adding breakpoint on address: %p\n", argv->intval); 191 cur->instruction = ((unative_t *)cur->address)[0]; 192 cur->nextinstruction = ((unative_t *)cur->address)[1]; 196 cur->instruction = ((unative_t *) cur->address)[0]; 197 cur->nextinstruction = ((unative_t *) cur->address)[1]; 193 198 if (argv == &add_argv) { 194 199 cur->flags = 0; 195 } else { /* We are add extended */200 } else { /* We are add extended */ 196 201 cur->flags = BKPOINT_FUNCCALL; 197 202 cur->bkfunc = (void (*)(void *, istate_t *)) argv[1].intval; 198 203 } 204 199 205 if (is_jump(cur->instruction)) 200 206 cur->flags |= BKPOINT_ONESHOT; 207 201 208 cur->counter = 0; 202 209 203 210 /* Set breakpoint */ 204 *((unative_t *) cur->address) = 0x0d;211 *((unative_t *) cur->address) = 0x0d; 205 212 smc_coherence(cur->address); 206 207 spinlock_unlock(&bkpoint_lock); 208 interrupts_restore(ipl); 209 213 214 irq_spinlock_unlock(&bkpoint_lock, true); 215 210 216 return 1; 211 217 } 212 218 213 /** Remove breakpoint from table */ 219 /** Remove breakpoint from table 220 * 221 */ 214 222 int cmd_del_breakpoint(cmd_arg_t *argv) 215 223 { 216 bpinfo_t *cur;217 ipl_t ipl;218 219 224 if (argv->intval > BKPOINTS_MAX) { 220 225 printf("Invalid breakpoint number.\n"); 221 226 return 0; 222 227 } 223 ipl = interrupts_disable();224 spinlock_lock(&bkpoint_lock);225 226 cur = &breakpoints[argv->intval];228 229 irq_spinlock_lock(&bkpoint_lock, true); 230 231 bpinfo_t *cur = &breakpoints[argv->intval]; 227 232 if (!cur->address) { 228 233 printf("Breakpoint does not exist.\n"); 229 spinlock_unlock(&bkpoint_lock); 230 interrupts_restore(ipl); 234 irq_spinlock_unlock(&bkpoint_lock, true); 231 235 return 0; 232 236 } 237 233 238 if ((cur->flags & BKPOINT_INPROG) && (cur->flags & BKPOINT_ONESHOT)) { 234 239 printf("Cannot remove one-shot breakpoint in-progress\n"); 235 spinlock_unlock(&bkpoint_lock); 236 interrupts_restore(ipl); 240 irq_spinlock_unlock(&bkpoint_lock, true); 237 241 return 0; 238 242 } 239 ((uint32_t *)cur->address)[0] = cur->instruction;240 smc_coherence(((uint32_t *)cur->address)[0]);241 ((uint32_t *)cur->address)[1] = cur->nextinstruction;242 smc_coherence(((uint32_t *)cur->address)[1]);243 244 cur->address = NULL;245 246 spinlock_unlock(&bkpoint_lock);247 i nterrupts_restore(ipl);243 244 ((uint32_t *) cur->address)[0] = cur->instruction; 245 smc_coherence(((uint32_t *) cur->address)[0]); 246 ((uint32_t *) cur->address)[1] = cur->nextinstruction; 247 smc_coherence(((uint32_t *) cur->address)[1]); 248 249 cur->address = (uintptr_t) NULL; 250 251 irq_spinlock_unlock(&bkpoint_lock, true); 248 252 return 1; 249 253 } 250 254 251 /** Print table of active breakpoints */ 255 /** Print table of active breakpoints 256 * 257 */ 252 258 int cmd_print_breakpoints(cmd_arg_t *argv) 253 259 { 254 260 unsigned int i; 255 char *symbol; 256 257 printf("# Count Address INPROG ONESHOT FUNCCALL In symbol\n"); 258 printf("-- ----- ---------- ------ ------- -------- ---------\n"); 259 260 for (i = 0; i < BKPOINTS_MAX; i++) 261 262 printf("[nr] [count] [address ] [inprog] [oneshot] [funccall] [in symbol\n"); 263 264 for (i = 0; i < BKPOINTS_MAX; i++) { 261 265 if (breakpoints[i].address) { 262 symbol = symtab_fmt_name_lookup(266 const char *symbol = symtab_fmt_name_lookup( 263 267 breakpoints[i].address); 264 265 printf("%- 2u %-5d %#10zx %-6s %-7s %-8s %s\n", i,266 breakpoints[i].counter, breakpoints[i].address,268 269 printf("%-4u %7zu %p %-8s %-9s %-10s %s\n", i, 270 breakpoints[i].counter, (void *) breakpoints[i].address, 267 271 ((breakpoints[i].flags & BKPOINT_INPROG) ? "true" : 268 272 "false"), ((breakpoints[i].flags & BKPOINT_ONESHOT) … … 270 274 BKPOINT_FUNCCALL) ? "true" : "false"), symbol); 271 275 } 276 } 277 272 278 return 1; 273 279 } 274 280 275 #endif 276 277 /** Initialize debugger */ 281 #endif /* CONFIG_KCONSOLE */ 282 283 /** Initialize debugger 284 * 285 */ 278 286 void debugger_init() 279 287 { 280 int i;281 288 unsigned int i; 289 282 290 for (i = 0; i < BKPOINTS_MAX; i++) 283 breakpoints[i].address = NULL;284 291 breakpoints[i].address = (uintptr_t) NULL; 292 285 293 #ifdef CONFIG_KCONSOLE 286 294 cmd_initialize(&bkpts_info); 287 295 if (!cmd_register(&bkpts_info)) 288 296 printf("Cannot register command %s\n", bkpts_info.name); 289 297 290 298 cmd_initialize(&delbkpt_info); 291 299 if (!cmd_register(&delbkpt_info)) 292 300 printf("Cannot register command %s\n", delbkpt_info.name); 293 301 294 302 cmd_initialize(&addbkpt_info); 295 303 if (!cmd_register(&addbkpt_info)) 296 304 printf("Cannot register command %s\n", addbkpt_info.name); 297 305 298 306 cmd_initialize(&addbkpte_info); 299 307 if (!cmd_register(&addbkpte_info)) 300 308 printf("Cannot register command %s\n", addbkpte_info.name); 301 #endif 309 #endif /* CONFIG_KCONSOLE */ 302 310 } 303 311 304 312 /** Handle breakpoint 305 313 * 306 * Find breakpoint in breakpoint table. 314 * Find breakpoint in breakpoint table. 307 315 * If found, call kconsole, set break on next instruction and reexecute. 308 316 * If we are on "next instruction", set it back on the first and reexecute. 309 317 * If breakpoint not found in breakpoint table, call kconsole and start 310 318 * next instruction. 319 * 311 320 */ 312 321 void debugger_bpoint(istate_t *istate) 313 322 { 314 bpinfo_t *cur = NULL;315 uintptr_t fireaddr = istate->epc;316 int i;317 318 323 /* test branch delay slot */ 319 324 if (cp0_cause_read() & 0x80000000) 320 325 panic("Breakpoint in branch delay slot not supported."); 321 322 spinlock_lock(&bkpoint_lock); 326 327 irq_spinlock_lock(&bkpoint_lock, false); 328 329 bpinfo_t *cur = NULL; 330 uintptr_t fireaddr = istate->epc; 331 unsigned int i; 332 323 333 for (i = 0; i < BKPOINTS_MAX; i++) { 324 334 /* Normal breakpoint */ 325 if ( fireaddr == breakpoints[i].address&&326 !(breakpoints[i].flags & BKPOINT_REINST)) {335 if ((fireaddr == breakpoints[i].address) && 336 (!(breakpoints[i].flags & BKPOINT_REINST))) { 327 337 cur = &breakpoints[i]; 328 338 break; 329 339 } 340 330 341 /* Reinst only breakpoint */ 331 342 if ((breakpoints[i].flags & BKPOINT_REINST) && … … 335 346 } 336 347 } 348 337 349 if (cur) { 338 350 if (cur->flags & BKPOINT_REINST) { 339 351 /* Set breakpoint on first instruction */ 340 ((uint32_t *) cur->address)[0] = 0x0d;352 ((uint32_t *) cur->address)[0] = 0x0d; 341 353 smc_coherence(((uint32_t *)cur->address)[0]); 354 342 355 /* Return back the second */ 343 ((uint32_t *)cur->address)[1] = cur->nextinstruction; 344 smc_coherence(((uint32_t *)cur->address)[1]); 356 ((uint32_t *) cur->address)[1] = cur->nextinstruction; 357 smc_coherence(((uint32_t *) cur->address)[1]); 358 345 359 cur->flags &= ~BKPOINT_REINST; 346 spinlock_unlock(&bkpoint_lock);360 irq_spinlock_unlock(&bkpoint_lock, false); 347 361 return; 348 } 362 } 363 349 364 if (cur->flags & BKPOINT_INPROG) 350 365 printf("Warning: breakpoint recursion\n"); 351 366 352 367 if (!(cur->flags & BKPOINT_FUNCCALL)) { 353 printf("***Breakpoint %d: %p in %s.\n", i, fireaddr, 354 symtab_fmt_name_lookup(istate->epc)); 355 } 356 368 printf("***Breakpoint %u: %p in %s.\n", i, 369 (void *) fireaddr, 370 symtab_fmt_name_lookup(fireaddr)); 371 } 372 357 373 /* Return first instruction back */ 358 374 ((uint32_t *)cur->address)[0] = cur->instruction; … … 366 382 cur->flags |= BKPOINT_INPROG; 367 383 } else { 368 printf("***Breakpoint %d: %p in %s.\n", i, fireaddr, 384 printf("***Breakpoint %d: %p in %s.\n", i, 385 (void *) fireaddr, 369 386 symtab_fmt_name_lookup(fireaddr)); 370 387 371 388 /* Move on to next instruction */ 372 389 istate->epc += 4; 373 390 } 391 374 392 if (cur) 375 393 cur->counter++; 394 376 395 if (cur && (cur->flags & BKPOINT_FUNCCALL)) { 377 396 /* Allow zero bkfunc, just for counting */ … … 380 399 } else { 381 400 #ifdef CONFIG_KCONSOLE 382 /* This disables all other processors - we are not SMP, 401 /* 402 * This disables all other processors - we are not SMP, 383 403 * actually this gets us to cpu_halt, if scheduler() is run 384 404 * - we generally do not want scheduler to be run from debug, 385 405 * so this is a good idea 386 */ 406 */ 387 407 atomic_set(&haltstate, 1); 388 spinlock_unlock(&bkpoint_lock);408 irq_spinlock_unlock(&bkpoint_lock, false); 389 409 390 410 kconsole("debug", "Debug console ready.\n", false); 391 411 392 spinlock_lock(&bkpoint_lock);412 irq_spinlock_lock(&bkpoint_lock, false); 393 413 atomic_set(&haltstate, 0); 394 414 #endif 395 415 } 396 if (cur && cur->address == fireaddr && (cur->flags & BKPOINT_INPROG)) { 416 417 if ((cur) && (cur->address == fireaddr) 418 && ((cur->flags & BKPOINT_INPROG))) { 397 419 /* Remove one-shot breakpoint */ 398 420 if ((cur->flags & BKPOINT_ONESHOT)) 399 cur->address = NULL; 421 cur->address = (uintptr_t) NULL; 422 400 423 /* Remove in-progress flag */ 401 424 cur->flags &= ~BKPOINT_INPROG; 402 } 403 spinlock_unlock(&bkpoint_lock); 425 } 426 427 irq_spinlock_unlock(&bkpoint_lock, false); 404 428 } 405 429 -
kernel/arch/mips32/src/exception.c
rfb150d78 r46c20c8 27 27 */ 28 28 29 /** @addtogroup mips32 29 /** @addtogroup mips32 30 30 * @{ 31 31 */ … … 38 38 #include <panic.h> 39 39 #include <arch/cp0.h> 40 #include < arch/types.h>40 #include <typedefs.h> 41 41 #include <arch.h> 42 42 #include <debug.h> … … 49 49 #include <symtab.h> 50 50 51 static c har *exctable[] = {51 static const char *exctable[] = { 52 52 "Interrupt", 53 53 "TLB Modified", … … 67 67 "Floating Point", 68 68 NULL, NULL, NULL, NULL, NULL, NULL, NULL, 69 "WatchHi/WatchLo", /* 23 */69 "WatchHi/WatchLo", /* 23 */ 70 70 NULL, NULL, NULL, NULL, NULL, NULL, NULL, 71 71 "Virtual Coherency - data", 72 72 }; 73 73 74 static void print_regdump(istate_t *istate) 75 { 76 char *pcsymbol, *rasymbol; 77 78 pcsymbol = symtab_fmt_name_lookup(istate->epc); 79 rasymbol = symtab_fmt_name_lookup(istate->ra); 80 81 printf("PC: %#x(%s) RA: %#x(%s), SP(%p)\n", istate->epc, pcsymbol, 82 istate->ra, rasymbol, istate->sp); 83 } 84 85 static void unhandled_exception(int n, istate_t *istate) 74 void istate_decode(istate_t *istate) 75 { 76 printf("epc=%p\tsta=%#0" PRIx32 "\t" 77 "lo =%#0" PRIx32 "\thi =%#0" PRIx32 "\n", 78 (void *) istate->epc, istate->status, 79 istate->lo, istate->hi); 80 81 printf("a0 =%#0" PRIx32 "\ta1 =%#0" PRIx32 "\t" 82 "a2 =%#0" PRIx32 "\ta3 =%#0" PRIx32 "\n", 83 istate->a0, istate->a1, istate->a2, istate->a3); 84 85 printf("t0 =%#0" PRIx32 "\tt1 =%#0" PRIx32 "\t" 86 "t2 =%#0" PRIx32 "\tt3 =%#0" PRIx32 "\n", 87 istate->t0, istate->t1, istate->t2, istate->t3); 88 89 printf("t4 =%#0" PRIx32 "\tt5 =%#0" PRIx32 "\t" 90 "t6 =%#0" PRIx32 "\tt7 =%#0" PRIx32 "\n", 91 istate->t4, istate->t5, istate->t6, istate->t7); 92 93 printf("t8 =%#0" PRIx32 "\tt9 =%#0" PRIx32 "\t" 94 "v0 =%#0" PRIx32 "\tv1 =%#0" PRIx32 "\n", 95 istate->t8, istate->t9, istate->v0, istate->v1); 96 97 printf("s0 =%#0" PRIx32 "\ts1 =%#0" PRIx32 "\t" 98 "s2 =%#0" PRIx32 "\ts3 =%#0" PRIx32 "\n", 99 istate->s0, istate->s1, istate->s2, istate->s3); 100 101 printf("s4 =%#0" PRIx32 "\ts5 =%#0" PRIx32 "\t" 102 "s6 =%#0" PRIx32 "\ts7 =%#0" PRIx32 "\n", 103 istate->s4, istate->s5, istate->s6, istate->s7); 104 105 printf("s8 =%#0" PRIx32 "\tat =%#0" PRIx32 "\t" 106 "kt0=%#0" PRIx32 "\tkt1=%#0" PRIx32 "\n", 107 istate->s8, istate->at, istate->kt0, istate->kt1); 108 109 printf("sp =%p\tra =%p\tgp =%p\n", 110 (void *) istate->sp, (void *) istate->ra, 111 (void *) istate->gp); 112 } 113 114 static void unhandled_exception(unsigned int n, istate_t *istate) 86 115 { 87 116 fault_if_from_uspace(istate, "Unhandled exception %s.", exctable[n]); 88 89 print_regdump(istate); 90 panic("Unhandled exception %s.", exctable[n]); 91 } 92 93 static void reserved_instr_exception(int n, istate_t *istate) 94 { 95 if (*((uint32_t *)istate->epc) == 0x7c03e83b) { 117 panic_badtrap(istate, n, "Unhandled exception %s.", exctable[n]); 118 } 119 120 static void reserved_instr_exception(unsigned int n, istate_t *istate) 121 { 122 if (*((uint32_t *) istate->epc) == 0x7c03e83b) { 96 123 ASSERT(THREAD); 97 124 istate->epc += 4; 98 istate->v1 = istate->k 1;99 } else 125 istate->v1 = istate->kt1; 126 } else 100 127 unhandled_exception(n, istate); 101 128 } 102 129 103 static void breakpoint_exception( int n, istate_t *istate)130 static void breakpoint_exception(unsigned int n, istate_t *istate) 104 131 { 105 132 #ifdef CONFIG_DEBUG … … 113 140 } 114 141 115 static void tlbmod_exception( int n, istate_t *istate)142 static void tlbmod_exception(unsigned int n, istate_t *istate) 116 143 { 117 144 tlb_modified(istate); 118 145 } 119 146 120 static void tlbinv_exception( int n, istate_t *istate)147 static void tlbinv_exception(unsigned int n, istate_t *istate) 121 148 { 122 149 tlb_invalid(istate); … … 124 151 125 152 #ifdef CONFIG_FPU_LAZY 126 static void cpuns_exception( int n, istate_t *istate)153 static void cpuns_exception(unsigned int n, istate_t *istate) 127 154 { 128 155 if (cp0_cause_coperr(cp0_cause_read()) == fpu_cop_id) 129 156 scheduler_fpu_lazy_request(); 130 157 else { 131 fault_if_from_uspace(istate, "Unhandled Coprocessor Unusable Exception."); 132 panic("Unhandled Coprocessor Unusable Exception."); 158 fault_if_from_uspace(istate, 159 "Unhandled Coprocessor Unusable Exception."); 160 panic_badtrap(istate, n, 161 "Unhandled Coprocessor Unusable Exception."); 133 162 } 134 163 } 135 164 #endif 136 165 137 static void interrupt_exception(int n, istate_t *istate) 138 { 139 uint32_t cause; 140 int i; 141 142 /* decode interrupt number and process the interrupt */ 143 cause = (cp0_cause_read() >> 8) & 0xff; 144 166 static void interrupt_exception(unsigned int n, istate_t *istate) 167 { 168 /* Decode interrupt number and process the interrupt */ 169 uint32_t cause = (cp0_cause_read() >> 8) & 0xff; 170 171 unsigned int i; 145 172 for (i = 0; i < 8; i++) { 146 173 if (cause & (1 << i)) { … … 151 178 */ 152 179 irq->handler(irq); 153 spinlock_unlock(&irq->lock);180 irq_spinlock_unlock(&irq->lock, false); 154 181 } else { 155 182 /* … … 157 184 */ 158 185 #ifdef CONFIG_DEBUG 159 printf("cpu%u: spurious interrupt (inum=% d)\n",186 printf("cpu%u: spurious interrupt (inum=%u)\n", 160 187 CPU->id, i); 161 188 #endif … … 166 193 167 194 /** Handle syscall userspace call */ 168 static void syscall_exception( int n, istate_t *istate)169 { 170 panic("Syscall is handled through shortcut.");195 static void syscall_exception(unsigned int n, istate_t *istate) 196 { 197 fault_if_from_uspace(istate, "Syscall is handled through shortcut."); 171 198 } 172 199 173 200 void exception_init(void) 174 201 { 175 int i;176 202 unsigned int i; 203 177 204 /* Clear exception table */ 178 205 for (i = 0; i < IVT_ITEMS; i++) 179 exc_register(i, "undef", (iroutine) unhandled_exception); 180 181 exc_register(EXC_Bp, "bkpoint", (iroutine) breakpoint_exception); 182 exc_register(EXC_RI, "resinstr", (iroutine) reserved_instr_exception); 183 exc_register(EXC_Mod, "tlb_mod", (iroutine) tlbmod_exception); 184 exc_register(EXC_TLBL, "tlbinvl", (iroutine) tlbinv_exception); 185 exc_register(EXC_TLBS, "tlbinvl", (iroutine) tlbinv_exception); 186 exc_register(EXC_Int, "interrupt", (iroutine) interrupt_exception); 206 exc_register(i, "undef", false, 207 (iroutine_t) unhandled_exception); 208 209 exc_register(EXC_Bp, "bkpoint", true, 210 (iroutine_t) breakpoint_exception); 211 exc_register(EXC_RI, "resinstr", true, 212 (iroutine_t) reserved_instr_exception); 213 exc_register(EXC_Mod, "tlb_mod", true, 214 (iroutine_t) tlbmod_exception); 215 exc_register(EXC_TLBL, "tlbinvl", true, 216 (iroutine_t) tlbinv_exception); 217 exc_register(EXC_TLBS, "tlbinvl", true, 218 (iroutine_t) tlbinv_exception); 219 exc_register(EXC_Int, "interrupt", true, 220 (iroutine_t) interrupt_exception); 221 187 222 #ifdef CONFIG_FPU_LAZY 188 exc_register(EXC_CpU, "cpunus", (iroutine) cpuns_exception); 189 #endif 190 exc_register(EXC_Sys, "syscall", (iroutine) syscall_exception); 223 exc_register(EXC_CpU, "cpunus", true, 224 (iroutine_t) cpuns_exception); 225 #endif 226 227 exc_register(EXC_Sys, "syscall", true, 228 (iroutine_t) syscall_exception); 191 229 } 192 230 -
kernel/arch/mips32/src/interrupt.c
rfb150d78 r46c20c8 35 35 #include <interrupt.h> 36 36 #include <arch/interrupt.h> 37 #include < arch/types.h>37 #include <typedefs.h> 38 38 #include <arch.h> 39 39 #include <arch/cp0.h> 40 #include <arch/smp/dorder.h> 40 41 #include <time/clock.h> 41 42 #include <ipc/sysipc.h> … … 48 49 function virtual_timer_fnc = NULL; 49 50 static irq_t timer_irq; 51 static irq_t dorder_irq; 52 53 // TODO: This is SMP unsafe!!! 54 55 uint32_t count_hi = 0; 56 static unsigned long nextcount; 57 static unsigned long lastcount; 50 58 51 59 /** Disable interrupts. … … 89 97 } 90 98 91 /* TODO: This is SMP unsafe!!! */ 92 uint32_t count_hi = 0; 93 static unsigned long nextcount; 94 static unsigned long lastcount; 99 /** Check interrupts state. 100 * 101 * @return True if interrupts are disabled. 102 * 103 */ 104 bool interrupts_disabled(void) 105 { 106 return !(cp0_status_read() & cp0_status_ie_enabled_bit); 107 } 95 108 96 /** Start hardware clock */ 109 /** Start hardware clock 110 * 111 */ 97 112 static void timer_start(void) 98 113 { … … 109 124 static void timer_irq_handler(irq_t *irq) 110 125 { 111 unsigned long drift;112 113 126 if (cp0_count_read() < lastcount) 114 127 /* Count overflow detected */ 115 128 count_hi++; 129 116 130 lastcount = cp0_count_read(); 117 131 118 drift = cp0_count_read() - nextcount;132 unsigned long drift = cp0_count_read() - nextcount; 119 133 while (drift > cp0_compare_value) { 120 134 drift -= cp0_compare_value; 121 135 CPU->missed_clock_ticks++; 122 136 } 137 123 138 nextcount = cp0_count_read() + cp0_compare_value - drift; 124 139 cp0_compare_write(nextcount); … … 128 143 * Release the lock, call clock() and reacquire the lock again. 129 144 */ 130 spinlock_unlock(&irq->lock);145 irq_spinlock_unlock(&irq->lock, false); 131 146 clock(); 132 spinlock_lock(&irq->lock);147 irq_spinlock_lock(&irq->lock, false); 133 148 134 149 if (virtual_timer_fnc != NULL) 135 150 virtual_timer_fnc(); 151 } 152 153 static irq_ownership_t dorder_claim(irq_t *irq) 154 { 155 return IRQ_ACCEPT; 156 } 157 158 static void dorder_irq_handler(irq_t *irq) 159 { 160 dorder_ipi_ack(1 << dorder_cpuid()); 136 161 } 137 162 … … 150 175 timer_start(); 151 176 cp0_unmask_int(TIMER_IRQ); 177 178 irq_initialize(&dorder_irq); 179 dorder_irq.devno = device_assign_devno(); 180 dorder_irq.inr = DORDER_IRQ; 181 dorder_irq.claim = dorder_claim; 182 dorder_irq.handler = dorder_irq_handler; 183 irq_register(&dorder_irq); 184 185 cp0_unmask_int(DORDER_IRQ); 152 186 } 153 187 -
kernel/arch/mips32/src/mips32.c
rfb150d78 r46c20c8 36 36 #include <arch/cp0.h> 37 37 #include <arch/exception.h> 38 #include <arch/debug.h> 38 39 #include <mm/as.h> 39 40 #include <userspace.h> … … 57 58 #include <macros.h> 58 59 #include <config.h> 59 #include <str ing.h>60 #include <str.h> 60 61 #include <arch/drivers/msim.h> 61 62 #include <arch/asm/regname.h> … … 83 84 void arch_pre_main(void *entry __attribute__((unused)), bootinfo_t *bootinfo) 84 85 { 85 /* Setup usermode */ 86 init.cnt = bootinfo->cnt; 86 init.cnt = min3(bootinfo->cnt, TASKMAP_MAX_RECORDS, CONFIG_INIT_TASKS); 87 87 88 88 size_t i; 89 for (i = 0; i < min3(bootinfo->cnt, TASKMAP_MAX_RECORDS, CONFIG_INIT_TASKS); i++) {90 init.tasks[i].addr = bootinfo->tasks[i].addr;89 for (i = 0; i < init.cnt; i++) { 90 init.tasks[i].addr = (uintptr_t) bootinfo->tasks[i].addr; 91 91 init.tasks[i].size = bootinfo->tasks[i].size; 92 92 str_cpy(init.tasks[i].name, CONFIG_TASK_NAME_BUFLEN, -
kernel/arch/mips32/src/mm/frame.c
rfb150d78 r46c20c8 249 249 void physmem_print(void) 250 250 { 251 printf("Base Size\n"); 252 printf("---------- ----------\n"); 251 printf("[base ] [size ]\n"); 253 252 254 253 size_t i; -
kernel/arch/mips32/src/mm/tlb.c
rfb150d78 r46c20c8 321 321 void tlb_refill_fail(istate_t *istate) 322 322 { 323 char *symbol, *sym2; 324 325 symbol = symtab_fmt_name_lookup(istate->epc); 326 sym2 = symtab_fmt_name_lookup(istate->ra); 323 uintptr_t va = cp0_badvaddr_read(); 327 324 328 325 fault_if_from_uspace(istate, "TLB Refill Exception on %p.", 329 cp0_badvaddr_read()); 330 panic("%x: TLB Refill Exception at %x (%s<-%s).", cp0_badvaddr_read(), 331 istate->epc, symbol, sym2); 326 (void *) va); 327 panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, "TLB Refill Exception."); 332 328 } 333 329 … … 335 331 void tlb_invalid_fail(istate_t *istate) 336 332 { 337 char *symbol; 338 339 symbol = symtab_fmt_name_lookup(istate->epc); 340 333 uintptr_t va = cp0_badvaddr_read(); 334 341 335 fault_if_from_uspace(istate, "TLB Invalid Exception on %p.", 342 cp0_badvaddr_read()); 343 panic("%x: TLB Invalid Exception at %x (%s).", cp0_badvaddr_read(), 344 istate->epc, symbol); 336 (void *) va); 337 panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, "TLB Invalid Exception."); 345 338 } 346 339 347 340 void tlb_modified_fail(istate_t *istate) 348 341 { 349 char *symbol; 350 351 symbol = symtab_fmt_name_lookup(istate->epc); 352 342 uintptr_t va = cp0_badvaddr_read(); 343 353 344 fault_if_from_uspace(istate, "TLB Modified Exception on %p.", 354 cp0_badvaddr_read()); 355 panic("%x: TLB Modified Exception at %x (%s).", cp0_badvaddr_read(), 356 istate->epc, symbol); 345 (void *) va); 346 panic_memtrap(istate, PF_ACCESS_WRITE, va, "TLB Modified Exception."); 357 347 } 358 348 359 349 /** Try to find PTE for faulting address. 360 *361 * The AS->lock must be held on entry to this function.362 350 * 363 351 * @param badvaddr Faulting virtual address. … … 375 363 entry_hi_t hi; 376 364 pte_t *pte; 365 366 ASSERT(mutex_locked(&AS->lock)); 377 367 378 368 hi.value = cp0_entry_hi_read(); … … 461 451 hi_save.value = cp0_entry_hi_read(); 462 452 463 printf("# ASID VPN2 MASK G V D C PFN\n"); 464 printf("-- ---- ------ ---- - - - - ------\n"); 453 printf("[nr] [asid] [vpn2] [mask] [gvdc] [pfn ]\n"); 465 454 466 455 for (i = 0; i < TLB_ENTRY_COUNT; i++) { … … 473 462 lo1.value = cp0_entry_lo1_read(); 474 463 475 printf("%- 2u %-4u %#6x %#4x %1u %1u %1u %1u%#6x\n",464 printf("%-4u %-6u %#6x %#6x %1u%1u%1u%1u %#6x\n", 476 465 i, hi.asid, hi.vpn2, mask.mask, 477 466 lo0.g, lo0.v, lo0.d, lo0.c, lo0.pfn); 478 printf(" %1u %1u %1u %1u%#6x\n",467 printf(" %1u%1u%1u%1u %#6x\n", 479 468 lo1.g, lo1.v, lo1.d, lo1.c, lo1.pfn); 480 469 } -
kernel/arch/mips32/src/smp/dorder.c
rfb150d78 r46c20c8 33 33 */ 34 34 35 #include <typedefs.h> 36 #include <smp/ipi.h> 35 37 #include <arch/smp/dorder.h> 36 38 37 39 #define MSIM_DORDER_ADDRESS 0xB0000004 38 40 41 #ifdef CONFIG_SMP 42 39 43 void ipi_broadcast_arch(int ipi) 40 44 { 41 #ifdef CONFIG_SMP 42 *((volatile unsigned int *) MSIM_DORDER_ADDRESS) = 0x7FFFFFFF; 45 *((volatile uint32_t *) MSIM_DORDER_ADDRESS) = 0x7fffffff; 46 } 47 43 48 #endif 49 50 uint32_t dorder_cpuid(void) 51 { 52 return *((volatile uint32_t *) MSIM_DORDER_ADDRESS); 53 } 54 55 void dorder_ipi_ack(uint32_t mask) 56 { 57 *((volatile uint32_t *) (MSIM_DORDER_ADDRESS + 4)) = mask; 44 58 } 45 59 -
kernel/arch/mips32/src/start.S
rfb150d78 r46c20c8 45 45 .global userspace_asm 46 46 47 # Which status bits should are thread-local 48 #define REG_SAVE_MASK 0x1f # KSU(UM), EXL, ERL, IE 49 50 # Save registers to space defined by \r 51 # We will change status: Disable ERL,EXL,UM,IE 52 # These changes will be automatically reversed in REGISTER_LOAD 53 # SP is NOT saved as part of these registers 47 /* 48 * Which status bits are thread-local: 49 * KSU(UM), EXL, ERL, IE 50 */ 51 #define REG_SAVE_MASK 0x1f 52 53 #define ISTATE_OFFSET_A0 0 54 #define ISTATE_OFFSET_A1 4 55 #define ISTATE_OFFSET_A2 8 56 #define ISTATE_OFFSET_A3 12 57 #define ISTATE_OFFSET_T0 16 58 #define ISTATE_OFFSET_T1 20 59 #define ISTATE_OFFSET_V0 24 60 #define ISTATE_OFFSET_V1 28 61 #define ISTATE_OFFSET_AT 32 62 #define ISTATE_OFFSET_T2 36 63 #define ISTATE_OFFSET_T3 40 64 #define ISTATE_OFFSET_T4 44 65 #define ISTATE_OFFSET_T5 48 66 #define ISTATE_OFFSET_T6 52 67 #define ISTATE_OFFSET_T7 56 68 #define ISTATE_OFFSET_S0 60 69 #define ISTATE_OFFSET_S1 64 70 #define ISTATE_OFFSET_S2 68 71 #define ISTATE_OFFSET_S3 72 72 #define ISTATE_OFFSET_S4 76 73 #define ISTATE_OFFSET_S5 80 74 #define ISTATE_OFFSET_S6 84 75 #define ISTATE_OFFSET_S7 88 76 #define ISTATE_OFFSET_T8 92 77 #define ISTATE_OFFSET_T9 96 78 #define ISTATE_OFFSET_KT0 100 79 #define ISTATE_OFFSET_KT1 104 80 #define ISTATE_OFFSET_GP 108 81 #define ISTATE_OFFSET_SP 112 82 #define ISTATE_OFFSET_S8 116 83 #define ISTATE_OFFSET_RA 120 84 #define ISTATE_OFFSET_LO 124 85 #define ISTATE_OFFSET_HI 128 86 #define ISTATE_OFFSET_STATUS 132 87 #define ISTATE_OFFSET_EPC 136 88 #define ISTATE_OFFSET_ALIGNMENT 140 89 90 #define ISTATE_SOFT_SIZE 144 91 92 /* 93 * The fake ABI prologue is never executed and may not be part of the 94 * procedure's body. Instead, it should be immediately preceding the procedure's 95 * body. Its only purpose is to trick the stack trace walker into thinking that 96 * the exception is more or less just a normal function call. 97 */ 98 .macro FAKE_ABI_PROLOGUE 99 sub $sp, ISTATE_SOFT_SIZE 100 sw $ra, ISTATE_OFFSET_EPC($sp) 101 .endm 102 103 /* 104 * Save registers to space defined by \r 105 * We will change status: Disable ERL, EXL, UM, IE 106 * These changes will be automatically reversed in REGISTER_LOAD 107 * %sp is NOT saved as part of these registers 108 */ 54 109 .macro REGISTERS_STORE_AND_EXC_RESET r 55 sw $at, EOFFSET_AT(\r) 56 sw $v0, EOFFSET_V0(\r) 57 sw $v1, EOFFSET_V1(\r) 58 sw $a0, EOFFSET_A0(\r) 59 sw $a1, EOFFSET_A1(\r) 60 sw $a2, EOFFSET_A2(\r) 61 sw $a3, EOFFSET_A3(\r) 62 sw $t0, EOFFSET_T0(\r) 63 sw $t1, EOFFSET_T1(\r) 64 sw $t2, EOFFSET_T2(\r) 65 sw $t3, EOFFSET_T3(\r) 66 sw $t4, EOFFSET_T4(\r) 67 sw $t5, EOFFSET_T5(\r) 68 sw $t6, EOFFSET_T6(\r) 69 sw $t7, EOFFSET_T7(\r) 70 sw $t8, EOFFSET_T8(\r) 71 sw $t9, EOFFSET_T9(\r) 72 110 sw $at, ISTATE_OFFSET_AT(\r) 111 sw $v0, ISTATE_OFFSET_V0(\r) 112 sw $v1, ISTATE_OFFSET_V1(\r) 113 sw $a0, ISTATE_OFFSET_A0(\r) 114 sw $a1, ISTATE_OFFSET_A1(\r) 115 sw $a2, ISTATE_OFFSET_A2(\r) 116 sw $a3, ISTATE_OFFSET_A3(\r) 117 sw $t0, ISTATE_OFFSET_T0(\r) 118 sw $t1, ISTATE_OFFSET_T1(\r) 119 sw $t2, ISTATE_OFFSET_T2(\r) 120 sw $t3, ISTATE_OFFSET_T3(\r) 121 sw $t4, ISTATE_OFFSET_T4(\r) 122 sw $t5, ISTATE_OFFSET_T5(\r) 123 sw $t6, ISTATE_OFFSET_T6(\r) 124 sw $t7, ISTATE_OFFSET_T7(\r) 125 sw $t8, ISTATE_OFFSET_T8(\r) 126 sw $t9, ISTATE_OFFSET_T9(\r) 127 sw $s0, ISTATE_OFFSET_S0(\r) 128 sw $s1, ISTATE_OFFSET_S1(\r) 129 sw $s2, ISTATE_OFFSET_S2(\r) 130 sw $s3, ISTATE_OFFSET_S3(\r) 131 sw $s4, ISTATE_OFFSET_S4(\r) 132 sw $s5, ISTATE_OFFSET_S5(\r) 133 sw $s6, ISTATE_OFFSET_S6(\r) 134 sw $s7, ISTATE_OFFSET_S7(\r) 135 sw $s8, ISTATE_OFFSET_S8(\r) 136 73 137 mflo $at 74 sw $at, EOFFSET_LO(\r)138 sw $at, ISTATE_OFFSET_LO(\r) 75 139 mfhi $at 76 sw $at, EOFFSET_HI(\r) 77 78 sw $gp, EOFFSET_GP(\r) 79 sw $ra, EOFFSET_RA(\r) 80 sw $k1, EOFFSET_K1(\r) 81 140 sw $at, ISTATE_OFFSET_HI(\r) 141 142 sw $gp, ISTATE_OFFSET_GP(\r) 143 sw $ra, ISTATE_OFFSET_RA(\r) 144 sw $k0, ISTATE_OFFSET_KT0(\r) 145 sw $k1, ISTATE_OFFSET_KT1(\r) 146 82 147 mfc0 $t0, $status 83 148 mfc0 $t1, $epc 84 149 85 and $t2, $t0, REG_SAVE_MASK # Save only KSU,EXL,ERL,IE 86 li $t3, ~(0x1f) 87 and $t0, $t0, $t3 # Clear KSU,EXL,ERL,IE 88 89 sw $t2,EOFFSET_STATUS(\r) 90 sw $t1,EOFFSET_EPC(\r) 150 /* save only KSU, EXL, ERL, IE */ 151 and $t2, $t0, REG_SAVE_MASK 152 153 /* clear KSU, EXL, ERL, IE */ 154 li $t3, ~(REG_SAVE_MASK) 155 and $t0, $t0, $t3 156 157 sw $t2, ISTATE_OFFSET_STATUS(\r) 158 sw $t1, ISTATE_OFFSET_EPC(\r) 91 159 mtc0 $t0, $status 92 160 .endm 93 161 94 162 .macro REGISTERS_LOAD r 95 # Update only UM,EXR,IE from status, the rest 96 # is controlled by OS and not bound to task 163 /* 164 * Update only UM, EXR, IE from status, the rest 165 * is controlled by OS and not bound to task. 166 */ 97 167 mfc0 $t0, $status 98 lw $t1,EOFFSET_STATUS(\r) 99 100 li $t2, ~REG_SAVE_MASK # Mask UM,EXL,ERL,IE 168 lw $t1, ISTATE_OFFSET_STATUS(\r) 169 170 /* mask UM, EXL, ERL, IE */ 171 li $t2, ~REG_SAVE_MASK 101 172 and $t0, $t0, $t2 102 173 103 or $t0, $t0, $t1 # Copy UM,EXL, ERL, IE from saved status 174 /* copy UM, EXL, ERL, IE from saved status */ 175 or $t0, $t0, $t1 104 176 mtc0 $t0, $status 105 177 106 lw $v0, EOFFSET_V0(\r)107 lw $v1, EOFFSET_V1(\r)108 lw $a0, EOFFSET_A0(\r)109 lw $a1, EOFFSET_A1(\r)110 lw $a2, EOFFSET_A2(\r)111 lw $a3, EOFFSET_A3(\r)112 lw $t0, EOFFSET_T0(\r)113 lw $t1, EOFFSET_T1(\r)114 lw $t2, EOFFSET_T2(\r)115 lw $t3, EOFFSET_T3(\r)116 lw $t4, EOFFSET_T4(\r)117 lw $t5, EOFFSET_T5(\r)118 lw $t6, EOFFSET_T6(\r)119 lw $t7, EOFFSET_T7(\r)120 lw $t8, EOFFSET_T8(\r)121 lw $t9, EOFFSET_T9(\r)122 123 lw $gp, EOFFSET_GP(\r)124 lw $ra, EOFFSET_RA(\r)125 lw $k1, EOFFSET_K1(\r)126 127 lw $at, EOFFSET_LO(\r)178 lw $v0, ISTATE_OFFSET_V0(\r) 179 lw $v1, ISTATE_OFFSET_V1(\r) 180 lw $a0, ISTATE_OFFSET_A0(\r) 181 lw $a1, ISTATE_OFFSET_A1(\r) 182 lw $a2, ISTATE_OFFSET_A2(\r) 183 lw $a3, ISTATE_OFFSET_A3(\r) 184 lw $t0, ISTATE_OFFSET_T0(\r) 185 lw $t1, ISTATE_OFFSET_T1(\r) 186 lw $t2, ISTATE_OFFSET_T2(\r) 187 lw $t3, ISTATE_OFFSET_T3(\r) 188 lw $t4, ISTATE_OFFSET_T4(\r) 189 lw $t5, ISTATE_OFFSET_T5(\r) 190 lw $t6, ISTATE_OFFSET_T6(\r) 191 lw $t7, ISTATE_OFFSET_T7(\r) 192 lw $t8, ISTATE_OFFSET_T8(\r) 193 lw $t9, ISTATE_OFFSET_T9(\r) 194 195 lw $gp, ISTATE_OFFSET_GP(\r) 196 lw $ra, ISTATE_OFFSET_RA(\r) 197 lw $k1, ISTATE_OFFSET_KT1(\r) 198 199 lw $at, ISTATE_OFFSET_LO(\r) 128 200 mtlo $at 129 lw $at, EOFFSET_HI(\r)201 lw $at, ISTATE_OFFSET_HI(\r) 130 202 mthi $at 131 132 lw $at, EOFFSET_EPC(\r)203 204 lw $at, ISTATE_OFFSET_EPC(\r) 133 205 mtc0 $at, $epc 134 206 135 lw $at, EOFFSET_AT(\r)136 lw $sp, EOFFSET_SP(\r)207 lw $at, ISTATE_OFFSET_AT(\r) 208 lw $sp, ISTATE_OFFSET_SP(\r) 137 209 .endm 138 210 139 # Move kernel stack pointer address to register K0 140 # - if we are in user mode, load the appropriate stack 141 # address 211 /* 212 * Move kernel stack pointer address to register $k0. 213 * If we are in user mode, load the appropriate stack address. 214 */ 142 215 .macro KERNEL_STACK_TO_K0 143 # If we are in user mode216 /* if we are in user mode */ 144 217 mfc0 $k0, $status 145 218 andi $k0, 0x10 146 219 147 220 beq $k0, $0, 1f 148 add $k0, $sp, 0149 150 # Move $k0 pointer to kernel stack221 move $k0, $sp 222 223 /* move $k0 pointer to kernel stack */ 151 224 lui $k0, %hi(supervisor_sp) 152 225 ori $k0, $k0, %lo(supervisor_sp) 153 # Move $k0 (superveisor_sp) 154 lw $k0, 0($k0) 155 1: 226 227 /* move $k0 (supervisor_sp) */ 228 lw $k0, ($k0) 229 230 1: 156 231 .endm 157 232 158 233 .org 0x0 159 234 kernel_image_start: 160 /* Load temporary stack */235 /* load temporary stack */ 161 236 lui $sp, %hi(end_stack) 162 237 ori $sp, $sp, %lo(end_stack) 163 238 164 /* Not sure about this, but might 165 be needed for PIC code */ 239 /* not sure about this, but might be needed for PIC code */ 166 240 lui $gp, 0x8000 167 241 168 242 /* $a1 contains physical address of bootinfo_t */ 169 170 243 jal arch_pre_main 171 244 nop … … 174 247 nop 175 248 176 .space TEMP_STACK_SIZE249 .space TEMP_STACK_SIZE 177 250 end_stack: 178 251 … … 189 262 nop 190 263 264 FAKE_ABI_PROLOGUE 191 265 exception_handler: 192 266 KERNEL_STACK_TO_K0 193 sub $k0, REGISTER_SPACE 194 sw $sp, EOFFSET_SP($k0) 267 268 sub $k0, ISTATE_SOFT_SIZE 269 sw $sp, ISTATE_OFFSET_SP($k0) 195 270 move $sp, $k0 196 271 197 272 mfc0 $k0, $cause 198 273 199 sra $k0, $k0, 0x2 # cp0_exc_cause() part 1200 andi $k0, $k0, 0x1f # cp0_exc_cause() part 2201 sub $k0, 8 # 8 = SYSCALL274 sra $k0, $k0, 0x2 /* cp0_exc_cause() part 1 */ 275 andi $k0, $k0, 0x1f /* cp0_exc_cause() part 2 */ 276 sub $k0, 8 /* 8 = SYSCALL */ 202 277 203 278 beqz $k0, syscall_shortcut 204 add $k0, 8 # Revert $k0 back to correct exc number279 add $k0, 8 /* revert $k0 back to correct exc number */ 205 280 206 281 REGISTERS_STORE_AND_EXC_RESET $sp 207 282 208 283 move $a1, $sp 209 jal exc_dispatch # exc_dispatch(excno, register_space)284 jal exc_dispatch /* exc_dispatch(excno, register_space) */ 210 285 move $a0, $k0 211 286 212 287 REGISTERS_LOAD $sp 213 # The $sp is automatically restored to former value 214 eret 215 216 ## Syscall entry 217 # 218 # Registers: 219 # 220 # @param v0 Syscall number. 221 # @param a0 1st argument. 222 # @param a1 2nd argument. 223 # @param a2 3rd argument. 224 # @param a3 4th argument. 225 # @param t0 5th argument. 226 # @param t1 6th argument. 227 # 228 # @return The return value will be stored in v0. 229 # 230 #define SS_SP EOFFSET_SP 231 #define SS_STATUS EOFFSET_STATUS 232 #define SS_EPC EOFFSET_EPC 233 #define SS_K1 EOFFSET_K1 288 /* the $sp is automatically restored to former value */ 289 eret 290 291 /** Syscall entry 292 * 293 * Registers: 294 * 295 * @param $v0 Syscall number. 296 * @param $a0 1st argument. 297 * @param $a1 2nd argument. 298 * @param $a2 3rd argument. 299 * @param $a3 4th argument. 300 * @param $t0 5th argument. 301 * @param $t1 6th argument. 302 * 303 * @return The return value will be stored in $v0. 304 * 305 */ 234 306 syscall_shortcut: 235 # We have a lot of space on the stack, with free use236 307 mfc0 $t3, $epc 237 308 mfc0 $t2, $status 238 sw $t3, SS_EPC($sp) # Save EPC239 sw $k1, SS_K1($sp) # Save k1 not saved on context switch240 241 and $t4, $t2, REG_SAVE_MASK # Save only KSU, EXL, ERL, IE309 sw $t3, ISTATE_OFFSET_EPC($sp) /* save EPC */ 310 sw $k1, ISTATE_OFFSET_KT1($sp) /* save $k1 not saved on context switch */ 311 312 and $t4, $t2, REG_SAVE_MASK /* save only KSU, EXL, ERL, IE */ 242 313 li $t5, ~(0x1f) 243 and $t2, $t2, $t5 # Clear KSU, EXL, ERL244 ori $t2, $t2, 0x1 # Set IE245 246 sw $t4, SS_STATUS($sp)314 and $t2, $t2, $t5 /* clear KSU, EXL, ERL */ 315 ori $t2, $t2, 0x1 /* set IE */ 316 317 sw $t4, ISTATE_OFFSET_STATUS($sp) 247 318 mtc0 $t2, $status 248 249 #250 # Call the higher level system call handler251 # We are going to reuse part of the unused exception stack frame252 #253 sw $t0, STACK_ARG4($sp) # save the 5th argument on the stack254 sw $t1, STACK_ARG5($sp) # save the 6th argument on the stack319 320 /* 321 * Call the higher level system call handler. 322 * 323 */ 324 sw $t0, ISTATE_OFFSET_T0($sp) /* save the 5th argument on the stack */ 325 sw $t1, ISTATE_OFFSET_T1($sp) /* save the 6th argument on the stack */ 255 326 jal syscall_handler 256 sw $v0, STACK_ARG6($sp) # save the syscall number on the stack257 258 # restore status327 sw $v0, ISTATE_OFFSET_V0($sp) /* save the syscall number on the stack */ 328 329 /* restore status */ 259 330 mfc0 $t2, $status 260 lw $t3, SS_STATUS($sp) 261 262 # Change back to EXL = 1 (from last exception), otherwise 263 # an interrupt could rewrite the CP0 - EPC 264 li $t4, ~REG_SAVE_MASK # Mask UM, EXL, ERL, IE 331 lw $t3, ISTATE_OFFSET_STATUS($sp) 332 333 /* 334 * Change back to EXL = 1 (from last exception), otherwise 335 * an interrupt could rewrite the CP0 - EPC. 336 * 337 */ 338 li $t4, ~REG_SAVE_MASK /* mask UM, EXL, ERL, IE */ 265 339 and $t2, $t2, $t4 266 or $t2, $t2, $t3 # Copy saved UM, EXL, ERL, IE340 or $t2, $t2, $t3 /* copy saved UM, EXL, ERL, IE */ 267 341 mtc0 $t2, $status 268 269 # restore epc + 4270 lw $t2, SS_EPC($sp)271 lw $k1, SS_K1($sp)342 343 /* restore epc + 4 */ 344 lw $t2, ISTATE_OFFSET_EPC($sp) 345 lw $k1, ISTATE_OFFSET_KT1($sp) 272 346 addi $t2, $t2, 4 273 347 mtc0 $t2, $epc 274 348 275 lw $sp, SS_SP($sp) # restore sp276 277 eret 278 349 lw $sp, ISTATE_OFFSET_SP($sp) /* restore $sp */ 350 eret 351 352 FAKE_ABI_PROLOGUE 279 353 tlb_refill_handler: 280 354 KERNEL_STACK_TO_K0 281 sub $k0, REGISTER_SPACE355 sub $k0, ISTATE_SOFT_SIZE 282 356 REGISTERS_STORE_AND_EXC_RESET $k0 283 sw $sp, EOFFSET_SP($k0)284 add $sp, $k0,0285 357 sw $sp, ISTATE_OFFSET_SP($k0) 358 move $sp, $k0 359 286 360 jal tlb_refill 287 add $a0, $sp, 0288 361 move $a0, $sp 362 289 363 REGISTERS_LOAD $sp 290 291 eret 292 364 eret 365 366 FAKE_ABI_PROLOGUE 293 367 cache_error_handler: 294 368 KERNEL_STACK_TO_K0 295 sub $k0, REGISTER_SPACE369 sub $k0, ISTATE_SOFT_SIZE 296 370 REGISTERS_STORE_AND_EXC_RESET $k0 297 sw $sp, EOFFSET_SP($k0)298 add $sp, $k0,0299 371 sw $sp, ISTATE_OFFSET_SP($k0) 372 move $sp, $k0 373 300 374 jal cache_error 301 add $a0, $sp, 0302 375 move $a0, $sp 376 303 377 REGISTERS_LOAD $sp 304 305 378 eret 306 379 307 380 userspace_asm: 308 add $sp, $a0,0309 add $v0, $a1, 0310 add $t9, $a2, 0 # Set up correct entry into PIC code311 xor $a0, $a0, $a0 # $a0 is defined to hold pcb_ptr312 # set it to 0313 eret 381 move $sp, $a0 382 move $v0, $a1 383 move $t9, $a2 /* set up correct entry into PIC code */ 384 xor $a0, $a0, $a0 /* $a0 is defined to hold pcb_ptr */ 385 /* set it to 0 */ 386 eret
Note:
See TracChangeset
for help on using the changeset viewer.
