Changeset 46c20c8 in mainline for kernel/arch/mips32
- Timestamp:
- 2010-11-26T20:08:10Z (15 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 45df59a
- Parents:
- fb150d78 (diff), ffdd2b9 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- kernel/arch/mips32
- Files:
-
- 1 added
- 1 deleted
- 39 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/mips32/Makefile.inc
rfb150d78 r46c20c8 27 27 # 28 28 29 ## Toolchain configuration30 #31 32 29 BFD_ARCH = mips 33 30 BFD = binary 34 TARGET = mipsel-linux-gnu35 TOOLCHAIN_DIR = $(CROSS_PREFIX)/mips3236 37 31 GCC_CFLAGS += -mno-abicalls -G 0 -fno-zero-initialized-in-bss -mips3 38 32 … … 48 42 ifeq ($(MACHINE),bgxemul) 49 43 BFD_NAME = elf32-tradbigmips 50 TOOLCHAIN_DIR = $(CROSS_PREFIX)/mips32eb51 TARGET = mips-linux-gnu52 44 ENDIANESS = BE 53 45 GCC_CFLAGS += -D__BE__ … … 62 54 arch/$(KARCH)/src/start.S \ 63 55 arch/$(KARCH)/src/context.S \ 64 arch/$(KARCH)/src/panic.S \65 56 arch/$(KARCH)/src/mips32.c \ 66 57 arch/$(KARCH)/src/asm.S \ -
kernel/arch/mips32/include/arch.h
rfb150d78 r46c20c8 36 36 #define KERN_mips32_ARCH_H_ 37 37 38 #define TASKMAP_MAX_RECORDS 32 39 #define CPUMAP_MAX_RECORDS 32 38 #include <typedefs.h> 40 39 41 #define BOOTINFO_TASK_NAME_BUFLEN3242 43 # include <typedefs.h>40 #define TASKMAP_MAX_RECORDS 32 41 #define CPUMAP_MAX_RECORDS 32 42 #define BOOTINFO_TASK_NAME_BUFLEN 32 44 43 45 44 extern size_t cpu_count; 46 45 47 46 typedef struct { 48 uintptr_taddr;49 uint32_t size;47 void *addr; 48 size_t size; 50 49 char name[BOOTINFO_TASK_NAME_BUFLEN]; 51 50 } utask_t; … … 53 52 typedef struct { 54 53 uint32_t cpumap; 55 uint32_t cnt;54 size_t cnt; 56 55 utask_t tasks[TASKMAP_MAX_RECORDS]; 57 56 } bootinfo_t; -
kernel/arch/mips32/include/asm.h
rfb150d78 r46c20c8 27 27 */ 28 28 29 /** @addtogroup mips32 29 /** @addtogroup mips32 30 30 * @{ 31 31 */ … … 36 36 #define KERN_mips32_ASM_H_ 37 37 38 #include <arch/types.h>39 38 #include <typedefs.h> 40 39 #include <config.h> 40 #include <trace.h> 41 41 42 43 static inline void cpu_sleep(void) 42 NO_TRACE static inline void cpu_sleep(void) 44 43 { 45 /* Most of the simulators do not support */ 46 /* asm volatile ("wait"); */ 44 /* 45 * Unfortunatelly most of the simulators do not support 46 * 47 * asm volatile ( 48 * "wait" 49 * ); 50 * 51 */ 47 52 } 48 53 49 54 /** Return base address of current stack 50 * 55 * 51 56 * Return the base address of the current stack. 52 57 * The stack is assumed to be STACK_SIZE bytes long. 53 58 * The stack must start on page boundary. 59 * 54 60 */ 55 static inline uintptr_t get_stack_base(void)61 NO_TRACE static inline uintptr_t get_stack_base(void) 56 62 { 57 uintptr_t v;63 uintptr_t base; 58 64 59 65 asm volatile ( 60 "and % 0, $29, %1\n"61 : "=r" (v)62 : "r" (~(STACK_SIZE-1))66 "and %[base], $29, %[mask]\n" 67 : [base] "=r" (base) 68 : [mask] "r" (~(STACK_SIZE - 1)) 63 69 ); 64 70 65 return v;71 return base; 66 72 } 67 73 68 extern void cpu_halt(void) __attribute__((noreturn)); 69 extern void asm_delay_loop(uint32_t t); 70 extern void userspace_asm(uintptr_t ustack, uintptr_t uspace_uarg, 71 uintptr_t entry); 72 73 extern ipl_t interrupts_disable(void); 74 extern ipl_t interrupts_enable(void); 75 extern void interrupts_restore(ipl_t ipl); 76 extern ipl_t interrupts_read(void); 77 extern void asm_delay_loop(uint32_t t); 78 79 static inline void pio_write_8(ioport8_t *port, uint8_t v) 74 NO_TRACE static inline void pio_write_8(ioport8_t *port, uint8_t v) 80 75 { 81 *port = v; 76 *port = v; 82 77 } 83 78 84 static inline void pio_write_16(ioport16_t *port, uint16_t v)79 NO_TRACE static inline void pio_write_16(ioport16_t *port, uint16_t v) 85 80 { 86 *port = v; 81 *port = v; 87 82 } 88 83 89 static inline void pio_write_32(ioport32_t *port, uint32_t v)84 NO_TRACE static inline void pio_write_32(ioport32_t *port, uint32_t v) 90 85 { 91 *port = v; 86 *port = v; 92 87 } 93 88 94 static inline uint8_t pio_read_8(ioport8_t *port)89 NO_TRACE static inline uint8_t pio_read_8(ioport8_t *port) 95 90 { 96 91 return *port; 97 92 } 98 93 99 static inline uint16_t pio_read_16(ioport16_t *port)94 NO_TRACE static inline uint16_t pio_read_16(ioport16_t *port) 100 95 { 101 96 return *port; 102 97 } 103 98 104 static inline uint32_t pio_read_32(ioport32_t *port)99 NO_TRACE static inline uint32_t pio_read_32(ioport32_t *port) 105 100 { 106 101 return *port; 107 102 } 103 104 extern void cpu_halt(void) __attribute__((noreturn)); 105 extern void asm_delay_loop(uint32_t); 106 extern void userspace_asm(uintptr_t, uintptr_t, uintptr_t); 107 108 extern ipl_t interrupts_disable(void); 109 extern ipl_t interrupts_enable(void); 110 extern void interrupts_restore(ipl_t); 111 extern ipl_t interrupts_read(void); 112 extern bool interrupts_disabled(void); 108 113 109 114 #endif -
kernel/arch/mips32/include/asm/boot.h
rfb150d78 r46c20c8 36 36 #define KERN_mips32_BOOT_H_ 37 37 38 39 38 /* Temporary stack size for boot process */ 40 #define TEMP_STACK_SIZE 0x10039 #define TEMP_STACK_SIZE 0x100 41 40 42 41 #endif -
kernel/arch/mips32/include/asm/regname.h
rfb150d78 r46c20c8 27 27 */ 28 28 29 /** @addtogroup mips32 29 /** @addtogroup mips32 30 30 * @{ 31 31 */ … … 69 69 #define ra 31 70 70 71 #define rindex 72 #define rrandom 73 #define entrylo0 74 #define entrylo1 75 #define context 76 #define pagemask 77 #define wired 78 #define badvaddr 79 #define count 80 #define entryhi 81 #define compare 82 #define status 83 #define cause 84 #define epc 85 #define rconfig 86 #define lladdr 87 #define watchlo 88 #define watchhi 89 #define xcontext 90 #define rdebug 91 #define depc 92 #define eepc 71 #define rindex 0 72 #define rrandom 1 73 #define entrylo0 2 74 #define entrylo1 3 75 #define context 4 76 #define pagemask 5 77 #define wired 6 78 #define badvaddr 8 79 #define count 9 80 #define entryhi 10 81 #define compare 11 82 #define status 12 83 #define cause 13 84 #define epc 14 85 #define rconfig 16 86 #define lladdr 17 87 #define watchlo 18 88 #define watchhi 19 89 #define xcontext 20 90 #define rdebug 23 91 #define depc 24 92 #define eepc 30 93 93 94 #endif /* KERN_mips32_REGNAME_H_ */94 #endif 95 95 96 96 /** @} -
kernel/arch/mips32/include/atomic.h
rfb150d78 r46c20c8 27 27 */ 28 28 29 /** @addtogroup mips32 29 /** @addtogroup mips32 30 30 * @{ 31 31 */ … … 35 35 #ifndef KERN_mips32_ATOMIC_H_ 36 36 #define KERN_mips32_ATOMIC_H_ 37 38 #include <trace.h> 37 39 38 40 #define atomic_inc(x) ((void) atomic_add(x, 1)) … … 51 53 * 52 54 * @return Value after addition. 55 * 53 56 */ 54 static inline long atomic_add(atomic_t *val, int i) 57 NO_TRACE static inline atomic_count_t atomic_add(atomic_t *val, 58 atomic_count_t i) 55 59 { 56 long tmp, v; 60 atomic_count_t tmp; 61 atomic_count_t v; 57 62 58 63 asm volatile ( … … 64 69 " beq %0, %4, 1b\n" /* if the atomic operation failed, try again */ 65 70 " nop\n" 66 : "=&r" (tmp), "+m" (val->count), "=&r" (v) 67 : "r" (i), "i" (0) 71 : "=&r" (tmp), 72 "+m" (val->count), 73 "=&r" (v) 74 : "r" (i), 75 "i" (0) 68 76 ); 69 77 … … 71 79 } 72 80 73 static inline uint32_t test_and_set(atomic_t *val) { 74 uint32_t tmp, v; 81 NO_TRACE static inline atomic_count_t test_and_set(atomic_t *val) 82 { 83 atomic_count_t tmp; 84 atomic_count_t v; 75 85 76 86 asm volatile ( … … 82 92 " beqz %0, 1b\n" 83 93 "2:\n" 84 : "=&r" (tmp), "+m" (val->count), "=&r" (v) 94 : "=&r" (tmp), 95 "+m" (val->count), 96 "=&r" (v) 85 97 : "i" (1) 86 98 ); … … 89 101 } 90 102 91 static inline void atomic_lock_arch(atomic_t *val) { 103 NO_TRACE static inline void atomic_lock_arch(atomic_t *val) 104 { 92 105 do { 93 while (val->count) 94 ; 106 while (val->count); 95 107 } while (test_and_set(val)); 96 108 } -
kernel/arch/mips32/include/barrier.h
rfb150d78 r46c20c8 27 27 */ 28 28 29 /** @addtogroup mips32 29 /** @addtogroup mips32 30 30 * @{ 31 31 */ … … 39 39 * TODO: implement true MIPS memory barriers for macros below. 40 40 */ 41 #define CS_ENTER_BARRIER() 42 #define CS_LEAVE_BARRIER() 41 #define CS_ENTER_BARRIER() asm volatile ("" ::: "memory") 42 #define CS_LEAVE_BARRIER() asm volatile ("" ::: "memory") 43 43 44 #define memory_barrier() 45 #define read_barrier() 46 #define write_barrier() 44 #define memory_barrier() asm volatile ("" ::: "memory") 45 #define read_barrier() asm volatile ("" ::: "memory") 46 #define write_barrier() asm volatile ("" ::: "memory") 47 47 48 48 #define smc_coherence(a) -
kernel/arch/mips32/include/context.h
rfb150d78 r46c20c8 46 46 #ifndef __ASM__ 47 47 48 #include < arch/types.h>48 #include <typedefs.h> 49 49 50 50 #define context_set(ctx, pc, stack, size) \ -
kernel/arch/mips32/include/context_offset.h
rfb150d78 r46c20c8 60 60 # define OFFSET_F30 0x5c 61 61 #endif /* KERNEL */ 62 63 /* istate_t */64 #define EOFFSET_AT 0x065 #define EOFFSET_V0 0x466 #define EOFFSET_V1 0x867 #define EOFFSET_A0 0xc68 #define EOFFSET_A1 0x1069 #define EOFFSET_A2 0x1470 #define EOFFSET_A3 0x1871 #define EOFFSET_T0 0x1c72 #define EOFFSET_T1 0x2073 #define EOFFSET_T2 0x2474 #define EOFFSET_T3 0x2875 #define EOFFSET_T4 0x2c76 #define EOFFSET_T5 0x3077 #define EOFFSET_T6 0x3478 #define EOFFSET_T7 0x3879 #define EOFFSET_T8 0x3c80 #define EOFFSET_T9 0x4081 #define EOFFSET_GP 0x4482 #define EOFFSET_SP 0x4883 #define EOFFSET_RA 0x4c84 #define EOFFSET_LO 0x5085 #define EOFFSET_HI 0x5486 #define EOFFSET_STATUS 0x5887 #define EOFFSET_EPC 0x5c88 #define EOFFSET_K1 0x6089 #define REGISTER_SPACE 104 /* respect stack alignment */90 62 91 63 #ifdef __ASM__ -
kernel/arch/mips32/include/cp0.h
rfb150d78 r46c20c8 36 36 #define KERN_mips32_CP0_H_ 37 37 38 #include <arch/types.h> 38 #ifdef KERNEL 39 #include <typedefs.h> 40 #else 41 #include <sys/types.h> 42 #endif 39 43 40 #define cp0_status_ie_enabled_bit 41 #define cp0_status_exl_exception_bit 42 #define cp0_status_erl_error_bit 43 #define cp0_status_um_bit 44 #define cp0_status_bev_bootstrap_bit 45 #define cp0_status_fpu_bit 44 #define cp0_status_ie_enabled_bit (1 << 0) 45 #define cp0_status_exl_exception_bit (1 << 1) 46 #define cp0_status_erl_error_bit (1 << 2) 47 #define cp0_status_um_bit (1 << 4) 48 #define cp0_status_bev_bootstrap_bit (1 << 22) 49 #define cp0_status_fpu_bit (1 << 29) 46 50 47 51 #define cp0_status_im_shift 8 -
kernel/arch/mips32/include/cpu.h
rfb150d78 r46c20c8 36 36 #define KERN_mips32_CPU_H_ 37 37 38 #include < arch/types.h>38 #include <typedefs.h> 39 39 #include <arch/asm.h> 40 40 -
kernel/arch/mips32/include/cycle.h
rfb150d78 r46c20c8 38 38 #include <arch/cp0.h> 39 39 #include <arch/interrupt.h> 40 #include <trace.h> 40 41 41 static inline uint64_t get_cycle(void)42 NO_TRACE static inline uint64_t get_cycle(void) 42 43 { 43 44 return ((uint64_t) count_hi << 32) + ((uint64_t) cp0_count_read()); -
kernel/arch/mips32/include/debug.h
rfb150d78 r46c20c8 36 36 #define KERN_mips23_DEBUG_H_ 37 37 38 /** simulator enters the trace mode */ 39 #define ___traceon() asm volatile ( "\t.word\t0x39\n"); 40 /** simulator leaves the trace mode */ 41 #define ___traceoff() asm volatile ( "\t.word\t0x3d\n"); 42 /** register dump */ 43 #define ___regview() asm volatile ( "\t.word\t0x37\n"); 44 /** halt the simulator */ 45 #define ___halt() asm volatile ( "\t.word\t0x28\n"); 46 /** simulator enters interactive mode */ 47 #define ___intmode() asm volatile ( "\t.word\t0x29\n"); 38 /** Enter the simulator trace mode */ 39 #define ___traceon() asm volatile ( "\t.word\t0x39\n"); 40 41 /** Leave the simulator trace mode */ 42 #define ___traceoff() asm volatile ( "\t.word\t0x3d\n"); 43 44 /** Ask the simulator to dump registers */ 45 #define ___regview() asm volatile ( "\t.word\t0x37\n"); 46 47 /** Halt the simulator */ 48 #define ___halt() asm volatile ( "\t.word\t0x28\n"); 49 50 /** Enter the simulator interactive mode */ 51 #define ___intmode() asm volatile ( "\t.word\t0x29\n"); 48 52 49 53 #endif -
kernel/arch/mips32/include/debugger.h
rfb150d78 r46c20c8 37 37 38 38 #include <arch/exception.h> 39 #include < arch/types.h>39 #include <typedefs.h> 40 40 41 41 #define BKPOINTS_MAX 10 42 42 43 #define BKPOINT_INPROG (1 << 0) /**< Breakpoint was shot */ 44 #define BKPOINT_ONESHOT (1 << 1) /**< One-time breakpoint,mandatory for j/b 45 instructions */ 46 #define BKPOINT_REINST (1 << 2) /**< Breakpoint is set on the next 47 instruction, so that it could be 48 reinstalled on the previous one */ 49 #define BKPOINT_FUNCCALL (1 << 3) /**< Call a predefined function */ 43 /** Breakpoint was shot */ 44 #define BKPOINT_INPROG (1 << 0) 45 46 /** One-time breakpoint, mandatory for j/b instructions */ 47 #define BKPOINT_ONESHOT (1 << 1) 48 49 /** 50 * Breakpoint is set on the next instruction, so that it 51 * could be reinstalled on the previous one 52 */ 53 #define BKPOINT_REINST (1 << 2) 54 55 /** Call a predefined function */ 56 #define BKPOINT_FUNCCALL (1 << 3) 57 50 58 51 59 typedef struct { 52 uintptr_t address; /**< Breakpoint address */53 unative_t instruction; /**< Original instruction */60 uintptr_t address; /**< Breakpoint address */ 61 unative_t instruction; /**< Original instruction */ 54 62 unative_t nextinstruction; /**< Original instruction following break */ 55 int flags;/**< Flags regarding breakpoint */63 unsigned int flags; /**< Flags regarding breakpoint */ 56 64 size_t counter; 57 void (*bkfunc)(void * b, istate_t *istate);65 void (*bkfunc)(void *, istate_t *); 58 66 } bpinfo_t; 59 67 68 extern bpinfo_t breakpoints[BKPOINTS_MAX]; 69 70 extern bool is_jump(unative_t); 71 60 72 extern void debugger_init(void); 61 void debugger_bpoint(istate_t *istate); 62 63 extern bpinfo_t breakpoints[BKPOINTS_MAX]; 73 extern void debugger_bpoint(istate_t *); 64 74 65 75 #endif -
kernel/arch/mips32/include/exception.h
rfb150d78 r46c20c8 27 27 */ 28 28 29 /** @addtogroup mips32 29 /** @addtogroup mips32 30 30 * @{ 31 31 */ … … 36 36 #define KERN_mips32_EXCEPTION_H_ 37 37 38 #include < arch/types.h>39 #include <arch/ cp0.h>38 #include <typedefs.h> 39 #include <arch/istate.h> 40 40 41 #define EXC_Int 0 42 #define EXC_Mod 1 43 #define EXC_TLBL 2 44 #define EXC_TLBS 3 45 #define EXC_AdEL 4 46 #define EXC_AdES 5 47 #define EXC_IBE 6 48 #define EXC_DBE 7 49 #define EXC_Sys 8 50 #define EXC_Bp 9 51 #define EXC_RI 10 52 #define EXC_CpU 11 53 #define EXC_Ov 12 54 #define EXC_Tr 13 55 #define EXC_VCEI 14 56 #define EXC_FPE 15 57 #define EXC_WATCH 23 58 #define EXC_VCED 31 59 60 typedef struct istate { 61 uint32_t at; 62 uint32_t v0; 63 uint32_t v1; 64 uint32_t a0; 65 uint32_t a1; 66 uint32_t a2; 67 uint32_t a3; 68 uint32_t t0; 69 uint32_t t1; 70 uint32_t t2; 71 uint32_t t3; 72 uint32_t t4; 73 uint32_t t5; 74 uint32_t t6; 75 uint32_t t7; 76 uint32_t t8; 77 uint32_t t9; 78 uint32_t gp; 79 uint32_t sp; 80 uint32_t ra; 81 82 uint32_t lo; 83 uint32_t hi; 84 85 uint32_t status; /* cp0_status */ 86 uint32_t epc; /* cp0_epc */ 87 uint32_t k1; /* We use it as thread-local pointer */ 88 } istate_t; 89 90 static inline void istate_set_retaddr(istate_t *istate, uintptr_t retaddr) 91 { 92 istate->epc = retaddr; 93 } 94 95 /** Return true if exception happened while in userspace */ 96 static inline int istate_from_uspace(istate_t *istate) 97 { 98 return istate->status & cp0_status_um_bit; 99 } 100 static inline unative_t istate_get_pc(istate_t *istate) 101 { 102 return istate->epc; 103 } 104 static inline unative_t istate_get_fp(istate_t *istate) 105 { 106 return 0; /* FIXME */ 107 } 41 #define EXC_Int 0 42 #define EXC_Mod 1 43 #define EXC_TLBL 2 44 #define EXC_TLBS 3 45 #define EXC_AdEL 4 46 #define EXC_AdES 5 47 #define EXC_IBE 6 48 #define EXC_DBE 7 49 #define EXC_Sys 8 50 #define EXC_Bp 9 51 #define EXC_RI 10 52 #define EXC_CpU 11 53 #define EXC_Ov 12 54 #define EXC_Tr 13 55 #define EXC_VCEI 14 56 #define EXC_FPE 15 57 #define EXC_WATCH 23 58 #define EXC_VCED 31 108 59 109 60 extern void exception(istate_t *istate); -
kernel/arch/mips32/include/faddr.h
rfb150d78 r46c20c8 27 27 */ 28 28 29 /** @addtogroup mips32 29 /** @addtogroup mips32 30 30 * @{ 31 31 */ … … 36 36 #define KERN_mips32_FADDR_H_ 37 37 38 #include < arch/types.h>38 #include <typedefs.h> 39 39 40 #define FADDR(fptr) 40 #define FADDR(fptr) ((uintptr_t) (fptr)) 41 41 42 42 #endif -
kernel/arch/mips32/include/fpu_context.h
rfb150d78 r46c20c8 36 36 #define KERN_mips32_FPU_CONTEXT_H_ 37 37 38 #include < arch/types.h>38 #include <typedefs.h> 39 39 40 40 #define FPU_CONTEXT_ALIGN sizeof(unative_t) -
kernel/arch/mips32/include/memstr.h
rfb150d78 r46c20c8 27 27 */ 28 28 29 /** @addtogroup mips32 29 /** @addtogroup mips32 30 30 * @{ 31 31 */ … … 38 38 #define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt)) 39 39 40 extern void memsetw(void *dst, size_t cnt, uint16_t x); 41 extern void memsetb(void *dst, size_t cnt, uint8_t x); 42 43 extern int memcmp(const void *a, const void *b, size_t cnt); 40 extern void memsetw(void *, size_t, uint16_t); 41 extern void memsetb(void *, size_t, uint8_t); 44 42 45 43 #endif -
kernel/arch/mips32/include/mm/as.h
rfb150d78 r46c20c8 27 27 */ 28 28 29 /** @addtogroup mips32mm 29 /** @addtogroup mips32mm 30 30 * @{ 31 31 */ … … 36 36 #define KERN_mips32_AS_H_ 37 37 38 #define KERNEL_ADDRESS_SPACE_SHADOWED_ARCH 38 #define KERNEL_ADDRESS_SPACE_SHADOWED_ARCH 0 39 39 40 40 #define KERNEL_ADDRESS_SPACE_START_ARCH (unsigned long) 0x80000000 -
kernel/arch/mips32/include/mm/asid.h
rfb150d78 r46c20c8 36 36 #define KERN_mips32_ASID_H_ 37 37 38 #include < arch/types.h>38 #include <typedefs.h> 39 39 40 40 #define ASID_MAX_ARCH 255 /* 2^8 - 1 */ -
kernel/arch/mips32/include/mm/page.h
rfb150d78 r46c20c8 37 37 38 38 #include <arch/mm/frame.h> 39 #include <trace.h> 39 40 40 41 #define PAGE_WIDTH FRAME_WIDTH … … 155 156 156 157 157 static inline unsigned int get_pt_flags(pte_t *pt, size_t i)158 NO_TRACE static inline unsigned int get_pt_flags(pte_t *pt, size_t i) 158 159 { 159 160 pte_t *p = &pt[i]; … … 168 169 } 169 170 170 static inline void set_pt_flags(pte_t *pt, size_t i, int flags)171 NO_TRACE static inline void set_pt_flags(pte_t *pt, size_t i, int flags) 171 172 { 172 173 pte_t *p = &pt[i]; -
kernel/arch/mips32/include/mm/tlb.h
rfb150d78 r46c20c8 36 36 #define KERN_mips32_TLB_H_ 37 37 38 #include <arch/types.h>39 38 #include <typedefs.h> 40 39 #include <arch/mm/asid.h> 41 40 #include <arch/exception.h> 41 #include <trace.h> 42 42 43 43 #define TLB_ENTRY_COUNT 48 … … 127 127 * Probe TLB for Matching Entry. 128 128 */ 129 static inline void tlbp(void)129 NO_TRACE static inline void tlbp(void) 130 130 { 131 131 asm volatile ("tlbp\n\t"); … … 137 137 * Read Indexed TLB Entry. 138 138 */ 139 static inline void tlbr(void)139 NO_TRACE static inline void tlbr(void) 140 140 { 141 141 asm volatile ("tlbr\n\t"); … … 146 146 * Write Indexed TLB Entry. 147 147 */ 148 static inline void tlbwi(void)148 NO_TRACE static inline void tlbwi(void) 149 149 { 150 150 asm volatile ("tlbwi\n\t"); … … 155 155 * Write Random TLB Entry. 156 156 */ 157 static inline void tlbwr(void)157 NO_TRACE static inline void tlbwr(void) 158 158 { 159 159 asm volatile ("tlbwr\n\t"); -
kernel/arch/mips32/include/smp/dorder.h
rfb150d78 r46c20c8 27 27 */ 28 28 29 /** @addtogroup mips32 30 * @{ 31 */ 32 /** @file 33 */ 34 29 35 #ifndef KERN_mips32_DORDER_H_ 30 36 #define KERN_mips32_DORDER_H_ 31 37 32 extern void ipi_broadcast_arch(int ipi); 38 #include <typedefs.h> 39 40 extern uint32_t dorder_cpuid(void); 41 extern void dorder_ipi_ack(uint32_t); 33 42 34 43 #endif 44 45 /** @} 46 */ -
kernel/arch/mips32/include/types.h
rfb150d78 r46c20c8 27 27 */ 28 28 29 /** @addtogroup mips32 29 /** @addtogroup mips32 30 30 * @{ 31 31 */ … … 35 35 #ifndef KERN_mips32_TYPES_H_ 36 36 #define KERN_mips32_TYPES_H_ 37 38 typedef signed char int8_t;39 typedef signed short int16_t;40 typedef signed long int32_t;41 typedef signed long long int64_t;42 43 typedef unsigned char uint8_t;44 typedef unsigned short uint16_t;45 typedef unsigned long uint32_t;46 typedef unsigned long long uint64_t;47 37 48 38 typedef uint32_t size_t; … … 55 45 typedef uint32_t unative_t; 56 46 typedef int32_t native_t; 47 typedef uint32_t atomic_count_t; 57 48 58 49 typedef struct { 59 50 } fncptr_t; 60 51 61 #define PRIp "x" /**< Format for uintptr_t. */62 #define PRIs "u" /**< Format for size_t. */52 #define INTN_C(c) INT32_C(c) 53 #define UINTN_C(c) UINT32_C(c) 63 54 64 #define PRId8 "d" /**< Format for int8_t. */ 65 #define PRId16 "d" /**< Format for int16_t. */ 66 #define PRId32 "ld" /**< Format for int32_t. */ 67 #define PRId64 "lld" /**< Format for int64_t. */ 68 #define PRIdn "d" /**< Format for native_t. */ 69 70 #define PRIu8 "u" /**< Format for uint8_t. */ 71 #define PRIu16 "u" /**< Format for uint16_t. */ 72 #define PRIu32 "u" /**< Format for uint32_t. */ 73 #define PRIu64 "llu" /**< Format for uint64_t. */ 74 #define PRIun "u" /**< Format for unative_t. */ 75 76 #define PRIx8 "x" /**< Format for hexadecimal (u)int8_t. */ 77 #define PRIx16 "x" /**< Format for hexadecimal (u)int16_t. */ 78 #define PRIx32 "x" /**< Format for hexadecimal (u)uint32_t. */ 79 #define PRIx64 "llx" /**< Format for hexadecimal (u)int64_t. */ 80 #define PRIxn "x" /**< Format for hexadecimal (u)native_t. */ 55 #define PRIdn PRId32 /**< Format for native_t. */ 56 #define PRIun PRIu32 /**< Format for unative_t. */ 57 #define PRIxn PRIx32 /**< Format for hexadecimal unative_t. */ 58 #define PRIua PRIu32 /**< Format for atomic_count_t. */ 81 59 82 60 #endif -
kernel/arch/mips32/src/asm.S
rfb150d78 r46c20c8 1 # 2 # Copyright (c) 2003-2004Jakub Jermar3 #All rights reserved.4 # 5 #Redistribution and use in source and binary forms, with or without6 #modification, are permitted provided that the following conditions7 #are met:8 # 9 #- Redistributions of source code must retain the above copyright10 #notice, this list of conditions and the following disclaimer.11 #- Redistributions in binary form must reproduce the above copyright12 #notice, this list of conditions and the following disclaimer in the13 #documentation and/or other materials provided with the distribution.14 #- The name of the author may not be used to endorse or promote products15 #derived from this software without specific prior written permission.16 # 17 #THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR18 #IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES19 #OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.20 #IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,21 #INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT22 #NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,23 #DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY24 #THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT25 #(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF26 #THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.27 # 1 /* 2 * Copyright (c) 2003 Jakub Jermar 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * - Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * - Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * - The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 28 29 29 #include <arch/asm/regname.h> … … 57 57 nop 58 58 59 60 59 .global memsetb 61 60 memsetb: … … 63 62 nop 64 63 65 66 64 .global memsetw 67 65 memsetw: 68 66 j _memsetw 69 67 nop 70 71 68 72 69 .global memcpy … … 78 75 memcpy_from_uspace: 79 76 memcpy_to_uspace: 80 move $t2, $a0 # save dst77 move $t2, $a0 /* save dst */ 81 78 82 79 addiu $v0, $a1, 3 83 li $v1, -4 # 0xfffffffffffffffc80 li $v1, -4 /* 0xfffffffffffffffc */ 84 81 and $v0, $v0, $v1 85 82 beq $a1, $v0, 3f … … 149 146 move $v0, $zero 150 147 151 152 153 148 .macro fpu_gp_save reg ctx 154 149 mfc1 $t0, $\reg … … 164 159 cfc1 $t0, $1 165 160 sw $t0, (\reg + 32) * 4(\ctx) 166 .endm 161 .endm 167 162 168 163 .macro fpu_ct_restore reg ctx … … 170 165 ctc1 $t0, $\reg 171 166 .endm 172 173 167 174 168 .global fpu_context_save … … 313 307 j $ra 314 308 nop 309 310 .global early_putchar 311 early_putchar: 312 j $ra 313 nop -
kernel/arch/mips32/src/cache.c
rfb150d78 r46c20c8 39 39 void cache_error(istate_t *istate) 40 40 { 41 panic("cache_error exception (epc=%p).", istate->epc);41 panic("cache_error exception (epc=%p).", (void *) istate->epc); 42 42 } 43 43 -
kernel/arch/mips32/src/context.S
rfb150d78 r46c20c8 28 28 29 29 #include <arch/context_offset.h> 30 31 .text 30 31 .text 32 32 33 33 .set noat … … 38 38 .global context_restore_arch 39 39 40 41 40 context_save_arch: 42 41 CONTEXT_SAVE_ARCH_CORE $a0 43 42 44 43 # context_save returns 1 45 44 j $31 46 li $2, 1 47 45 li $2, 1 46 48 47 context_restore_arch: 49 48 CONTEXT_RESTORE_ARCH_CORE $a0 50 49 51 50 # context_restore returns 0 52 51 j $31 53 xor $2, $2 52 xor $2, $2 -
kernel/arch/mips32/src/cpu/cpu.c
rfb150d78 r46c20c8 40 40 41 41 struct data_t { 42 c har *vendor;43 c har *model;42 const char *vendor; 43 const char *model; 44 44 }; 45 45 -
kernel/arch/mips32/src/ddi/ddi.c
rfb150d78 r46c20c8 35 35 #include <ddi/ddi.h> 36 36 #include <proc/task.h> 37 #include < arch/types.h>37 #include <typedefs.h> 38 38 #include <security/cap.h> 39 39 #include <arch.h> -
kernel/arch/mips32/src/debug/stacktrace.c
rfb150d78 r46c20c8 33 33 */ 34 34 35 /* 36 * This stack tracing code is based on the suggested algorithm described on page 37 * 3-27 and 3-28 of: 38 * 39 * SYSTEM V 40 * APPLICATION BINARY INTERFACE 41 * 42 * MIPS RISC Processor 43 * Supplement 44 * 3rd Edition 45 * 46 * Unfortunately, GCC generates code which is not entirely compliant with this 47 * method. For example, it places the "jr ra" instruction quite arbitrarily in 48 * the middle of the function which makes the original algorithm unapplicable. 49 * 50 * We deal with this problem by simply not using those parts of the algorithm 51 * that rely on the "jr ra" instruction occurring in the last basic block of a 52 * function, which gives us still usable, but less reliable stack tracer. The 53 * unreliability stems from the fact that under some circumstances it can become 54 * confused and produce incorrect or incomplete stack trace. We apply extra 55 * sanity checks so that the algorithm is still safe and should not crash the 56 * system. 57 * 58 * Even though not perfect, our solution is pretty lightweight, especially when 59 * compared with a prospective alternative solution based on additional 60 * debugging information stored directly in the kernel image. 61 */ 62 35 63 #include <stacktrace.h> 36 64 #include <syscall/copy.h> 37 #include <arch/types.h>38 65 #include <typedefs.h> 39 40 bool kernel_frame_pointer_validate(uintptr_t fp) 66 #include <arch/debugger.h> 67 #include <print.h> 68 69 #define R0 0U 70 #define SP 29U 71 #define RA 31U 72 73 #define OP_SHIFT 26 74 #define RS_SHIFT 21 75 #define RT_SHIFT 16 76 #define RD_SHIFT 11 77 78 #define HINT_SHIFT 6 79 #define BASE_SHIFT RS_SHIFT 80 #define IMM_SHIFT 0 81 #define OFFSET_SHIFT IMM_SHIFT 82 83 #define RS_MASK (0x1f << RS_SHIFT) 84 #define RT_MASK (0x1f << RT_SHIFT) 85 #define RD_MASK (0x1f << RD_SHIFT) 86 #define HINT_MASK (0x1f << HINT_SHIFT) 87 #define BASE_MASK RS_MASK 88 #define IMM_MASK (0xffff << IMM_SHIFT) 89 #define OFFSET_MASK IMM_MASK 90 91 #define RS_GET(inst) (((inst) & RS_MASK) >> RS_SHIFT) 92 #define RD_GET(inst) (((inst) & RD_MASK) >> RD_SHIFT) 93 #define IMM_GET(inst) (int16_t)(((inst) & IMM_MASK) >> IMM_SHIFT) 94 #define BASE_GET(inst) RS_GET(inst) 95 #define OFFSET_GET(inst) IMM_GET(inst) 96 97 #define ADDU_R_SP_R0_TEMPL \ 98 ((0x0 << OP_SHIFT) | (SP << RS_SHIFT) | (R0 << RT_SHIFT) | 0x21) 99 #define ADDU_SP_R_R0_TEMPL \ 100 ((0x0 << OP_SHIFT) | (SP << RD_SHIFT) | (R0 << RT_SHIFT) | 0x21) 101 #define ADDI_SP_SP_IMM_TEMPL \ 102 ((0x8 << OP_SHIFT) | (SP << RS_SHIFT) | (SP << RT_SHIFT)) 103 #define ADDIU_SP_SP_IMM_TEMPL \ 104 ((0x9 << OP_SHIFT) | (SP << RS_SHIFT) | (SP << RT_SHIFT)) 105 #define JR_RA_TEMPL \ 106 ((0x0 << OP_SHIFT) | (RA << RS_SHIFT) | (0x0 << HINT_SHIFT) | 0x8) 107 #define SW_RA_TEMPL \ 108 ((0x2b << OP_SHIFT) | (RA << RT_SHIFT)) 109 110 #define IS_ADDU_R_SP_R0(inst) \ 111 (((inst) & ~RD_MASK) == ADDU_R_SP_R0_TEMPL) 112 #define IS_ADDU_SP_R_R0(inst) \ 113 (((inst) & ~RS_MASK) == ADDU_SP_R_R0_TEMPL) 114 #define IS_ADDI_SP_SP_IMM(inst) \ 115 (((inst) & ~IMM_MASK) == ADDI_SP_SP_IMM_TEMPL) 116 #define IS_ADDIU_SP_SP_IMM(inst) \ 117 (((inst) & ~IMM_MASK) == ADDIU_SP_SP_IMM_TEMPL) 118 #define IS_JR_RA(inst) \ 119 (((inst) & ~HINT_MASK) == JR_RA_TEMPL) 120 #define IS_SW_RA(inst) \ 121 (((inst) & ~(BASE_MASK | OFFSET_MASK)) == SW_RA_TEMPL) 122 123 extern char ktext_start; 124 extern char ktext_end; 125 126 static bool bounds_check(uintptr_t pc) 127 { 128 return (pc >= (uintptr_t) &ktext_start) && 129 (pc < (uintptr_t) &ktext_end); 130 } 131 132 static bool 133 scan(stack_trace_context_t *ctx, uintptr_t *prev_fp, uintptr_t *prev_ra) 134 { 135 uint32_t *inst = (void *) ctx->pc; 136 bool has_fp = false; 137 size_t frame_size; 138 unsigned int fp = SP; 139 140 do { 141 inst--; 142 if (!bounds_check((uintptr_t) inst)) 143 return false; 144 #if 0 145 /* 146 * This is one of the situations in which the theory (ABI) does 147 * not meet the practice (GCC). GCC simply does not place the 148 * JR $ra instruction as dictated by the ABI, rendering the 149 * official stack tracing algorithm somewhat unapplicable. 150 */ 151 152 if (IS_ADDU_R_SP_R0(*inst)) { 153 uint32_t *cur; 154 fp = RD_GET(*inst); 155 /* 156 * We have a candidate for frame pointer. 157 */ 158 159 /* Seek to the end of this function. */ 160 for (cur = inst + 1; !IS_JR_RA(*cur); cur++) 161 ; 162 /* Scan the last basic block */ 163 for (cur--; !is_jump(*(cur - 1)); cur--) { 164 if (IS_ADDU_SP_R_R0(*cur) && 165 (fp == RS_GET(*cur))) { 166 has_fp = true; 167 } 168 } 169 continue; 170 } 171 172 if (IS_JR_RA(*inst)) { 173 if (!ctx->istate) 174 return false; 175 /* 176 * No stack frame has been allocated yet. 177 * Use the values stored in istate. 178 */ 179 if (prev_fp) 180 *prev_fp = ctx->istate->sp; 181 if (prev_ra) 182 *prev_ra = ctx->istate->ra - 8; 183 ctx->istate = NULL; 184 return true; 185 } 186 #endif 187 188 } while ((!IS_ADDIU_SP_SP_IMM(*inst) && !IS_ADDI_SP_SP_IMM(*inst)) || 189 (IMM_GET(*inst) >= 0)); 190 191 /* 192 * We are at the instruction which allocates the space for the current 193 * stack frame. 194 */ 195 frame_size = -IMM_GET(*inst); 196 if (prev_fp) 197 *prev_fp = ctx->fp + frame_size; 198 199 /* 200 * Scan the first basic block for the occurrence of 201 * SW $ra, OFFSET($base). 202 */ 203 for (inst++; !is_jump(*(inst - 1)) && (uintptr_t) inst < ctx->pc; 204 inst++) { 205 if (IS_SW_RA(*inst)) { 206 unsigned int base = BASE_GET(*inst); 207 int16_t offset = OFFSET_GET(*inst); 208 209 if (base == SP || (has_fp && base == fp)) { 210 uint32_t *addr = (void *) (ctx->fp + offset); 211 212 if (offset % 4 != 0) 213 return false; 214 /* cannot store below current stack pointer */ 215 if (offset < 0) 216 return false; 217 /* too big offsets are suspicious */ 218 if ((size_t) offset > sizeof(istate_t)) 219 return false; 220 221 if (prev_ra) 222 *prev_ra = *addr; 223 return true; 224 } 225 } 226 } 227 228 /* 229 * The first basic block does not save the return address or saves it 230 * after ctx->pc, which means that the correct value is in istate. 231 */ 232 if (prev_ra) { 233 if (!ctx->istate) 234 return false; 235 *prev_ra = ctx->istate->ra - 8; 236 ctx->istate = NULL; 237 } 238 return true; 239 } 240 241 242 bool kernel_stack_trace_context_validate(stack_trace_context_t *ctx) 243 { 244 return !((ctx->fp == 0) || ((ctx->fp % 8) != 0) || 245 (ctx->pc % 4 != 0) || !bounds_check(ctx->pc)); 246 } 247 248 bool kernel_frame_pointer_prev(stack_trace_context_t *ctx, uintptr_t *prev) 249 { 250 return scan(ctx, prev, NULL); 251 } 252 253 bool kernel_return_address_get(stack_trace_context_t *ctx, uintptr_t *ra) 254 { 255 return scan(ctx, NULL, ra); 256 } 257 258 bool uspace_stack_trace_context_validate(stack_trace_context_t *ctx) 41 259 { 42 260 return false; 43 261 } 44 262 45 bool kernel_frame_pointer_prev(uintptr_t fp, uintptr_t *prev)263 bool uspace_frame_pointer_prev(stack_trace_context_t *ctx, uintptr_t *prev) 46 264 { 47 265 return false; 48 266 } 49 267 50 bool kernel_return_address_get(uintptr_t fp, uintptr_t *ra)268 bool uspace_return_address_get(stack_trace_context_t *ctx, uintptr_t *ra) 51 269 { 52 270 return false; 53 271 } 54 272 55 bool uspace_frame_pointer_validate(uintptr_t fp)56 {57 return false;58 }59 60 bool uspace_frame_pointer_prev(uintptr_t fp, uintptr_t *prev)61 {62 return false;63 }64 65 bool uspace_return_address_get(uintptr_t fp, uintptr_t *ra)66 {67 return false;68 }69 70 273 /** @} 71 274 */ -
kernel/arch/mips32/src/debug/stacktrace_asm.S
rfb150d78 r46c20c8 37 37 frame_pointer_get: 38 38 j $ra 39 xor $v0, $v039 move $v0, $sp 40 40 41 41 program_counter_get: 42 42 j $ra 43 xor $v0, $v043 move $v0, $ra -
kernel/arch/mips32/src/debugger.c
rfb150d78 r46c20c8 46 46 47 47 bpinfo_t breakpoints[BKPOINTS_MAX]; 48 SPINLOCK_INITIALIZE(bkpoint_lock);48 IRQ_SPINLOCK_STATIC_INITIALIZE(bkpoint_lock); 49 49 50 50 #ifdef CONFIG_KCONSOLE 51 51 52 static int cmd_print_breakpoints(cmd_arg_t *argv); 52 static int cmd_print_breakpoints(cmd_arg_t *); 53 static int cmd_del_breakpoint(cmd_arg_t *); 54 static int cmd_add_breakpoint(cmd_arg_t *); 55 53 56 static cmd_info_t bkpts_info = { 54 57 .name = "bkpts", … … 58 61 }; 59 62 60 static int cmd_del_breakpoint(cmd_arg_t *argv);61 63 static cmd_arg_t del_argv = { 62 64 .type = ARG_TYPE_INT 63 65 }; 66 64 67 static cmd_info_t delbkpt_info = { 65 68 .name = "delbkpt", 66 .description = " delbkpt <number> -Delete breakpoint.",69 .description = "Delete breakpoint.", 67 70 .func = cmd_del_breakpoint, 68 71 .argc = 1, … … 70 73 }; 71 74 72 static int cmd_add_breakpoint(cmd_arg_t *argv);73 75 static cmd_arg_t add_argv = { 74 76 .type = ARG_TYPE_INT 75 77 }; 78 76 79 static cmd_info_t addbkpt_info = { 77 80 .name = "addbkpt", 78 .description = "addbkpt <&symbol> - new bkpoint. Break on J/Branch " 79 "insts unsupported.", 81 .description = "Add bkpoint (break on J/Branch insts unsupported).", 80 82 .func = cmd_add_breakpoint, 81 83 .argc = 1, … … 89 91 static cmd_info_t addbkpte_info = { 90 92 .name = "addbkpte", 91 .description = "addebkpte <&symbol> <&func> - new bkpoint. Call " 92 "func(or Nothing if 0).", 93 .description = "Add bkpoint with a trigger function.", 93 94 .func = cmd_add_breakpoint, 94 95 .argc = 2, … … 100 101 uint32_t value; 101 102 } jmpinstr[] = { 102 {0xf3ff0000, 0x41000000}, /* BCzF */ 103 {0xf3ff0000, 0x41020000}, /* BCzFL */ 104 {0xf3ff0000, 0x41010000}, /* BCzT */ 105 {0xf3ff0000, 0x41030000}, /* BCzTL */ 106 {0xfc000000, 0x10000000}, /* BEQ */ 107 {0xfc000000, 0x50000000}, /* BEQL */ 108 {0xfc1f0000, 0x04010000}, /* BEQL */ 109 {0xfc1f0000, 0x04110000}, /* BGEZAL */ 110 {0xfc1f0000, 0x04130000}, /* BGEZALL */ 111 {0xfc1f0000, 0x04030000}, /* BGEZL */ 112 {0xfc1f0000, 0x1c000000}, /* BGTZ */ 113 {0xfc1f0000, 0x5c000000}, /* BGTZL */ 114 {0xfc1f0000, 0x18000000}, /* BLEZ */ 115 {0xfc1f0000, 0x58000000}, /* BLEZL */ 116 {0xfc1f0000, 0x04000000}, /* BLTZ */ 117 {0xfc1f0000, 0x04100000}, /* BLTZAL */ 118 {0xfc1f0000, 0x04120000}, /* BLTZALL */ 119 {0xfc1f0000, 0x04020000}, /* BLTZL */ 120 {0xfc000000, 0x14000000}, /* BNE */ 121 {0xfc000000, 0x54000000}, /* BNEL */ 122 {0xfc000000, 0x08000000}, /* J */ 123 {0xfc000000, 0x0c000000}, /* JAL */ 124 {0xfc1f07ff, 0x00000009}, /* JALR */ 125 {0, 0} /* EndOfTable */ 126 }; 127 103 {0xf3ff0000, 0x41000000}, /* BCzF */ 104 {0xf3ff0000, 0x41020000}, /* BCzFL */ 105 {0xf3ff0000, 0x41010000}, /* BCzT */ 106 {0xf3ff0000, 0x41030000}, /* BCzTL */ 107 {0xfc000000, 0x10000000}, /* BEQ */ 108 {0xfc000000, 0x50000000}, /* BEQL */ 109 {0xfc1f0000, 0x04010000}, /* BEQL */ 110 {0xfc1f0000, 0x04110000}, /* BGEZAL */ 111 {0xfc1f0000, 0x04130000}, /* BGEZALL */ 112 {0xfc1f0000, 0x04030000}, /* BGEZL */ 113 {0xfc1f0000, 0x1c000000}, /* BGTZ */ 114 {0xfc1f0000, 0x5c000000}, /* BGTZL */ 115 {0xfc1f0000, 0x18000000}, /* BLEZ */ 116 {0xfc1f0000, 0x58000000}, /* BLEZL */ 117 {0xfc1f0000, 0x04000000}, /* BLTZ */ 118 {0xfc1f0000, 0x04100000}, /* BLTZAL */ 119 {0xfc1f0000, 0x04120000}, /* BLTZALL */ 120 {0xfc1f0000, 0x04020000}, /* BLTZL */ 121 {0xfc000000, 0x14000000}, /* BNE */ 122 {0xfc000000, 0x54000000}, /* BNEL */ 123 {0xfc000000, 0x08000000}, /* J */ 124 {0xfc000000, 0x0c000000}, /* JAL */ 125 {0xfc1f07ff, 0x00000009}, /* JALR */ 126 {0, 0} /* end of table */ 127 }; 128 128 129 129 /** Test, if the given instruction is a jump or branch instruction 130 130 * 131 131 * @param instr Instruction code 132 * @return true - it is jump instruction, false otherwise 133 * 134 */ 135 static bool is_jump(unative_t instr) 136 { 137 int i; 138 132 * 133 * @return true if it is jump instruction, false otherwise 134 * 135 */ 136 bool is_jump(unative_t instr) 137 { 138 unsigned int i; 139 139 140 for (i = 0; jmpinstr[i].andmask; i++) { 140 141 if ((instr & jmpinstr[i].andmask) == jmpinstr[i].value) 141 142 return true; 142 143 } 143 144 144 145 return false; 145 146 } 146 147 147 /** Add new breakpoint to table */ 148 /** Add new breakpoint to table 149 * 150 */ 148 151 int cmd_add_breakpoint(cmd_arg_t *argv) 149 152 { 150 bpinfo_t *cur = NULL;151 ipl_t ipl;152 int i;153 154 153 if (argv->intval & 0x3) { 155 154 printf("Not aligned instruction, forgot to use &symbol?\n"); 156 155 return 1; 157 156 } 158 ipl = interrupts_disable();159 spinlock_lock(&bkpoint_lock);160 157 158 irq_spinlock_lock(&bkpoint_lock, true); 159 161 160 /* Check, that the breakpoints do not conflict */ 161 unsigned int i; 162 162 for (i = 0; i < BKPOINTS_MAX; i++) { 163 if (breakpoints[i].address == (uintptr_t) argv->intval) {163 if (breakpoints[i].address == (uintptr_t) argv->intval) { 164 164 printf("Duplicate breakpoint %d.\n", i); 165 spinlock_unlock(&bkpoint_lock);165 irq_spinlock_unlock(&bkpoint_lock, true); 166 166 return 0; 167 } else if ( breakpoints[i].address == (uintptr_t)argv->intval +168 sizeof(unative_t) ||breakpoints[i].address ==169 (uintptr_t) argv->intval - sizeof(unative_t)) {167 } else if ((breakpoints[i].address == (uintptr_t) argv->intval + 168 sizeof(unative_t)) || (breakpoints[i].address == 169 (uintptr_t) argv->intval - sizeof(unative_t))) { 170 170 printf("Adjacent breakpoints not supported, conflict " 171 171 "with %d.\n", i); 172 spinlock_unlock(&bkpoint_lock);172 irq_spinlock_unlock(&bkpoint_lock, true); 173 173 return 0; 174 174 } 175 175 176 176 } 177 178 for (i = 0; i < BKPOINTS_MAX; i++) 177 178 bpinfo_t *cur = NULL; 179 180 for (i = 0; i < BKPOINTS_MAX; i++) { 179 181 if (!breakpoints[i].address) { 180 182 cur = &breakpoints[i]; 181 183 break; 182 184 } 185 } 186 183 187 if (!cur) { 184 188 printf("Too many breakpoints.\n"); 185 spinlock_unlock(&bkpoint_lock); 186 interrupts_restore(ipl); 189 irq_spinlock_unlock(&bkpoint_lock, true); 187 190 return 0; 188 191 } 192 193 printf("Adding breakpoint on address %p\n", (void *) argv->intval); 194 189 195 cur->address = (uintptr_t) argv->intval; 190 printf("Adding breakpoint on address: %p\n", argv->intval); 191 cur->instruction = ((unative_t *)cur->address)[0]; 192 cur->nextinstruction = ((unative_t *)cur->address)[1]; 196 cur->instruction = ((unative_t *) cur->address)[0]; 197 cur->nextinstruction = ((unative_t *) cur->address)[1]; 193 198 if (argv == &add_argv) { 194 199 cur->flags = 0; 195 } else { /* We are add extended */200 } else { /* We are add extended */ 196 201 cur->flags = BKPOINT_FUNCCALL; 197 202 cur->bkfunc = (void (*)(void *, istate_t *)) argv[1].intval; 198 203 } 204 199 205 if (is_jump(cur->instruction)) 200 206 cur->flags |= BKPOINT_ONESHOT; 207 201 208 cur->counter = 0; 202 209 203 210 /* Set breakpoint */ 204 *((unative_t *) cur->address) = 0x0d;211 *((unative_t *) cur->address) = 0x0d; 205 212 smc_coherence(cur->address); 206 207 spinlock_unlock(&bkpoint_lock); 208 interrupts_restore(ipl); 209 213 214 irq_spinlock_unlock(&bkpoint_lock, true); 215 210 216 return 1; 211 217 } 212 218 213 /** Remove breakpoint from table */ 219 /** Remove breakpoint from table 220 * 221 */ 214 222 int cmd_del_breakpoint(cmd_arg_t *argv) 215 223 { 216 bpinfo_t *cur;217 ipl_t ipl;218 219 224 if (argv->intval > BKPOINTS_MAX) { 220 225 printf("Invalid breakpoint number.\n"); 221 226 return 0; 222 227 } 223 ipl = interrupts_disable();224 spinlock_lock(&bkpoint_lock);225 226 cur = &breakpoints[argv->intval];228 229 irq_spinlock_lock(&bkpoint_lock, true); 230 231 bpinfo_t *cur = &breakpoints[argv->intval]; 227 232 if (!cur->address) { 228 233 printf("Breakpoint does not exist.\n"); 229 spinlock_unlock(&bkpoint_lock); 230 interrupts_restore(ipl); 234 irq_spinlock_unlock(&bkpoint_lock, true); 231 235 return 0; 232 236 } 237 233 238 if ((cur->flags & BKPOINT_INPROG) && (cur->flags & BKPOINT_ONESHOT)) { 234 239 printf("Cannot remove one-shot breakpoint in-progress\n"); 235 spinlock_unlock(&bkpoint_lock); 236 interrupts_restore(ipl); 240 irq_spinlock_unlock(&bkpoint_lock, true); 237 241 return 0; 238 242 } 239 ((uint32_t *)cur->address)[0] = cur->instruction;240 smc_coherence(((uint32_t *)cur->address)[0]);241 ((uint32_t *)cur->address)[1] = cur->nextinstruction;242 smc_coherence(((uint32_t *)cur->address)[1]);243 244 cur->address = NULL;245 246 spinlock_unlock(&bkpoint_lock);247 i nterrupts_restore(ipl);243 244 ((uint32_t *) cur->address)[0] = cur->instruction; 245 smc_coherence(((uint32_t *) cur->address)[0]); 246 ((uint32_t *) cur->address)[1] = cur->nextinstruction; 247 smc_coherence(((uint32_t *) cur->address)[1]); 248 249 cur->address = (uintptr_t) NULL; 250 251 irq_spinlock_unlock(&bkpoint_lock, true); 248 252 return 1; 249 253 } 250 254 251 /** Print table of active breakpoints */ 255 /** Print table of active breakpoints 256 * 257 */ 252 258 int cmd_print_breakpoints(cmd_arg_t *argv) 253 259 { 254 260 unsigned int i; 255 char *symbol; 256 257 printf("# Count Address INPROG ONESHOT FUNCCALL In symbol\n"); 258 printf("-- ----- ---------- ------ ------- -------- ---------\n"); 259 260 for (i = 0; i < BKPOINTS_MAX; i++) 261 262 printf("[nr] [count] [address ] [inprog] [oneshot] [funccall] [in symbol\n"); 263 264 for (i = 0; i < BKPOINTS_MAX; i++) { 261 265 if (breakpoints[i].address) { 262 symbol = symtab_fmt_name_lookup(266 const char *symbol = symtab_fmt_name_lookup( 263 267 breakpoints[i].address); 264 265 printf("%- 2u %-5d %#10zx %-6s %-7s %-8s %s\n", i,266 breakpoints[i].counter, breakpoints[i].address,268 269 printf("%-4u %7zu %p %-8s %-9s %-10s %s\n", i, 270 breakpoints[i].counter, (void *) breakpoints[i].address, 267 271 ((breakpoints[i].flags & BKPOINT_INPROG) ? "true" : 268 272 "false"), ((breakpoints[i].flags & BKPOINT_ONESHOT) … … 270 274 BKPOINT_FUNCCALL) ? "true" : "false"), symbol); 271 275 } 276 } 277 272 278 return 1; 273 279 } 274 280 275 #endif 276 277 /** Initialize debugger */ 281 #endif /* CONFIG_KCONSOLE */ 282 283 /** Initialize debugger 284 * 285 */ 278 286 void debugger_init() 279 287 { 280 int i;281 288 unsigned int i; 289 282 290 for (i = 0; i < BKPOINTS_MAX; i++) 283 breakpoints[i].address = NULL;284 291 breakpoints[i].address = (uintptr_t) NULL; 292 285 293 #ifdef CONFIG_KCONSOLE 286 294 cmd_initialize(&bkpts_info); 287 295 if (!cmd_register(&bkpts_info)) 288 296 printf("Cannot register command %s\n", bkpts_info.name); 289 297 290 298 cmd_initialize(&delbkpt_info); 291 299 if (!cmd_register(&delbkpt_info)) 292 300 printf("Cannot register command %s\n", delbkpt_info.name); 293 301 294 302 cmd_initialize(&addbkpt_info); 295 303 if (!cmd_register(&addbkpt_info)) 296 304 printf("Cannot register command %s\n", addbkpt_info.name); 297 305 298 306 cmd_initialize(&addbkpte_info); 299 307 if (!cmd_register(&addbkpte_info)) 300 308 printf("Cannot register command %s\n", addbkpte_info.name); 301 #endif 309 #endif /* CONFIG_KCONSOLE */ 302 310 } 303 311 304 312 /** Handle breakpoint 305 313 * 306 * Find breakpoint in breakpoint table. 314 * Find breakpoint in breakpoint table. 307 315 * If found, call kconsole, set break on next instruction and reexecute. 308 316 * If we are on "next instruction", set it back on the first and reexecute. 309 317 * If breakpoint not found in breakpoint table, call kconsole and start 310 318 * next instruction. 319 * 311 320 */ 312 321 void debugger_bpoint(istate_t *istate) 313 322 { 314 bpinfo_t *cur = NULL;315 uintptr_t fireaddr = istate->epc;316 int i;317 318 323 /* test branch delay slot */ 319 324 if (cp0_cause_read() & 0x80000000) 320 325 panic("Breakpoint in branch delay slot not supported."); 321 322 spinlock_lock(&bkpoint_lock); 326 327 irq_spinlock_lock(&bkpoint_lock, false); 328 329 bpinfo_t *cur = NULL; 330 uintptr_t fireaddr = istate->epc; 331 unsigned int i; 332 323 333 for (i = 0; i < BKPOINTS_MAX; i++) { 324 334 /* Normal breakpoint */ 325 if ( fireaddr == breakpoints[i].address&&326 !(breakpoints[i].flags & BKPOINT_REINST)) {335 if ((fireaddr == breakpoints[i].address) && 336 (!(breakpoints[i].flags & BKPOINT_REINST))) { 327 337 cur = &breakpoints[i]; 328 338 break; 329 339 } 340 330 341 /* Reinst only breakpoint */ 331 342 if ((breakpoints[i].flags & BKPOINT_REINST) && … … 335 346 } 336 347 } 348 337 349 if (cur) { 338 350 if (cur->flags & BKPOINT_REINST) { 339 351 /* Set breakpoint on first instruction */ 340 ((uint32_t *) cur->address)[0] = 0x0d;352 ((uint32_t *) cur->address)[0] = 0x0d; 341 353 smc_coherence(((uint32_t *)cur->address)[0]); 354 342 355 /* Return back the second */ 343 ((uint32_t *)cur->address)[1] = cur->nextinstruction; 344 smc_coherence(((uint32_t *)cur->address)[1]); 356 ((uint32_t *) cur->address)[1] = cur->nextinstruction; 357 smc_coherence(((uint32_t *) cur->address)[1]); 358 345 359 cur->flags &= ~BKPOINT_REINST; 346 spinlock_unlock(&bkpoint_lock);360 irq_spinlock_unlock(&bkpoint_lock, false); 347 361 return; 348 } 362 } 363 349 364 if (cur->flags & BKPOINT_INPROG) 350 365 printf("Warning: breakpoint recursion\n"); 351 366 352 367 if (!(cur->flags & BKPOINT_FUNCCALL)) { 353 printf("***Breakpoint %d: %p in %s.\n", i, fireaddr, 354 symtab_fmt_name_lookup(istate->epc)); 355 } 356 368 printf("***Breakpoint %u: %p in %s.\n", i, 369 (void *) fireaddr, 370 symtab_fmt_name_lookup(fireaddr)); 371 } 372 357 373 /* Return first instruction back */ 358 374 ((uint32_t *)cur->address)[0] = cur->instruction; … … 366 382 cur->flags |= BKPOINT_INPROG; 367 383 } else { 368 printf("***Breakpoint %d: %p in %s.\n", i, fireaddr, 384 printf("***Breakpoint %d: %p in %s.\n", i, 385 (void *) fireaddr, 369 386 symtab_fmt_name_lookup(fireaddr)); 370 387 371 388 /* Move on to next instruction */ 372 389 istate->epc += 4; 373 390 } 391 374 392 if (cur) 375 393 cur->counter++; 394 376 395 if (cur && (cur->flags & BKPOINT_FUNCCALL)) { 377 396 /* Allow zero bkfunc, just for counting */ … … 380 399 } else { 381 400 #ifdef CONFIG_KCONSOLE 382 /* This disables all other processors - we are not SMP, 401 /* 402 * This disables all other processors - we are not SMP, 383 403 * actually this gets us to cpu_halt, if scheduler() is run 384 404 * - we generally do not want scheduler to be run from debug, 385 405 * so this is a good idea 386 */ 406 */ 387 407 atomic_set(&haltstate, 1); 388 spinlock_unlock(&bkpoint_lock);408 irq_spinlock_unlock(&bkpoint_lock, false); 389 409 390 410 kconsole("debug", "Debug console ready.\n", false); 391 411 392 spinlock_lock(&bkpoint_lock);412 irq_spinlock_lock(&bkpoint_lock, false); 393 413 atomic_set(&haltstate, 0); 394 414 #endif 395 415 } 396 if (cur && cur->address == fireaddr && (cur->flags & BKPOINT_INPROG)) { 416 417 if ((cur) && (cur->address == fireaddr) 418 && ((cur->flags & BKPOINT_INPROG))) { 397 419 /* Remove one-shot breakpoint */ 398 420 if ((cur->flags & BKPOINT_ONESHOT)) 399 cur->address = NULL; 421 cur->address = (uintptr_t) NULL; 422 400 423 /* Remove in-progress flag */ 401 424 cur->flags &= ~BKPOINT_INPROG; 402 } 403 spinlock_unlock(&bkpoint_lock); 425 } 426 427 irq_spinlock_unlock(&bkpoint_lock, false); 404 428 } 405 429 -
kernel/arch/mips32/src/exception.c
rfb150d78 r46c20c8 27 27 */ 28 28 29 /** @addtogroup mips32 29 /** @addtogroup mips32 30 30 * @{ 31 31 */ … … 38 38 #include <panic.h> 39 39 #include <arch/cp0.h> 40 #include < arch/types.h>40 #include <typedefs.h> 41 41 #include <arch.h> 42 42 #include <debug.h> … … 49 49 #include <symtab.h> 50 50 51 static c har *exctable[] = {51 static const char *exctable[] = { 52 52 "Interrupt", 53 53 "TLB Modified", … … 67 67 "Floating Point", 68 68 NULL, NULL, NULL, NULL, NULL, NULL, NULL, 69 "WatchHi/WatchLo", /* 23 */69 "WatchHi/WatchLo", /* 23 */ 70 70 NULL, NULL, NULL, NULL, NULL, NULL, NULL, 71 71 "Virtual Coherency - data", 72 72 }; 73 73 74 static void print_regdump(istate_t *istate) 75 { 76 char *pcsymbol, *rasymbol; 77 78 pcsymbol = symtab_fmt_name_lookup(istate->epc); 79 rasymbol = symtab_fmt_name_lookup(istate->ra); 80 81 printf("PC: %#x(%s) RA: %#x(%s), SP(%p)\n", istate->epc, pcsymbol, 82 istate->ra, rasymbol, istate->sp); 83 } 84 85 static void unhandled_exception(int n, istate_t *istate) 74 void istate_decode(istate_t *istate) 75 { 76 printf("epc=%p\tsta=%#0" PRIx32 "\t" 77 "lo =%#0" PRIx32 "\thi =%#0" PRIx32 "\n", 78 (void *) istate->epc, istate->status, 79 istate->lo, istate->hi); 80 81 printf("a0 =%#0" PRIx32 "\ta1 =%#0" PRIx32 "\t" 82 "a2 =%#0" PRIx32 "\ta3 =%#0" PRIx32 "\n", 83 istate->a0, istate->a1, istate->a2, istate->a3); 84 85 printf("t0 =%#0" PRIx32 "\tt1 =%#0" PRIx32 "\t" 86 "t2 =%#0" PRIx32 "\tt3 =%#0" PRIx32 "\n", 87 istate->t0, istate->t1, istate->t2, istate->t3); 88 89 printf("t4 =%#0" PRIx32 "\tt5 =%#0" PRIx32 "\t" 90 "t6 =%#0" PRIx32 "\tt7 =%#0" PRIx32 "\n", 91 istate->t4, istate->t5, istate->t6, istate->t7); 92 93 printf("t8 =%#0" PRIx32 "\tt9 =%#0" PRIx32 "\t" 94 "v0 =%#0" PRIx32 "\tv1 =%#0" PRIx32 "\n", 95 istate->t8, istate->t9, istate->v0, istate->v1); 96 97 printf("s0 =%#0" PRIx32 "\ts1 =%#0" PRIx32 "\t" 98 "s2 =%#0" PRIx32 "\ts3 =%#0" PRIx32 "\n", 99 istate->s0, istate->s1, istate->s2, istate->s3); 100 101 printf("s4 =%#0" PRIx32 "\ts5 =%#0" PRIx32 "\t" 102 "s6 =%#0" PRIx32 "\ts7 =%#0" PRIx32 "\n", 103 istate->s4, istate->s5, istate->s6, istate->s7); 104 105 printf("s8 =%#0" PRIx32 "\tat =%#0" PRIx32 "\t" 106 "kt0=%#0" PRIx32 "\tkt1=%#0" PRIx32 "\n", 107 istate->s8, istate->at, istate->kt0, istate->kt1); 108 109 printf("sp =%p\tra =%p\tgp =%p\n", 110 (void *) istate->sp, (void *) istate->ra, 111 (void *) istate->gp); 112 } 113 114 static void unhandled_exception(unsigned int n, istate_t *istate) 86 115 { 87 116 fault_if_from_uspace(istate, "Unhandled exception %s.", exctable[n]); 88 89 print_regdump(istate); 90 panic("Unhandled exception %s.", exctable[n]); 91 } 92 93 static void reserved_instr_exception(int n, istate_t *istate) 94 { 95 if (*((uint32_t *)istate->epc) == 0x7c03e83b) { 117 panic_badtrap(istate, n, "Unhandled exception %s.", exctable[n]); 118 } 119 120 static void reserved_instr_exception(unsigned int n, istate_t *istate) 121 { 122 if (*((uint32_t *) istate->epc) == 0x7c03e83b) { 96 123 ASSERT(THREAD); 97 124 istate->epc += 4; 98 istate->v1 = istate->k 1;99 } else 125 istate->v1 = istate->kt1; 126 } else 100 127 unhandled_exception(n, istate); 101 128 } 102 129 103 static void breakpoint_exception( int n, istate_t *istate)130 static void breakpoint_exception(unsigned int n, istate_t *istate) 104 131 { 105 132 #ifdef CONFIG_DEBUG … … 113 140 } 114 141 115 static void tlbmod_exception( int n, istate_t *istate)142 static void tlbmod_exception(unsigned int n, istate_t *istate) 116 143 { 117 144 tlb_modified(istate); 118 145 } 119 146 120 static void tlbinv_exception( int n, istate_t *istate)147 static void tlbinv_exception(unsigned int n, istate_t *istate) 121 148 { 122 149 tlb_invalid(istate); … … 124 151 125 152 #ifdef CONFIG_FPU_LAZY 126 static void cpuns_exception( int n, istate_t *istate)153 static void cpuns_exception(unsigned int n, istate_t *istate) 127 154 { 128 155 if (cp0_cause_coperr(cp0_cause_read()) == fpu_cop_id) 129 156 scheduler_fpu_lazy_request(); 130 157 else { 131 fault_if_from_uspace(istate, "Unhandled Coprocessor Unusable Exception."); 132 panic("Unhandled Coprocessor Unusable Exception."); 158 fault_if_from_uspace(istate, 159 "Unhandled Coprocessor Unusable Exception."); 160 panic_badtrap(istate, n, 161 "Unhandled Coprocessor Unusable Exception."); 133 162 } 134 163 } 135 164 #endif 136 165 137 static void interrupt_exception(int n, istate_t *istate) 138 { 139 uint32_t cause; 140 int i; 141 142 /* decode interrupt number and process the interrupt */ 143 cause = (cp0_cause_read() >> 8) & 0xff; 144 166 static void interrupt_exception(unsigned int n, istate_t *istate) 167 { 168 /* Decode interrupt number and process the interrupt */ 169 uint32_t cause = (cp0_cause_read() >> 8) & 0xff; 170 171 unsigned int i; 145 172 for (i = 0; i < 8; i++) { 146 173 if (cause & (1 << i)) { … … 151 178 */ 152 179 irq->handler(irq); 153 spinlock_unlock(&irq->lock);180 irq_spinlock_unlock(&irq->lock, false); 154 181 } else { 155 182 /* … … 157 184 */ 158 185 #ifdef CONFIG_DEBUG 159 printf("cpu%u: spurious interrupt (inum=% d)\n",186 printf("cpu%u: spurious interrupt (inum=%u)\n", 160 187 CPU->id, i); 161 188 #endif … … 166 193 167 194 /** Handle syscall userspace call */ 168 static void syscall_exception( int n, istate_t *istate)169 { 170 panic("Syscall is handled through shortcut.");195 static void syscall_exception(unsigned int n, istate_t *istate) 196 { 197 fault_if_from_uspace(istate, "Syscall is handled through shortcut."); 171 198 } 172 199 173 200 void exception_init(void) 174 201 { 175 int i;176 202 unsigned int i; 203 177 204 /* Clear exception table */ 178 205 for (i = 0; i < IVT_ITEMS; i++) 179 exc_register(i, "undef", (iroutine) unhandled_exception); 180 181 exc_register(EXC_Bp, "bkpoint", (iroutine) breakpoint_exception); 182 exc_register(EXC_RI, "resinstr", (iroutine) reserved_instr_exception); 183 exc_register(EXC_Mod, "tlb_mod", (iroutine) tlbmod_exception); 184 exc_register(EXC_TLBL, "tlbinvl", (iroutine) tlbinv_exception); 185 exc_register(EXC_TLBS, "tlbinvl", (iroutine) tlbinv_exception); 186 exc_register(EXC_Int, "interrupt", (iroutine) interrupt_exception); 206 exc_register(i, "undef", false, 207 (iroutine_t) unhandled_exception); 208 209 exc_register(EXC_Bp, "bkpoint", true, 210 (iroutine_t) breakpoint_exception); 211 exc_register(EXC_RI, "resinstr", true, 212 (iroutine_t) reserved_instr_exception); 213 exc_register(EXC_Mod, "tlb_mod", true, 214 (iroutine_t) tlbmod_exception); 215 exc_register(EXC_TLBL, "tlbinvl", true, 216 (iroutine_t) tlbinv_exception); 217 exc_register(EXC_TLBS, "tlbinvl", true, 218 (iroutine_t) tlbinv_exception); 219 exc_register(EXC_Int, "interrupt", true, 220 (iroutine_t) interrupt_exception); 221 187 222 #ifdef CONFIG_FPU_LAZY 188 exc_register(EXC_CpU, "cpunus", (iroutine) cpuns_exception); 189 #endif 190 exc_register(EXC_Sys, "syscall", (iroutine) syscall_exception); 223 exc_register(EXC_CpU, "cpunus", true, 224 (iroutine_t) cpuns_exception); 225 #endif 226 227 exc_register(EXC_Sys, "syscall", true, 228 (iroutine_t) syscall_exception); 191 229 } 192 230 -
kernel/arch/mips32/src/interrupt.c
rfb150d78 r46c20c8 35 35 #include <interrupt.h> 36 36 #include <arch/interrupt.h> 37 #include < arch/types.h>37 #include <typedefs.h> 38 38 #include <arch.h> 39 39 #include <arch/cp0.h> 40 #include <arch/smp/dorder.h> 40 41 #include <time/clock.h> 41 42 #include <ipc/sysipc.h> … … 48 49 function virtual_timer_fnc = NULL; 49 50 static irq_t timer_irq; 51 static irq_t dorder_irq; 52 53 // TODO: This is SMP unsafe!!! 54 55 uint32_t count_hi = 0; 56 static unsigned long nextcount; 57 static unsigned long lastcount; 50 58 51 59 /** Disable interrupts. … … 89 97 } 90 98 91 /* TODO: This is SMP unsafe!!! */ 92 uint32_t count_hi = 0; 93 static unsigned long nextcount; 94 static unsigned long lastcount; 99 /** Check interrupts state. 100 * 101 * @return True if interrupts are disabled. 102 * 103 */ 104 bool interrupts_disabled(void) 105 { 106 return !(cp0_status_read() & cp0_status_ie_enabled_bit); 107 } 95 108 96 /** Start hardware clock */ 109 /** Start hardware clock 110 * 111 */ 97 112 static void timer_start(void) 98 113 { … … 109 124 static void timer_irq_handler(irq_t *irq) 110 125 { 111 unsigned long drift;112 113 126 if (cp0_count_read() < lastcount) 114 127 /* Count overflow detected */ 115 128 count_hi++; 129 116 130 lastcount = cp0_count_read(); 117 131 118 drift = cp0_count_read() - nextcount;132 unsigned long drift = cp0_count_read() - nextcount; 119 133 while (drift > cp0_compare_value) { 120 134 drift -= cp0_compare_value; 121 135 CPU->missed_clock_ticks++; 122 136 } 137 123 138 nextcount = cp0_count_read() + cp0_compare_value - drift; 124 139 cp0_compare_write(nextcount); … … 128 143 * Release the lock, call clock() and reacquire the lock again. 129 144 */ 130 spinlock_unlock(&irq->lock);145 irq_spinlock_unlock(&irq->lock, false); 131 146 clock(); 132 spinlock_lock(&irq->lock);147 irq_spinlock_lock(&irq->lock, false); 133 148 134 149 if (virtual_timer_fnc != NULL) 135 150 virtual_timer_fnc(); 151 } 152 153 static irq_ownership_t dorder_claim(irq_t *irq) 154 { 155 return IRQ_ACCEPT; 156 } 157 158 static void dorder_irq_handler(irq_t *irq) 159 { 160 dorder_ipi_ack(1 << dorder_cpuid()); 136 161 } 137 162 … … 150 175 timer_start(); 151 176 cp0_unmask_int(TIMER_IRQ); 177 178 irq_initialize(&dorder_irq); 179 dorder_irq.devno = device_assign_devno(); 180 dorder_irq.inr = DORDER_IRQ; 181 dorder_irq.claim = dorder_claim; 182 dorder_irq.handler = dorder_irq_handler; 183 irq_register(&dorder_irq); 184 185 cp0_unmask_int(DORDER_IRQ); 152 186 } 153 187 -
kernel/arch/mips32/src/mips32.c
rfb150d78 r46c20c8 36 36 #include <arch/cp0.h> 37 37 #include <arch/exception.h> 38 #include <arch/debug.h> 38 39 #include <mm/as.h> 39 40 #include <userspace.h> … … 57 58 #include <macros.h> 58 59 #include <config.h> 59 #include <str ing.h>60 #include <str.h> 60 61 #include <arch/drivers/msim.h> 61 62 #include <arch/asm/regname.h> … … 83 84 void arch_pre_main(void *entry __attribute__((unused)), bootinfo_t *bootinfo) 84 85 { 85 /* Setup usermode */ 86 init.cnt = bootinfo->cnt; 86 init.cnt = min3(bootinfo->cnt, TASKMAP_MAX_RECORDS, CONFIG_INIT_TASKS); 87 87 88 88 size_t i; 89 for (i = 0; i < min3(bootinfo->cnt, TASKMAP_MAX_RECORDS, CONFIG_INIT_TASKS); i++) {90 init.tasks[i].addr = bootinfo->tasks[i].addr;89 for (i = 0; i < init.cnt; i++) { 90 init.tasks[i].addr = (uintptr_t) bootinfo->tasks[i].addr; 91 91 init.tasks[i].size = bootinfo->tasks[i].size; 92 92 str_cpy(init.tasks[i].name, CONFIG_TASK_NAME_BUFLEN, -
kernel/arch/mips32/src/mm/frame.c
rfb150d78 r46c20c8 249 249 void physmem_print(void) 250 250 { 251 printf("Base Size\n"); 252 printf("---------- ----------\n"); 251 printf("[base ] [size ]\n"); 253 252 254 253 size_t i; -
kernel/arch/mips32/src/mm/tlb.c
rfb150d78 r46c20c8 321 321 void tlb_refill_fail(istate_t *istate) 322 322 { 323 char *symbol, *sym2; 324 325 symbol = symtab_fmt_name_lookup(istate->epc); 326 sym2 = symtab_fmt_name_lookup(istate->ra); 323 uintptr_t va = cp0_badvaddr_read(); 327 324 328 325 fault_if_from_uspace(istate, "TLB Refill Exception on %p.", 329 cp0_badvaddr_read()); 330 panic("%x: TLB Refill Exception at %x (%s<-%s).", cp0_badvaddr_read(), 331 istate->epc, symbol, sym2); 326 (void *) va); 327 panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, "TLB Refill Exception."); 332 328 } 333 329 … … 335 331 void tlb_invalid_fail(istate_t *istate) 336 332 { 337 char *symbol; 338 339 symbol = symtab_fmt_name_lookup(istate->epc); 340 333 uintptr_t va = cp0_badvaddr_read(); 334 341 335 fault_if_from_uspace(istate, "TLB Invalid Exception on %p.", 342 cp0_badvaddr_read()); 343 panic("%x: TLB Invalid Exception at %x (%s).", cp0_badvaddr_read(), 344 istate->epc, symbol); 336 (void *) va); 337 panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, "TLB Invalid Exception."); 345 338 } 346 339 347 340 void tlb_modified_fail(istate_t *istate) 348 341 { 349 char *symbol; 350 351 symbol = symtab_fmt_name_lookup(istate->epc); 352 342 uintptr_t va = cp0_badvaddr_read(); 343 353 344 fault_if_from_uspace(istate, "TLB Modified Exception on %p.", 354 cp0_badvaddr_read()); 355 panic("%x: TLB Modified Exception at %x (%s).", cp0_badvaddr_read(), 356 istate->epc, symbol); 345 (void *) va); 346 panic_memtrap(istate, PF_ACCESS_WRITE, va, "TLB Modified Exception."); 357 347 } 358 348 359 349 /** Try to find PTE for faulting address. 360 *361 * The AS->lock must be held on entry to this function.362 350 * 363 351 * @param badvaddr Faulting virtual address. … … 375 363 entry_hi_t hi; 376 364 pte_t *pte; 365 366 ASSERT(mutex_locked(&AS->lock)); 377 367 378 368 hi.value = cp0_entry_hi_read(); … … 461 451 hi_save.value = cp0_entry_hi_read(); 462 452 463 printf("# ASID VPN2 MASK G V D C PFN\n"); 464 printf("-- ---- ------ ---- - - - - ------\n"); 453 printf("[nr] [asid] [vpn2] [mask] [gvdc] [pfn ]\n"); 465 454 466 455 for (i = 0; i < TLB_ENTRY_COUNT; i++) { … … 473 462 lo1.value = cp0_entry_lo1_read(); 474 463 475 printf("%- 2u %-4u %#6x %#4x %1u %1u %1u %1u%#6x\n",464 printf("%-4u %-6u %#6x %#6x %1u%1u%1u%1u %#6x\n", 476 465 i, hi.asid, hi.vpn2, mask.mask, 477 466 lo0.g, lo0.v, lo0.d, lo0.c, lo0.pfn); 478 printf(" %1u %1u %1u %1u%#6x\n",467 printf(" %1u%1u%1u%1u %#6x\n", 479 468 lo1.g, lo1.v, lo1.d, lo1.c, lo1.pfn); 480 469 } -
kernel/arch/mips32/src/smp/dorder.c
rfb150d78 r46c20c8 33 33 */ 34 34 35 #include <typedefs.h> 36 #include <smp/ipi.h> 35 37 #include <arch/smp/dorder.h> 36 38 37 39 #define MSIM_DORDER_ADDRESS 0xB0000004 38 40 41 #ifdef CONFIG_SMP 42 39 43 void ipi_broadcast_arch(int ipi) 40 44 { 41 #ifdef CONFIG_SMP 42 *((volatile unsigned int *) MSIM_DORDER_ADDRESS) = 0x7FFFFFFF; 45 *((volatile uint32_t *) MSIM_DORDER_ADDRESS) = 0x7fffffff; 46 } 47 43 48 #endif 49 50 uint32_t dorder_cpuid(void) 51 { 52 return *((volatile uint32_t *) MSIM_DORDER_ADDRESS); 53 } 54 55 void dorder_ipi_ack(uint32_t mask) 56 { 57 *((volatile uint32_t *) (MSIM_DORDER_ADDRESS + 4)) = mask; 44 58 } 45 59 -
kernel/arch/mips32/src/start.S
rfb150d78 r46c20c8 45 45 .global userspace_asm 46 46 47 # Which status bits should are thread-local 48 #define REG_SAVE_MASK 0x1f # KSU(UM), EXL, ERL, IE 49 50 # Save registers to space defined by \r 51 # We will change status: Disable ERL,EXL,UM,IE 52 # These changes will be automatically reversed in REGISTER_LOAD 53 # SP is NOT saved as part of these registers 47 /* 48 * Which status bits are thread-local: 49 * KSU(UM), EXL, ERL, IE 50 */ 51 #define REG_SAVE_MASK 0x1f 52 53 #define ISTATE_OFFSET_A0 0 54 #define ISTATE_OFFSET_A1 4 55 #define ISTATE_OFFSET_A2 8 56 #define ISTATE_OFFSET_A3 12 57 #define ISTATE_OFFSET_T0 16 58 #define ISTATE_OFFSET_T1 20 59 #define ISTATE_OFFSET_V0 24 60 #define ISTATE_OFFSET_V1 28 61 #define ISTATE_OFFSET_AT 32 62 #define ISTATE_OFFSET_T2 36 63 #define ISTATE_OFFSET_T3 40 64 #define ISTATE_OFFSET_T4 44 65 #define ISTATE_OFFSET_T5 48 66 #define ISTATE_OFFSET_T6 52 67 #define ISTATE_OFFSET_T7 56 68 #define ISTATE_OFFSET_S0 60 69 #define ISTATE_OFFSET_S1 64 70 #define ISTATE_OFFSET_S2 68 71 #define ISTATE_OFFSET_S3 72 72 #define ISTATE_OFFSET_S4 76 73 #define ISTATE_OFFSET_S5 80 74 #define ISTATE_OFFSET_S6 84 75 #define ISTATE_OFFSET_S7 88 76 #define ISTATE_OFFSET_T8 92 77 #define ISTATE_OFFSET_T9 96 78 #define ISTATE_OFFSET_KT0 100 79 #define ISTATE_OFFSET_KT1 104 80 #define ISTATE_OFFSET_GP 108 81 #define ISTATE_OFFSET_SP 112 82 #define ISTATE_OFFSET_S8 116 83 #define ISTATE_OFFSET_RA 120 84 #define ISTATE_OFFSET_LO 124 85 #define ISTATE_OFFSET_HI 128 86 #define ISTATE_OFFSET_STATUS 132 87 #define ISTATE_OFFSET_EPC 136 88 #define ISTATE_OFFSET_ALIGNMENT 140 89 90 #define ISTATE_SOFT_SIZE 144 91 92 /* 93 * The fake ABI prologue is never executed and may not be part of the 94 * procedure's body. Instead, it should be immediately preceding the procedure's 95 * body. Its only purpose is to trick the stack trace walker into thinking that 96 * the exception is more or less just a normal function call. 97 */ 98 .macro FAKE_ABI_PROLOGUE 99 sub $sp, ISTATE_SOFT_SIZE 100 sw $ra, ISTATE_OFFSET_EPC($sp) 101 .endm 102 103 /* 104 * Save registers to space defined by \r 105 * We will change status: Disable ERL, EXL, UM, IE 106 * These changes will be automatically reversed in REGISTER_LOAD 107 * %sp is NOT saved as part of these registers 108 */ 54 109 .macro REGISTERS_STORE_AND_EXC_RESET r 55 sw $at, EOFFSET_AT(\r) 56 sw $v0, EOFFSET_V0(\r) 57 sw $v1, EOFFSET_V1(\r) 58 sw $a0, EOFFSET_A0(\r) 59 sw $a1, EOFFSET_A1(\r) 60 sw $a2, EOFFSET_A2(\r) 61 sw $a3, EOFFSET_A3(\r) 62 sw $t0, EOFFSET_T0(\r) 63 sw $t1, EOFFSET_T1(\r) 64 sw $t2, EOFFSET_T2(\r) 65 sw $t3, EOFFSET_T3(\r) 66 sw $t4, EOFFSET_T4(\r) 67 sw $t5, EOFFSET_T5(\r) 68 sw $t6, EOFFSET_T6(\r) 69 sw $t7, EOFFSET_T7(\r) 70 sw $t8, EOFFSET_T8(\r) 71 sw $t9, EOFFSET_T9(\r) 72 110 sw $at, ISTATE_OFFSET_AT(\r) 111 sw $v0, ISTATE_OFFSET_V0(\r) 112 sw $v1, ISTATE_OFFSET_V1(\r) 113 sw $a0, ISTATE_OFFSET_A0(\r) 114 sw $a1, ISTATE_OFFSET_A1(\r) 115 sw $a2, ISTATE_OFFSET_A2(\r) 116 sw $a3, ISTATE_OFFSET_A3(\r) 117 sw $t0, ISTATE_OFFSET_T0(\r) 118 sw $t1, ISTATE_OFFSET_T1(\r) 119 sw $t2, ISTATE_OFFSET_T2(\r) 120 sw $t3, ISTATE_OFFSET_T3(\r) 121 sw $t4, ISTATE_OFFSET_T4(\r) 122 sw $t5, ISTATE_OFFSET_T5(\r) 123 sw $t6, ISTATE_OFFSET_T6(\r) 124 sw $t7, ISTATE_OFFSET_T7(\r) 125 sw $t8, ISTATE_OFFSET_T8(\r) 126 sw $t9, ISTATE_OFFSET_T9(\r) 127 sw $s0, ISTATE_OFFSET_S0(\r) 128 sw $s1, ISTATE_OFFSET_S1(\r) 129 sw $s2, ISTATE_OFFSET_S2(\r) 130 sw $s3, ISTATE_OFFSET_S3(\r) 131 sw $s4, ISTATE_OFFSET_S4(\r) 132 sw $s5, ISTATE_OFFSET_S5(\r) 133 sw $s6, ISTATE_OFFSET_S6(\r) 134 sw $s7, ISTATE_OFFSET_S7(\r) 135 sw $s8, ISTATE_OFFSET_S8(\r) 136 73 137 mflo $at 74 sw $at, EOFFSET_LO(\r)138 sw $at, ISTATE_OFFSET_LO(\r) 75 139 mfhi $at 76 sw $at, EOFFSET_HI(\r) 77 78 sw $gp, EOFFSET_GP(\r) 79 sw $ra, EOFFSET_RA(\r) 80 sw $k1, EOFFSET_K1(\r) 81 140 sw $at, ISTATE_OFFSET_HI(\r) 141 142 sw $gp, ISTATE_OFFSET_GP(\r) 143 sw $ra, ISTATE_OFFSET_RA(\r) 144 sw $k0, ISTATE_OFFSET_KT0(\r) 145 sw $k1, ISTATE_OFFSET_KT1(\r) 146 82 147 mfc0 $t0, $status 83 148 mfc0 $t1, $epc 84 149 85 and $t2, $t0, REG_SAVE_MASK # Save only KSU,EXL,ERL,IE 86 li $t3, ~(0x1f) 87 and $t0, $t0, $t3 # Clear KSU,EXL,ERL,IE 88 89 sw $t2,EOFFSET_STATUS(\r) 90 sw $t1,EOFFSET_EPC(\r) 150 /* save only KSU, EXL, ERL, IE */ 151 and $t2, $t0, REG_SAVE_MASK 152 153 /* clear KSU, EXL, ERL, IE */ 154 li $t3, ~(REG_SAVE_MASK) 155 and $t0, $t0, $t3 156 157 sw $t2, ISTATE_OFFSET_STATUS(\r) 158 sw $t1, ISTATE_OFFSET_EPC(\r) 91 159 mtc0 $t0, $status 92 160 .endm 93 161 94 162 .macro REGISTERS_LOAD r 95 # Update only UM,EXR,IE from status, the rest 96 # is controlled by OS and not bound to task 163 /* 164 * Update only UM, EXR, IE from status, the rest 165 * is controlled by OS and not bound to task. 166 */ 97 167 mfc0 $t0, $status 98 lw $t1,EOFFSET_STATUS(\r) 99 100 li $t2, ~REG_SAVE_MASK # Mask UM,EXL,ERL,IE 168 lw $t1, ISTATE_OFFSET_STATUS(\r) 169 170 /* mask UM, EXL, ERL, IE */ 171 li $t2, ~REG_SAVE_MASK 101 172 and $t0, $t0, $t2 102 173 103 or $t0, $t0, $t1 # Copy UM,EXL, ERL, IE from saved status 174 /* copy UM, EXL, ERL, IE from saved status */ 175 or $t0, $t0, $t1 104 176 mtc0 $t0, $status 105 177 106 lw $v0, EOFFSET_V0(\r)107 lw $v1, EOFFSET_V1(\r)108 lw $a0, EOFFSET_A0(\r)109 lw $a1, EOFFSET_A1(\r)110 lw $a2, EOFFSET_A2(\r)111 lw $a3, EOFFSET_A3(\r)112 lw $t0, EOFFSET_T0(\r)113 lw $t1, EOFFSET_T1(\r)114 lw $t2, EOFFSET_T2(\r)115 lw $t3, EOFFSET_T3(\r)116 lw $t4, EOFFSET_T4(\r)117 lw $t5, EOFFSET_T5(\r)118 lw $t6, EOFFSET_T6(\r)119 lw $t7, EOFFSET_T7(\r)120 lw $t8, EOFFSET_T8(\r)121 lw $t9, EOFFSET_T9(\r)122 123 lw $gp, EOFFSET_GP(\r)124 lw $ra, EOFFSET_RA(\r)125 lw $k1, EOFFSET_K1(\r)126 127 lw $at, EOFFSET_LO(\r)178 lw $v0, ISTATE_OFFSET_V0(\r) 179 lw $v1, ISTATE_OFFSET_V1(\r) 180 lw $a0, ISTATE_OFFSET_A0(\r) 181 lw $a1, ISTATE_OFFSET_A1(\r) 182 lw $a2, ISTATE_OFFSET_A2(\r) 183 lw $a3, ISTATE_OFFSET_A3(\r) 184 lw $t0, ISTATE_OFFSET_T0(\r) 185 lw $t1, ISTATE_OFFSET_T1(\r) 186 lw $t2, ISTATE_OFFSET_T2(\r) 187 lw $t3, ISTATE_OFFSET_T3(\r) 188 lw $t4, ISTATE_OFFSET_T4(\r) 189 lw $t5, ISTATE_OFFSET_T5(\r) 190 lw $t6, ISTATE_OFFSET_T6(\r) 191 lw $t7, ISTATE_OFFSET_T7(\r) 192 lw $t8, ISTATE_OFFSET_T8(\r) 193 lw $t9, ISTATE_OFFSET_T9(\r) 194 195 lw $gp, ISTATE_OFFSET_GP(\r) 196 lw $ra, ISTATE_OFFSET_RA(\r) 197 lw $k1, ISTATE_OFFSET_KT1(\r) 198 199 lw $at, ISTATE_OFFSET_LO(\r) 128 200 mtlo $at 129 lw $at, EOFFSET_HI(\r)201 lw $at, ISTATE_OFFSET_HI(\r) 130 202 mthi $at 131 132 lw $at, EOFFSET_EPC(\r)203 204 lw $at, ISTATE_OFFSET_EPC(\r) 133 205 mtc0 $at, $epc 134 206 135 lw $at, EOFFSET_AT(\r)136 lw $sp, EOFFSET_SP(\r)207 lw $at, ISTATE_OFFSET_AT(\r) 208 lw $sp, ISTATE_OFFSET_SP(\r) 137 209 .endm 138 210 139 # Move kernel stack pointer address to register K0 140 # - if we are in user mode, load the appropriate stack 141 # address 211 /* 212 * Move kernel stack pointer address to register $k0. 213 * If we are in user mode, load the appropriate stack address. 214 */ 142 215 .macro KERNEL_STACK_TO_K0 143 # If we are in user mode216 /* if we are in user mode */ 144 217 mfc0 $k0, $status 145 218 andi $k0, 0x10 146 219 147 220 beq $k0, $0, 1f 148 add $k0, $sp, 0149 150 # Move $k0 pointer to kernel stack221 move $k0, $sp 222 223 /* move $k0 pointer to kernel stack */ 151 224 lui $k0, %hi(supervisor_sp) 152 225 ori $k0, $k0, %lo(supervisor_sp) 153 # Move $k0 (superveisor_sp) 154 lw $k0, 0($k0) 155 1: 226 227 /* move $k0 (supervisor_sp) */ 228 lw $k0, ($k0) 229 230 1: 156 231 .endm 157 232 158 233 .org 0x0 159 234 kernel_image_start: 160 /* Load temporary stack */235 /* load temporary stack */ 161 236 lui $sp, %hi(end_stack) 162 237 ori $sp, $sp, %lo(end_stack) 163 238 164 /* Not sure about this, but might 165 be needed for PIC code */ 239 /* not sure about this, but might be needed for PIC code */ 166 240 lui $gp, 0x8000 167 241 168 242 /* $a1 contains physical address of bootinfo_t */ 169 170 243 jal arch_pre_main 171 244 nop … … 174 247 nop 175 248 176 249 .space TEMP_STACK_SIZE 177 250 end_stack: 178 251 … … 189 262 nop 190 263 264 FAKE_ABI_PROLOGUE 191 265 exception_handler: 192 266 KERNEL_STACK_TO_K0 193 sub $k0, REGISTER_SPACE 194 sw $sp, EOFFSET_SP($k0) 267 268 sub $k0, ISTATE_SOFT_SIZE 269 sw $sp, ISTATE_OFFSET_SP($k0) 195 270 move $sp, $k0 196 271 197 272 mfc0 $k0, $cause 198 273 199 sra $k0, $k0, 0x2 # cp0_exc_cause() part 1200 andi $k0, $k0, 0x1f # cp0_exc_cause() part 2201 sub $k0, 8 # 8 = SYSCALL274 sra $k0, $k0, 0x2 /* cp0_exc_cause() part 1 */ 275 andi $k0, $k0, 0x1f /* cp0_exc_cause() part 2 */ 276 sub $k0, 8 /* 8 = SYSCALL */ 202 277 203 278 beqz $k0, syscall_shortcut 204 add $k0, 8 # Revert $k0 back to correct exc number279 add $k0, 8 /* revert $k0 back to correct exc number */ 205 280 206 281 REGISTERS_STORE_AND_EXC_RESET $sp 207 282 208 283 move $a1, $sp 209 jal exc_dispatch # exc_dispatch(excno, register_space)284 jal exc_dispatch /* exc_dispatch(excno, register_space) */ 210 285 move $a0, $k0 211 286 212 287 REGISTERS_LOAD $sp 213 # The $sp is automatically restored to former value 214 eret 215 216 ## Syscall entry 217 # 218 # Registers: 219 # 220 # @param v0 Syscall number. 221 # @param a0 1st argument. 222 # @param a1 2nd argument. 223 # @param a2 3rd argument. 224 # @param a3 4th argument. 225 # @param t0 5th argument. 226 # @param t1 6th argument. 227 # 228 # @return The return value will be stored in v0. 229 # 230 #define SS_SP EOFFSET_SP 231 #define SS_STATUS EOFFSET_STATUS 232 #define SS_EPC EOFFSET_EPC 233 #define SS_K1 EOFFSET_K1 288 /* the $sp is automatically restored to former value */ 289 eret 290 291 /** Syscall entry 292 * 293 * Registers: 294 * 295 * @param $v0 Syscall number. 296 * @param $a0 1st argument. 297 * @param $a1 2nd argument. 298 * @param $a2 3rd argument. 299 * @param $a3 4th argument. 300 * @param $t0 5th argument. 301 * @param $t1 6th argument. 302 * 303 * @return The return value will be stored in $v0. 304 * 305 */ 234 306 syscall_shortcut: 235 # We have a lot of space on the stack, with free use236 307 mfc0 $t3, $epc 237 308 mfc0 $t2, $status 238 sw $t3, SS_EPC($sp) # Save EPC239 sw $k1, SS_K1($sp) # Save k1 not saved on context switch240 241 and $t4, $t2, REG_SAVE_MASK # Save only KSU, EXL, ERL, IE309 sw $t3, ISTATE_OFFSET_EPC($sp) /* save EPC */ 310 sw $k1, ISTATE_OFFSET_KT1($sp) /* save $k1 not saved on context switch */ 311 312 and $t4, $t2, REG_SAVE_MASK /* save only KSU, EXL, ERL, IE */ 242 313 li $t5, ~(0x1f) 243 and $t2, $t2, $t5 # Clear KSU, EXL, ERL244 ori $t2, $t2, 0x1 # Set IE245 246 sw $t4, SS_STATUS($sp)314 and $t2, $t2, $t5 /* clear KSU, EXL, ERL */ 315 ori $t2, $t2, 0x1 /* set IE */ 316 317 sw $t4, ISTATE_OFFSET_STATUS($sp) 247 318 mtc0 $t2, $status 248 249 #250 # Call the higher level system call handler251 # We are going to reuse part of the unused exception stack frame252 #253 sw $t0, STACK_ARG4($sp) # save the 5th argument on the stack254 sw $t1, STACK_ARG5($sp) # save the 6th argument on the stack319 320 /* 321 * Call the higher level system call handler. 322 * 323 */ 324 sw $t0, ISTATE_OFFSET_T0($sp) /* save the 5th argument on the stack */ 325 sw $t1, ISTATE_OFFSET_T1($sp) /* save the 6th argument on the stack */ 255 326 jal syscall_handler 256 sw $v0, STACK_ARG6($sp) # save the syscall number on the stack257 258 # restore status327 sw $v0, ISTATE_OFFSET_V0($sp) /* save the syscall number on the stack */ 328 329 /* restore status */ 259 330 mfc0 $t2, $status 260 lw $t3, SS_STATUS($sp) 261 262 # Change back to EXL = 1 (from last exception), otherwise 263 # an interrupt could rewrite the CP0 - EPC 264 li $t4, ~REG_SAVE_MASK # Mask UM, EXL, ERL, IE 331 lw $t3, ISTATE_OFFSET_STATUS($sp) 332 333 /* 334 * Change back to EXL = 1 (from last exception), otherwise 335 * an interrupt could rewrite the CP0 - EPC. 336 * 337 */ 338 li $t4, ~REG_SAVE_MASK /* mask UM, EXL, ERL, IE */ 265 339 and $t2, $t2, $t4 266 or $t2, $t2, $t3 # Copy saved UM, EXL, ERL, IE340 or $t2, $t2, $t3 /* copy saved UM, EXL, ERL, IE */ 267 341 mtc0 $t2, $status 268 269 # restore epc + 4270 lw $t2, SS_EPC($sp)271 lw $k1, SS_K1($sp)342 343 /* restore epc + 4 */ 344 lw $t2, ISTATE_OFFSET_EPC($sp) 345 lw $k1, ISTATE_OFFSET_KT1($sp) 272 346 addi $t2, $t2, 4 273 347 mtc0 $t2, $epc 274 348 275 lw $sp, SS_SP($sp) # restore sp276 277 eret 278 349 lw $sp, ISTATE_OFFSET_SP($sp) /* restore $sp */ 350 eret 351 352 FAKE_ABI_PROLOGUE 279 353 tlb_refill_handler: 280 354 KERNEL_STACK_TO_K0 281 sub $k0, REGISTER_SPACE355 sub $k0, ISTATE_SOFT_SIZE 282 356 REGISTERS_STORE_AND_EXC_RESET $k0 283 sw $sp, EOFFSET_SP($k0)284 add $sp, $k0,0285 357 sw $sp, ISTATE_OFFSET_SP($k0) 358 move $sp, $k0 359 286 360 jal tlb_refill 287 add $a0, $sp, 0288 361 move $a0, $sp 362 289 363 REGISTERS_LOAD $sp 290 291 eret 292 364 eret 365 366 FAKE_ABI_PROLOGUE 293 367 cache_error_handler: 294 368 KERNEL_STACK_TO_K0 295 sub $k0, REGISTER_SPACE369 sub $k0, ISTATE_SOFT_SIZE 296 370 REGISTERS_STORE_AND_EXC_RESET $k0 297 sw $sp, EOFFSET_SP($k0)298 add $sp, $k0,0299 371 sw $sp, ISTATE_OFFSET_SP($k0) 372 move $sp, $k0 373 300 374 jal cache_error 301 add $a0, $sp, 0302 375 move $a0, $sp 376 303 377 REGISTERS_LOAD $sp 304 305 378 eret 306 379 307 380 userspace_asm: 308 add $sp, $a0,0309 add $v0, $a1, 0310 add $t9, $a2, 0 # Set up correct entry into PIC code311 xor $a0, $a0, $a0 # $a0 is defined to hold pcb_ptr312 # set it to 0313 eret 381 move $sp, $a0 382 move $v0, $a1 383 move $t9, $a2 /* set up correct entry into PIC code */ 384 xor $a0, $a0, $a0 /* $a0 is defined to hold pcb_ptr */ 385 /* set it to 0 */ 386 eret
Note:
See TracChangeset
for help on using the changeset viewer.