Changeset aa85487 in mainline for kernel/arch
- Timestamp:
- 2010-03-07T15:11:56Z (16 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- aadf01e
- Parents:
- 2e99277 (diff), 137691a (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- kernel/arch
- Files:
-
- 9 added
- 65 edited
- 2 moved
-
abs32le/Makefile.inc (modified) (1 diff)
-
abs32le/include/atomic.h (modified) (5 diffs)
-
abs32le/include/types.h (modified) (1 diff)
-
abs32le/src/abs32le.c (modified) (1 diff)
-
amd64/include/atomic.h (modified) (7 diffs)
-
amd64/include/types.h (modified) (1 diff)
-
amd64/src/cpu/cpu.c (modified) (1 diff)
-
amd64/src/debugger.c (modified) (2 diffs)
-
amd64/src/interrupt.c (modified) (1 diff)
-
arm32/include/atomic.h (modified) (8 diffs)
-
arm32/include/types.h (modified) (3 diffs)
-
arm32/src/cpu/cpu.c (modified) (3 diffs)
-
ia32/include/atomic.h (modified) (7 diffs)
-
ia32/include/types.h (modified) (1 diff)
-
ia32/src/cpu/cpu.c (modified) (1 diff)
-
ia32/src/interrupt.c (modified) (2 diffs)
-
ia32/src/mm/frame.c (modified) (3 diffs)
-
ia64/include/atomic.h (modified) (8 diffs)
-
ia64/include/types.h (modified) (2 diffs)
-
ia64/src/cpu/cpu.c (modified) (1 diff)
-
ia64/src/interrupt.c (modified) (6 diffs)
-
mips32/include/atomic.h (modified) (6 diffs)
-
mips32/include/types.h (modified) (2 diffs)
-
mips32/src/cpu/cpu.c (modified) (1 diff)
-
mips32/src/debugger.c (modified) (2 diffs)
-
mips32/src/exception.c (modified) (3 diffs)
-
mips32/src/mm/tlb.c (modified) (3 diffs)
-
ppc32/include/atomic.h (modified) (7 diffs)
-
ppc32/include/types.h (modified) (2 diffs)
-
ppc32/src/cpu/cpu.c (modified) (1 diff)
-
ppc32/src/mm/tlb.c (modified) (1 diff)
-
sparc64/Makefile.inc (modified) (3 diffs)
-
sparc64/include/atomic.h (modified) (4 diffs)
-
sparc64/include/mm/as.h (modified) (1 diff)
-
sparc64/include/mm/sun4u/as.h (added)
-
sparc64/include/mm/sun4u/tlb.h (modified) (1 diff)
-
sparc64/include/mm/sun4u/tsb.h (added)
-
sparc64/include/mm/sun4v/frame.h (modified) (2 diffs)
-
sparc64/include/mm/sun4v/mmu.h (modified) (2 diffs)
-
sparc64/include/mm/sun4v/tlb.h (modified) (1 diff)
-
sparc64/include/mm/sun4v/tsb.h (modified) (2 diffs)
-
sparc64/include/mm/sun4v/tte.h (modified) (1 diff)
-
sparc64/include/mm/tlb.h (modified) (1 diff)
-
sparc64/include/mm/tsb.h (modified) (1 diff)
-
sparc64/include/smp/sun4v/smp.h (added)
-
sparc64/include/sun4v/arch.h (modified) (1 diff)
-
sparc64/include/sun4v/cpu.h (modified) (3 diffs)
-
sparc64/include/sun4v/hypercall.h (modified) (2 diffs)
-
sparc64/include/sun4v/ipi.h (modified) (1 diff)
-
sparc64/include/trap/exception.h (modified) (2 diffs)
-
sparc64/include/trap/interrupt.h (modified) (4 diffs)
-
sparc64/include/trap/regwin.h (modified) (2 diffs)
-
sparc64/include/trap/sun4u/interrupt.h (added)
-
sparc64/include/trap/sun4v/interrupt.h (added)
-
sparc64/include/trap/sun4v/mmu.h (modified) (2 diffs)
-
sparc64/include/trap/trap_table.h (modified) (1 diff)
-
sparc64/include/types.h (modified) (2 diffs)
-
sparc64/src/cpu/sun4u/cpu.c (modified) (1 diff)
-
sparc64/src/cpu/sun4v/cpu.c (modified) (5 diffs)
-
sparc64/src/drivers/niagara.c (modified) (3 diffs)
-
sparc64/src/mm/sun4u/tlb.c (modified) (3 diffs)
-
sparc64/src/mm/sun4v/as.c (modified) (7 diffs)
-
sparc64/src/mm/sun4v/frame.c (modified) (3 diffs)
-
sparc64/src/mm/sun4v/tlb.c (modified) (3 diffs)
-
sparc64/src/mm/sun4v/tsb.c (modified) (7 diffs)
-
sparc64/src/smp/sun4u/ipi.c (moved) (moved from kernel/arch/sparc64/src/smp/ipi.c ) (2 diffs)
-
sparc64/src/smp/sun4u/smp.c (moved) (moved from kernel/arch/sparc64/src/smp/smp.c ) (1 diff)
-
sparc64/src/smp/sun4v/ipi.c (added)
-
sparc64/src/smp/sun4v/smp.c (added)
-
sparc64/src/sun4v/md.c (modified) (7 diffs)
-
sparc64/src/sun4v/sparc64.c (modified) (1 diff)
-
sparc64/src/trap/exception.c (modified) (2 diffs)
-
sparc64/src/trap/interrupt.c (modified) (3 diffs)
-
sparc64/src/trap/sun4u/interrupt.c (added)
-
sparc64/src/trap/sun4v/interrupt.c (added)
-
sparc64/src/trap/sun4v/trap_table.S (modified) (18 diffs)
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/abs32le/Makefile.inc
r2e99277 raa85487 50 50 endif 51 51 52 ifeq ($(COMPILER),clang) 53 CLANG_ARCH = i386 54 endif 55 52 56 BITS = 32 53 57 ENDIANESS = LE -
kernel/arch/abs32le/include/atomic.h
r2e99277 raa85487 54 54 } 55 55 56 static inline longatomic_postinc(atomic_t *val)56 static inline atomic_count_t atomic_postinc(atomic_t *val) 57 57 { 58 58 /* On real hardware both the storing of the previous … … 60 60 atomic action. */ 61 61 62 longprev = val->count;62 atomic_count_t prev = val->count; 63 63 64 64 val->count++; … … 66 66 } 67 67 68 static inline longatomic_postdec(atomic_t *val)68 static inline atomic_count_t atomic_postdec(atomic_t *val) 69 69 { 70 70 /* On real hardware both the storing of the previous … … 72 72 atomic action. */ 73 73 74 longprev = val->count;74 atomic_count_t prev = val->count; 75 75 76 76 val->count--; … … 81 81 #define atomic_predec(val) (atomic_postdec(val) - 1) 82 82 83 static inline uint32_t test_and_set(atomic_t *val)83 static inline atomic_count_t test_and_set(atomic_t *val) 84 84 { 85 uint32_t prev = val->count;85 atomic_count_t prev = val->count; 86 86 val->count = 1; 87 87 return prev; -
kernel/arch/abs32le/include/types.h
r2e99277 raa85487 55 55 typedef uint32_t unative_t; 56 56 typedef int32_t native_t; 57 typedef uint32_t atomic_count_t; 57 58 58 59 typedef struct { -
kernel/arch/abs32le/src/abs32le.c
r2e99277 raa85487 114 114 } 115 115 116 void panic_printf(c har *fmt, ...)116 void panic_printf(const char *fmt, ...) 117 117 { 118 118 va_list args; -
kernel/arch/amd64/include/atomic.h
r2e99277 raa85487 40 40 #include <preemption.h> 41 41 42 static inline void atomic_inc(atomic_t *val) { 42 static inline void atomic_inc(atomic_t *val) 43 { 43 44 #ifdef CONFIG_SMP 44 45 asm volatile ( … … 54 55 } 55 56 56 static inline void atomic_dec(atomic_t *val) { 57 static inline void atomic_dec(atomic_t *val) 58 { 57 59 #ifdef CONFIG_SMP 58 60 asm volatile ( … … 68 70 } 69 71 70 static inline long atomic_postinc(atomic_t *val)72 static inline atomic_count_t atomic_postinc(atomic_t *val) 71 73 { 72 longr = 1;74 atomic_count_t r = 1; 73 75 74 76 asm volatile ( 75 77 "lock xaddq %[r], %[count]\n" 76 : [count] "+m" (val->count), [r] "+r" (r) 78 : [count] "+m" (val->count), 79 [r] "+r" (r) 77 80 ); 78 81 … … 80 83 } 81 84 82 static inline long atomic_postdec(atomic_t *val)85 static inline atomic_count_t atomic_postdec(atomic_t *val) 83 86 { 84 longr = -1;87 atomic_count_t r = -1; 85 88 86 89 asm volatile ( 87 90 "lock xaddq %[r], %[count]\n" 88 : [count] "+m" (val->count), [r] "+r" (r) 91 : [count] "+m" (val->count), 92 [r] "+r" (r) 89 93 ); 90 94 … … 95 99 #define atomic_predec(val) (atomic_postdec(val) - 1) 96 100 97 static inline uint64_t test_and_set(atomic_t *val) { 98 uint64_t v; 101 static inline atomic_count_t test_and_set(atomic_t *val) 102 { 103 atomic_count_t v = 1; 99 104 100 105 asm volatile ( 101 "movq $1, %[v]\n"102 106 "xchgq %[v], %[count]\n" 103 : [v] "=r" (v), [count] "+m" (val->count) 107 : [v] "+r" (v), 108 [count] "+m" (val->count) 104 109 ); 105 110 … … 107 112 } 108 113 109 110 114 /** amd64 specific fast spinlock */ 111 115 static inline void atomic_lock_arch(atomic_t *val) 112 116 { 113 uint64_t tmp;117 atomic_count_t tmp; 114 118 115 119 preemption_disable(); … … 125 129 "testq %[tmp], %[tmp]\n" 126 130 "jnz 0b\n" 127 : [count] "+m" (val->count), [tmp] "=&r" (tmp) 131 : [count] "+m" (val->count), 132 [tmp] "=&r" (tmp) 128 133 ); 134 129 135 /* 130 136 * Prevent critical section code from bleeding out this way up. -
kernel/arch/amd64/include/types.h
r2e99277 raa85487 55 55 typedef uint64_t unative_t; 56 56 typedef int64_t native_t; 57 typedef uint64_t atomic_count_t; 57 58 58 59 typedef struct { -
kernel/arch/amd64/src/cpu/cpu.c
r2e99277 raa85487 62 62 }; 63 63 64 static c har *vendor_str[] = {64 static const char *vendor_str[] = { 65 65 "Unknown Vendor", 66 66 "AuthenticAMD", -
kernel/arch/amd64/src/debugger.c
r2e99277 raa85487 339 339 { 340 340 unsigned int i; 341 char *symbol;342 341 343 342 #ifdef __32_BITS__ … … 353 352 for (i = 0; i < BKPOINTS_MAX; i++) 354 353 if (breakpoints[i].address) { 355 symbol = symtab_fmt_name_lookup(354 const char *symbol = symtab_fmt_name_lookup( 356 355 breakpoints[i].address); 357 356 -
kernel/arch/amd64/src/interrupt.c
r2e99277 raa85487 65 65 void decode_istate(int n, istate_t *istate) 66 66 { 67 char *symbol; 68 69 symbol = symtab_fmt_name_lookup(istate->rip); 70 67 const char *symbol = symtab_fmt_name_lookup(istate->rip); 68 71 69 printf("-----EXCEPTION(%d) OCCURED----- ( %s )\n", n, __func__); 72 70 printf("%%rip: %#llx (%s)\n", istate->rip, symbol); -
kernel/arch/arm32/include/atomic.h
r2e99277 raa85487 47 47 * 48 48 */ 49 static inline long atomic_add(atomic_t *val, int i)49 static inline atomic_count_t atomic_add(atomic_t *val, atomic_count_t i) 50 50 { 51 long ret;52 53 51 /* 54 52 * This implementation is for UP pre-ARMv6 systems where we do not have … … 57 55 ipl_t ipl = interrupts_disable(); 58 56 val->count += i; 59 ret = val->count;57 atomic_count_t ret = val->count; 60 58 interrupts_restore(ipl); 61 59 … … 66 64 * 67 65 * @param val Variable to be incremented. 66 * 68 67 */ 69 68 static inline void atomic_inc(atomic_t *val) … … 75 74 * 76 75 * @param val Variable to be decremented. 76 * 77 77 */ 78 78 static inline void atomic_dec(atomic_t *val) { … … 84 84 * @param val Variable to be incremented. 85 85 * @return Value after incrementation. 86 * 86 87 */ 87 static inline longatomic_preinc(atomic_t *val)88 static inline atomic_count_t atomic_preinc(atomic_t *val) 88 89 { 89 90 return atomic_add(val, 1); … … 94 95 * @param val Variable to be decremented. 95 96 * @return Value after decrementation. 97 * 96 98 */ 97 static inline longatomic_predec(atomic_t *val)99 static inline atomic_count_t atomic_predec(atomic_t *val) 98 100 { 99 101 return atomic_add(val, -1); … … 104 106 * @param val Variable to be incremented. 105 107 * @return Value before incrementation. 108 * 106 109 */ 107 static inline longatomic_postinc(atomic_t *val)110 static inline atomic_count_t atomic_postinc(atomic_t *val) 108 111 { 109 112 return atomic_add(val, 1) - 1; … … 114 117 * @param val Variable to be decremented. 115 118 * @return Value before decrementation. 119 * 116 120 */ 117 static inline longatomic_postdec(atomic_t *val)121 static inline atomic_count_t atomic_postdec(atomic_t *val) 118 122 { 119 123 return atomic_add(val, -1) + 1; -
kernel/arch/arm32/include/types.h
r2e99277 raa85487 27 27 */ 28 28 29 /** @addtogroup arm32 29 /** @addtogroup arm32 30 30 * @{ 31 31 */ … … 38 38 39 39 #ifndef DOXYGEN 40 # define ATTRIBUTE_PACKED __attribute__((packed))40 #define ATTRIBUTE_PACKED __attribute__((packed)) 41 41 #else 42 #define ATTRIBUTE_PACKED42 #define ATTRIBUTE_PACKED 43 43 #endif 44 44 … … 62 62 typedef uint32_t unative_t; 63 63 typedef int32_t native_t; 64 typedef uint32_t atomic_count_t; 64 65 65 66 typedef struct { -
kernel/arch/arm32/src/cpu/cpu.c
r2e99277 raa85487 43 43 44 44 /** Implementators (vendor) names */ 45 static c har *imp_data[] = {45 static const char *imp_data[] = { 46 46 "?", /* IMP_DATA_START_OFFSET */ 47 47 "ARM Ltd", /* 0x41 */ … … 60 60 61 61 /** Architecture names */ 62 static c har *arch_data[] = {62 static const char *arch_data[] = { 63 63 "?", /* 0x0 */ 64 64 "4", /* 0x1 */ … … 108 108 void cpu_print_report(cpu_t *m) 109 109 { 110 c har *vendor = imp_data[0];111 c har *architecture = arch_data[0];110 const char *vendor = imp_data[0]; 111 const char *architecture = arch_data[0]; 112 112 cpu_arch_t * cpu_arch = &m->arch; 113 113 -
kernel/arch/ia32/include/atomic.h
r2e99277 raa85487 40 40 #include <preemption.h> 41 41 42 static inline void atomic_inc(atomic_t *val) { 42 static inline void atomic_inc(atomic_t *val) 43 { 43 44 #ifdef CONFIG_SMP 44 45 asm volatile ( … … 54 55 } 55 56 56 static inline void atomic_dec(atomic_t *val) { 57 static inline void atomic_dec(atomic_t *val) 58 { 57 59 #ifdef CONFIG_SMP 58 60 asm volatile ( … … 68 70 } 69 71 70 static inline long atomic_postinc(atomic_t *val)72 static inline atomic_count_t atomic_postinc(atomic_t *val) 71 73 { 72 longr = 1;74 atomic_count_t r = 1; 73 75 74 76 asm volatile ( 75 77 "lock xaddl %[r], %[count]\n" 76 : [count] "+m" (val->count), [r] "+r" (r) 78 : [count] "+m" (val->count), 79 [r] "+r" (r) 77 80 ); 78 81 … … 80 83 } 81 84 82 static inline long atomic_postdec(atomic_t *val)85 static inline atomic_count_t atomic_postdec(atomic_t *val) 83 86 { 84 longr = -1;87 atomic_count_t r = -1; 85 88 86 89 asm volatile ( 87 90 "lock xaddl %[r], %[count]\n" 88 : [count] "+m" (val->count), [r] "+r"(r) 91 : [count] "+m" (val->count), 92 [r] "+r" (r) 89 93 ); 90 94 … … 95 99 #define atomic_predec(val) (atomic_postdec(val) - 1) 96 100 97 static inline uint32_t test_and_set(atomic_t *val) { 98 uint32_t v; 101 static inline atomic_count_t test_and_set(atomic_t *val) 102 { 103 atomic_count_t v = 1; 99 104 100 105 asm volatile ( 101 "movl $1, %[v]\n"102 106 "xchgl %[v], %[count]\n" 103 : [v] "=r" (v), [count] "+m" (val->count) 107 : [v] "+r" (v), 108 [count] "+m" (val->count) 104 109 ); 105 110 … … 110 115 static inline void atomic_lock_arch(atomic_t *val) 111 116 { 112 uint32_t tmp;117 atomic_count_t tmp; 113 118 114 119 preemption_disable(); … … 124 129 "testl %[tmp], %[tmp]\n" 125 130 "jnz 0b\n" 126 : [count] "+m" (val->count), [tmp] "=&r" (tmp) 131 : [count] "+m" (val->count), 132 [tmp] "=&r" (tmp) 127 133 ); 134 128 135 /* 129 136 * Prevent critical section code from bleeding out this way up. -
kernel/arch/ia32/include/types.h
r2e99277 raa85487 55 55 typedef uint32_t unative_t; 56 56 typedef int32_t native_t; 57 typedef uint32_t atomic_count_t; 57 58 58 59 typedef struct { -
kernel/arch/ia32/src/cpu/cpu.c
r2e99277 raa85487 64 64 }; 65 65 66 static c har *vendor_str[] = {66 static const char *vendor_str[] = { 67 67 "Unknown Vendor", 68 68 "AMD", -
kernel/arch/ia32/src/interrupt.c
r2e99277 raa85487 65 65 void decode_istate(istate_t *istate) 66 66 { 67 char *symbol; 68 69 symbol = symtab_fmt_name_lookup(istate->eip); 70 67 const char *symbol = symtab_fmt_name_lookup(istate->eip); 68 71 69 if (CPU) 72 70 printf("----------------EXCEPTION OCCURED (cpu%u)----------------\n", CPU->id); 73 71 else 74 72 printf("----------------EXCEPTION OCCURED----------------\n"); 75 73 76 74 printf("%%eip: %#lx (%s)\n", istate->eip, symbol); 77 75 printf("ERROR_WORD=%#lx\n", istate->error_word); … … 80 78 printf("stack: %#lx, %#lx, %#lx, %#lx\n", istate->stack[0], istate->stack[1], istate->stack[2], istate->stack[3]); 81 79 printf(" %#lx, %#lx, %#lx, %#lx\n", istate->stack[4], istate->stack[5], istate->stack[6], istate->stack[7]); 82 80 83 81 stack_trace_istate(istate); 84 82 } -
kernel/arch/ia32/src/mm/frame.c
r2e99277 raa85487 27 27 */ 28 28 29 /** @addtogroup ia32mm 29 /** @addtogroup ia32mm 30 30 * @{ 31 31 */ … … 109 109 } 110 110 111 static c har *e820names[] = {111 static const char *e820names[] = { 112 112 "invalid", 113 113 "available", … … 122 122 { 123 123 unsigned int i; 124 c har *name;124 const char *name; 125 125 126 126 printf("Base Size Name\n"); -
kernel/arch/ia64/include/atomic.h
r2e99277 raa85487 36 36 #define KERN_ia64_ATOMIC_H_ 37 37 38 static inline uint64_t test_and_set(atomic_t *val)38 static inline atomic_count_t test_and_set(atomic_t *val) 39 39 { 40 uint64_t v;41 40 atomic_count_t v; 41 42 42 asm volatile ( 43 43 "movl %[v] = 0x1;;\n" … … 53 53 { 54 54 do { 55 while (val->count) 56 ; 55 while (val->count); 57 56 } while (test_and_set(val)); 58 57 } … … 60 59 static inline void atomic_inc(atomic_t *val) 61 60 { 62 longv;61 atomic_count_t v; 63 62 64 63 asm volatile ( … … 71 70 static inline void atomic_dec(atomic_t *val) 72 71 { 73 longv;72 atomic_count_t v; 74 73 75 74 asm volatile ( … … 80 79 } 81 80 82 static inline longatomic_preinc(atomic_t *val)81 static inline atomic_count_t atomic_preinc(atomic_t *val) 83 82 { 84 longv;83 atomic_count_t v; 85 84 86 85 asm volatile ( … … 93 92 } 94 93 95 static inline longatomic_predec(atomic_t *val)94 static inline atomic_count_t atomic_predec(atomic_t *val) 96 95 { 97 longv;96 atomic_count_t v; 98 97 99 98 asm volatile ( … … 106 105 } 107 106 108 static inline longatomic_postinc(atomic_t *val)107 static inline atomic_count_t atomic_postinc(atomic_t *val) 109 108 { 110 longv;109 atomic_count_t v; 111 110 112 111 asm volatile ( … … 119 118 } 120 119 121 static inline longatomic_postdec(atomic_t *val)120 static inline atomic_count_t atomic_postdec(atomic_t *val) 122 121 { 123 longv;122 atomic_count_t v; 124 123 125 124 asm volatile ( -
kernel/arch/ia64/include/types.h
r2e99277 raa85487 27 27 */ 28 28 29 /** @addtogroup ia64 29 /** @addtogroup ia64 30 30 * @{ 31 31 */ … … 63 63 typedef uint64_t unative_t; 64 64 typedef int64_t native_t; 65 typedef uint64_t atomic_count_t; 65 66 66 67 typedef struct { -
kernel/arch/ia64/src/cpu/cpu.c
r2e99277 raa85487 52 52 void cpu_print_report(cpu_t *m) 53 53 { 54 c har *family_str;54 const char *family_str; 55 55 char vendor[2 * sizeof(uint64_t) + 1]; 56 56 -
kernel/arch/ia64/src/interrupt.c
r2e99277 raa85487 64 64 #define BUNDLE_SIZE 16 65 65 66 char *vector_names_64_bundle[VECTORS_64_BUNDLE] = {66 static const char *vector_names_64_bundle[VECTORS_64_BUNDLE] = { 67 67 "VHPT Translation vector", 68 68 "Instruction TLB vector", … … 87 87 }; 88 88 89 char *vector_names_16_bundle[VECTORS_16_BUNDLE] = {89 static const char *vector_names_16_bundle[VECTORS_16_BUNDLE] = { 90 90 "Page Not Present vector", 91 91 "Key Permission vector", … … 121 121 }; 122 122 123 static char *vector_to_string(uint16_t vector); 124 static void dump_interrupted_context(istate_t *istate); 125 126 char *vector_to_string(uint16_t vector) 123 static const char *vector_to_string(uint16_t vector) 127 124 { 128 125 ASSERT(vector <= VECTOR_MAX); … … 135 132 } 136 133 137 void dump_interrupted_context(istate_t *istate) 138 { 139 char *ifa, *iipa, *iip; 140 141 ifa = symtab_fmt_name_lookup(istate->cr_ifa); 142 iipa = symtab_fmt_name_lookup(istate->cr_iipa); 143 iip = symtab_fmt_name_lookup(istate->cr_iip); 144 134 static void dump_interrupted_context(istate_t *istate) 135 { 136 const char *ifa = symtab_fmt_name_lookup(istate->cr_ifa); 137 const char *iipa = symtab_fmt_name_lookup(istate->cr_iipa); 138 const char *iip = symtab_fmt_name_lookup(istate->cr_iip); 139 145 140 putchar('\n'); 146 141 printf("Interrupted context dump:\n"); … … 162 157 void general_exception(uint64_t vector, istate_t *istate) 163 158 { 164 c har *desc = "";165 159 const char *desc; 160 166 161 switch (istate->cr_isr.ge_code) { 167 162 case GE_ILLEGALOP: … … 187 182 break; 188 183 } 189 184 190 185 fault_if_from_uspace(istate, "General Exception (%s).", desc); 191 186 192 187 dump_interrupted_context(istate); 193 188 panic("General Exception (%s).", desc); -
kernel/arch/mips32/include/atomic.h
r2e99277 raa85487 27 27 */ 28 28 29 /** @addtogroup mips32 29 /** @addtogroup mips32 30 30 * @{ 31 31 */ … … 51 51 * 52 52 * @return Value after addition. 53 * 53 54 */ 54 static inline long atomic_add(atomic_t *val, int i)55 static inline atomic_count_t atomic_add(atomic_t *val, atomic_count_t i) 55 56 { 56 long tmp, v; 57 atomic_count_t tmp; 58 atomic_count_t v; 57 59 58 60 asm volatile ( … … 64 66 " beq %0, %4, 1b\n" /* if the atomic operation failed, try again */ 65 67 " nop\n" 66 : "=&r" (tmp), "+m" (val->count), "=&r" (v) 67 : "r" (i), "i" (0) 68 : "=&r" (tmp), 69 "+m" (val->count), 70 "=&r" (v) 71 : "r" (i), 72 "i" (0) 68 73 ); 69 74 … … 71 76 } 72 77 73 static inline uint32_t test_and_set(atomic_t *val) { 74 uint32_t tmp, v; 78 static inline atomic_count_t test_and_set(atomic_t *val) 79 { 80 atomic_count_t tmp; 81 atomic_count_t v; 75 82 76 83 asm volatile ( … … 82 89 " beqz %0, 1b\n" 83 90 "2:\n" 84 : "=&r" (tmp), "+m" (val->count), "=&r" (v) 91 : "=&r" (tmp), 92 "+m" (val->count), 93 "=&r" (v) 85 94 : "i" (1) 86 95 ); … … 89 98 } 90 99 91 static inline void atomic_lock_arch(atomic_t *val) { 100 static inline void atomic_lock_arch(atomic_t *val) 101 { 92 102 do { 93 while (val->count) 94 ; 103 while (val->count); 95 104 } while (test_and_set(val)); 96 105 } -
kernel/arch/mips32/include/types.h
r2e99277 raa85487 27 27 */ 28 28 29 /** @addtogroup mips32 29 /** @addtogroup mips32 30 30 * @{ 31 31 */ … … 55 55 typedef uint32_t unative_t; 56 56 typedef int32_t native_t; 57 typedef uint32_t atomic_count_t; 57 58 58 59 typedef struct { -
kernel/arch/mips32/src/cpu/cpu.c
r2e99277 raa85487 40 40 41 41 struct data_t { 42 c har *vendor;43 c har *model;42 const char *vendor; 43 const char *model; 44 44 }; 45 45 -
kernel/arch/mips32/src/debugger.c
r2e99277 raa85487 253 253 { 254 254 unsigned int i; 255 char *symbol;256 255 257 256 printf("# Count Address INPROG ONESHOT FUNCCALL In symbol\n"); 258 257 printf("-- ----- ---------- ------ ------- -------- ---------\n"); 259 258 260 for (i = 0; i < BKPOINTS_MAX; i++) 259 for (i = 0; i < BKPOINTS_MAX; i++) { 261 260 if (breakpoints[i].address) { 262 symbol = symtab_fmt_name_lookup(261 const char *symbol = symtab_fmt_name_lookup( 263 262 breakpoints[i].address); 264 263 265 264 printf("%-2u %-5d %#10zx %-6s %-7s %-8s %s\n", i, 266 265 breakpoints[i].counter, breakpoints[i].address, … … 270 269 BKPOINT_FUNCCALL) ? "true" : "false"), symbol); 271 270 } 271 } 272 272 273 return 1; 273 274 } -
kernel/arch/mips32/src/exception.c
r2e99277 raa85487 49 49 #include <symtab.h> 50 50 51 static c har *exctable[] = {51 static const char *exctable[] = { 52 52 "Interrupt", 53 53 "TLB Modified", … … 74 74 static void print_regdump(istate_t *istate) 75 75 { 76 char *pcsymbol, *rasymbol; 77 78 pcsymbol = symtab_fmt_name_lookup(istate->epc); 79 rasymbol = symtab_fmt_name_lookup(istate->ra); 80 76 const char *pcsymbol = symtab_fmt_name_lookup(istate->epc); 77 const char *rasymbol = symtab_fmt_name_lookup(istate->ra); 78 81 79 printf("PC: %#x(%s) RA: %#x(%s), SP(%p)\n", istate->epc, pcsymbol, 82 80 istate->ra, rasymbol, istate->sp); … … 93 91 static void reserved_instr_exception(int n, istate_t *istate) 94 92 { 95 if (*((uint32_t *) istate->epc) == 0x7c03e83b) {93 if (*((uint32_t *) istate->epc) == 0x7c03e83b) { 96 94 ASSERT(THREAD); 97 95 istate->epc += 4; 98 96 istate->v1 = istate->k1; 99 } else 97 } else 100 98 unhandled_exception(n, istate); 101 99 } -
kernel/arch/mips32/src/mm/tlb.c
r2e99277 raa85487 321 321 void tlb_refill_fail(istate_t *istate) 322 322 { 323 char *symbol, *sym2; 324 325 symbol = symtab_fmt_name_lookup(istate->epc); 326 sym2 = symtab_fmt_name_lookup(istate->ra); 323 const char *symbol = symtab_fmt_name_lookup(istate->epc); 324 const char *sym2 = symtab_fmt_name_lookup(istate->ra); 327 325 328 326 fault_if_from_uspace(istate, "TLB Refill Exception on %p.", … … 335 333 void tlb_invalid_fail(istate_t *istate) 336 334 { 337 char *symbol; 338 339 symbol = symtab_fmt_name_lookup(istate->epc); 340 335 const char *symbol = symtab_fmt_name_lookup(istate->epc); 336 341 337 fault_if_from_uspace(istate, "TLB Invalid Exception on %p.", 342 338 cp0_badvaddr_read()); … … 347 343 void tlb_modified_fail(istate_t *istate) 348 344 { 349 char *symbol; 350 351 symbol = symtab_fmt_name_lookup(istate->epc); 352 345 const char *symbol = symtab_fmt_name_lookup(istate->epc); 346 353 347 fault_if_from_uspace(istate, "TLB Modified Exception on %p.", 354 348 cp0_badvaddr_read()); -
kernel/arch/ppc32/include/atomic.h
r2e99277 raa85487 27 27 */ 28 28 29 /** @addtogroup ppc32 29 /** @addtogroup ppc32 30 30 * @{ 31 31 */ … … 38 38 static inline void atomic_inc(atomic_t *val) 39 39 { 40 longtmp;41 40 atomic_count_t tmp; 41 42 42 asm volatile ( 43 43 "1:\n" … … 46 46 "stwcx. %0, 0, %2\n" 47 47 "bne- 1b" 48 : "=&r" (tmp), "=m" (val->count) 49 : "r" (&val->count), "m" (val->count) 48 : "=&r" (tmp), 49 "=m" (val->count) 50 : "r" (&val->count), 51 "m" (val->count) 50 52 : "cc" 51 53 ); … … 54 56 static inline void atomic_dec(atomic_t *val) 55 57 { 56 longtmp;57 58 atomic_count_t tmp; 59 58 60 asm volatile ( 59 61 "1:\n" 60 62 "lwarx %0, 0, %2\n" 61 63 "addic %0, %0, -1\n" 62 "stwcx. %0, 0, %2\n"64 "stwcx. %0, 0, %2\n" 63 65 "bne- 1b" 64 : "=&r" (tmp), "=m" (val->count) 65 : "r" (&val->count), "m" (val->count) 66 : "=&r" (tmp), 67 "=m" (val->count) 68 : "r" (&val->count), 69 "m" (val->count) 66 70 : "cc" 67 71 ); 68 72 } 69 73 70 static inline longatomic_postinc(atomic_t *val)74 static inline atomic_count_t atomic_postinc(atomic_t *val) 71 75 { 72 76 atomic_inc(val); … … 74 78 } 75 79 76 static inline longatomic_postdec(atomic_t *val)80 static inline atomic_count_t atomic_postdec(atomic_t *val) 77 81 { 78 82 atomic_dec(val); … … 80 84 } 81 85 82 static inline longatomic_preinc(atomic_t *val)86 static inline atomic_count_t atomic_preinc(atomic_t *val) 83 87 { 84 88 atomic_inc(val); … … 86 90 } 87 91 88 static inline longatomic_predec(atomic_t *val)92 static inline atomic_count_t atomic_predec(atomic_t *val) 89 93 { 90 94 atomic_dec(val); -
kernel/arch/ppc32/include/types.h
r2e99277 raa85487 27 27 */ 28 28 29 /** @addtogroup ppc32 29 /** @addtogroup ppc32 30 30 * @{ 31 31 */ … … 55 55 typedef uint32_t unative_t; 56 56 typedef int32_t native_t; 57 typedef uint32_t atomic_count_t; 57 58 58 59 typedef struct { -
kernel/arch/ppc32/src/cpu/cpu.c
r2e99277 raa85487 54 54 void cpu_print_report(cpu_t *m) 55 55 { 56 c har *name;56 const char *name; 57 57 58 58 switch (m->arch.version) { -
kernel/arch/ppc32/src/mm/tlb.c
r2e99277 raa85487 114 114 static void pht_refill_fail(uintptr_t badvaddr, istate_t *istate) 115 115 { 116 char *symbol; 117 char *sym2; 118 119 symbol = symtab_fmt_name_lookup(istate->pc); 120 sym2 = symtab_fmt_name_lookup(istate->lr); 121 116 const char *symbol = symtab_fmt_name_lookup(istate->pc); 117 const char *sym2 = symtab_fmt_name_lookup(istate->lr); 118 122 119 fault_if_from_uspace(istate, 123 120 "PHT Refill Exception on %p.", badvaddr); -
kernel/arch/sparc64/Makefile.inc
r2e99277 raa85487 75 75 arch/$(KARCH)/src/dummy.s \ 76 76 arch/$(KARCH)/src/mm/$(USARCH)/as.c \ 77 arch/$(KARCH)/src/mm/cache.S \78 77 arch/$(KARCH)/src/mm/$(USARCH)/frame.c \ 79 78 arch/$(KARCH)/src/mm/page.c \ … … 93 92 arch/$(KARCH)/src/drivers/sgcn.c \ 94 93 arch/$(KARCH)/src/drivers/pci.c \ 95 arch/$(KARCH)/src/drivers/fhc.c 94 arch/$(KARCH)/src/drivers/fhc.c \ 95 arch/$(KARCH)/src/trap/$(USARCH)/interrupt.c 96 97 ifeq ($(USARCH),sun4u) 98 ARCH_SOURCES += \ 99 arch/$(KARCH)/src/mm/cache.S 100 endif 96 101 97 102 ifeq ($(USARCH),sun4v) … … 108 113 ifeq ($(CONFIG_SMP),y) 109 114 ARCH_SOURCES += \ 110 arch/$(KARCH)/src/smp/ ipi.c \111 arch/$(KARCH)/src/smp/ smp.c115 arch/$(KARCH)/src/smp/$(USARCH)/smp.c \ 116 arch/$(KARCH)/src/smp/$(USARCH)/ipi.c 112 117 endif 113 118 -
kernel/arch/sparc64/include/atomic.h
r2e99277 raa85487 27 27 */ 28 28 29 /** @addtogroup sparc64 29 /** @addtogroup sparc64 30 30 * @{ 31 31 */ … … 45 45 * 46 46 * @param val Atomic variable. 47 * @param i Signed value to be added.47 * @param i Signed value to be added. 48 48 * 49 49 * @return Value of the atomic variable as it existed before addition. 50 * 50 51 */ 51 static inline long atomic_add(atomic_t *val, int i)52 static inline atomic_count_t atomic_add(atomic_t *val, atomic_count_t i) 52 53 { 53 uint64_t a, b; 54 54 atomic_count_t a; 55 atomic_count_t b; 56 55 57 do { 56 volatile uintptr_t x = (uint64_t) &val->count;57 58 a = *(( uint64_t *) x);58 volatile uintptr_t ptr = (uintptr_t) &val->count; 59 60 a = *((atomic_count_t *) ptr); 59 61 b = a + i; 60 asm volatile ("casx %0, %2, %1\n" : "+m" (*((uint64_t *)x)), 61 "+r" (b) : "r" (a)); 62 63 asm volatile ( 64 "casx %0, %2, %1\n" 65 : "+m" (*((atomic_count_t *) ptr)), 66 "+r" (b) 67 : "r" (a) 68 ); 62 69 } while (a != b); 63 70 64 71 return a; 65 72 } 66 73 67 static inline longatomic_preinc(atomic_t *val)74 static inline atomic_count_t atomic_preinc(atomic_t *val) 68 75 { 69 76 return atomic_add(val, 1) + 1; 70 77 } 71 78 72 static inline longatomic_postinc(atomic_t *val)79 static inline atomic_count_t atomic_postinc(atomic_t *val) 73 80 { 74 81 return atomic_add(val, 1); 75 82 } 76 83 77 static inline longatomic_predec(atomic_t *val)84 static inline atomic_count_t atomic_predec(atomic_t *val) 78 85 { 79 86 return atomic_add(val, -1) - 1; 80 87 } 81 88 82 static inline longatomic_postdec(atomic_t *val)89 static inline atomic_count_t atomic_postdec(atomic_t *val) 83 90 { 84 91 return atomic_add(val, -1); … … 95 102 } 96 103 97 static inline longtest_and_set(atomic_t *val)104 static inline atomic_count_t test_and_set(atomic_t *val) 98 105 { 99 uint64_t v = 1; 100 volatile uintptr_t x = (uint64_t) &val->count; 101 102 asm volatile ("casx %0, %2, %1\n" : "+m" (*((uint64_t *) x)), 103 "+r" (v) : "r" (0)); 104 106 atomic_count_t v = 1; 107 volatile uintptr_t ptr = (uintptr_t) &val->count; 108 109 asm volatile ( 110 "casx %0, %2, %1\n" 111 : "+m" (*((atomic_count_t *) ptr)), 112 "+r" (v) 113 : "r" (0) 114 ); 115 105 116 return v; 106 117 } … … 108 119 static inline void atomic_lock_arch(atomic_t *val) 109 120 { 110 uint64_t tmp1 = 1;111 uint64_t tmp2 = 0;112 113 volatile uintptr_t x = (uint64_t) &val->count;114 121 atomic_count_t tmp1 = 1; 122 atomic_count_t tmp2 = 0; 123 124 volatile uintptr_t ptr = (uintptr_t) &val->count; 125 115 126 preemption_disable(); 116 127 117 128 asm volatile ( 118 "0:\n" 119 "casx %0, %3, %1\n" 120 "brz %1, 2f\n" 121 "nop\n" 122 "1:\n" 123 "ldx %0, %2\n" 124 "brz %2, 0b\n" 125 "nop\n" 126 "ba %%xcc, 1b\n" 127 "nop\n" 128 "2:\n" 129 : "+m" (*((uint64_t *) x)), "+r" (tmp1), "+r" (tmp2) : "r" (0) 129 "0:\n" 130 "casx %0, %3, %1\n" 131 "brz %1, 2f\n" 132 "nop\n" 133 "1:\n" 134 "ldx %0, %2\n" 135 "brz %2, 0b\n" 136 "nop\n" 137 "ba %%xcc, 1b\n" 138 "nop\n" 139 "2:\n" 140 : "+m" (*((atomic_count_t *) ptr)), 141 "+r" (tmp1), 142 "+r" (tmp2) 143 : "r" (0) 130 144 ); 131 145 -
kernel/arch/sparc64/include/mm/as.h
r2e99277 raa85487 36 36 #define KERN_sparc64_AS_H_ 37 37 38 #include <arch/mm/tte.h> 39 40 #define KERNEL_ADDRESS_SPACE_SHADOWED_ARCH 1 41 42 #define KERNEL_ADDRESS_SPACE_START_ARCH (unsigned long) 0x0000000000000000 43 #define KERNEL_ADDRESS_SPACE_END_ARCH (unsigned long) 0xffffffffffffffff 44 #define USER_ADDRESS_SPACE_START_ARCH (unsigned long) 0x0000000000000000 45 #define USER_ADDRESS_SPACE_END_ARCH (unsigned long) 0xffffffffffffffff 46 47 #define USTACK_ADDRESS_ARCH (0xffffffffffffffffULL - (PAGE_SIZE - 1)) 48 49 #ifdef CONFIG_TSB 50 51 /** TSB Tag Target register. */ 52 typedef union tsb_tag_target { 53 uint64_t value; 54 struct { 55 unsigned invalid : 1; /**< Invalidated by software. */ 56 unsigned : 2; 57 unsigned context : 13; /**< Software ASID. */ 58 unsigned : 6; 59 uint64_t va_tag : 42; /**< Virtual address bits <63:22>. */ 60 } __attribute__ ((packed)); 61 } tsb_tag_target_t; 62 63 /** TSB entry. */ 64 typedef struct tsb_entry { 65 tsb_tag_target_t tag; 66 tte_data_t data; 67 } __attribute__ ((packed)) tsb_entry_t; 68 69 typedef struct { 70 tsb_entry_t *itsb; 71 tsb_entry_t *dtsb; 72 } as_arch_t; 73 74 #else 75 76 typedef struct { 77 } as_arch_t; 78 79 #endif /* CONFIG_TSB */ 80 81 #include <genarch/mm/as_ht.h> 82 83 #ifdef CONFIG_TSB 84 #include <arch/mm/tsb.h> 85 #define as_invalidate_translation_cache(as, page, cnt) \ 86 tsb_invalidate((as), (page), (cnt)) 87 #else 88 #define as_invalidate_translation_cache(as, page, cnt) 38 #if defined (SUN4U) 39 #include <arch/mm/sun4u/as.h> 40 #elif defined (SUN4V) 41 #include <arch/mm/sun4v/as.h> 89 42 #endif 90 91 extern void as_arch_init(void);92 43 93 44 #endif -
kernel/arch/sparc64/include/mm/sun4u/tlb.h
r2e99277 raa85487 684 684 685 685 extern void dump_sfsr_and_sfar(void); 686 extern void describe_dmmu_fault(void); 686 687 687 688 #endif /* !def __ASM__ */ -
kernel/arch/sparc64/include/mm/sun4v/frame.h
r2e99277 raa85487 33 33 */ 34 34 35 #ifndef KERN_sparc64_ SUN4V_FRAME_H_36 #define KERN_sparc64_ SUN4V_FRAME_H_35 #ifndef KERN_sparc64_sun4v_FRAME_H_ 36 #define KERN_sparc64_sun4v_FRAME_H_ 37 37 38 /*39 * Page size supported by the MMU.40 * For 8K there is the nasty illegal virtual aliasing problem.41 * Therefore, the kernel uses 8K only internally on the TLB and TSB levels.42 */43 38 #define MMU_FRAME_WIDTH 13 /* 8K */ 44 39 #define MMU_FRAME_SIZE (1 << MMU_FRAME_WIDTH) … … 52 47 #include <arch/types.h> 53 48 54 union frame_address {55 uintptr_t address;56 struct {57 #if defined (US)58 unsigned : 23;59 uint64_t pfn : 28; /**< Physical Frame Number. */60 #elif defined (US3)61 unsigned : 21;62 uint64_t pfn : 30; /**< Physical Frame Number. */63 #endif64 unsigned offset : 13; /**< Offset. */65 } __attribute__ ((packed));66 };67 68 typedef union frame_address frame_address_t;69 70 49 extern uintptr_t last_frame; 71 //MH72 //extern uintptr_t end_of_identity;73 74 50 extern void frame_arch_init(void); 75 51 #define physmem_print() -
kernel/arch/sparc64/include/mm/sun4v/mmu.h
r2e99277 raa85487 28 28 */ 29 29 30 /** @addtogroup sparc64mm 30 /** @addtogroup sparc64mm 31 31 * @{ 32 32 */ … … 45 45 #define ASI_SECONDARY_CONTEXT_REG 0x21 /**< secondary context register ASI. */ 46 46 47 48 49 50 51 52 53 54 55 56 57 /* I-MMU ASIs. */58 #define ASI_IMMU 0x5059 #define ASI_IMMU_TSB_8KB_PTR_REG 0x5160 #define ASI_IMMU_TSB_64KB_PTR_REG 0x5261 #define ASI_ITLB_DATA_IN_REG 0x5462 #define ASI_ITLB_DATA_ACCESS_REG 0x5563 #define ASI_ITLB_TAG_READ_REG 0x5664 #define ASI_IMMU_DEMAP 0x5765 66 /* Virtual Addresses within ASI_IMMU. */67 #define VA_IMMU_TSB_TAG_TARGET 0x0 /**< IMMU TSB tag target register. */68 #define VA_IMMU_SFSR 0x18 /**< IMMU sync fault status register. */69 #define VA_IMMU_TSB_BASE 0x28 /**< IMMU TSB base register. */70 #define VA_IMMU_TAG_ACCESS 0x30 /**< IMMU TLB tag access register. */71 #if defined (US3)72 #define VA_IMMU_PRIMARY_EXTENSION 0x48 /**< IMMU TSB primary extension register */73 #define VA_IMMU_NUCLEUS_EXTENSION 0x58 /**< IMMU TSB nucleus extension register */74 #endif75 76 77 /* D-MMU ASIs. */78 #define ASI_DMMU 0x5879 #define ASI_DMMU_TSB_8KB_PTR_REG 0x5980 #define ASI_DMMU_TSB_64KB_PTR_REG 0x5a81 #define ASI_DMMU_TSB_DIRECT_PTR_REG 0x5b82 #define ASI_DTLB_DATA_IN_REG 0x5c83 #define ASI_DTLB_DATA_ACCESS_REG 0x5d84 #define ASI_DTLB_TAG_READ_REG 0x5e85 #define ASI_DMMU_DEMAP 0x5f86 87 /* Virtual Addresses within ASI_DMMU. */88 #define VA_DMMU_TSB_TAG_TARGET 0x0 /**< DMMU TSB tag target register. */89 #define VA_PRIMARY_CONTEXT_REG 0x8 /**< DMMU primary context register. */90 #define VA_SECONDARY_CONTEXT_REG 0x10 /**< DMMU secondary context register. */91 #define VA_DMMU_SFSR 0x18 /**< DMMU sync fault status register. */92 #define VA_DMMU_SFAR 0x20 /**< DMMU sync fault address register. */93 #define VA_DMMU_TSB_BASE 0x28 /**< DMMU TSB base register. */94 #define VA_DMMU_TAG_ACCESS 0x30 /**< DMMU TLB tag access register. */95 #define VA_DMMU_VA_WATCHPOINT_REG 0x38 /**< DMMU VA data watchpoint register. */96 #define VA_DMMU_PA_WATCHPOINT_REG 0x40 /**< DMMU PA data watchpoint register. */97 #if defined (US3)98 #define VA_DMMU_PRIMARY_EXTENSION 0x48 /**< DMMU TSB primary extension register */99 #define VA_DMMU_SECONDARY_EXTENSION 0x50 /**< DMMU TSB secondary extension register */100 #define VA_DMMU_NUCLEUS_EXTENSION 0x58 /**< DMMU TSB nucleus extension register */101 #endif102 103 #ifndef __ASM__104 105 #include <arch/asm.h>106 #include <arch/barrier.h>107 #include <arch/types.h>108 109 #if defined(US)110 /** LSU Control Register. */111 typedef union {112 uint64_t value;113 struct {114 unsigned : 23;115 unsigned pm : 8;116 unsigned vm : 8;117 unsigned pr : 1;118 unsigned pw : 1;119 unsigned vr : 1;120 unsigned vw : 1;121 unsigned : 1;122 unsigned fm : 16;123 unsigned dm : 1; /**< D-MMU enable. */124 unsigned im : 1; /**< I-MMU enable. */125 unsigned dc : 1; /**< D-Cache enable. */126 unsigned ic : 1; /**< I-Cache enable. */127 128 } __attribute__ ((packed));129 } lsu_cr_reg_t;130 #endif /* US */131 132 #endif /* !def __ASM__ */133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 47 #endif 150 48 -
kernel/arch/sparc64/include/mm/sun4v/tlb.h
r2e99277 raa85487 28 28 */ 29 29 30 /** @addtogroup sparc64mm 30 /** @addtogroup sparc64mm 31 31 * @{ 32 32 */ -
kernel/arch/sparc64/include/mm/sun4v/tsb.h
r2e99277 raa85487 28 28 */ 29 29 30 /** @addtogroup sparc64mm 30 /** @addtogroup sparc64mm 31 31 * @{ 32 32 */ … … 71 71 struct pte; 72 72 73 extern void tsb_invalidate(struct as *as, uintptr_t page, count_t pages);73 extern void tsb_invalidate(struct as *as, uintptr_t page, uint64_t pages); 74 74 extern void itsb_pte_copy(struct pte *t); 75 75 extern void dtsb_pte_copy(struct pte *t, bool ro); -
kernel/arch/sparc64/include/mm/sun4v/tte.h
r2e99277 raa85487 27 27 */ 28 28 29 /** @addtogroup sparc64mm 29 /** @addtogroup sparc64mm 30 30 * @{ 31 31 */ -
kernel/arch/sparc64/include/mm/tlb.h
r2e99277 raa85487 36 36 #define KERN_sparc64_TLB_H_ 37 37 38 38 39 #if defined (SUN4U) 39 40 #include <arch/mm/sun4u/tlb.h> -
kernel/arch/sparc64/include/mm/tsb.h
r2e99277 raa85487 36 36 #define KERN_sparc64_TSB_H_ 37 37 38 /* 39 * ITSB abd DTSB will claim 64K of memory, which 40 * is a nice number considered that it is one of 41 * the page sizes supported by hardware, which, 42 * again, is nice because TSBs need to be locked 43 * in TLBs - only one TLB entry will do. 44 */ 45 #define TSB_SIZE 2 /* when changing this, change 46 * as.c as well */ 47 #define ITSB_ENTRY_COUNT (512 * (1 << TSB_SIZE)) 48 #define DTSB_ENTRY_COUNT (512 * (1 << TSB_SIZE)) 49 50 #define TSB_TAG_TARGET_CONTEXT_SHIFT 48 51 52 #ifndef __ASM__ 53 54 #include <arch/mm/tte.h> 55 #include <arch/mm/mmu.h> 56 #include <arch/types.h> 57 58 /** TSB Base register. */ 59 typedef union tsb_base_reg { 60 uint64_t value; 61 struct { 62 uint64_t base : 51; /**< TSB base address, bits 63:13. */ 63 unsigned split : 1; /**< Split vs. common TSB for 8K and 64K 64 * pages. HelenOS uses only 8K pages 65 * for user mappings, so we always set 66 * this to 0. 67 */ 68 unsigned : 9; 69 unsigned size : 3; /**< TSB size. Number of entries is 70 * 512 * 2^size. */ 71 } __attribute__ ((packed)); 72 } tsb_base_reg_t; 73 74 /** Read ITSB Base register. 75 * 76 * @return Content of the ITSB Base register. 77 */ 78 static inline uint64_t itsb_base_read(void) 79 { 80 return asi_u64_read(ASI_IMMU, VA_IMMU_TSB_BASE); 81 } 82 83 /** Read DTSB Base register. 84 * 85 * @return Content of the DTSB Base register. 86 */ 87 static inline uint64_t dtsb_base_read(void) 88 { 89 return asi_u64_read(ASI_DMMU, VA_DMMU_TSB_BASE); 90 } 91 92 /** Write ITSB Base register. 93 * 94 * @param v New content of the ITSB Base register. 95 */ 96 static inline void itsb_base_write(uint64_t v) 97 { 98 asi_u64_write(ASI_IMMU, VA_IMMU_TSB_BASE, v); 99 } 100 101 /** Write DTSB Base register. 102 * 103 * @param v New content of the DTSB Base register. 104 */ 105 static inline void dtsb_base_write(uint64_t v) 106 { 107 asi_u64_write(ASI_DMMU, VA_DMMU_TSB_BASE, v); 108 } 109 110 #if defined (US3) 111 112 /** Write DTSB Primary Extension register. 113 * 114 * @param v New content of the DTSB Primary Extension register. 115 */ 116 static inline void dtsb_primary_extension_write(uint64_t v) 117 { 118 asi_u64_write(ASI_DMMU, VA_DMMU_PRIMARY_EXTENSION, v); 119 } 120 121 /** Write DTSB Secondary Extension register. 122 * 123 * @param v New content of the DTSB Secondary Extension register. 124 */ 125 static inline void dtsb_secondary_extension_write(uint64_t v) 126 { 127 asi_u64_write(ASI_DMMU, VA_DMMU_SECONDARY_EXTENSION, v); 128 } 129 130 /** Write DTSB Nucleus Extension register. 131 * 132 * @param v New content of the DTSB Nucleus Extension register. 133 */ 134 static inline void dtsb_nucleus_extension_write(uint64_t v) 135 { 136 asi_u64_write(ASI_DMMU, VA_DMMU_NUCLEUS_EXTENSION, v); 137 } 138 139 /** Write ITSB Primary Extension register. 140 * 141 * @param v New content of the ITSB Primary Extension register. 142 */ 143 static inline void itsb_primary_extension_write(uint64_t v) 144 { 145 asi_u64_write(ASI_IMMU, VA_IMMU_PRIMARY_EXTENSION, v); 146 } 147 148 /** Write ITSB Nucleus Extension register. 149 * 150 * @param v New content of the ITSB Nucleus Extension register. 151 */ 152 static inline void itsb_nucleus_extension_write(uint64_t v) 153 { 154 asi_u64_write(ASI_IMMU, VA_IMMU_NUCLEUS_EXTENSION, v); 155 } 156 38 #if defined (SUN4U) 39 #include <arch/mm/sun4u/tsb.h> 40 #elif defined (SUN4V) 41 #include <arch/mm/sun4v/tsb.h> 157 42 #endif 158 159 /* Forward declarations. */160 struct as;161 struct pte;162 163 extern void tsb_invalidate(struct as *as, uintptr_t page, size_t pages);164 extern void itsb_pte_copy(struct pte *t, size_t index);165 extern void dtsb_pte_copy(struct pte *t, size_t index, bool ro);166 167 #endif /* !def __ASM__ */168 43 169 44 #endif -
kernel/arch/sparc64/include/sun4v/arch.h
r2e99277 raa85487 58 58 #define SCRATCHPAD_WBUF 0x18 59 59 60 //MH - remove when cpu.h is forked61 #define ASI_NUCLEUS_QUAD_LDD 0x24 /** ASI for 16-byte atomic loads. */62 #define ASI_DCACHE_TAG 0x47 /** ASI D-Cache Tag. */63 #define ASI_ICBUS_CONFIG 0x4a /** ASI of the UPA_CONFIG/FIREPLANE_CONFIG register. */64 65 60 #endif 66 61 -
kernel/arch/sparc64/include/sun4v/cpu.h
r2e99277 raa85487 44 44 #ifndef __ASM__ 45 45 46 #include <atomic.h> 47 #include <synch/spinlock.h> 48 46 49 struct cpu; 47 50 48 /*49 51 typedef struct { 50 52 uint64_t exec_unit_id; … … 55 57 SPINLOCK_DECLARE(proposed_nrdy_lock); 56 58 } exec_unit_t; 57 */58 59 59 60 typedef struct cpu_arch { … … 63 64 generated when the TICK register 64 65 matches this value. */ 65 //exec_unit_t *exec_unit; /**< Physical core. */66 //unsigned long proposed_nrdy; /**< Proposed No. of ready threads67 //so that cores are equally balanced. */66 exec_unit_t *exec_unit; /**< Physical core. */ 67 unsigned long proposed_nrdy; /**< Proposed No. of ready threads 68 so that cores are equally balanced. */ 68 69 } cpu_arch_t; 69 70 -
kernel/arch/sparc64/include/sun4v/hypercall.h
r2e99277 raa85487 75 75 76 76 /* return codes */ 77 #define EOK0 /**< Successful return */78 #define ENOCPU 1 /**< Invalid CPU id */79 #define ENORADDR2 /**< Invalid real address */80 #define ENOINTR 3 /**< Invalid interrupt id */81 #define EBADPGSZ4 /**< Invalid pagesize encoding */82 #define EBADTSB 5 /**< Invalid TSB description */83 #define EINVAL 6 /**< Invalid argument */84 #define EBADTRAP7 /**< Invalid function number */85 #define EBADALIGN8 /**< Invalid address alignment */86 #define EWOULDBLOCK9 /**< Cannot complete operation without blocking */87 #define ENOACCESS10 /**< No access to specified resource */88 #define EIO11 /**< I/O Error */89 #define ECPUERROR12 /**< CPU is in error state */90 #define ENOTSUPPORTED 13 /**< Function not supported */91 #define ENOMAP 14 /**< No mapping found */92 #define ETOOMANY15 /**< Too many items specified / limit reached */93 #define ECHANNEL16 /**< Invalid LDC channel */94 #define EBUSY 17 /**< Operation failed as resource is otherwise busy */77 #define HV_EOK 0 /**< Successful return */ 78 #define HV_ENOCPU 1 /**< Invalid CPU id */ 79 #define HV_ENORADDR 2 /**< Invalid real address */ 80 #define HV_ENOINTR 3 /**< Invalid interrupt id */ 81 #define HV_EBADPGSZ 4 /**< Invalid pagesize encoding */ 82 #define HV_EBADTSB 5 /**< Invalid TSB description */ 83 #define HV_EINVAL 6 /**< Invalid argument */ 84 #define HV_EBADTRAP 7 /**< Invalid function number */ 85 #define HV_EBADALIGN 8 /**< Invalid address alignment */ 86 #define HV_EWOULDBLOCK 9 /**< Cannot complete operation without blocking */ 87 #define HV_ENOACCESS 10 /**< No access to specified resource */ 88 #define HV_EIO 11 /**< I/O Error */ 89 #define HV_ECPUERROR 12 /**< CPU is in error state */ 90 #define HV_ENOTSUPPORTED 13 /**< Function not supported */ 91 #define HV_ENOMAP 14 /**< No mapping found */ 92 #define HV_ETOOMANY 15 /**< Too many items specified / limit reached */ 93 #define HV_ECHANNEL 16 /**< Invalid LDC channel */ 94 #define HV_EBUSY 17 /**< Operation failed as resource is otherwise busy */ 95 95 96 96 … … 190 190 __hypercall_fast_ret1(const uint64_t p1, const uint64_t p2, const uint64_t p3, 191 191 const uint64_t p4, const uint64_t p5, const uint64_t function_number, 192 uint64_t * constret1)192 uint64_t *ret1) 193 193 { 194 uint64_t errno = __hypercall_fast(p1, p2, p3, p4, p5, function_number); 195 if (ret1 != NULL) { 196 asm volatile ("mov %%o1, %0\n" : "=r" (*ret1)); 197 } 198 return errno; 194 register uint64_t a6 asm("o5") = function_number; 195 register uint64_t a1 asm("o0") = p1; 196 register uint64_t a2 asm("o1") = p2; 197 register uint64_t a3 asm("o2") = p3; 198 register uint64_t a4 asm("o3") = p4; 199 register uint64_t a5 asm("o4") = p5; 200 201 asm volatile ( 202 "ta %8\n" 203 : "=r" (a1), "=r" (a2) 204 : "r" (a1), "r" (a2), "r" (a3), "r" (a4), "r" (a5), "r" (a6), 205 "i" (FAST_TRAP) 206 : "memory" 207 ); 208 209 if (ret1) 210 *ret1 = a2; 211 212 return a1; 199 213 } 200 214 -
kernel/arch/sparc64/include/sun4v/ipi.h
r2e99277 raa85487 38 38 #define KERN_sparc64_sun4v_IPI_H_ 39 39 40 uint64_t ipi_brodcast_to(void (*func)(void), uint16_t cpu_list[MAX_NUM_STRANDS], 41 uint64_t list_size); 42 uint64_t ipi_unicast_to(void (*func)(void), uint16_t cpu_id); 40 #include <arch/types.h> 41 42 extern uint64_t ipi_brodcast_to(void (*)(void), uint16_t cpu_list[], uint64_t); 43 extern uint64_t ipi_unicast_to(void (*)(void), uint16_t); 43 44 44 45 #endif -
kernel/arch/sparc64/include/trap/exception.h
r2e99277 raa85487 38 38 39 39 #define TT_INSTRUCTION_ACCESS_EXCEPTION 0x08 40 #define TT_INSTRUCTION_ACCESS_MMU_MISS 0x09 40 41 #define TT_INSTRUCTION_ACCESS_ERROR 0x0a 42 #define TT_IAE_UNAUTH_ACCESS 0x0b 43 #define TT_IAE_NFO_PAGE 0x0c 41 44 #define TT_ILLEGAL_INSTRUCTION 0x10 42 45 #define TT_PRIVILEGED_OPCODE 0x11 43 46 #define TT_UNIMPLEMENTED_LDD 0x12 44 47 #define TT_UNIMPLEMENTED_STD 0x13 48 #define TT_DAE_INVALID_ASI 0x14 49 #define TT_DAE_PRIVILEGE_VIOLATION 0x15 50 #define TT_DAE_NC_PAGE 0x16 51 #define TT_DAE_NFO_PAGE 0x17 45 52 #define TT_FP_DISABLED 0x20 46 53 #define TT_FP_EXCEPTION_IEEE_754 0x21 … … 49 56 #define TT_DIVISION_BY_ZERO 0x28 50 57 #define TT_DATA_ACCESS_EXCEPTION 0x30 58 #define TT_DATA_ACCESS_MMU_MISS 0x31 51 59 #define TT_DATA_ACCESS_ERROR 0x32 52 60 #define TT_MEM_ADDRESS_NOT_ALIGNED 0x34 -
kernel/arch/sparc64/include/trap/interrupt.h
r2e99277 raa85487 32 32 /** 33 33 * @file 34 * @brief This file contains interrupt vector trap handler. 34 * @brief This file contains level N interrupt and inter-processor interrupt 35 * trap handler. 35 36 */ 36 37 #ifndef KERN_sparc64_TRAP_INTERRUPT_H_ 38 #define KERN_sparc64_TRAP_INTERRUPT_H_ 39 40 #include <arch/trap/trap_table.h> 41 #include <arch/stack.h> 42 43 /* IMAP register bits */ 44 #define IGN_MASK 0x7c0 45 #define INO_MASK 0x1f 46 #define IMAP_V_MASK (1ULL << 31) 47 48 #define IGN_SHIFT 6 49 50 51 /* Interrupt ASI registers. */ 52 #define ASI_INTR_W 0x77 53 #define ASI_INTR_DISPATCH_STATUS 0x48 54 #define ASI_INTR_R 0x7f 55 #define ASI_INTR_RECEIVE 0x49 56 57 /* VA's used with ASI_INTR_W register. */ 58 #if defined (US) 59 #define ASI_UDB_INTR_W_DATA_0 0x40 60 #define ASI_UDB_INTR_W_DATA_1 0x50 61 #define ASI_UDB_INTR_W_DATA_2 0x60 62 #elif defined (US3) 63 #define VA_INTR_W_DATA_0 0x40 64 #define VA_INTR_W_DATA_1 0x48 65 #define VA_INTR_W_DATA_2 0x50 66 #define VA_INTR_W_DATA_3 0x58 67 #define VA_INTR_W_DATA_4 0x60 68 #define VA_INTR_W_DATA_5 0x68 69 #define VA_INTR_W_DATA_6 0x80 70 #define VA_INTR_W_DATA_7 0x88 71 #endif 72 #define VA_INTR_W_DISPATCH 0x70 73 74 /* VA's used with ASI_INTR_R register. */ 75 #if defined(US) 76 #define ASI_UDB_INTR_R_DATA_0 0x40 77 #define ASI_UDB_INTR_R_DATA_1 0x50 78 #define ASI_UDB_INTR_R_DATA_2 0x60 79 #elif defined (US3) 80 #define VA_INTR_R_DATA_0 0x40 81 #define VA_INTR_R_DATA_1 0x48 82 #define VA_INTR_R_DATA_2 0x50 83 #define VA_INTR_R_DATA_3 0x58 84 #define VA_INTR_R_DATA_4 0x60 85 #define VA_INTR_R_DATA_5 0x68 86 #define VA_INTR_R_DATA_6 0x80 87 #define VA_INTR_R_DATA_7 0x88 88 #endif 89 90 /* Shifts in the Interrupt Vector Dispatch virtual address. */ 91 #define INTR_VEC_DISPATCH_MID_SHIFT 14 92 93 /* Bits in the Interrupt Dispatch Status register. */ 94 #define INTR_DISPATCH_STATUS_NACK 0x2 95 #define INTR_DISPATCH_STATUS_BUSY 0x1 37 #ifndef KERN_sparc64_INTERRUPT_TRAP_H_ 38 #define KERN_sparc64_INTERRUPT_TRAP_H_ 96 39 97 40 #define TT_INTERRUPT_LEVEL_1 0x41 … … 111 54 #define TT_INTERRUPT_LEVEL_15 0x4f 112 55 113 #define TT_INTERRUPT_VECTOR_TRAP 0x6056 #define INTERRUPT_LEVEL_N_HANDLER_SIZE TRAP_TABLE_ENTRY_SIZE 114 57 115 #define INTERRUPT_LEVEL_N_HANDLER_SIZE TRAP_TABLE_ENTRY_SIZE 116 #define INTERRUPT_VECTOR_TRAP_HANDLER_SIZE TRAP_TABLE_ENTRY_SIZE 58 /* IMAP register bits */ 59 #define IGN_MASK 0x7c0 60 #define INO_MASK 0x1f 61 #define IMAP_V_MASK (1ULL << 31) 62 63 #define IGN_SHIFT 6 64 117 65 118 66 #ifdef __ASM__ … … 121 69 PREEMPTIBLE_HANDLER exc_dispatch 122 70 .endm 123 124 .macro INTERRUPT_VECTOR_TRAP_HANDLER 125 PREEMPTIBLE_HANDLER interrupt 126 .endm 127 #endif /* __ASM__ */ 71 #endif 128 72 129 73 #ifndef __ASM__ … … 134 78 #endif /* !def __ASM__ */ 135 79 80 81 #if defined (SUN4U) 82 #include <arch/trap/sun4u/interrupt.h> 83 #elif defined (SUN4V) 84 #include <arch/trap/sun4v/interrupt.h> 85 #endif 86 136 87 #endif 137 88 -
kernel/arch/sparc64/include/trap/regwin.h
r2e99277 raa85487 183 183 add %l0, 1, %l0 184 184 wrpr %l0, 0, %cleanwin 185 #if defined(SUN4U) 185 186 mov %r0, %l0 186 187 mov %r0, %l1 … … 199 200 mov %r0, %o6 200 201 mov %r0, %o7 202 #endif 201 203 retry 202 204 .endm 203 205 #endif /* __ASM__ */ 204 206 205 #if defined (SUN4U)207 #if defined(SUN4U) 206 208 #include <arch/trap/sun4u/regwin.h> 207 #elif defined (SUN4V)209 #elif defined(SUN4V) 208 210 #include <arch/trap/sun4v/regwin.h> 209 211 #endif -
kernel/arch/sparc64/include/trap/sun4v/mmu.h
r2e99277 raa85487 36 36 */ 37 37 38 #ifndef KERN_sparc64_ SUN4V_MMU_TRAP_H_39 #define KERN_sparc64_ SUN4V_MMU_TRAP_H_38 #ifndef KERN_sparc64_sun4v_MMU_TRAP_H_ 39 #define KERN_sparc64_sun4v_MMU_TRAP_H_ 40 40 41 41 #include <arch/stack.h> … … 121 121 * but this time its handler accesse memory which IS mapped. 122 122 */ 123 0: 124 .if (\tl > 0) 125 wrpr %g0, 1, %tl 126 .endif 123 .if (\tl > 0) 124 wrpr %g0, 1, %tl 125 .endif 127 126 128 127 /* -
kernel/arch/sparc64/include/trap/trap_table.h
r2e99277 raa85487 101 101 .macro PREEMPTIBLE_HANDLER f 102 102 sethi %hi(\f), %g1 103 b a %xcc,preemptible_handler103 b preemptible_handler 104 104 or %g1, %lo(\f), %g1 105 105 .endm -
kernel/arch/sparc64/include/types.h
r2e99277 raa85487 27 27 */ 28 28 29 /** @addtogroup sparc64 29 /** @addtogroup sparc64 30 30 * @{ 31 31 */ … … 55 55 typedef uint64_t unative_t; 56 56 typedef int64_t native_t; 57 typedef uint64_t atomic_count_t; 57 58 58 59 typedef struct { -
kernel/arch/sparc64/src/cpu/sun4u/cpu.c
r2e99277 raa85487 129 129 void cpu_print_report(cpu_t *m) 130 130 { 131 char *manuf, *impl; 131 const char *manuf; 132 const char *impl; 132 133 133 134 switch (m->arch.ver.manuf) { -
kernel/arch/sparc64/src/cpu/sun4v/cpu.c
r2e99277 raa85487 1 1 /* 2 2 * Copyright (c) 2005 Jakub Jermar 3 * Copyright (c) 2009 Pavel Rimsky 3 4 * All rights reserved. 4 5 * … … 40 41 #include <arch/sun4v/md.h> 41 42 #include <arch/sun4v/hypercall.h> 42 43 //#include <arch/trap/sun4v/interrupt.h> 43 #include <arch/trap/sun4v/interrupt.h> 44 44 45 45 /** Perform sparc64 specific initialization of the processor structure for the … … 51 51 __hypercall_fast_ret1(0, 0, 0, 0, 0, CPU_MYID, &myid); 52 52 53 //MH 54 //CPU->arch.id = myid; 53 CPU->arch.id = myid; 55 54 56 55 md_node_t node = md_get_root(); … … 74 73 75 74 tick_init(); 76 //MH - uncomment later 77 //sun4v_ipi_init();75 76 sun4v_ipi_init(); 78 77 } 79 78 … … 96 95 { 97 96 printf("cpu%d: Niagara (%d MHz)\n", m->id, 98 m->arch.clock_frequency / 1000000);97 m->arch.clock_frequency / 1000000); 99 98 } 100 99 -
kernel/arch/sparc64/src/drivers/niagara.c
r2e99277 raa85487 104 104 static inline void do_putchar(const char c) { 105 105 /* repeat until the buffer is non-full */ 106 while (__hypercall_fast1(CONS_PUTCHAR, c) == EWOULDBLOCK)106 while (__hypercall_fast1(CONS_PUTCHAR, c) == HV_EWOULDBLOCK) 107 107 ; 108 108 } … … 111 111 static void niagara_putchar(outdev_t *dev, const wchar_t ch, bool silent) 112 112 { 113 if (silent) 114 return; 115 113 116 do_putchar(ch); 114 117 if (ch == '\n') … … 136 139 137 140 /* read character from keyboard, send it to upper layers of HelenOS */ 138 if (__hypercall_fast_ret1(0, 0, 0, 0, 0, CONS_GETCHAR, &c) == EOK) {141 if (__hypercall_fast_ret1(0, 0, 0, 0, 0, CONS_GETCHAR, &c) == HV_EOK) { 139 142 if (!silent) { 140 143 /* kconsole active, send the character to kernel */ -
kernel/arch/sparc64/src/mm/sun4u/tlb.c
r2e99277 raa85487 64 64 tlb_tag_access_reg_t, const char *); 65 65 66 c har *context_encoding[] = {66 const char *context_encoding[] = { 67 67 "Primary", 68 68 "Secondary", … … 476 476 } 477 477 478 void d ump_sfsr_and_sfar(void)478 void describe_dmmu_fault(void) 479 479 { 480 480 tlb_sfsr_reg_t sfsr; … … 499 499 } 500 500 501 void dump_sfsr_and_sfar(void) 502 { 503 tlb_sfsr_reg_t sfsr; 504 uintptr_t sfar; 505 506 sfsr.value = dtlb_sfsr_read(); 507 sfar = dtlb_sfar_read(); 508 509 #if defined (US) 510 printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, " 511 "fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, 512 sfsr.ow, sfsr.fv); 513 #elif defined (US3) 514 printf("DTLB SFSR: nf=%d, asi=%#x, tm=%d, ft=%#x, e=%d, ct=%d, pr=%d, " 515 "w=%d, ow=%d, fv=%d\n", sfsr.nf, sfsr.asi, sfsr.tm, sfsr.ft, 516 sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, sfsr.ow, sfsr.fv); 517 #endif 518 519 printf("DTLB SFAR: address=%p\n", sfar); 520 521 dtlb_sfsr_write(0); 522 } 523 501 524 #if defined (US) 502 525 /** Invalidate all unlocked ITLB and DTLB entries. */ -
kernel/arch/sparc64/src/mm/sun4v/as.c
r2e99277 raa85487 1 1 /* 2 2 * Copyright (c) 2006 Jakub Jermar 3 * Copyright (c) 2009 Pavel Rimsky 3 4 * All rights reserved. 4 5 * … … 34 35 35 36 #include <arch/mm/as.h> 37 #include <arch/mm/pagesize.h> 36 38 #include <arch/mm/tlb.h> 37 39 #include <genarch/mm/page_ht.h> … … 39 41 #include <debug.h> 40 42 #include <config.h> 43 #include <arch/sun4v/hypercall.h> 41 44 42 45 #ifdef CONFIG_TSB … … 86 89 { 87 90 #ifdef CONFIG_TSB 88 count_t cnt = (TSB_ENTRY_COUNT * sizeof(tsb_entry_t)) >> FRAME_WIDTH;91 size_t cnt = (TSB_ENTRY_COUNT * sizeof(tsb_entry_t)) >> FRAME_WIDTH; 89 92 frame_free((uintptr_t) as->arch.tsb_description.tsb_base); 90 93 return cnt; … … 112 115 { 113 116 mmu_secondary_context_write(as->asid); 117 #ifdef CONFIG_TSB 118 uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH); 119 120 ASSERT(as->arch.tsb_description.tsb_base); 121 uintptr_t tsb = PA2KA(as->arch.tsb_description.tsb_base); 122 123 if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) { 124 /* 125 * TSBs were allocated from memory not covered 126 * by the locked 4M kernel DTLB entry. We need 127 * to map both TSBs explicitly. 128 */ 129 mmu_demap_page(tsb, 0, MMU_FLAG_DTLB); 130 dtlb_insert_mapping(tsb, KA2PA(tsb), PAGESIZE_64K, true, true); 131 } 132 133 __hypercall_fast2(MMU_TSB_CTXNON0, 1, KA2PA(&(as->arch.tsb_description))); 134 135 #endif 114 136 } 115 137 … … 134 156 uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH); 135 157 136 ASSERT(as->arch. itsb && as->arch.dtsb);158 ASSERT(as->arch.tsb_description.tsb_base); 137 159 138 uintptr_t tsb = (uintptr_t) as->arch.itsb;160 uintptr_t tsb = PA2KA(as->arch.tsb_description.tsb_base); 139 161 140 162 if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) { … … 144 166 * to demap the entry installed by as_install_arch(). 145 167 */ 146 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);168 __hypercall_fast3(MMU_UNMAP_PERM_ADDR, tsb, 0, MMU_FLAG_DTLB); 147 169 } 148 170 #endif -
kernel/arch/sparc64/src/mm/sun4v/frame.c
r2e99277 raa85487 41 41 #include <macros.h> 42 42 43 uintptr_t last_frame = NULL;44 45 43 /** Create memory zones according to information stored in bootinfo. 46 44 * … … 68 66 SIZE2FRAMES(ALIGN_DOWN(size, FRAME_SIZE)), 69 67 confdata, 0); 70 last_frame = max(last_frame, start + ALIGN_UP(size,71 FRAME_SIZE));72 68 } 73 69 … … 80 76 frame_mark_unavailable(ADDR2PFN(KA2PA(PFN2ADDR(0))), 1); 81 77 } 82 83 //MH84 // end_of_identity = PA2KA(last_frame);85 78 } 86 79 -
kernel/arch/sparc64/src/mm/sun4v/tlb.c
r2e99277 raa85487 89 89 * field of the MMU fault status area is i. 90 90 */ 91 char *fault_types[] = {91 static const char *fault_types[] = { 92 92 "unknown", 93 93 "fast miss", … … 107 107 "invalid page size" 108 108 }; 109 110 109 111 110 /** Array of MMU fault status areas. */ … … 406 405 uint64_t errno = __hypercall_fast3(MMU_DEMAP_ALL, 0, 0, 407 406 MMU_FLAG_DTLB | MMU_FLAG_ITLB); 408 if (errno != EOK) {407 if (errno != HV_EOK) { 409 408 panic("Error code = %d.\n", errno); 410 409 } -
kernel/arch/sparc64/src/mm/sun4v/tsb.c
r2e99277 raa85487 1 1 /* 2 2 * Copyright (c) 2006 Jakub Jermar 3 * Copyright (c) 2009 Pavel Rimsky 3 4 * All rights reserved. 4 5 * … … 34 35 35 36 #include <arch/mm/tsb.h> 37 #include <arch/mm/pagesize.h> 36 38 #include <arch/mm/tlb.h> 37 39 #include <arch/mm/page.h> … … 49 51 * portions of both TSBs are invalidated at a time. 50 52 * 51 * @param as Address space.52 * @param page First page to invalidate in TSB.53 * @param pages Number of pages to invalidate. Value of ( size_t) -1 means the54 * whole TSB.53 * @param as Address space. 54 * @param page First page to invalidate in TSB. 55 * @param pages Number of pages to invalidate. Value of (count_t) -1 means the 56 * whole TSB. 55 57 */ 56 58 void tsb_invalidate(as_t *as, uintptr_t page, size_t pages) 57 59 { 58 size_t i0; 59 size_t i; 60 size_t i0, i; 60 61 size_t cnt; 61 62 62 ASSERT(as->arch. itsb && as->arch.dtsb);63 ASSERT(as->arch.tsb_description.tsb_base); 63 64 64 65 i0 = (page >> MMU_PAGE_WIDTH) & TSB_INDEX_MASK; 65 ASSERT(i0 < ITSB_ENTRY_COUNT && i0 < DTSB_ENTRY_COUNT);66 ASSERT(i0 < TSB_ENTRY_COUNT); 66 67 67 if (pages == (size_t) - 1 || (pages * 2) > ITSB_ENTRY_COUNT)68 cnt = ITSB_ENTRY_COUNT;68 if (pages == (size_t) - 1 || (pages) > TSB_ENTRY_COUNT) 69 cnt = TSB_ENTRY_COUNT; 69 70 else 70 cnt = pages * 2;71 cnt = pages; 71 72 72 73 for (i = 0; i < cnt; i++) { 73 as->arch.itsb[(i0 + i) & (ITSB_ENTRY_COUNT - 1)].tag.invalid = 74 true; 75 as->arch.dtsb[(i0 + i) & (DTSB_ENTRY_COUNT - 1)].tag.invalid = 76 true; 74 ((tsb_entry_t *) PA2KA(as->arch.tsb_description.tsb_base))[ 75 (i0 + i) & (TSB_ENTRY_COUNT - 1)].data.v = false; 77 76 } 78 77 } … … 81 80 * 82 81 * @param t Software PTE. 83 * @param index Zero if lower 8K-subpage, one if higher 8K subpage.84 82 */ 85 void itsb_pte_copy(pte_t *t , size_t index)83 void itsb_pte_copy(pte_t *t) 86 84 { 87 #if 088 85 as_t *as; 89 86 tsb_entry_t *tsb; 90 87 size_t entry; 91 88 92 ASSERT(index <= 1);93 94 89 as = t->as; 95 entry = ( (t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK;96 ASSERT(entry < ITSB_ENTRY_COUNT);97 tsb = & as->arch.itsb[entry];90 entry = (t->page >> MMU_PAGE_WIDTH) & TSB_INDEX_MASK; 91 ASSERT(entry < TSB_ENTRY_COUNT); 92 tsb = &((tsb_entry_t *) PA2KA(as->arch.tsb_description.tsb_base))[entry]; 98 93 99 94 /* … … 103 98 */ 104 99 105 tsb->tag.invalid = true; /* invalidate the entry 106 * (tag target has this 107 * set to 0) */ 100 tsb->data.v = false; 108 101 109 102 write_barrier(); 110 103 111 tsb->tag.context = as->asid;112 /* the shift is bigger than PAGE_WIDTH, do not bother with index */113 104 tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT; 105 114 106 tsb->data.value = 0; 107 tsb->data.nfo = false; 108 tsb->data.ra = t->frame >> MMU_FRAME_WIDTH; 109 tsb->data.ie = false; 110 tsb->data.e = false; 111 tsb->data.cp = t->c; /* cp as cache in phys.-idxed, c as cacheable */ 112 tsb->data.cv = false; 113 tsb->data.p = t->k; /* p as privileged, k as kernel */ 114 tsb->data.x = true; 115 tsb->data.w = false; 115 116 tsb->data.size = PAGESIZE_8K; 116 tsb->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index;117 tsb->data.cp = t->c; /* cp as cache in phys.-idxed, c as cacheable */118 tsb->data.p = t->k; /* p as privileged, k as kernel */119 tsb->data.v = t->p; /* v as valid, p as present */120 117 121 118 write_barrier(); 122 119 123 tsb->tag.invalid = false; /* mark the entry as valid */ 124 #endif 120 tsb->data.v = t->p; /* v as valid, p as present */ 125 121 } 126 122 … … 128 124 * 129 125 * @param t Software PTE. 130 * @param index Zero if lower 8K-subpage, one if higher 8K-subpage.131 126 * @param ro If true, the mapping is copied read-only. 132 127 */ 133 void dtsb_pte_copy(pte_t *t, size_t index,bool ro)128 void dtsb_pte_copy(pte_t *t, bool ro) 134 129 { 135 #if 0136 130 as_t *as; 137 131 tsb_entry_t *tsb; 138 132 size_t entry; 139 140 ASSERT(index <= 1);141 133 142 134 as = t->as; 143 entry = ( (t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK;144 ASSERT(entry < DTSB_ENTRY_COUNT);145 tsb = & as->arch.dtsb[entry];135 entry = (t->page >> MMU_PAGE_WIDTH) & TSB_INDEX_MASK; 136 ASSERT(entry < TSB_ENTRY_COUNT); 137 tsb = &((tsb_entry_t *) PA2KA(as->arch.tsb_description.tsb_base))[entry]; 146 138 147 139 /* … … 151 143 */ 152 144 153 tsb->tag.invalid = true; /* invalidate the entry 154 * (tag target has this 155 * set to 0) */ 145 tsb->data.v = false; 156 146 157 147 write_barrier(); 158 148 159 tsb->tag.context = as->asid;160 /* the shift is bigger than PAGE_WIDTH, do not bother with index */161 149 tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT; 150 162 151 tsb->data.value = 0; 163 tsb->data.size = PAGESIZE_8K; 164 tsb->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index; 165 tsb->data.cp = t->c; 152 tsb->data.nfo = false; 153 tsb->data.ra = t->frame >> MMU_FRAME_WIDTH; 154 tsb->data.ie = false; 155 tsb->data.e = false; 156 tsb->data.cp = t->c; /* cp as cache in phys.-idxed, c as cacheable */ 166 157 #ifdef CONFIG_VIRT_IDX_DCACHE 167 158 tsb->data.cv = t->c; 168 159 #endif /* CONFIG_VIRT_IDX_DCACHE */ 169 tsb->data.p = t->k; /* p as privileged */ 160 tsb->data.p = t->k; /* p as privileged, k as kernel */ 161 tsb->data.x = true; 170 162 tsb->data.w = ro ? false : t->w; 171 tsb->data. v = t->p;163 tsb->data.size = PAGESIZE_8K; 172 164 173 165 write_barrier(); 174 166 175 tsb->tag.invalid = false; /* mark the entry as valid */ 176 #endif 167 tsb->data.v = t->p; /* v as valid, p as present */ 177 168 } 178 169 179 170 /** @} 180 171 */ 181 -
kernel/arch/sparc64/src/smp/sun4u/ipi.c
r2e99277 raa85487 99 99 status = asi_u64_read(ASI_INTR_DISPATCH_STATUS, 0); 100 100 if (status & INTR_DISPATCH_STATUS_BUSY) 101 panic("Interrupt Dispatch Status busy bit set .");101 panic("Interrupt Dispatch Status busy bit set\n"); 102 102 103 103 ASSERT(!(pstate_read() & PSTATE_IE_BIT)); … … 152 152 break; 153 153 default: 154 panic("Unknown IPI (%d). ", ipi);154 panic("Unknown IPI (%d).\n", ipi); 155 155 break; 156 156 } -
kernel/arch/sparc64/src/smp/sun4u/smp.c
r2e99277 raa85487 62 62 { 63 63 ofw_tree_node_t *node; 64 size_t cnt = 0;64 unsigned int cnt = 0; 65 65 66 66 if (is_us() || is_us_iii()) { -
kernel/arch/sparc64/src/sun4v/md.c
r2e99277 raa85487 104 104 static md_element_t *get_element(element_idx_t idx) 105 105 { 106 return (md_element_t *) ( 107 mach_desc +sizeof(md_header_t) + idx * sizeof(md_element_t));106 return (md_element_t *) (mach_desc + 107 sizeof(md_header_t) + idx * sizeof(md_element_t)); 108 108 } 109 109 … … 114 114 uintptr_t name_offset = get_element(idx)->name_offset; 115 115 return (char *) mach_desc + sizeof(md_header_t) + 116 md_header->node_blk_sz + name_offset;116 md_header->node_blk_sz + name_offset; 117 117 } 118 118 … … 137 137 md_element_t *element = get_element(idx); 138 138 if (element->tag == PROP_VAL && 139 str_cmp(key, get_element_name(idx)) == 0) {139 str_cmp(key, get_element_name(idx)) == 0) { 140 140 *result = element->d.val; 141 141 return true; … … 161 161 md_element_t *element = get_element(idx); 162 162 if (element->tag == PROP_DATA && 163 str_cmp(key, get_element_name(idx)) == 0) {163 str_cmp(key, get_element_name(idx)) == 0) { 164 164 *result = (char *) mach_desc + sizeof(md_header_t) + 165 md_header->node_blk_sz + md_header->name_blk_sz +166 element->d.y.data_offset;165 md_header->node_blk_sz + md_header->name_blk_sz + 166 element->d.y.data_offset; 167 167 return true; 168 168 } … … 186 186 md_element_t *element = get_element(*it); 187 187 if (element->tag == PROP_ARC && 188 str_cmp("fwd", get_element_name(*it)) == 0) {188 str_cmp("fwd", get_element_name(*it)) == 0) { 189 189 return true; 190 190 } … … 289 289 290 290 if (element->tag == NODE && 291 str_cmp(name, get_element_name(*node)) == 0) {291 str_cmp(name, get_element_name(*node)) == 0) { 292 292 return true; 293 293 } … … 306 306 { 307 307 uint64_t retval = __hypercall_fast2(MACH_DESC, KA2PA(mach_desc), 308 MD_MAX_SIZE);308 MD_MAX_SIZE); 309 309 310 310 retval = retval; 311 if (retval != EOK) {311 if (retval != HV_EOK) { 312 312 printf("Could not retrieve machine description, error = %d.\n", 313 retval);313 retval); 314 314 } 315 315 } -
kernel/arch/sparc64/src/sun4v/sparc64.c
r2e99277 raa85487 50 50 #include <ddi/irq.h> 51 51 #include <string.h> 52 53 //MH54 52 #include <arch/drivers/niagara.h> 55 53 -
kernel/arch/sparc64/src/trap/exception.c
r2e99277 raa85487 46 46 void dump_istate(istate_t *istate) 47 47 { 48 char *tpcs, *tnpcs; 49 50 tpcs = symtab_fmt_name_lookup(istate->tpc); 51 tnpcs = symtab_fmt_name_lookup(istate->tnpc); 52 48 const char *tpcs = symtab_fmt_name_lookup(istate->tpc); 49 const char *tnpcs = symtab_fmt_name_lookup(istate->tnpc); 50 53 51 printf("TSTATE=%#" PRIx64 "\n", istate->tstate); 54 52 printf("TPC=%#" PRIx64 " (%s)\n", istate->tpc, tpcs); … … 162 160 fault_if_from_uspace(istate, "%s.", __func__); 163 161 dump_istate(istate); 164 //MH 165 // dump_sfsr_and_sfar(); 162 describe_dmmu_fault(); 166 163 panic("%s.", __func__); 167 164 } -
kernel/arch/sparc64/src/trap/interrupt.c
r2e99277 raa85487 1 1 /* 2 2 * Copyright (c) 2005 Jakub Jermar 3 * Copyright (c) 2009 Pavel Rimsky 3 4 * All rights reserved. 4 5 * … … 34 35 35 36 #include <arch/interrupt.h> 37 #include <arch/trap/interrupt.h> 36 38 #include <arch/sparc64.h> 37 #include <arch/trap/interrupt.h>38 39 #include <interrupt.h> 39 40 #include <ddi/irq.h> … … 60 61 exc_register(n - 1, name, f); 61 62 } 62 63 /** Process hardware interrupt.64 *65 * @param n Ignored.66 * @param istate Ignored.67 */68 void interrupt(int n, istate_t *istate)69 {70 uint64_t status;71 uint64_t intrcv;72 uint64_t data0;73 status = asi_u64_read(ASI_INTR_DISPATCH_STATUS, 0);74 if (status & (!INTR_DISPATCH_STATUS_BUSY))75 panic("Interrupt Dispatch Status busy bit not set.");76 77 intrcv = asi_u64_read(ASI_INTR_RECEIVE, 0);78 #if defined (US)79 data0 = asi_u64_read(ASI_INTR_R, ASI_UDB_INTR_R_DATA_0);80 #elif defined (US3)81 data0 = asi_u64_read(ASI_INTR_R, VA_INTR_R_DATA_0);82 #endif83 84 irq_t *irq = irq_dispatch_and_lock(data0);85 if (irq) {86 /*87 * The IRQ handler was found.88 */89 irq->handler(irq);90 /*91 * See if there is a clear-interrupt-routine and call it.92 */93 if (irq->cir) {94 irq->cir(irq->cir_arg, irq->inr);95 }96 spinlock_unlock(&irq->lock);97 } else if (data0 > config.base) {98 /*99 * This is a cross-call.100 * data0 contains address of the kernel function.101 * We call the function only after we verify102 * it is one of the supported ones.103 */104 #ifdef CONFIG_SMP105 if (data0 == (uintptr_t) tlb_shootdown_ipi_recv) {106 tlb_shootdown_ipi_recv();107 }108 #endif109 } else {110 /*111 * Spurious interrupt.112 */113 #ifdef CONFIG_DEBUG114 printf("cpu%u: spurious interrupt (intrcv=%#" PRIx64115 ", data0=%#" PRIx64 ")\n", CPU->id, intrcv, data0);116 #endif117 }118 119 membar();120 asi_u64_write(ASI_INTR_RECEIVE, 0, 0);121 }122 123 63 /** @} 124 64 */ -
kernel/arch/sparc64/src/trap/sun4v/trap_table.S
r2e99277 raa85487 48 48 #include <arch/stack.h> 49 49 #include <arch/sun4v/regdef.h> 50 #include <arch/sun4v/arch.h> 51 #include <arch/sun4v/cpu.h> 50 52 51 53 #define TABLE_SIZE TRAP_TABLE_SIZE … … 60 62 61 63 /* TT = 0x08, TL = 0, instruction_access_exception */ 64 /* TT = 0x08, TL = 0, IAE_privilege_violation on UltraSPARC T2 */ 62 65 .org trap_table + TT_INSTRUCTION_ACCESS_EXCEPTION*ENTRY_SIZE 63 66 .global instruction_access_exception_tl0 64 67 instruction_access_exception_tl0: 65 /*wrpr %g0, PSTATE_AG_BIT | PSTATE_PRIV_BIT, %pstate 66 PREEMPTIBLE_HANDLER instruction_access_exception*/ 68 PREEMPTIBLE_HANDLER instruction_access_exception 69 70 /* TT = 0x09, TL = 0, instruction_access_mmu_miss */ 71 .org trap_table + TT_INSTRUCTION_ACCESS_MMU_MISS*ENTRY_SIZE 72 .global instruction_access_mmu_miss_handler_tl0 73 ba fast_instruction_access_mmu_miss_handler_tl0 74 nop 67 75 68 76 /* TT = 0x0a, TL = 0, instruction_access_error */ … … 72 80 PREEMPTIBLE_HANDLER instruction_access_error 73 81 82 /* TT = 0x0b, TL = 0, IAE_unauth_access */ 83 .org trap_table + TT_IAE_UNAUTH_ACCESS*ENTRY_SIZE 84 .global iae_unauth_access_tl0 85 iae_unauth_access_tl0: 86 PREEMPTIBLE_HANDLER instruction_access_exception 87 88 /* TT = 0x0c, TL = 0, IAE_nfo_page */ 89 .org trap_table + TT_IAE_NFO_PAGE*ENTRY_SIZE 90 .global iae_nfo_page_tl0 91 iae_nfo_page_tl0: 92 PREEMPTIBLE_HANDLER instruction_access_exception 93 74 94 /* TT = 0x10, TL = 0, illegal_instruction */ 75 95 .org trap_table + TT_ILLEGAL_INSTRUCTION*ENTRY_SIZE … … 96 116 PREEMPTIBLE_HANDLER unimplemented_STD 97 117 118 /* TT = 0x14, TL = 0, DAE_invalid_asi */ 119 .org trap_table + TT_DAE_INVALID_ASI*ENTRY_SIZE 120 .global dae_invalid_asi_tl0 121 dae_invalid_asi_tl0: 122 PREEMPTIBLE_HANDLER data_access_exception 123 124 /* TT = 0x15, TL = 0, DAE_privilege_violation */ 125 .org trap_table + TT_DAE_PRIVILEGE_VIOLATION*ENTRY_SIZE 126 .global dae_privilege_violation_tl0 127 dae_privilege_violation_tl0: 128 PREEMPTIBLE_HANDLER data_access_exception 129 130 /* TT = 0x16, TL = 0, DAE_nc_page */ 131 .org trap_table + TT_DAE_NC_PAGE*ENTRY_SIZE 132 .global dae_nc_page_tl0 133 dae_nc_page_tl0: 134 PREEMPTIBLE_HANDLER data_access_exception 135 136 /* TT = 0x17, TL = 0, DAE_nfo_page */ 137 .org trap_table + TT_DAE_NFO_PAGE*ENTRY_SIZE 138 .global dae_nfo_page_tl0 139 dae_nfo_page_tl0: 140 PREEMPTIBLE_HANDLER data_access_exception 141 98 142 /* TT = 0x20, TL = 0, fb_disabled handler */ 99 143 .org trap_table + TT_FP_DISABLED*ENTRY_SIZE … … 133 177 134 178 /* TT = 0x30, TL = 0, data_access_exception */ 179 /* TT = 0x30, TL = 0, DAE_side_effect_page for UltraPSARC T2 */ 135 180 .org trap_table + TT_DATA_ACCESS_EXCEPTION*ENTRY_SIZE 136 181 .global data_access_exception_tl0 137 182 data_access_exception_tl0: 138 wrpr %g0, PSTATE_AG_BIT | PSTATE_PRIV_BIT, %pstate139 183 PREEMPTIBLE_HANDLER data_access_exception 184 185 /* TT = 0x31, TL = 0, data_access_mmu_miss */ 186 .org trap_table + TT_DATA_ACCESS_MMU_MISS*ENTRY_SIZE 187 .global data_access_mmu_miss_tl0 188 data_access_mmu_miss_tl0: 189 ba fast_data_access_mmu_miss_handler_tl0 190 nop 140 191 141 192 /* TT = 0x32, TL = 0, data_access_error */ … … 271 322 INTERRUPT_LEVEL_N_HANDLER 15 272 323 273 /* TT = 0x60, TL = 0, interrupt_vector_trap handler */274 .org trap_table + TT_INTERRUPT_VECTOR_TRAP*ENTRY_SIZE275 .global interrupt_vector_trap_handler_tl0276 interrupt_vector_trap_handler_tl0:277 INTERRUPT_VECTOR_TRAP_HANDLER278 279 324 /* TT = 0x64, TL = 0, fast_instruction_access_MMU_miss */ 280 325 .org trap_table + TT_FAST_INSTRUCTION_ACCESS_MMU_MISS*ENTRY_SIZE … … 294 339 fast_data_access_protection_handler_tl0: 295 340 FAST_DATA_ACCESS_PROTECTION_HANDLER 0 341 342 /* TT = 0x7c, TL = 0, cpu_mondo */ 343 .org trap_table + TT_CPU_MONDO*ENTRY_SIZE 344 .global cpu_mondo_handler_tl0 345 cpu_mondo_handler_tl0: 346 PREEMPTIBLE_HANDLER cpu_mondo 296 347 297 348 /* TT = 0x80, TL = 0, spill_0_normal handler */ … … 352 403 353 404 /* TT = 0x08, TL > 0, instruction_access_exception */ 405 /* TT = 0x08, TL > 0, IAE_privilege_violation on UltraSPARC T2 */ 354 406 .org trap_table + (TT_INSTRUCTION_ACCESS_EXCEPTION+512)*ENTRY_SIZE 355 407 .global instruction_access_exception_tl1 356 408 instruction_access_exception_tl1: 357 409 wrpr %g0, 1, %tl 358 wrpr %g0, PSTATE_AG_BIT | PSTATE_PRIV_BIT, %pstate359 410 PREEMPTIBLE_HANDLER instruction_access_exception 411 412 /* TT = 0x09, TL > 0, instruction_access_mmu_miss */ 413 .org trap_table + (TT_INSTRUCTION_ACCESS_MMU_MISS+512)*ENTRY_SIZE 414 .global instruction_access_mmu_miss_handler_tl1 415 wrpr %g0, 1, %tl 416 ba fast_instruction_access_mmu_miss_handler_tl0 417 nop 360 418 361 419 /* TT = 0x0a, TL > 0, instruction_access_error */ … … 366 424 PREEMPTIBLE_HANDLER instruction_access_error 367 425 426 /* TT = 0x0b, TL > 0, IAE_unauth_access */ 427 .org trap_table + (TT_IAE_UNAUTH_ACCESS+512)*ENTRY_SIZE 428 .global iae_unauth_access_tl1 429 iae_unauth_access_tl1: 430 wrpr %g0, 1, %tl 431 PREEMPTIBLE_HANDLER instruction_access_exception 432 433 /* TT = 0x0c, TL > 0, IAE_nfo_page */ 434 .org trap_table + (TT_IAE_NFO_PAGE+512)*ENTRY_SIZE 435 .global iae_nfo_page_tl1 436 iae_nfo_page_tl1: 437 wrpr %g0, 1, %tl 438 PREEMPTIBLE_HANDLER instruction_access_exception 439 368 440 /* TT = 0x10, TL > 0, illegal_instruction */ 369 441 .org trap_table + (TT_ILLEGAL_INSTRUCTION+512)*ENTRY_SIZE … … 372 444 wrpr %g0, 1, %tl 373 445 PREEMPTIBLE_HANDLER illegal_instruction 446 447 /* TT = 0x14, TL > 0, DAE_invalid_asi */ 448 .org trap_table + (TT_DAE_INVALID_ASI+512)*ENTRY_SIZE 449 .global dae_invalid_asi_tl1 450 dae_invalid_asi_tl1: 451 wrpr %g0, 1, %tl 452 PREEMPTIBLE_HANDLER data_access_exception 453 454 /* TT = 0x15, TL > 0, DAE_privilege_violation */ 455 .org trap_table + (TT_DAE_PRIVILEGE_VIOLATION+512)*ENTRY_SIZE 456 .global dae_privilege_violation_tl1 457 dae_privilege_violation_tl1: 458 wrpr %g0, 1, %tl 459 PREEMPTIBLE_HANDLER data_access_exception 460 461 /* TT = 0x16, TL > 0, DAE_nc_page */ 462 .org trap_table + (TT_DAE_NC_PAGE+512)*ENTRY_SIZE 463 .global dae_nc_page_tl1 464 dae_nc_page_tl1: 465 wrpr %g0, 1, %tl 466 PREEMPTIBLE_HANDLER data_access_exception 467 468 /* TT = 0x17, TL > 0, DAE_nfo_page */ 469 .org trap_table + (TT_DAE_NFO_PAGE+512)*ENTRY_SIZE 470 .global dae_nfo_page_tl1 471 dae_nfo_page_tl1: 472 wrpr %g0, 1, %tl 473 PREEMPTIBLE_HANDLER data_access_exception 374 474 375 475 /* TT = 0x24, TL > 0, clean_window handler */ … … 390 490 .global data_access_exception_tl1 391 491 data_access_exception_tl1: 392 wrpr %g0, 1, %tl492 /*wrpr %g0, 1, %tl 393 493 wrpr %g0, PSTATE_AG_BIT | PSTATE_PRIV_BIT, %pstate 394 PREEMPTIBLE_HANDLER data_access_exception 494 PREEMPTIBLE_HANDLER data_access_exception*/ 495 496 /* TT = 0x31, TL > 0, data_access_mmu_miss */ 497 .org trap_table + (TT_DATA_ACCESS_MMU_MISS+512)*ENTRY_SIZE 498 .global data_access_mmu_miss_tl1 499 data_access_mmu_miss_tl1: 500 ba fast_data_access_mmu_miss_handler_tl1 501 nop 502 395 503 396 504 /* TT = 0x32, TL > 0, data_access_error */ … … 419 527 fast_data_access_protection_handler_tl1: 420 528 FAST_DATA_ACCESS_PROTECTION_HANDLER 1 529 530 /* TT = 0x7c, TL > 0, cpu_mondo */ 531 .org trap_table + (TT_CPU_MONDO+512)*ENTRY_SIZE 532 .global cpu_mondo_handler_tl1 533 cpu_mondo_handler_tl1: 534 wrpr %g0, %tl 535 PREEMPTIBLE_HANDLER cpu_mondo 421 536 422 537 /* TT = 0x80, TL > 0, spill_0_normal handler */ … … 660 775 .endm 661 776 662 663 #if 0664 777 /* 665 778 * Preemptible trap handler for handling traps from kernel. … … 677 790 nop ! it will be easy to find 678 791 679 /* prevent unnecessary CLEANWIN exceptions */680 wrpr %g0, WSTATE_OTHER(0) | WSTATE_NORMAL(0), %wstate681 1:682 /*683 * Prevent SAVE instruction from causing a spill exception. If the684 * CANSAVE register is zero, explicitly spill register window685 * at CWP + 2.686 */687 688 rdpr %cansave, %g3689 brnz %g3, 2f690 nop691 INLINE_SPILL %g3, %g4692 693 2:694 /* ask for new register window */695 save %sp, -PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE, %sp696 697 /* copy higher level routine's address and its argument */698 mov %g1, %l0699 mov %g2, %o0700 701 /*702 * Save TSTATE, TPC and TNPC aside.703 */704 rdpr %tstate, %g1705 rdpr %tpc, %g2706 rdpr %tnpc, %g3707 708 stx %g1, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TSTATE]709 stx %g2, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TPC]710 stx %g3, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TNPC]711 712 /*713 * Save the Y register.714 * This register is deprecated according to SPARC V9 specification715 * and is only present for backward compatibility with previous716 * versions of the SPARC architecture.717 * Surprisingly, gcc makes use of this register without a notice.718 */719 rd %y, %g4720 stx %g4, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_Y]721 722 /* switch to TL = 0, explicitly enable FPU */723 wrpr %g0, 0, %tl724 wrpr %g0, 0, %gl725 wrpr %g0, PSTATE_PRIV_BIT | PSTATE_PEF_BIT, %pstate726 727 /* g1 -> l1, ..., g7 -> l7 */728 SAVE_GLOBALS729 730 /* call higher-level service routine, pass istate as its 2nd parameter */731 call %l0732 add %sp, PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TNPC, %o1733 734 /* l1 -> g1, ..., l7 -> g7 */735 RESTORE_GLOBALS736 737 /* we must prserve the PEF bit */738 rdpr %pstate, %l1739 740 /* TL := 1, GL := 1 */741 wrpr %g0, PSTATE_PRIV_BIT, %pstate742 wrpr %g0, 1, %tl743 wrpr %g0, 1, %gl744 745 /* Read TSTATE, TPC and TNPC from saved copy. */746 ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TSTATE], %g1747 ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TPC], %g2748 ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TNPC], %g3749 750 /* Copy PSTATE.PEF to the in-register copy of TSTATE. */751 and %l1, PSTATE_PEF_BIT, %l1752 sllx %l1, TSTATE_PSTATE_SHIFT, %l1753 sethi %hi(TSTATE_PEF_BIT), %g4 ! reset the PEF bit to 0 ...754 andn %g1, %g4, %g1755 or %g1, %l1, %g1 ! ... "or" it with saved PEF756 757 /* Restore TSTATE, TPC and TNPC from saved copies. */758 wrpr %g1, 0, %tstate759 wrpr %g2, 0, %tpc760 wrpr %g3, 0, %tnpc761 762 /* Restore Y. */763 ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_Y], %g4764 wr %g4, %y765 766 /* If TSTATE.CWP + 1 == CWP, then we do not have to fix CWP. */767 and %g1, TSTATE_CWP_MASK, %l0768 inc %l0769 and %l0, NWINDOWS - 1, %l0 ! %l0 mod NWINDOWS770 rdpr %cwp, %l1771 cmp %l0, %l1772 bz 4f ! CWP is ok773 nop774 775 3:776 /*777 * Fix CWP.778 * In order to recapitulate, the input registers in the current779 * window are the output registers of the window to which we want780 * to restore. Because the fill trap fills only input and local781 * registers of a window, we need to preserve those output782 * registers manually.783 */784 mov %sp, %g2785 stx %i0, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I0]786 stx %i1, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I1]787 stx %i2, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I2]788 stx %i3, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I3]789 stx %i4, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I4]790 stx %i5, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I5]791 stx %i6, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I6]792 stx %i7, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I7]793 wrpr %l0, 0, %cwp794 mov %g2, %sp795 ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I0], %i0796 ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I1], %i1797 ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I2], %i2798 ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I3], %i3799 ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I4], %i4800 ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I5], %i5801 ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I6], %i6802 ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I7], %i7803 804 4:805 /*806 * Prevent RESTORE instruction from causing a fill exception. If the807 * CANRESTORE register is zero, explicitly fill register window808 * at CWP - 1.809 */810 rdpr %canrestore, %g1811 brnz %g1, 5f812 nop813 INLINE_FILL %g3, %g4814 815 5:816 restore817 818 retry819 .endm820 821 #endif822 823 /*824 * Preemptible trap handler for handling traps from kernel.825 */826 .macro PREEMPTIBLE_HANDLER_KERNEL827 828 /*829 * ASSERT(%tl == 1)830 */831 rdpr %tl, %g3832 cmp %g3, 1833 be 1f834 nop835 0: ba 0b ! this is for debugging, if we ever get here836 nop ! it will be easy to find837 838 792 1: 839 793 /* prevent unnecessary CLEANWIN exceptions */ … … 872 826 retry 873 827 .endm 874 875 876 828 877 829 /* … … 1092 1044 and %g1, NWINDOWS - 1, %g1 1093 1045 wrpr %g1, 0, %cwp ! CWP-- 1094 1046 1095 1047 .if \is_syscall 1096 1048 done … … 1100 1052 1101 1053 .endm 1102 1103 1104 1054 1105 1055 /* Preemptible trap handler for TL=1. … … 1132 1082 trap_instruction_handler: 1133 1083 PREEMPTIBLE_HANDLER_TEMPLATE 1 1134
Note:
See TracChangeset
for help on using the changeset viewer.
