Changeset f47fd19 in mainline for kernel/arch/sparc64/src
- Timestamp:
- 2006-08-21T13:36:34Z (19 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- a7961271
- Parents:
- ee289cf0
- Location:
- kernel/arch/sparc64/src
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/sparc64/src/mm/tlb.c
ree289cf0 rf47fd19 35 35 #include <arch/mm/tlb.h> 36 36 #include <mm/tlb.h> 37 #include <mm/as.h> 38 #include <mm/asid.h> 37 39 #include <arch/mm/frame.h> 38 40 #include <arch/mm/page.h> 39 41 #include <arch/mm/mmu.h> 40 #include <mm/asid.h> 42 #include <arch/interrupt.h> 43 #include <arch.h> 41 44 #include <print.h> 42 45 #include <arch/types.h> … … 47 50 #include <arch/asm.h> 48 51 #include <symtab.h> 52 53 static void dtlb_pte_copy(pte_t *t); 54 static void do_fast_data_access_mmu_miss_fault(istate_t *istate, const char *str); 49 55 50 56 char *context_encoding[] = { … … 100 106 } 101 107 108 void dtlb_pte_copy(pte_t *t) 109 { 110 } 111 102 112 /** ITLB miss handler. */ 103 void fast_instruction_access_mmu_miss( void)113 void fast_instruction_access_mmu_miss(int n, istate_t *istate) 104 114 { 105 115 panic("%s\n", __FUNCTION__); 106 116 } 107 117 108 /** DTLB miss handler. */ 109 void fast_data_access_mmu_miss(void) 118 /** DTLB miss handler. 119 * 120 * Note that some faults (e.g. kernel faults) were already resolved 121 * by the low-level, assembly language part of the fast_data_access_mmu_miss 122 * handler. 123 */ 124 void fast_data_access_mmu_miss(int n, istate_t *istate) 110 125 { 111 126 tlb_tag_access_reg_t tag; 112 uintptr_t tpc;113 char *tpc_str;127 uintptr_t va; 128 pte_t *t; 114 129 115 130 tag.value = dtlb_tag_access_read(); 116 if (tag.context != ASID_KERNEL || tag.vpn == 0) { 117 tpc = tpc_read(); 118 tpc_str = get_symtab_entry(tpc); 119 120 printf("Faulting page: %p, ASID=%d\n", tag.vpn * PAGE_SIZE, tag.context); 121 printf("TPC=%p, (%s)\n", tpc, tpc_str ? tpc_str : "?"); 122 panic("%s\n", __FUNCTION__); 123 } 124 125 /* 126 * Identity map piece of faulting kernel address space. 127 */ 128 dtlb_insert_mapping(tag.vpn * PAGE_SIZE, tag.vpn * FRAME_SIZE, PAGESIZE_8K, false, true); 131 va = tag.vpn * PAGE_SIZE; 132 if (tag.context == ASID_KERNEL) { 133 if (!tag.vpn) { 134 /* NULL access in kernel */ 135 do_fast_data_access_mmu_miss_fault(istate, __FUNCTION__); 136 } 137 do_fast_data_access_mmu_miss_fault(istate, "Unexpected kernel page fault."); 138 } 139 140 page_table_lock(AS, true); 141 t = page_mapping_find(AS, va); 142 if (t) { 143 /* 144 * The mapping was found in the software page hash table. 145 * Insert it into DTLB. 146 */ 147 dtlb_pte_copy(t); 148 page_table_unlock(AS, true); 149 } else { 150 /* 151 * Forward the page fault to the address space page fault handler. 152 */ 153 page_table_unlock(AS, true); 154 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { 155 do_fast_data_access_mmu_miss_fault(istate, __FUNCTION__); 156 } 157 } 129 158 } 130 159 131 160 /** DTLB protection fault handler. */ 132 void fast_data_access_protection( void)161 void fast_data_access_protection(int n, istate_t *istate) 133 162 { 134 163 panic("%s\n", __FUNCTION__); … … 162 191 } 163 192 193 void do_fast_data_access_mmu_miss_fault(istate_t *istate, const char *str) 194 { 195 tlb_tag_access_reg_t tag; 196 uintptr_t va; 197 char *tpc_str = get_symtab_entry(istate->tpc); 198 199 tag.value = dtlb_tag_access_read(); 200 va = tag.vpn * PAGE_SIZE; 201 202 printf("Faulting page: %p, ASID=%d\n", va, tag.context); 203 printf("TPC=%p, (%s)\n", istate->tpc, tpc_str); 204 panic("%s\n", str); 205 } 206 164 207 /** Invalidate all unlocked ITLB and DTLB entries. */ 165 208 void tlb_invalidate_all(void) -
kernel/arch/sparc64/src/trap/trap_table.S
ree289cf0 rf47fd19 44 44 #include <arch/trap/mmu.h> 45 45 #include <arch/stack.h> 46 #include <arch/regdef.h> 46 47 47 48 #define TABLE_SIZE TRAP_TABLE_SIZE … … 276 277 277 278 278 /* Preemptible trap handler. 279 * 280 * This trap handler makes arrangements to 281 * make calling scheduler() possible. 282 * 283 * The caller is responsible for doing save 284 * and allocating PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE 285 * bytes on stack. 279 /* Preemptible trap handler for TL=1. 280 * 281 * This trap handler makes arrangements to make calling of scheduler() from 282 * within a trap context possible. It is guaranteed to function only when traps 283 * are not nested (i.e. for TL=1). 284 * 285 * Every trap handler on TL=1 that makes a call to the scheduler needs to 286 * be based on this function. The reason behind it is that the nested 287 * trap levels and the automatic saving of the interrupted context by hardware 288 * does not work well together with scheduling (i.e. a thread cannot be rescheduled 289 * with TL>0). Therefore it is necessary to eliminate the effect of trap levels 290 * by software and save the necessary state on the kernel stack. 291 * 292 * Note that for traps with TL>1, more state needs to be saved. This function 293 * is therefore not going to work when TL>1. 294 * 295 * The caller is responsible for doing SAVE and allocating 296 * PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE bytes on the stack. 286 297 * 287 298 * Input registers: … … 300 311 rdpr %pstate, %g4 301 312 313 /* 314 * The following memory accesses will not fault 315 * because special provisions are made to have 316 * the kernel stack of THREAD locked in DTLB. 317 */ 302 318 stx %g1, [%fp + STACK_BIAS + SAVED_TSTATE] 303 319 stx %g2, [%fp + STACK_BIAS + SAVED_TPC] … … 314 330 * - switch to normal globals. 315 331 */ 316 and %g4, ~ 1, %g4 ! mask alternate globals332 and %g4, ~(PSTATE_AG_BIT|PSTATE_IG_BIT|PSTATE_MG_BIT), %g4 317 333 wrpr %g4, 0, %pstate 318 334 … … 325 341 * Call the higher-level handler. 326 342 */ 343 mov %fp, %o1 ! calculate istate address 327 344 call %l0 328 nop329 330 /* 331 * Restore the normal global register set.345 add %o1, STACK_BIAS + SAVED_PSTATE, %o1 ! calculate istate address 346 347 /* 348 * Restore the normal global register set. 332 349 */ 333 350 RESTORE_GLOBALS … … 335 352 /* 336 353 * Restore PSTATE from saved copy. 337 * Alternate globals become active.354 * Alternate/Interrupt/MM globals become active. 338 355 */ 339 356 ldx [%fp + STACK_BIAS + SAVED_PSTATE], %l4 … … 358 375 359 376 /* 360 * On execution of retry instruction, CWP will be restored from TSTATE register.361 * However, because of scheduling, it is possible that CWP in saved TSTATE362 * is different from current CWP. The following chunk of code fixes CWP363 * in the saved copy of TSTATE.377 * On execution of the RETRY instruction, CWP will be restored from the TSTATE 378 * register. However, because of scheduling, it is possible that CWP in the saved 379 * TSTATE is different from the current CWP. The following chunk of code fixes 380 * CWP in the saved copy of TSTATE. 364 381 */ 365 382 rdpr %cwp, %g4 ! read current CWP
Note:
See TracChangeset
for help on using the changeset viewer.