Changeset ba50a34 in mainline for kernel/arch/sparc64/src
- Timestamp:
- 2009-12-06T18:29:57Z (16 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 5e53e02
- Parents:
- eb79d60
- Location:
- kernel/arch/sparc64/src
- Files:
-
- 4 edited
-
mm/sun4v/as.c (modified) (3 diffs)
-
mm/sun4v/tlb.c (modified) (14 diffs)
-
trap/exception.c (modified) (1 diff)
-
trap/sun4v/trap_table.S (modified) (1 diff)
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/sparc64/src/mm/sun4v/as.c
reb79d60 rba50a34 61 61 { 62 62 #ifdef CONFIG_TSB 63 /* 64 * The order must be calculated with respect to the emulated 65 * 16K page size. 66 */ 67 int order = fnzb32(((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * 68 sizeof(tsb_entry_t)) >> FRAME_WIDTH); 63 int order = fnzb32( 64 (TSB_ENTRY_COUNT * sizeof(tsb_entry_t)) >> FRAME_WIDTH); 69 65 70 uintptr_t tsb = (uintptr_t) frame_alloc(order, flags | FRAME_KA);66 uintptr_t tsb = (uintptr_t) frame_alloc(order, flags); 71 67 72 68 if (!tsb) 73 69 return -1; 74 70 75 as->arch.itsb = (tsb_entry_t *) tsb; 76 as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT * 77 sizeof(tsb_entry_t)); 71 as->arch.tsb_description.page_size = PAGESIZE_8K; 72 as->arch.tsb_description.associativity = 1; 73 as->arch.tsb_description.num_ttes = TSB_ENTRY_COUNT; 74 as->arch.tsb_description.pgsize_mask = 1 << PAGESIZE_8K; 75 as->arch.tsb_description.tsb_base = tsb; 76 as->arch.tsb_description.reserved = 0; 77 as->arch.tsb_description.context = 0; 78 78 79 memsetb( as->arch.itsb,80 (ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT)* sizeof(tsb_entry_t), 0);79 memsetb((void *) PA2KA(as->arch.tsb_description.tsb_base), 80 TSB_ENTRY_COUNT * sizeof(tsb_entry_t), 0); 81 81 #endif 82 82 return 0; … … 86 86 { 87 87 #ifdef CONFIG_TSB 88 /* 89 * The count must be calculated with respect to the emualted 16K page 90 * size. 91 */ 92 size_t cnt = ((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * 93 sizeof(tsb_entry_t)) >> FRAME_WIDTH; 94 frame_free(KA2PA((uintptr_t) as->arch.itsb)); 88 count_t cnt = (TSB_ENTRY_COUNT * sizeof(tsb_entry_t)) >> FRAME_WIDTH; 89 frame_free((uintptr_t) as->arch.tsb_description.tsb_base); 95 90 return cnt; 96 91 #else … … 116 111 void as_install_arch(as_t *as) 117 112 { 118 #if 0 119 tlb_context_reg_t ctx; 120 121 /* 122 * Note that we don't and may not lock the address space. That's ok 123 * since we only read members that are currently read-only. 124 * 125 * Moreover, the as->asid is protected by asidlock, which is being held. 126 */ 127 128 /* 129 * Write ASID to secondary context register. The primary context 130 * register has to be set from TL>0 so it will be filled from the 131 * secondary context register from the TL=1 code just before switch to 132 * userspace. 133 */ 134 ctx.v = 0; 135 ctx.context = as->asid; 136 mmu_secondary_context_write(ctx.v); 137 138 #ifdef CONFIG_TSB 139 uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH); 140 141 ASSERT(as->arch.itsb && as->arch.dtsb); 142 143 uintptr_t tsb = (uintptr_t) as->arch.itsb; 144 145 if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) { 146 /* 147 * TSBs were allocated from memory not covered 148 * by the locked 4M kernel DTLB entry. We need 149 * to map both TSBs explicitly. 150 */ 151 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb); 152 dtlb_insert_mapping(tsb, KA2PA(tsb), PAGESIZE_64K, true, true); 153 } 154 155 /* 156 * Setup TSB Base registers. 157 */ 158 tsb_base_reg_t tsb_base; 159 160 tsb_base.value = 0; 161 tsb_base.size = TSB_SIZE; 162 tsb_base.split = 0; 163 164 tsb_base.base = ((uintptr_t) as->arch.itsb) >> MMU_PAGE_WIDTH; 165 itsb_base_write(tsb_base.value); 166 tsb_base.base = ((uintptr_t) as->arch.dtsb) >> MMU_PAGE_WIDTH; 167 dtsb_base_write(tsb_base.value); 168 169 #if defined (US3) 170 /* 171 * Clear the extension registers. 172 * In HelenOS, primary and secondary context registers contain 173 * equal values and kernel misses (context 0, ie. the nucleus context) 174 * are excluded from the TSB miss handler, so it makes no sense 175 * to have separate TSBs for primary, secondary and nucleus contexts. 176 * Clearing the extension registers will ensure that the value of the 177 * TSB Base register will be used as an address of TSB, making the code 178 * compatible with the US port. 179 */ 180 itsb_primary_extension_write(0); 181 itsb_nucleus_extension_write(0); 182 dtsb_primary_extension_write(0); 183 dtsb_secondary_extension_write(0); 184 dtsb_nucleus_extension_write(0); 185 #endif 186 #endif 187 #endif 113 mmu_secondary_context_write(as->asid); 188 114 } 189 115 -
kernel/arch/sparc64/src/mm/sun4v/tlb.c
reb79d60 rba50a34 60 60 #endif 61 61 62 static void dtlb_pte_copy(pte_t *, size_t, bool);63 static void itlb_pte_copy(pte_t * , size_t);62 //static void dtlb_pte_copy(pte_t *, size_t, bool); 63 static void itlb_pte_copy(pte_t *); 64 64 static void do_fast_instruction_access_mmu_miss_fault(istate_t *, const char *); 65 static void do_fast_data_access_mmu_miss_fault(istate_t *, tlb_tag_access_reg_t,66 const char *);67 static void do_fast_data_access_protection_fault(istate_t *,68 tlb_tag_access_reg_t, const char *);65 //static void do_fast_data_access_mmu_miss_fault(istate_t *, uint64_t, 66 // const char *); 67 //static void do_fast_data_access_protection_fault(istate_t *, 68 // uint64_t, const char *); 69 69 70 70 char *context_encoding[] = { … … 132 132 } 133 133 134 #if 0 134 135 /** Copy PTE to TLB. 135 136 * … … 141 142 void dtlb_pte_copy(pte_t *t, size_t index, bool ro) 142 143 { 143 #if 0144 144 tlb_tag_access_reg_t tag; 145 145 tlb_data_t data; … … 170 170 171 171 dtlb_data_in_write(data.value); 172 #endif 173 } 172 } 173 #endif 174 174 175 175 /** Copy PTE to ITLB. 176 176 * 177 177 * @param t Page Table Entry to be copied. 178 * @param index Zero if lower 8K-subpage, one if higher 8K-subpage. 179 */ 180 void itlb_pte_copy(pte_t *t, size_t index) 181 { 182 #if 0 183 tlb_tag_access_reg_t tag; 184 tlb_data_t data; 185 page_address_t pg; 186 frame_address_t fr; 187 188 pg.address = t->page + (index << MMU_PAGE_WIDTH); 189 fr.address = t->frame + (index << MMU_PAGE_WIDTH); 190 191 tag.value = 0; 192 tag.context = t->as->asid; 193 tag.vpn = pg.vpn; 194 195 itlb_tag_access_write(tag.value); 178 */ 179 void itlb_pte_copy(pte_t *t) 180 { 181 tte_data_t data; 196 182 197 183 data.value = 0; 198 184 data.v = true; 185 data.nfo = false; 186 data.ra = (t->frame) >> FRAME_WIDTH; 187 data.ie = false; 188 data.e = false; 189 data.cp = t->c; 190 data.cv = false; 191 data.p = t->k; 192 data.x = true; 193 data.w = false; 199 194 data.size = PAGESIZE_8K; 200 data.pfn = fr.pfn;201 data.l = false;202 data.cp = t->c;203 data.p = t->k; /* p like privileged */204 data.w = false;205 data.g = t->g;206 195 207 itlb_data_in_write(data.value);208 #endif 196 __hypercall_hyperfast( 197 t->page, t->as->asid, data.value, MMU_FLAG_ITLB, 0, MMU_MAP_ADDR); 209 198 } 210 199 … … 212 201 void fast_instruction_access_mmu_miss(unative_t unused, istate_t *istate) 213 202 { 214 asm volatile ("sethi 0x41906, %g0"); 215 uintptr_t page_16k = ALIGN_DOWN(istate->tpc, PAGE_SIZE); 216 size_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE; 203 uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE); 217 204 pte_t *t; 218 205 219 206 page_table_lock(AS, true); 220 t = page_mapping_find(AS, page_16k); 207 t = page_mapping_find(AS, va); 208 221 209 if (t && PTE_EXECUTABLE(t)) { 222 210 /* … … 225 213 */ 226 214 t->a = true; 227 itlb_pte_copy(t , index);215 itlb_pte_copy(t); 228 216 #ifdef CONFIG_TSB 229 itsb_pte_copy(t , index);217 itsb_pte_copy(t); 230 218 #endif 231 219 page_table_unlock(AS, true); … … 236 224 */ 237 225 page_table_unlock(AS, true); 238 if (as_page_fault(page_16k, PF_ACCESS_EXEC, istate) == 239 AS_PF_FAULT) { 226 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { 240 227 do_fast_instruction_access_mmu_miss_fault(istate, 241 228 __func__); … … 248 235 * Note that some faults (e.g. kernel faults) were already resolved by the 249 236 * low-level, assembly language part of the fast_data_access_mmu_miss handler. 237 * 238 * @param page_and_ctx A 64-bit value describing the fault. The most 239 * significant 51 bits of the value contain the virtual 240 * address which caused the fault truncated to the page 241 * boundary. The least significant 13 bits of the value 242 * contain the number of the context in which the fault 243 * occurred. 244 * @param istate Interrupted state saved on the stack. 245 */ 246 void fast_data_access_mmu_miss(uint64_t page_and_ctx, istate_t *istate) 247 { 248 #if 0 249 pte_t *t; 250 uintptr_t va = DMISS_ADDRESS(page_and_ctx); 251 uint16_t ctx = DMISS_CONTEXT(page_and_ctx); 252 253 if (ctx == ASID_KERNEL) { 254 if (va == 0) { 255 /* NULL access in kernel */ 256 do_fast_data_access_mmu_miss_fault(istate, page_and_ctx, 257 __func__); 258 } 259 do_fast_data_access_mmu_miss_fault(istate, page_and_ctx, "Unexpected " 260 "kernel page fault."); 261 } 262 263 page_table_lock(AS, true); 264 t = page_mapping_find(AS, va); 265 if (t) { 266 /* 267 * The mapping was found in the software page hash table. 268 * Insert it into DTLB. 269 */ 270 t->a = true; 271 dtlb_pte_copy(t, true); 272 #ifdef CONFIG_TSB 273 dtsb_pte_copy(t, true); 274 #endif 275 page_table_unlock(AS, true); 276 } else { 277 /* 278 * Forward the page fault to the address space page fault 279 * handler. 280 */ 281 page_table_unlock(AS, true); 282 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { 283 do_fast_data_access_mmu_miss_fault(istate, page_and_ctx, 284 __func__); 285 } 286 } 287 #endif 288 } 289 290 /** DTLB protection fault handler. 250 291 * 251 292 * @param tag Content of the TLB Tag Access register as it existed … … 255 296 * @param istate Interrupted state saved on the stack. 256 297 */ 257 void fast_data_access_mmu_miss(tlb_tag_access_reg_t tag, istate_t *istate) 258 { 259 uintptr_t page_8k; 260 uintptr_t page_16k; 261 size_t index; 298 void fast_data_access_protection(uint64_t page_and_ctx, istate_t *istate) 299 { 300 #if 0 262 301 pte_t *t; 263 302 264 page_8k = (uint64_t) tag.vpn << MMU_PAGE_WIDTH; 265 page_16k = ALIGN_DOWN(page_8k, PAGE_SIZE); 266 index = tag.vpn % MMU_PAGES_PER_PAGE; 267 268 if (tag.context == ASID_KERNEL) { 269 if (!tag.vpn) { 270 /* NULL access in kernel */ 271 do_fast_data_access_mmu_miss_fault(istate, tag, 272 __func__); 273 //MH 274 } else { 275 // } else if (page_8k >= end_of_identity) { 276 /* 277 * The kernel is accessing the I/O space. 278 * We still do identity mapping for I/O, 279 * but without caching. 280 */ 281 dtlb_insert_mapping(page_8k, KA2PA(page_8k), 282 PAGESIZE_8K, false, false); 283 return; 284 } 285 do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected " 286 "kernel page fault."); 287 } 303 uintptr_t va = DMISS_ADDRESS(page_and_ctx); 304 uint16_t ctx = DMISS_CONTEXT(page_and_ctx); 288 305 289 306 page_table_lock(AS, true); 290 t = page_mapping_find(AS, page_16k); 291 if (t) { 292 /* 293 * The mapping was found in the software page hash table. 294 * Insert it into DTLB. 295 */ 296 t->a = true; 297 dtlb_pte_copy(t, index, true); 298 #ifdef CONFIG_TSB 299 dtsb_pte_copy(t, index, true); 300 #endif 301 page_table_unlock(AS, true); 302 } else { 303 /* 304 * Forward the page fault to the address space page fault 305 * handler. 306 */ 307 page_table_unlock(AS, true); 308 if (as_page_fault(page_16k, PF_ACCESS_READ, istate) == 309 AS_PF_FAULT) { 310 do_fast_data_access_mmu_miss_fault(istate, tag, 311 __func__); 312 } 313 } 314 } 315 316 /** DTLB protection fault handler. 317 * 318 * @param tag Content of the TLB Tag Access register as it existed 319 * when the trap happened. This is to prevent confusion 320 * created by clobbered Tag Access register during a nested 321 * DTLB miss. 322 * @param istate Interrupted state saved on the stack. 323 */ 324 void fast_data_access_protection(tlb_tag_access_reg_t tag, istate_t *istate) 325 { 326 uintptr_t page_16k; 327 size_t index; 328 pte_t *t; 329 330 page_16k = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE); 331 index = tag.vpn % MMU_PAGES_PER_PAGE; /* 16K-page emulation */ 332 333 page_table_lock(AS, true); 334 t = page_mapping_find(AS, page_16k); 307 t = page_mapping_find(AS, va); 335 308 if (t && PTE_WRITABLE(t)) { 336 309 /* … … 341 314 t->a = true; 342 315 t->d = true; 343 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY, 344 page_16k + index * MMU_PAGE_SIZE); 345 dtlb_pte_copy(t, index, false); 316 mmu_demap_page(va, ctx, MMU_FLAG_DTLB); 317 dtlb_pte_copy(t, false); 346 318 #ifdef CONFIG_TSB 347 dtsb_pte_copy(t, index,false);319 dtsb_pte_copy(t, false); 348 320 #endif 349 321 page_table_unlock(AS, true); … … 360 332 } 361 333 } 334 #endif 362 335 } 363 336 … … 371 344 * @param d TLB entry data 372 345 */ 373 static void print_tlb_entry(int i, tlb_tag_read_reg_t t, tlb_data_t d)374 {375 #if 0376 printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "377 "ie=%d, soft2=%#x, pfn=%#x, soft=%#x, l=%d, "378 "cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,379 t.context, d.v, d.size, d.nfo, d.ie, d.soft2,380 d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);381 #endif382 }383 384 #if defined (US)385 386 /** Print contents of both TLBs. */387 346 void tlb_print(void) 388 347 { 389 int i; 390 tlb_data_t d; 391 tlb_tag_read_reg_t t; 392 393 printf("I-TLB contents:\n"); 394 for (i = 0; i < ITLB_ENTRY_COUNT; i++) { 395 d.value = itlb_data_access_read(i); 396 t.value = itlb_tag_read_read(i); 397 print_tlb_entry(i, t, d); 398 } 399 400 printf("D-TLB contents:\n"); 401 for (i = 0; i < DTLB_ENTRY_COUNT; i++) { 402 d.value = dtlb_data_access_read(i); 403 t.value = dtlb_tag_read_read(i); 404 print_tlb_entry(i, t, d); 405 } 406 } 407 408 #elif defined (US3) 409 410 /** Print contents of all TLBs. */ 411 void tlb_print(void) 412 { 413 int i; 414 tlb_data_t d; 415 tlb_tag_read_reg_t t; 416 417 printf("TLB_ISMALL contents:\n"); 418 for (i = 0; i < tlb_ismall_size(); i++) { 419 d.value = dtlb_data_access_read(TLB_ISMALL, i); 420 t.value = dtlb_tag_read_read(TLB_ISMALL, i); 421 print_tlb_entry(i, t, d); 422 } 423 424 printf("TLB_IBIG contents:\n"); 425 for (i = 0; i < tlb_ibig_size(); i++) { 426 d.value = dtlb_data_access_read(TLB_IBIG, i); 427 t.value = dtlb_tag_read_read(TLB_IBIG, i); 428 print_tlb_entry(i, t, d); 429 } 430 431 printf("TLB_DSMALL contents:\n"); 432 for (i = 0; i < tlb_dsmall_size(); i++) { 433 d.value = dtlb_data_access_read(TLB_DSMALL, i); 434 t.value = dtlb_tag_read_read(TLB_DSMALL, i); 435 print_tlb_entry(i, t, d); 436 } 437 438 printf("TLB_DBIG_1 contents:\n"); 439 for (i = 0; i < tlb_dbig_size(); i++) { 440 d.value = dtlb_data_access_read(TLB_DBIG_0, i); 441 t.value = dtlb_tag_read_read(TLB_DBIG_0, i); 442 print_tlb_entry(i, t, d); 443 } 444 445 printf("TLB_DBIG_2 contents:\n"); 446 for (i = 0; i < tlb_dbig_size(); i++) { 447 d.value = dtlb_data_access_read(TLB_DBIG_1, i); 448 t.value = dtlb_tag_read_read(TLB_DBIG_1, i); 449 print_tlb_entry(i, t, d); 450 } 451 } 452 453 #endif 348 printf("Operation not possible on Niagara.\n"); 349 } 454 350 455 351 void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, … … 461 357 } 462 358 359 #if 0 463 360 void do_fast_data_access_mmu_miss_fault(istate_t *istate, 464 tlb_tag_access_reg_t tag, const char *str) 465 { 466 uintptr_t va; 467 468 va = tag.vpn << MMU_PAGE_WIDTH; 469 if (tag.context) { 470 fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d).", str, va, 471 tag.context); 361 uint64_t page_and_ctx, const char *str) 362 { 363 if (DMISS_CONTEXT(page_and_ctx)) { 364 fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, DMISS_ADDRESS(page_and_ctx), 365 DMISS_CONTEXT(page_and_ctx)); 472 366 } 473 367 dump_istate(istate); … … 475 369 panic("%s.", str); 476 370 } 477 371 #endif 372 373 #if 0 478 374 void do_fast_data_access_protection_fault(istate_t *istate, 479 tlb_tag_access_reg_t tag, const char *str) 480 { 481 uintptr_t va; 482 483 va = tag.vpn << MMU_PAGE_WIDTH; 484 485 if (tag.context) { 486 fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d).", str, va, 487 tag.context); 488 } 489 printf("Faulting page: %p, ASID=%d\n", va, tag.context); 375 uint64_t page_and_ctx, const char *str) 376 { 377 if (DMISS_CONTEXT(page_and_ctx)) { 378 fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, DMISS_ADDRESS(page_and_ctx), 379 DMISS_CONTEXT(page_and_ctx)); 380 } 381 printf("Faulting page: %p, ASID=%d\n", DMISS_ADDRESS(page_and_ctx), DMISS_CONTEXT(page_and_ctx)); 490 382 dump_istate(istate); 491 383 panic("%s.", str); 492 384 } 493 494 void dump_sfsr_and_sfar(void) 495 { 496 tlb_sfsr_reg_t sfsr; 497 uintptr_t sfar; 498 499 sfsr.value = dtlb_sfsr_read(); 500 sfar = dtlb_sfar_read(); 501 502 #if defined (US) 503 printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, " 504 "fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, 505 sfsr.ow, sfsr.fv); 506 #elif defined (US3) 507 printf("DTLB SFSR: nf=%d, asi=%#x, tm=%d, ft=%#x, e=%d, ct=%d, pr=%d, " 508 "w=%d, ow=%d, fv=%d\n", sfsr.nf, sfsr.asi, sfsr.tm, sfsr.ft, 509 sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, sfsr.ow, sfsr.fv); 510 #endif 511 512 printf("DTLB SFAR: address=%p\n", sfar); 513 514 dtlb_sfsr_write(0); 385 #endif 386 387 /** 388 * Describes the exact condition which caused the last DMMU fault. 389 */ 390 void describe_dmmu_fault(void) 391 { 392 #if 0 393 uint64_t myid; 394 __hypercall_fast_ret1(0, 0, 0, 0, 0, CPU_MYID, &myid); 395 396 ASSERT(mmu_fsas[myid].dft < 16); 397 398 printf("condition which caused the fault: %s\n", 399 fault_types[mmu_fsas[myid].dft]); 400 } 401 402 /** Invalidate all unlocked ITLB and DTLB entries. */ 403 void tlb_invalidate_all(void) 404 { 405 uint64_t errno = __hypercall_fast3(MMU_DEMAP_ALL, 0, 0, 406 MMU_FLAG_DTLB | MMU_FLAG_ITLB); 407 if (errno != EOK) { 408 panic("Error code = %d.\n", errno); 409 } 410 #endif 515 411 } 516 412 -
kernel/arch/sparc64/src/trap/exception.c
reb79d60 rba50a34 162 162 fault_if_from_uspace(istate, "%s.", __func__); 163 163 dump_istate(istate); 164 dump_sfsr_and_sfar(); 164 //MH 165 // dump_sfsr_and_sfar(); 165 166 panic("%s.", __func__); 166 167 } -
kernel/arch/sparc64/src/trap/sun4v/trap_table.S
reb79d60 rba50a34 1092 1092 and %g1, NWINDOWS - 1, %g1 1093 1093 wrpr %g1, 0, %cwp ! CWP-- 1094 1094 1095 1095 .if \is_syscall 1096 1096 done
Note:
See TracChangeset
for help on using the changeset viewer.
