Changeset 965dc18 in mainline for kernel/arch/sparc64/src/mm/tlb.c
- Timestamp:
- 2008-12-05T19:59:03Z (17 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 49093a4
- Parents:
- 0258e67
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/sparc64/src/mm/tlb.c
r0258e67 r965dc18 55 55 #endif 56 56 57 static void dtlb_pte_copy(pte_t *t, index_t index, bool ro); 58 static void itlb_pte_copy(pte_t *t, index_t index); 59 static void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, 60 const char *str); 61 static void do_fast_data_access_mmu_miss_fault(istate_t *istate, 62 tlb_tag_access_reg_t tag, const char *str); 63 static void do_fast_data_access_protection_fault(istate_t *istate, 64 tlb_tag_access_reg_t tag, const char *str); 57 static void dtlb_pte_copy(pte_t *, index_t, bool); 58 static void itlb_pte_copy(pte_t *, index_t); 59 static void do_fast_instruction_access_mmu_miss_fault(istate_t *, const char *); 60 static void do_fast_data_access_mmu_miss_fault(istate_t *, tlb_tag_access_reg_t, 61 const char *); 62 static void do_fast_data_access_protection_fault(istate_t *, 63 tlb_tag_access_reg_t, const char *); 65 64 66 65 char *context_encoding[] = { … … 87 86 /** Insert privileged mapping into DMMU TLB. 88 87 * 89 * @param page 90 * @param frame 91 * @param pagesize 92 * @param locked 93 * @param cacheable 88 * @param page Virtual page address. 89 * @param frame Physical frame address. 90 * @param pagesize Page size. 91 * @param locked True for permanent mappings, false otherwise. 92 * @param cacheable True if the mapping is cacheable, false otherwise. 94 93 */ 95 94 void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize, … … 104 103 fr.address = frame; 105 104 106 tag. value= ASID_KERNEL;105 tag.context = ASID_KERNEL; 107 106 tag.vpn = pg.vpn; 108 107 … … 127 126 /** Copy PTE to TLB. 128 127 * 129 * @param t Page Table Entry to be copied.130 * @param index Zero if lower 8K-subpage, one if higher 8K-subpage.131 * @param ro If true, the entry will be created read-only, regardless of its132 * w field.128 * @param t Page Table Entry to be copied. 129 * @param index Zero if lower 8K-subpage, one if higher 8K-subpage. 130 * @param ro If true, the entry will be created read-only, regardless 131 * of its w field. 133 132 */ 134 133 void dtlb_pte_copy(pte_t *t, index_t index, bool ro) … … 166 165 /** Copy PTE to ITLB. 167 166 * 168 * @param t 169 * @param index Zero if lower 8K-subpage, one if higher 8K-subpage.167 * @param t Page Table Entry to be copied. 168 * @param index Zero if lower 8K-subpage, one if higher 8K-subpage. 170 169 */ 171 170 void itlb_pte_copy(pte_t *t, index_t index) … … 236 235 * low-level, assembly language part of the fast_data_access_mmu_miss handler. 237 236 * 238 * @param tag Content of the TLB Tag Access register as it existed when the 239 * trap happened. This is to prevent confusion created by clobbered 240 * Tag Access register during a nested DTLB miss. 241 * @param istate Interrupted state saved on the stack. 237 * @param tag Content of the TLB Tag Access register as it existed 238 * when the trap happened. This is to prevent confusion 239 * created by clobbered Tag Access register during a nested 240 * DTLB miss. 241 * @param istate Interrupted state saved on the stack. 242 242 */ 243 243 void fast_data_access_mmu_miss(tlb_tag_access_reg_t tag, istate_t *istate) … … 288 288 /** DTLB protection fault handler. 289 289 * 290 * @param tag Content of the TLB Tag Access register as it existed when the 291 * trap happened. This is to prevent confusion created by clobbered 292 * Tag Access register during a nested DTLB miss. 293 * @param istate Interrupted state saved on the stack. 290 * @param tag Content of the TLB Tag Access register as it existed 291 * when the trap happened. This is to prevent confusion 292 * created by clobbered Tag Access register during a nested 293 * DTLB miss. 294 * @param istate Interrupted state saved on the stack. 294 295 */ 295 296 void fast_data_access_protection(tlb_tag_access_reg_t tag, istate_t *istate) … … 332 333 } 333 334 335 /** Print TLB entry (for debugging purposes). 336 * 337 * The diag field has been left out in order to make this function more generic 338 * (there is no diag field in US3 architeture). 339 * 340 * @param i TLB entry number 341 * @param t TLB entry tag 342 * @param d TLB entry data 343 */ 344 static void print_tlb_entry(int i, tlb_tag_read_reg_t t, tlb_data_t d) 345 { 346 printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, " 347 "ie=%d, soft2=%#x, pfn=%#x, soft=%#x, l=%d, " 348 "cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn, 349 t.context, d.v, d.size, d.nfo, d.ie, d.soft2, 350 d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g); 351 } 352 353 #if defined (US) 354 334 355 /** Print contents of both TLBs. */ 335 356 void tlb_print(void) … … 343 364 d.value = itlb_data_access_read(i); 344 365 t.value = itlb_tag_read_read(i); 345 346 printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, " 347 "ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, " 348 "cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn, 349 t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag, 350 d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g); 366 print_tlb_entry(i, t, d); 351 367 } 352 368 … … 355 371 d.value = dtlb_data_access_read(i); 356 372 t.value = dtlb_tag_read_read(i); 357 358 printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, " 359 "ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, " 360 "cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn, 361 t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag, 362 d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g); 363 } 364 365 } 373 print_tlb_entry(i, t, d); 374 } 375 } 376 377 #elif defined (US3) 378 379 /** Print contents of all TLBs. */ 380 void tlb_print(void) 381 { 382 int i; 383 tlb_data_t d; 384 tlb_tag_read_reg_t t; 385 386 printf("TLB_ISMALL contents:\n"); 387 for (i = 0; i < tlb_ismall_size(); i++) { 388 d.value = dtlb_data_access_read(TLB_ISMALL, i); 389 t.value = dtlb_tag_read_read(TLB_ISMALL, i); 390 print_tlb_entry(i, t, d); 391 } 392 393 printf("TLB_IBIG contents:\n"); 394 for (i = 0; i < tlb_ibig_size(); i++) { 395 d.value = dtlb_data_access_read(TLB_IBIG, i); 396 t.value = dtlb_tag_read_read(TLB_IBIG, i); 397 print_tlb_entry(i, t, d); 398 } 399 400 printf("TLB_DSMALL contents:\n"); 401 for (i = 0; i < tlb_dsmall_size(); i++) { 402 d.value = dtlb_data_access_read(TLB_DSMALL, i); 403 t.value = dtlb_tag_read_read(TLB_DSMALL, i); 404 print_tlb_entry(i, t, d); 405 } 406 407 printf("TLB_DBIG_1 contents:\n"); 408 for (i = 0; i < tlb_dbig_size(); i++) { 409 d.value = dtlb_data_access_read(TLB_DBIG_0, i); 410 t.value = dtlb_tag_read_read(TLB_DBIG_0, i); 411 print_tlb_entry(i, t, d); 412 } 413 414 printf("TLB_DBIG_2 contents:\n"); 415 for (i = 0; i < tlb_dbig_size(); i++) { 416 d.value = dtlb_data_access_read(TLB_DBIG_1, i); 417 t.value = dtlb_tag_read_read(TLB_DBIG_1, i); 418 print_tlb_entry(i, t, d); 419 } 420 } 421 422 #endif 366 423 367 424 void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, … … 412 469 sfar = dtlb_sfar_read(); 413 470 471 #if defined (US) 414 472 printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, " 415 473 "fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, 416 474 sfsr.ow, sfsr.fv); 475 #elif defined (US3) 476 printf("DTLB SFSR: nf=%d, asi=%#x, tm=%d, ft=%#x, e=%d, ct=%d, pr=%d, " 477 "w=%d, ow=%d, fv=%d\n", sfsr.nf, sfsr.asi, sfsr.tm, sfsr.ft, 478 sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, sfsr.ow, sfsr.fv); 479 #endif 480 417 481 printf("DTLB SFAR: address=%p\n", sfar); 418 482 419 483 dtlb_sfsr_write(0); 420 484 } 485 486 #if defined (US3) 487 /** Invalidates given TLB entry if and only if it is non-locked or global. 488 * 489 * @param tlb TLB number (one of TLB_DSMALL, TLB_DBIG_0, TLB_DBIG_1, 490 * TLB_ISMALL, TLB_IBIG). 491 * @param entry Entry index within the given TLB. 492 */ 493 static void tlb_invalidate_entry(int tlb, index_t entry) 494 { 495 tlb_data_t d; 496 tlb_tag_read_reg_t t; 497 498 if (tlb == TLB_DSMALL || tlb == TLB_DBIG_0 || tlb == TLB_DBIG_1) { 499 d.value = dtlb_data_access_read(tlb, entry); 500 if (!d.l || d.g) { 501 t.value = dtlb_tag_read_read(tlb, entry); 502 d.v = false; 503 dtlb_tag_access_write(t.value); 504 dtlb_data_access_write(tlb, entry, d.value); 505 } 506 } else if (tlb == TLB_ISMALL || tlb == TLB_IBIG) { 507 d.value = itlb_data_access_read(tlb, entry); 508 if (!d.l || d.g) { 509 t.value = itlb_tag_read_read(tlb, entry); 510 d.v = false; 511 itlb_tag_access_write(t.value); 512 itlb_data_access_write(tlb, entry, d.value); 513 } 514 } 515 } 516 #endif 421 517 422 518 /** Invalidate all unlocked ITLB and DTLB entries. */ … … 424 520 { 425 521 int i; 426 tlb_data_t d; 427 tlb_tag_read_reg_t t; 428 522 429 523 /* 430 524 * Walk all ITLB and DTLB entries and remove all unlocked mappings. 431 525 * 432 526 * The kernel doesn't use global mappings so any locked global mappings 433 * found 527 * found must have been created by someone else. Their only purpose now 434 528 * is to collide with proper mappings. Invalidate immediately. It should 435 529 * be safe to invalidate them as late as now. 436 530 */ 531 532 #if defined (US) 533 tlb_data_t d; 534 tlb_tag_read_reg_t t; 437 535 438 536 for (i = 0; i < ITLB_ENTRY_COUNT; i++) { … … 445 543 } 446 544 } 447 545 448 546 for (i = 0; i < DTLB_ENTRY_COUNT; i++) { 449 547 d.value = dtlb_data_access_read(i); … … 455 553 } 456 554 } 457 555 556 #elif defined (US3) 557 558 for (i = 0; i < tlb_ismall_size(); i++) 559 tlb_invalidate_entry(TLB_ISMALL, i); 560 for (i = 0; i < tlb_ibig_size(); i++) 561 tlb_invalidate_entry(TLB_IBIG, i); 562 for (i = 0; i < tlb_dsmall_size(); i++) 563 tlb_invalidate_entry(TLB_DSMALL, i); 564 for (i = 0; i < tlb_dbig_size(); i++) 565 tlb_invalidate_entry(TLB_DBIG_0, i); 566 for (i = 0; i < tlb_dbig_size(); i++) 567 tlb_invalidate_entry(TLB_DBIG_1, i); 568 #endif 569 458 570 } 459 571 … … 485 597 * address space. 486 598 * 487 * @param asid 488 * @param page 489 * @param cnt 599 * @param asid Address Space ID. 600 * @param page First page which to sweep out from ITLB and DTLB. 601 * @param cnt Number of ITLB and DTLB entries to invalidate. 490 602 */ 491 603 void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
Note:
See TracChangeset
for help on using the changeset viewer.