Changeset ad4b32c in mainline for kernel/arch/ia64/src
- Timestamp:
- 2009-09-04T21:50:59Z (16 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 309ede1
- Parents:
- 7e266ff (diff), 40240b1 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- kernel/arch/ia64/src
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/ia64/src/mm/as.c
r7e266ff rad4b32c 55 55 void as_install_arch(as_t *as) 56 56 { 57 region_register rr;57 region_register_t rr; 58 58 int i; 59 59 -
kernel/arch/ia64/src/mm/page.c
r7e266ff rad4b32c 63 63 void set_environment(void) 64 64 { 65 region_register rr;66 pta_register pta;65 region_register_t rr; 66 pta_register_t pta; 67 67 int i; 68 68 #ifdef CONFIG_VHPT … … 131 131 vhpt_entry_t *vhpt_hash(uintptr_t page, asid_t asid) 132 132 { 133 region_register rr_save, rr;133 region_register_t rr_save, rr; 134 134 size_t vrn; 135 135 rid_t rid; … … 176 176 bool vhpt_compare(uintptr_t page, asid_t asid, vhpt_entry_t *v) 177 177 { 178 region_register rr_save, rr;178 region_register_t rr_save, rr; 179 179 size_t vrn; 180 180 rid_t rid; … … 223 223 int flags) 224 224 { 225 region_register rr_save, rr;225 region_register_t rr_save, rr; 226 226 size_t vrn; 227 227 rid_t rid; … … 257 257 v->present.ma = (flags & PAGE_CACHEABLE) ? 258 258 MA_WRITEBACK : MA_UNCACHEABLE; 259 v->present.a = false; 260 v->present.d = false; 259 v->present.a = false; /* not accessed */ 260 v->present.d = false; /* not dirty */ 261 261 v->present.pl = (flags & PAGE_USER) ? PL_USER : PL_KERNEL; 262 262 v->present.ar = (flags & PAGE_WRITE) ? AR_WRITE : AR_READ; 263 263 v->present.ar |= (flags & PAGE_EXEC) ? AR_EXECUTE : 0; 264 264 v->present.ppn = frame >> PPN_SHIFT; 265 v->present.ed = false; 265 v->present.ed = false; /* exception not deffered */ 266 266 v->present.ps = PAGE_WIDTH; 267 267 v->present.key = 0; -
kernel/arch/ia64/src/mm/tlb.c
r7e266ff rad4b32c 27 27 */ 28 28 29 /** @addtogroup ia64mm 29 /** @addtogroup ia64mm 30 30 * @{ 31 31 */ … … 53 53 #include <interrupt.h> 54 54 55 #define IO_FRAME_BASE 0xFFFFC000000 56 55 57 /** Invalidate all TLB entries. */ 56 58 void tlb_invalidate_all(void) … … 59 61 uintptr_t adr; 60 62 uint32_t count1, count2, stride1, stride2; 61 63 62 64 unsigned int i, j; 63 65 64 66 adr = PAL_PTCE_INFO_BASE(); 65 67 count1 = PAL_PTCE_INFO_COUNT1(); … … 67 69 stride1 = PAL_PTCE_INFO_STRIDE1(); 68 70 stride2 = PAL_PTCE_INFO_STRIDE2(); 69 71 70 72 ipl = interrupts_disable(); 71 73 72 74 for (i = 0; i < count1; i++) { 73 75 for (j = 0; j < count2; j++) { 74 76 asm volatile ( 75 "ptc.e %0 ;;" 76 : 77 : "r" (adr) 77 "ptc.e %[adr] ;;" 78 :: [adr] "r" (adr) 78 79 ); 79 80 adr += stride2; … … 81 82 adr += stride1; 82 83 } 83 84 84 85 interrupts_restore(ipl); 85 86 86 87 srlz_d(); 87 88 srlz_i(); 89 88 90 #ifdef CONFIG_VHPT 89 91 vhpt_invalidate_all(); 90 #endif 92 #endif 91 93 } 92 94 93 95 /** Invalidate entries belonging to an address space. 94 96 * 95 * @param asid Address space identifier. 97 * @param asid Address space identifier. 98 * 96 99 */ 97 100 void tlb_invalidate_asid(asid_t asid) … … 103 106 void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt) 104 107 { 105 region_register rr;108 region_register_t rr; 106 109 bool restore_rr = false; 107 110 int b = 0; 108 111 int c = cnt; 109 112 110 113 uintptr_t va; 111 114 va = page; 112 115 113 116 rr.word = rr_read(VA2VRN(va)); 114 117 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) { … … 117 120 * Save the old content of the register and replace the RID. 118 121 */ 119 region_register rr0;120 122 region_register_t rr0; 123 121 124 rr0 = rr; 122 125 rr0.map.rid = ASID2RID(asid, VA2VRN(va)); … … 126 129 } 127 130 128 while (c >>= 1)131 while (c >>= 1) 129 132 b++; 130 133 b >>= 1; … … 169 172 break; 170 173 } 171 for(; va < (page + cnt * PAGE_SIZE); va += (1 << ps)) 172 asm volatile ("ptc.l %0, %1;;" :: "r" (va), "r" (ps << 2)); 174 175 for (; va < (page + cnt * PAGE_SIZE); va += (1 << ps)) 176 asm volatile ( 177 "ptc.l %[va], %[ps] ;;" 178 :: [va]"r" (va), 179 [ps] "r" (ps << 2) 180 ); 181 173 182 srlz_d(); 174 183 srlz_i(); … … 183 192 /** Insert data into data translation cache. 184 193 * 185 * @param va Virtual page address. 186 * @param asid Address space identifier. 187 * @param entry The rest of TLB entry as required by TLB insertion 188 * format. 194 * @param va Virtual page address. 195 * @param asid Address space identifier. 196 * @param entry The rest of TLB entry as required by TLB insertion 197 * format. 198 * 189 199 */ 190 200 void dtc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry) … … 195 205 /** Insert data into instruction translation cache. 196 206 * 197 * @param va 198 * @param asid 199 * @param entry 200 * 207 * @param va Virtual page address. 208 * @param asid Address space identifier. 209 * @param entry The rest of TLB entry as required by TLB insertion 210 * format. 201 211 */ 202 212 void itc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry) … … 207 217 /** Insert data into instruction or data translation cache. 208 218 * 209 * @param va Virtual page address. 210 * @param asid Address space identifier. 211 * @param entry The rest of TLB entry as required by TLB insertion 212 * format. 213 * @param dtc If true, insert into data translation cache, use 214 * instruction translation cache otherwise. 219 * @param va Virtual page address. 220 * @param asid Address space identifier. 221 * @param entry The rest of TLB entry as required by TLB insertion 222 * format. 223 * @param dtc If true, insert into data translation cache, use 224 * instruction translation cache otherwise. 225 * 215 226 */ 216 227 void tc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtc) 217 228 { 218 region_register rr;229 region_register_t rr; 219 230 bool restore_rr = false; 220 231 221 232 rr.word = rr_read(VA2VRN(va)); 222 233 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) { … … 225 236 * Save the old content of the register and replace the RID. 226 237 */ 227 region_register rr0;228 238 region_register_t rr0; 239 229 240 rr0 = rr; 230 241 rr0.map.rid = ASID2RID(asid, VA2VRN(va)); … … 235 246 236 247 asm volatile ( 237 "mov r8 = psr;;\n" 238 "rsm %0;;\n" /* PSR_IC_MASK */ 239 "srlz.d;;\n" 240 "srlz.i;;\n" 241 "mov cr.ifa = %1\n" /* va */ 242 "mov cr.itir = %2;;\n" /* entry.word[1] */ 243 "cmp.eq p6,p7 = %4,r0;;\n" /* decide between itc and dtc */ 244 "(p6) itc.i %3;;\n" 245 "(p7) itc.d %3;;\n" 246 "mov psr.l = r8;;\n" 247 "srlz.d;;\n" 248 : 249 : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), 250 "r" (entry.word[0]), "r" (dtc) 248 "mov r8 = psr ;;\n" 249 "rsm %[mask] ;;\n" /* PSR_IC_MASK */ 250 "srlz.d ;;\n" 251 "srlz.i ;;\n" 252 "mov cr.ifa = %[va]\n" /* va */ 253 "mov cr.itir = %[word1] ;;\n" /* entry.word[1] */ 254 "cmp.eq p6, p7 = %[dtc], r0 ;;\n" /* decide between itc and dtc */ 255 "(p6) itc.i %[word0] ;;\n" 256 "(p7) itc.d %[word0] ;;\n" 257 "mov psr.l = r8 ;;\n" 258 "srlz.d ;;\n" 259 :: [mask] "i" (PSR_IC_MASK), 260 [va] "r" (va), 261 [word0] "r" (entry.word[0]), 262 [word1] "r" (entry.word[1]), 263 [dtc] "r" (dtc) 251 264 : "p6", "p7", "r8" 252 265 ); … … 261 274 /** Insert data into instruction translation register. 262 275 * 263 * @param va 264 * @param asid 265 * @param entry 266 * 267 * @param tr 268 * /269 void 270 itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, size_t tr)276 * @param va Virtual page address. 277 * @param asid Address space identifier. 278 * @param entry The rest of TLB entry as required by TLB insertion 279 * format. 280 * @param tr Translation register. 281 * 282 */ 283 void itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, size_t tr) 271 284 { 272 285 tr_mapping_insert(va, asid, entry, false, tr); … … 275 288 /** Insert data into data translation register. 276 289 * 277 * @param va 278 * @param asid 279 * @param entry 280 * 281 * @param tr 282 * /283 void 284 dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, size_t tr)290 * @param va Virtual page address. 291 * @param asid Address space identifier. 292 * @param entry The rest of TLB entry as required by TLB insertion 293 * format. 294 * @param tr Translation register. 295 * 296 */ 297 void dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, size_t tr) 285 298 { 286 299 tr_mapping_insert(va, asid, entry, true, tr); … … 289 302 /** Insert data into instruction or data translation register. 290 303 * 291 * @param va 292 * @param asid 293 * @param entry 294 * 295 * @param dtr 296 * 297 * @param tr 298 * /299 void 300 tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr,304 * @param va Virtual page address. 305 * @param asid Address space identifier. 306 * @param entry The rest of TLB entry as required by TLB insertion 307 * format. 308 * @param dtr If true, insert into data translation register, use 309 * instruction translation register otherwise. 310 * @param tr Translation register. 311 * 312 */ 313 void tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr, 301 314 size_t tr) 302 315 { 303 region_register rr;316 region_register_t rr; 304 317 bool restore_rr = false; 305 318 306 319 rr.word = rr_read(VA2VRN(va)); 307 320 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) { … … 310 323 * Save the old content of the register and replace the RID. 311 324 */ 312 region_register rr0;313 325 region_register_t rr0; 326 314 327 rr0 = rr; 315 328 rr0.map.rid = ASID2RID(asid, VA2VRN(va)); … … 318 331 srlz_i(); 319 332 } 320 333 321 334 asm volatile ( 322 "mov r8 = psr;;\n" 323 "rsm %0;;\n" /* PSR_IC_MASK */ 324 "srlz.d;;\n" 325 "srlz.i;;\n" 326 "mov cr.ifa = %1\n" /* va */ 327 "mov cr.itir = %2;;\n" /* entry.word[1] */ 328 "cmp.eq p6,p7 = %5,r0;;\n" /* decide between itr and dtr */ 329 "(p6) itr.i itr[%4] = %3;;\n" 330 "(p7) itr.d dtr[%4] = %3;;\n" 331 "mov psr.l = r8;;\n" 332 "srlz.d;;\n" 333 : 334 : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), 335 "r" (entry.word[0]), "r" (tr), "r" (dtr) 335 "mov r8 = psr ;;\n" 336 "rsm %[mask] ;;\n" /* PSR_IC_MASK */ 337 "srlz.d ;;\n" 338 "srlz.i ;;\n" 339 "mov cr.ifa = %[va]\n" /* va */ 340 "mov cr.itir = %[word1] ;;\n" /* entry.word[1] */ 341 "cmp.eq p6, p7 = %[dtr], r0 ;;\n" /* decide between itr and dtr */ 342 "(p6) itr.i itr[%[tr]] = %[word0] ;;\n" 343 "(p7) itr.d dtr[%[tr]] = %[word0] ;;\n" 344 "mov psr.l = r8 ;;\n" 345 "srlz.d ;;\n" 346 :: [mask] "i" (PSR_IC_MASK), 347 [va] "r" (va), 348 [word1] "r" (entry.word[1]), 349 [word0] "r" (entry.word[0]), 350 [tr] "r" (tr), 351 [dtr] "r" (dtr) 336 352 : "p6", "p7", "r8" 337 353 ); … … 346 362 /** Insert data into DTLB. 347 363 * 348 * @param page 349 * @param frame 350 * @param dtr 351 * 352 * @param tr 353 * /354 void 355 dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr,364 * @param page Virtual page address including VRN bits. 365 * @param frame Physical frame address. 366 * @param dtr If true, insert into data translation register, use data 367 * translation cache otherwise. 368 * @param tr Translation register if dtr is true, ignored otherwise. 369 * 370 */ 371 void dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr, 356 372 size_t tr) 357 373 { … … 361 377 entry.word[1] = 0; 362 378 363 entry.p = true; 379 entry.p = true; /* present */ 364 380 entry.ma = MA_WRITEBACK; 365 entry.a = true; 366 entry.d = true; 381 entry.a = true; /* already accessed */ 382 entry.d = true; /* already dirty */ 367 383 entry.pl = PL_KERNEL; 368 384 entry.ar = AR_READ | AR_WRITE; … … 380 396 * Purge DTR entries used by the kernel. 381 397 * 382 * @param page Virtual page address including VRN bits. 383 * @param width Width of the purge in bits. 398 * @param page Virtual page address including VRN bits. 399 * @param width Width of the purge in bits. 400 * 384 401 */ 385 402 void dtr_purge(uintptr_t page, size_t width) 386 403 { 387 asm volatile ("ptr.d %0, %1\n" : : "r" (page), "r" (width << 2)); 404 asm volatile ( 405 "ptr.d %[page], %[width]\n" 406 :: [page] "r" (page), 407 [width] "r" (width << 2) 408 ); 388 409 } 389 410 … … 391 412 /** Copy content of PTE into data translation cache. 392 413 * 393 * @param t PTE. 414 * @param t PTE. 415 * 394 416 */ 395 417 void dtc_pte_copy(pte_t *t) 396 418 { 397 419 tlb_entry_t entry; 398 420 399 421 entry.word[0] = 0; 400 422 entry.word[1] = 0; … … 410 432 411 433 dtc_mapping_insert(t->page, t->as->asid, entry); 434 412 435 #ifdef CONFIG_VHPT 413 436 vhpt_mapping_insert(t->page, t->as->asid, entry); 414 #endif 437 #endif 415 438 } 416 439 417 440 /** Copy content of PTE into instruction translation cache. 418 441 * 419 * @param t PTE. 442 * @param t PTE. 443 * 420 444 */ 421 445 void itc_pte_copy(pte_t *t) 422 446 { 423 447 tlb_entry_t entry; 424 448 425 449 entry.word[0] = 0; 426 450 entry.word[1] = 0; … … 437 461 438 462 itc_mapping_insert(t->page, t->as->asid, entry); 463 439 464 #ifdef CONFIG_VHPT 440 465 vhpt_mapping_insert(t->page, t->as->asid, entry); 441 #endif 466 #endif 442 467 } 443 468 444 469 /** Instruction TLB fault handler for faults with VHPT turned off. 445 470 * 446 * @param vector Interruption vector. 447 * @param istate Structure with saved interruption state. 471 * @param vector Interruption vector. 472 * @param istate Structure with saved interruption state. 473 * 448 474 */ 449 475 void alternate_instruction_tlb_fault(uint64_t vector, istate_t *istate) 450 476 { 451 region_register rr;477 region_register_t rr; 452 478 rid_t rid; 453 479 uintptr_t va; 454 480 pte_t *t; 455 481 456 va = istate->cr_ifa; 482 va = istate->cr_ifa; /* faulting address */ 457 483 rr.word = rr_read(VA2VRN(va)); 458 484 rid = rr.map.rid; 459 485 460 486 page_table_lock(AS, true); 461 487 t = page_mapping_find(AS, va); … … 473 499 page_table_unlock(AS, true); 474 500 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { 475 fault_if_from_uspace(istate, "Page fault at %p.",va);501 fault_if_from_uspace(istate, "Page fault at %p.", va); 476 502 panic("%s: va=%p, rid=%d, iip=%p.", __func__, va, rid, 477 503 istate->cr_iip); … … 488 514 } 489 515 490 #define IO_FRAME_BASE 0xFFFFC000000491 492 516 /** 493 517 * There is special handling of memory mapped legacy io, because of 4KB sized 494 518 * access for userspace. 495 519 * 496 * @param va Virtual address of page fault. 497 * @param istate Structure with saved interruption state. 498 * 499 * @return One on success, zero on failure. 520 * @param va Virtual address of page fault. 521 * @param istate Structure with saved interruption state. 522 * 523 * @return One on success, zero on failure. 524 * 500 525 */ 501 526 static int try_memmap_io_insertion(uintptr_t va, istate_t *istate) … … 505 530 uint64_t io_page = (va & ((1 << IO_PAGE_WIDTH) - 1)) >> 506 531 USPACE_IO_PAGE_WIDTH; 507 532 508 533 if (is_io_page_accessible(io_page)) { 509 534 uint64_t page, frame; 510 535 511 536 page = IO_OFFSET + 512 537 (1 << USPACE_IO_PAGE_WIDTH) * io_page; 513 538 frame = IO_FRAME_BASE + 514 539 (1 << USPACE_IO_PAGE_WIDTH) * io_page; 515 540 516 541 tlb_entry_t entry; 517 542 518 543 entry.word[0] = 0; 519 544 entry.word[1] = 0; 520 521 entry.p = true; 522 entry.ma = MA_UNCACHEABLE; 523 entry.a = true; 524 entry.d = true; 545 546 entry.p = true; /* present */ 547 entry.ma = MA_UNCACHEABLE; 548 entry.a = true; /* already accessed */ 549 entry.d = true; /* already dirty */ 525 550 entry.pl = PL_USER; 526 551 entry.ar = AR_READ | AR_WRITE; 527 552 entry.ppn = frame >> PPN_SHIFT; 528 553 entry.ps = USPACE_IO_PAGE_WIDTH; 529 554 530 555 dtc_mapping_insert(page, TASK->as->asid, entry); 531 556 return 1; … … 536 561 } 537 562 } 538 563 539 564 return 0; 540 565 } … … 542 567 /** Data TLB fault handler for faults with VHPT turned off. 543 568 * 544 * @param vector Interruption vector. 545 * @param istate Structure with saved interruption state. 569 * @param vector Interruption vector. 570 * @param istate Structure with saved interruption state. 571 * 546 572 */ 547 573 void alternate_data_tlb_fault(uint64_t vector, istate_t *istate) 548 574 { 549 region_register rr; 550 rid_t rid; 551 uintptr_t va; 552 pte_t *t; 553 554 va = istate->cr_ifa; /* faulting address */ 555 rr.word = rr_read(VA2VRN(va)); 556 rid = rr.map.rid; 575 if (istate->cr_isr.sp) { 576 /* Speculative load. Deffer the exception 577 until a more clever approach can be used. 578 579 Currently if we try to find the mapping 580 for the speculative load while in the kernel, 581 we might introduce a livelock because of 582 the possibly invalid values of the address. */ 583 istate->cr_ipsr.ed = true; 584 return; 585 } 586 587 uintptr_t va = istate->cr_ifa; /* faulting address */ 588 589 region_register_t rr; 590 rr.word = rr_read(VA2VRN(va)); 591 rid_t rid = rr.map.rid; 557 592 if (RID2ASID(rid) == ASID_KERNEL) { 558 593 if (VA2VRN(va) == VRN_KERNEL) { … … 565 600 } 566 601 } 567 602 603 568 604 page_table_lock(AS, true); 569 t= page_mapping_find(AS, va);570 if ( t) {605 pte_t *entry = page_mapping_find(AS, va); 606 if (entry) { 571 607 /* 572 608 * The mapping was found in the software page hash table. 573 609 * Insert it into data translation cache. 574 610 */ 575 dtc_pte_copy( t);611 dtc_pte_copy(entry); 576 612 page_table_unlock(AS, true); 577 613 } else { … … 579 615 if (try_memmap_io_insertion(va, istate)) 580 616 return; 581 /* 582 * Forward the page fault to the address space page fault 617 618 /* 619 * Forward the page fault to the address space page fault 583 620 * handler. 584 621 */ 585 622 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { 586 fault_if_from_uspace(istate, "Page fault at %p.",va);623 fault_if_from_uspace(istate, "Page fault at %p.", va); 587 624 panic("%s: va=%p, rid=%d, iip=%p.", __func__, va, rid, 588 625 istate->cr_iip); … … 595 632 * This fault should not occur. 596 633 * 597 * @param vector Interruption vector. 598 * @param istate Structure with saved interruption state. 634 * @param vector Interruption vector. 635 * @param istate Structure with saved interruption state. 636 * 599 637 */ 600 638 void data_nested_tlb_fault(uint64_t vector, istate_t *istate) 601 639 { 602 panic("%s.", __func__);640 ASSERT(false); 603 641 } 604 642 605 643 /** Data Dirty bit fault handler. 606 644 * 607 * @param vector Interruption vector. 608 * @param istate Structure with saved interruption state. 645 * @param vector Interruption vector. 646 * @param istate Structure with saved interruption state. 647 * 609 648 */ 610 649 void data_dirty_bit_fault(uint64_t vector, istate_t *istate) 611 650 { 612 region_register rr;651 region_register_t rr; 613 652 rid_t rid; 614 653 uintptr_t va; 615 654 pte_t *t; 616 655 617 va = istate->cr_ifa; 656 va = istate->cr_ifa; /* faulting address */ 618 657 rr.word = rr_read(VA2VRN(va)); 619 658 rid = rr.map.rid; 620 659 621 660 page_table_lock(AS, true); 622 661 t = page_mapping_find(AS, va); 623 ASSERT( t && t->p);624 if ( t && t->p && t->w) {662 ASSERT((t) && (t->p)); 663 if ((t) && (t->p) && (t->w)) { 625 664 /* 626 665 * Update the Dirty bit in page tables and reinsert … … 631 670 } else { 632 671 if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) { 633 fault_if_from_uspace(istate, "Page fault at %p.",va);672 fault_if_from_uspace(istate, "Page fault at %p.", va); 634 673 panic("%s: va=%p, rid=%d, iip=%p.", __func__, va, rid, 635 674 istate->cr_iip); … … 641 680 /** Instruction access bit fault handler. 642 681 * 643 * @param vector Interruption vector. 644 * @param istate Structure with saved interruption state. 682 * @param vector Interruption vector. 683 * @param istate Structure with saved interruption state. 684 * 645 685 */ 646 686 void instruction_access_bit_fault(uint64_t vector, istate_t *istate) 647 687 { 648 region_register rr;688 region_register_t rr; 649 689 rid_t rid; 650 690 uintptr_t va; 651 pte_t *t; 652 653 va = istate->cr_ifa; 691 pte_t *t; 692 693 va = istate->cr_ifa; /* faulting address */ 654 694 rr.word = rr_read(VA2VRN(va)); 655 695 rid = rr.map.rid; 656 696 657 697 page_table_lock(AS, true); 658 698 t = page_mapping_find(AS, va); 659 ASSERT( t && t->p);660 if ( t && t->p && t->x) {699 ASSERT((t) && (t->p)); 700 if ((t) && (t->p) && (t->x)) { 661 701 /* 662 702 * Update the Accessed bit in page tables and reinsert … … 679 719 * @param vector Interruption vector. 680 720 * @param istate Structure with saved interruption state. 721 * 681 722 */ 682 723 void data_access_bit_fault(uint64_t vector, istate_t *istate) 683 724 { 684 region_register rr;725 region_register_t rr; 685 726 rid_t rid; 686 727 uintptr_t va; 687 728 pte_t *t; 688 689 va = istate->cr_ifa; 729 730 va = istate->cr_ifa; /* faulting address */ 690 731 rr.word = rr_read(VA2VRN(va)); 691 732 rid = rr.map.rid; 692 733 693 734 page_table_lock(AS, true); 694 735 t = page_mapping_find(AS, va); 695 ASSERT( t && t->p);696 if ( t && t->p) {736 ASSERT((t) && (t->p)); 737 if ((t) && (t->p)) { 697 738 /* 698 739 * Update the Accessed bit in page tables and reinsert … … 715 756 * @param vector Interruption vector. 716 757 * @param istate Structure with saved interruption state. 758 * 717 759 */ 718 760 void data_access_rights_fault(uint64_t vector, istate_t *istate) 719 761 { 720 region_register rr;762 region_register_t rr; 721 763 rid_t rid; 722 764 uintptr_t va; 723 765 pte_t *t; 724 725 va = istate->cr_ifa; 766 767 va = istate->cr_ifa; /* faulting address */ 726 768 rr.word = rr_read(VA2VRN(va)); 727 769 rid = rr.map.rid; 728 770 729 771 /* 730 772 * Assume a write to a read-only page. … … 732 774 page_table_lock(AS, true); 733 775 t = page_mapping_find(AS, va); 734 ASSERT( t && t->p);776 ASSERT((t) && (t->p)); 735 777 ASSERT(!t->w); 736 778 if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) { … … 746 788 * @param vector Interruption vector. 747 789 * @param istate Structure with saved interruption state. 790 * 748 791 */ 749 792 void page_not_present(uint64_t vector, istate_t *istate) 750 793 { 751 region_register rr;794 region_register_t rr; 752 795 rid_t rid; 753 796 uintptr_t va; 754 797 pte_t *t; 755 798 756 va = istate->cr_ifa; 799 va = istate->cr_ifa; /* faulting address */ 757 800 rr.word = rr_read(VA2VRN(va)); 758 801 rid = rr.map.rid; 759 802 760 803 page_table_lock(AS, true); 761 804 t = page_mapping_find(AS, va); -
kernel/arch/ia64/src/mm/vhpt.c
r7e266ff rad4b32c 27 27 */ 28 28 29 /** @addtogroup ia64mm 29 /** @addtogroup ia64mm 30 30 * @{ 31 31 */ … … 44 44 vhpt_base = frame_alloc(VHPT_WIDTH - FRAME_WIDTH, 45 45 FRAME_KA | FRAME_ATOMIC); 46 if (!vhpt_base) 46 if (!vhpt_base) 47 47 panic("Kernel configured with VHPT but no memory for table."); 48 48 vhpt_invalidate_all(); … … 53 53 void vhpt_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry) 54 54 { 55 region_register rr_save, rr;55 region_register_t rr_save, rr; 56 56 size_t vrn; 57 57 rid_t rid; 58 58 uint64_t tag; 59 59 60 60 vhpt_entry_t *ventry; 61 62 61 63 62 vrn = va >> VRN_SHIFT; 64 63 rid = ASID2RID(asid, vrn); 65 64 66 65 rr_save.word = rr_read(vrn); 67 66 rr.word = rr_save.word; … … 75 74 srlz_i(); 76 75 srlz_d(); 77 76 78 77 ventry->word[0] = entry.word[0]; 79 78 ventry->word[1] = entry.word[1]; -
kernel/arch/ia64/src/start.S
r7e266ff rad4b32c 32 32 #include <mm/asid.h> 33 33 34 #define RR_MASK (0xFFFFFFFF00000002)35 #define RID_SHIFT 36 #define PS_SHIFT 37 38 #define KERNEL_TRANSLATION_I 39 #define KERNEL_TRANSLATION_D 40 #define KERNEL_TRANSLATION_VIO 41 #define KERNEL_TRANSLATION_IO 0x00100FFFFC00067142 #define KERNEL_TRANSLATION_FW 0x00100000F000067134 #define RR_MASK (0xFFFFFFFF00000002) 35 #define RID_SHIFT 8 36 #define PS_SHIFT 2 37 38 #define KERNEL_TRANSLATION_I 0x0010000000000661 39 #define KERNEL_TRANSLATION_D 0x0010000000000661 40 #define KERNEL_TRANSLATION_VIO 0x0010000000000671 41 #define KERNEL_TRANSLATION_IO 0x00100FFFFC000671 42 #define KERNEL_TRANSLATION_FW 0x00100000F0000671 43 43 44 44 .section K_TEXT_START, "ax" … … 49 49 kernel_image_start: 50 50 .auto 51 51 52 52 #ifdef CONFIG_SMP 53 53 # Identify self(CPU) in OS structures by ID / EID 54 54 55 55 mov r9 = cr64 56 56 mov r10 = 1 … … 62 62 st1 [r8] = r10 63 63 #endif 64 64 65 65 mov psr.l = r0 66 66 srlz.i 67 67 srlz.d 68 68 69 69 # Fill TR.i and TR.d using Region Register #VRN_KERNEL 70 70 71 71 movl r8 = (VRN_KERNEL << VRN_SHIFT) 72 72 mov r9 = rr[r8] 73 73 74 74 movl r10 = (RR_MASK) 75 75 and r9 = r10, r9 76 76 movl r10 = ((RID_KERNEL << RID_SHIFT) | (KERNEL_PAGE_WIDTH << PS_SHIFT)) 77 or 78 77 or r9 = r10, r9 78 79 79 mov rr[r8] = r9 80 80 81 81 movl r8 = (VRN_KERNEL << VRN_SHIFT) 82 82 mov cr.ifa = r8 83 83 84 84 mov r11 = cr.itir 85 85 movl r10 = (KERNEL_PAGE_WIDTH << PS_SHIFT) 86 86 or r10 = r10, r11 87 87 mov cr.itir = r10 88 88 89 89 movl r10 = (KERNEL_TRANSLATION_I) 90 90 itr.i itr[r0] = r10 91 91 movl r10 = (KERNEL_TRANSLATION_D) 92 92 itr.d dtr[r0] = r10 93 93 94 94 movl r7 = 1 95 95 movl r8 = (VRN_KERNEL << VRN_SHIFT) | VIO_OFFSET … … 97 97 movl r10 = (KERNEL_TRANSLATION_VIO) 98 98 itr.d dtr[r7] = r10 99 99 100 100 mov r11 = cr.itir 101 101 movl r10 = ~0xfc … … 104 104 or r10 = r10, r11 105 105 mov cr.itir = r10 106 106 107 107 movl r7 = 2 108 108 movl r8 = (VRN_KERNEL << VRN_SHIFT) | IO_OFFSET … … 110 110 movl r10 = (KERNEL_TRANSLATION_IO) 111 111 itr.d dtr[r7] = r10 112 113 # Setup mapping for fi mware arrea (also SAPIC)114 112 113 # Setup mapping for firmware area (also SAPIC) 114 115 115 mov r11 = cr.itir 116 116 movl r10 = ~0xfc … … 119 119 or r10 = r10, r11 120 120 mov cr.itir = r10 121 121 122 122 movl r7 = 3 123 123 movl r8 = (VRN_KERNEL << VRN_SHIFT) | FW_OFFSET … … 125 125 movl r10 = (KERNEL_TRANSLATION_FW) 126 126 itr.d dtr[r7] = r10 127 127 128 # Initialize DSR 129 130 movl r10 = (DCR_DP_MASK | DCR_DK_MASK | DCR_DX_MASK | DCR_DR_MASK | DCR_DA_MASK | DCR_DD_MASK | DCR_LC_MASK) 131 mov r9 = cr.dcr 132 or r10 = r10, r9 133 mov cr.dcr = r10 134 128 135 # Initialize PSR 129 136 130 137 movl r10 = (PSR_DT_MASK | PSR_RT_MASK | PSR_IT_MASK | PSR_IC_MASK) /* Enable paging */ 131 138 mov r9 = psr 132 139 133 140 or r10 = r10, r9 134 141 mov cr.ipsr = r10 … … 138 145 srlz.d 139 146 srlz.i 140 147 141 148 .explicit 142 149 143 150 /* 144 151 * Return From Interrupt is the only way to … … 147 154 rfi ;; 148 155 149 150 156 .global paging_start 151 157 paging_start: 152 158 153 159 /* 154 160 * Now we are paging. 155 161 */ 156 162 157 163 # Switch to register bank 1 158 164 bsw.1 159 165 160 166 #ifdef CONFIG_SMP 161 167 # Am I BSP or AP? … … 164 170 cmp.eq p3, p2 = r20, r0 ;; 165 171 #else 166 cmp.eq p3, p2 = r0, r0 ;; 167 #endif 172 cmp.eq p3, p2 = r0, r0 ;; /* you are BSP */ 173 #endif /* CONFIG_SMP */ 168 174 169 175 # Initialize register stack … … 172 178 mov ar.bspstore = r8 173 179 loadrs 174 180 175 181 # Initialize memory stack to some sane value 176 182 movl r12 = stack0 ;; 177 add r12 = -16, r12 178 183 add r12 = -16, r12 /* allocate a scratch area on the stack */ 184 179 185 # Initialize gp (Global Pointer) register 180 movl r20 = (VRN_KERNEL << VRN_SHIFT) ;;181 or r20 = r20, r1;;186 movl r20 = (VRN_KERNEL << VRN_SHIFT) ;; 187 or r20 = r20, r1 ;; 182 188 movl r1 = _hardcoded_load_address 183 189 … … 192 198 (p3) addl r19 = @gprel(hardcoded_load_address), gp 193 199 (p3) addl r21 = @gprel(bootinfo), gp 194 ;;200 ;; 195 201 (p3) st8 [r17] = r14 196 202 (p3) st8 [r18] = r15 197 203 (p3) st8 [r19] = r16 198 204 (p3) st8 [r21] = r20 199 205 200 206 ssm (1 << 19) ;; /* Disable f32 - f127 */ 201 207 srlz.i 202 208 srlz.d ;; 203 209 204 210 #ifdef CONFIG_SMP 205 211 (p2) movl r18 = main_ap ;; 206 (p2) 212 (p2) mov b1 = r18 ;; 207 213 (p2) br.call.sptk.many b0 = b1 208 214 209 215 # Mark that BSP is on 216 210 217 mov r20 = 1 ;; 211 218 movl r21 = bsp_started ;; 212 219 st8 [r21] = r20 ;; 213 220 #endif 214 221 215 222 br.call.sptk.many b0 = arch_pre_main 216 223 217 224 movl r18 = main_bsp ;; 218 225 mov b1 = r18 ;; … … 227 234 kernel_image_ap_start: 228 235 .auto 229 236 230 237 # Identify self(CPU) in OS structures by ID / EID 231 238 232 239 mov r9 = cr64 233 240 mov r10 = 1 … … 240 247 241 248 # Wait for wakeup synchro signal (#3 in cpu_by_id_eid_list) 242 249 243 250 kernel_image_ap_start_loop: 244 251 movl r11 = kernel_image_ap_start_loop 245 252 and r11 = r11, r12 246 mov b1 = r11 247 248 ld1 r20 = [r8] ;;249 movl r21 = 3 ;;250 cmp.eq p2, p3 = r20, r21 ;;253 mov b1 = r11 254 255 ld1 r20 = [r8] 256 movl r21 = 3 257 cmp.eq p2, p3 = r20, r21 251 258 (p3) br.call.sptk.many b0 = b1 252 259 253 260 movl r11 = kernel_image_start 254 261 and r11 = r11, r12 255 mov b1 = r11 262 mov b1 = r11 256 263 br.call.sptk.many b0 = b1 257 264 … … 259 266 .global bsp_started 260 267 bsp_started: 261 .space 8268 .space 8 262 269 263 270 .align 4096 264 271 .global cpu_by_id_eid_list 265 272 cpu_by_id_eid_list: 266 .space 65536267 268 #endif 273 .space 65536 274 275 #endif /* CONFIG_SMP */
Note:
See TracChangeset
for help on using the changeset viewer.