Changeset c520034 in mainline for kernel/arch/ia64/src
- Timestamp:
- 2011-12-31T18:19:35Z (14 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 295f658, 77c2b02, 96cd5b4
- Parents:
- 852052d (diff), 22f0561 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- kernel/arch/ia64/src
- Files:
-
- 1 added
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/ia64/src/ia64.c
r852052d rc520034 45 45 #include <arch/drivers/it.h> 46 46 #include <arch/drivers/kbd.h> 47 #include <arch/legacyio.h> 47 48 #include <genarch/drivers/ega/ega.h> 48 49 #include <genarch/drivers/i8042/i8042.h> … … 51 52 #include <genarch/kbrd/kbrd.h> 52 53 #include <genarch/srln/srln.h> 54 #include <mm/page.h> 55 56 #ifdef MACHINE_ski 57 #include <arch/drivers/ski.h> 58 #endif 53 59 54 60 /* NS16550 as a COM 1 */ … … 58 64 59 65 static uint64_t iosapic_base = 0xfec00000; 66 uintptr_t legacyio_virt_base = 0; 60 67 61 68 /** Performs ia64-specific initialization before main_bsp() is called. */ … … 80 87 static void iosapic_init(void) 81 88 { 82 uint 64_t IOSAPIC = PA2KA((sysarg_t)(iosapic_base)) | FW_OFFSET;89 uintptr_t IOSAPIC = hw_map(iosapic_base, PAGE_SIZE); 83 90 int i; 84 91 … … 107 114 { 108 115 if (config.cpu_active == 1) { 116 /* Map the page with legacy I/O. */ 117 legacyio_virt_base = hw_map(LEGACYIO_PHYS_BASE, LEGACYIO_SIZE); 118 109 119 iosapic_init(); 110 120 irq_init(INR_COUNT, INR_COUNT); … … 113 123 } 114 124 115 void arch_post_cpu_init(void) 116 { 125 void arch_post_cpu_init(void){ 117 126 } 118 127 … … 202 211 sysinfo_set_item_val("ia64_iospace", NULL, true); 203 212 sysinfo_set_item_val("ia64_iospace.address", NULL, true); 204 sysinfo_set_item_val("ia64_iospace.address.virtual", NULL, IO_OFFSET);213 sysinfo_set_item_val("ia64_iospace.address.virtual", NULL, LEGACYIO_USER_BASE); 205 214 } 206 215 -
kernel/arch/ia64/src/mm/frame.c
r852052d rc520034 51 51 #define MINCONF 1 52 52 53 uintptr_t last_frame = 0;53 uintptr_t end_of_identity = -1ULL; 54 54 55 void frame_arch_init(void)55 static void frame_common_arch_init(bool low) 56 56 { 57 if (config.cpu_active == 1) { 58 unsigned int i; 59 for (i = 0; i < bootinfo->memmap_items; i++) { 60 if (bootinfo->memmap[i].type == MEMMAP_FREE_MEM) { 61 uint64_t base = bootinfo->memmap[i].base; 62 uint64_t size = bootinfo->memmap[i].size; 63 uint64_t abase = ALIGN_UP(base, FRAME_SIZE); 57 unsigned int i; 64 58 65 if (size > FRAME_SIZE) 66 size -= abase - base; 59 for (i = 0; i < bootinfo->memmap_items; i++) { 60 if (bootinfo->memmap[i].type != MEMMAP_FREE_MEM) 61 continue; 67 62 68 if (size > MIN_ZONE_SIZE) { 69 zone_create(abase >> FRAME_WIDTH, 70 size >> FRAME_WIDTH, 71 max(MINCONF, abase >> FRAME_WIDTH), 72 0); 73 } 74 if (abase + size > last_frame) 75 last_frame = abase + size; 63 uintptr_t base = bootinfo->memmap[i].base; 64 size_t size = bootinfo->memmap[i].size; 65 uintptr_t abase = ALIGN_UP(base, FRAME_SIZE); 66 67 if (size > FRAME_SIZE) 68 size -= abase - base; 69 70 if (!frame_adjust_zone_bounds(low, &abase, &size)) 71 continue; 72 73 if (size > MIN_ZONE_SIZE) { 74 pfn_t pfn = ADDR2PFN(abase); 75 size_t count = SIZE2FRAMES(size); 76 77 if (low) { 78 zone_create(pfn, count, max(MINCONF, pfn), 79 ZONE_AVAILABLE | ZONE_LOWMEM); 80 } else { 81 pfn_t conf; 82 83 conf = zone_external_conf_alloc(count); 84 zone_create(pfn, count, conf, 85 ZONE_AVAILABLE | ZONE_HIGHMEM); 76 86 } 77 87 } 78 79 /* 80 * Blacklist ROM regions. 81 */ 82 frame_mark_unavailable(ADDR2PFN(ROM_BASE), 83 SIZE2FRAMES(ROM_SIZE)); 88 } 89 } 84 90 85 frame_mark_unavailable(ADDR2PFN(KERNEL_RESERVED_AREA_BASE), 86 SIZE2FRAMES(KERNEL_RESERVED_AREA_SIZE)); 87 } 91 void frame_low_arch_init(void) 92 { 93 if (config.cpu_active > 1) 94 return; 95 96 frame_common_arch_init(true); 97 98 /* 99 * Blacklist ROM regions. 100 */ 101 frame_mark_unavailable(ADDR2PFN(ROM_BASE), 102 SIZE2FRAMES(ROM_SIZE)); 103 104 frame_mark_unavailable(ADDR2PFN(KERNEL_RESERVED_AREA_BASE), 105 SIZE2FRAMES(KERNEL_RESERVED_AREA_SIZE)); 106 107 /* PA2KA will work only on low-memory. */ 108 end_of_identity = PA2KA(config.physmem_end - FRAME_SIZE) + PAGE_SIZE; 109 } 110 111 void frame_high_arch_init(void) 112 { 113 if (config.cpu_active > 1) 114 return; 115 116 frame_common_arch_init(false); 88 117 } 89 118 -
kernel/arch/ia64/src/mm/page.c
r852052d rc520034 255 255 } 256 256 257 uintptr_t hw_map(uintptr_t physaddr, size_t size __attribute__ ((unused)))258 {259 /* THIS is a dirty hack. */260 return (uintptr_t)((uint64_t)(PA2KA(physaddr)) + VIO_OFFSET);261 }262 263 257 /** @} 264 258 */ -
kernel/arch/ia64/src/mm/tlb.c
r852052d rc520034 52 52 #include <arch.h> 53 53 #include <interrupt.h> 54 55 #define IO_FRAME_BASE 0xFFFFC000000 54 #include <arch/legacyio.h> 56 55 57 56 /** Invalidate all TLB entries. */ … … 467 466 } 468 467 468 static bool is_kernel_fault(uintptr_t va) 469 { 470 region_register_t rr; 471 472 rr.word = rr_read(VA2VRN(va)); 473 rid_t rid = rr.map.rid; 474 return (RID2ASID(rid) == ASID_KERNEL) && (VA2VRN(va) == VRN_KERNEL); 475 } 476 469 477 /** Instruction TLB fault handler for faults with VHPT turned off. 470 478 * … … 480 488 va = istate->cr_ifa; /* faulting address */ 481 489 482 page_table_lock(AS, true); 490 ASSERT(!is_kernel_fault(va)); 491 483 492 t = page_mapping_find(AS, va, true); 484 493 if (t) { … … 488 497 */ 489 498 itc_pte_copy(t); 490 page_table_unlock(AS, true);491 499 } else { 492 500 /* 493 501 * Forward the page fault to address space page fault handler. 494 502 */ 495 page_table_unlock(AS, true);496 503 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { 497 504 fault_if_from_uspace(istate, "Page fault at %p.", … … 522 529 static int try_memmap_io_insertion(uintptr_t va, istate_t *istate) 523 530 { 524 if ((va >= IO_OFFSET ) && (va < IO_OFFSET + (1 <<IO_PAGE_WIDTH))) {531 if ((va >= LEGACYIO_USER_BASE) && (va < LEGACYIO_USER_BASE + (1 << LEGACYIO_PAGE_WIDTH))) { 525 532 if (TASK) { 526 uint64_t io_page = (va & ((1 << IO_PAGE_WIDTH) - 1)) >>527 USPACE_IO_PAGE_WIDTH;533 uint64_t io_page = (va & ((1 << LEGACYIO_PAGE_WIDTH) - 1)) >> 534 LEGACYIO_SINGLE_PAGE_WIDTH; 528 535 529 536 if (is_io_page_accessible(io_page)) { 530 537 uint64_t page, frame; 531 538 532 page = IO_OFFSET+533 (1 << USPACE_IO_PAGE_WIDTH) * io_page;534 frame = IO_FRAME_BASE +535 (1 << USPACE_IO_PAGE_WIDTH) * io_page;539 page = LEGACYIO_USER_BASE + 540 (1 << LEGACYIO_SINGLE_PAGE_WIDTH) * io_page; 541 frame = LEGACYIO_PHYS_BASE + 542 (1 << LEGACYIO_SINGLE_PAGE_WIDTH) * io_page; 536 543 537 544 tlb_entry_t entry; … … 547 554 entry.ar = AR_READ | AR_WRITE; 548 555 entry.ppn = frame >> PPN_SHIFT; 549 entry.ps = USPACE_IO_PAGE_WIDTH;556 entry.ps = LEGACYIO_SINGLE_PAGE_WIDTH; 550 557 551 558 dtc_mapping_insert(page, TASK->as->asid, entry); … … 570 577 { 571 578 if (istate->cr_isr.sp) { 572 /* Speculative load. Deffer the exception573 until a more clever approach can be used.574 575 Currently if we try to find the mapping576 for the speculative load while in the kernel,577 we might introduce a livelock because of578 the possibly invalid values of the address.*/579 /* 580 * Speculative load. Deffer the exception until a more clever 581 * approach can be used. Currently if we try to find the 582 * mapping for the speculative load while in the kernel, we 583 * might introduce a livelock because of the possibly invalid 584 * values of the address. 585 */ 579 586 istate->cr_ipsr.ed = true; 580 587 return; … … 582 589 583 590 uintptr_t va = istate->cr_ifa; /* faulting address */ 584 585 region_register_t rr; 586 rr.word = rr_read(VA2VRN(va)); 587 rid_t rid = rr.map.rid; 588 if (RID2ASID(rid) == ASID_KERNEL) { 589 if (VA2VRN(va) == VRN_KERNEL) { 591 as_t *as = AS; 592 593 if (is_kernel_fault(va)) { 594 if (va < end_of_identity) { 590 595 /* 591 * Provide KA2PA(identity) mapping for faulting piece of 592 * kernel address space. 596 * Create kernel identity mapping for low memory. 593 597 */ 594 598 dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0); 595 599 return; 600 } else { 601 as = AS_KERNEL; 596 602 } 597 603 } 598 604 599 605 600 page_table_lock(AS, true); 601 pte_t *entry = page_mapping_find(AS, va, true); 606 pte_t *entry = page_mapping_find(as, va, true); 602 607 if (entry) { 603 608 /* … … 606 611 */ 607 612 dtc_pte_copy(entry); 608 page_table_unlock(AS, true);609 613 } else { 610 page_table_unlock(AS, true);611 614 if (try_memmap_io_insertion(va, istate)) 612 615 return; … … 647 650 uintptr_t va; 648 651 pte_t *t; 652 as_t *as = AS; 649 653 650 654 va = istate->cr_ifa; /* faulting address */ 651 655 652 page_table_lock(AS, true); 653 t = page_mapping_find(AS, va, true); 656 if (is_kernel_fault(va)) 657 as = AS_KERNEL; 658 659 t = page_mapping_find(as, va, true); 654 660 ASSERT((t) && (t->p)); 655 661 if ((t) && (t->p) && (t->w)) { … … 667 673 } 668 674 } 669 page_table_unlock(AS, true);670 675 } 671 676 … … 682 687 683 688 va = istate->cr_ifa; /* faulting address */ 684 685 page_table_lock(AS, true); 689 690 ASSERT(!is_kernel_fault(va)); 691 686 692 t = page_mapping_find(AS, va, true); 687 693 ASSERT((t) && (t->p)); … … 700 706 } 701 707 } 702 page_table_unlock(AS, true);703 708 } 704 709 … … 713 718 uintptr_t va; 714 719 pte_t *t; 720 as_t *as = AS; 715 721 716 722 va = istate->cr_ifa; /* faulting address */ 717 723 718 page_table_lock(AS, true); 719 t = page_mapping_find(AS, va, true); 724 if (is_kernel_fault(va)) 725 as = AS_KERNEL; 726 727 t = page_mapping_find(as, va, true); 720 728 ASSERT((t) && (t->p)); 721 729 if ((t) && (t->p)) { … … 733 741 } 734 742 } 735 page_table_unlock(AS, true);736 743 } 737 744 … … 748 755 749 756 va = istate->cr_ifa; /* faulting address */ 757 758 ASSERT(!is_kernel_fault(va)); 750 759 751 760 /* 752 761 * Assume a write to a read-only page. 753 762 */ 754 page_table_lock(AS, true);755 763 t = page_mapping_find(AS, va, true); 756 764 ASSERT((t) && (t->p)); … … 761 769 panic_memtrap(istate, PF_ACCESS_WRITE, va, NULL); 762 770 } 763 page_table_unlock(AS, true);764 771 } 765 772 … … 777 784 va = istate->cr_ifa; /* faulting address */ 778 785 779 page_table_lock(AS, true); 786 ASSERT(!is_kernel_fault(va)); 787 780 788 t = page_mapping_find(AS, va, true); 781 789 ASSERT(t); … … 790 798 else 791 799 dtc_pte_copy(t); 792 page_table_unlock(AS, true);793 800 } else { 794 page_table_unlock(AS, true);795 801 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { 796 802 fault_if_from_uspace(istate, "Page fault at %p.", -
kernel/arch/ia64/src/start.S
r852052d rc520034 38 38 #define KERNEL_TRANSLATION_I 0x0010000000000661 39 39 #define KERNEL_TRANSLATION_D 0x0010000000000661 40 #define KERNEL_TRANSLATION_VIO 0x001000000000067141 #define KERNEL_TRANSLATION_IO 0x00100FFFFC00067142 #define KERNEL_TRANSLATION_FW 0x00100000F000067143 40 44 41 .section K_TEXT_START, "ax" … … 88 85 itr.d dtr[r0] = r10 89 86 90 movl r7 = 1 91 movl r8 = (VRN_KERNEL << VRN_SHIFT) | VIO_OFFSET 92 mov cr.ifa = r8 93 movl r10 = (KERNEL_TRANSLATION_VIO) 94 itr.d dtr[r7] = r10 95 96 mov r11 = cr.itir 97 movl r10 = ~0xfc 98 and r10 = r10, r11 99 movl r11 = (IO_PAGE_WIDTH << PS_SHIFT) 100 or r10 = r10, r11 101 mov cr.itir = r10 102 103 movl r7 = 2 104 movl r8 = (VRN_KERNEL << VRN_SHIFT) | IO_OFFSET 105 mov cr.ifa = r8 106 movl r10 = (KERNEL_TRANSLATION_IO) 107 itr.d dtr[r7] = r10 108 109 # Setup mapping for firmware area (also SAPIC) 110 111 mov r11 = cr.itir 112 movl r10 = ~0xfc 113 and r10 = r10, r11 114 movl r11 = (FW_PAGE_WIDTH << PS_SHIFT) 115 or r10 = r10, r11 116 mov cr.itir = r10 117 118 movl r7 = 3 119 movl r8 = (VRN_KERNEL << VRN_SHIFT) | FW_OFFSET 120 mov cr.ifa = r8 121 movl r10 = (KERNEL_TRANSLATION_FW) 122 itr.d dtr[r7] = r10 123 124 # Initialize DSR 87 # Initialize DCR 125 88 126 89 movl r10 = (DCR_DP_MASK | DCR_DK_MASK | DCR_DX_MASK | DCR_DR_MASK | DCR_DA_MASK | DCR_DD_MASK | DCR_LC_MASK)
Note:
See TracChangeset
for help on using the changeset viewer.