Changeset 00aece0 in mainline for kernel/arch/sparc64
- Timestamp:
- 2012-02-18T16:47:38Z (14 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 4449c6c
- Parents:
- bd5f3b7 (diff), f943dd3 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- kernel/arch/sparc64
- Files:
-
- 5 added
- 12 edited
-
Makefile.inc (modified) (1 diff)
-
include/mm/km.h (added)
-
include/mm/sun4u/frame.h (modified) (1 diff)
-
include/mm/sun4u/km.h (added)
-
include/mm/sun4v/frame.h (modified) (1 diff)
-
include/mm/sun4v/km.h (added)
-
src/drivers/kbd.c (modified) (3 diffs)
-
src/drivers/pci.c (modified) (3 diffs)
-
src/mm/page.c (modified) (1 diff)
-
src/mm/sun4u/frame.c (modified) (1 diff)
-
src/mm/sun4u/km.c (added)
-
src/mm/sun4u/tlb.c (modified) (9 diffs)
-
src/mm/sun4v/frame.c (modified) (1 diff)
-
src/mm/sun4v/km.c (added)
-
src/mm/sun4v/tlb.c (modified) (9 diffs)
-
src/sun4u/sparc64.c (modified) (1 diff)
-
src/sun4v/sparc64.c (modified) (1 diff)
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/sparc64/Makefile.inc
rbd5f3b7 r00aece0 68 68 arch/$(KARCH)/src/fpu_context.c \ 69 69 arch/$(KARCH)/src/dummy.s \ 70 arch/$(KARCH)/src/mm/$(USARCH)/km.c \ 70 71 arch/$(KARCH)/src/mm/$(USARCH)/as.c \ 71 72 arch/$(KARCH)/src/mm/$(USARCH)/frame.c \ -
kernel/arch/sparc64/include/mm/sun4u/frame.h
rbd5f3b7 r00aece0 72 72 typedef union frame_address frame_address_t; 73 73 74 extern uintptr_t last_frame;75 74 extern uintptr_t end_of_identity; 76 75 77 extern void frame_arch_init(void); 76 extern void frame_low_arch_init(void); 77 extern void frame_high_arch_init(void); 78 78 #define physmem_print() 79 79 -
kernel/arch/sparc64/include/mm/sun4v/frame.h
rbd5f3b7 r00aece0 46 46 #include <typedefs.h> 47 47 48 extern uintptr_t last_frame;49 extern void frame_ arch_init(void);48 extern void frame_low_arch_init(void); 49 extern void frame_high_arch_init(void); 50 50 #define physmem_print() 51 51 -
kernel/arch/sparc64/src/drivers/kbd.c
rbd5f3b7 r00aece0 40 40 #include <mm/page.h> 41 41 #include <arch/mm/page.h> 42 #include <mm/km.h> 42 43 #include <typedefs.h> 43 44 #include <align.h> … … 113 114 size_t offset = pa - aligned_addr; 114 115 115 ns16550_t *ns16550 = (ns16550_t *) 116 (hw_map(aligned_addr, offset + size) + offset);116 ns16550_t *ns16550 = (ns16550_t *) (km_map(aligned_addr, offset + size, 117 PAGE_WRITE | PAGE_NOT_CACHEABLE) + offset); 117 118 118 119 ns16550_instance_t *ns16550_instance = ns16550_init(ns16550, inr, cir, cir_arg); … … 132 133 sysinfo_set_item_val("kbd", NULL, true); 133 134 sysinfo_set_item_val("kbd.inr", NULL, inr); 134 sysinfo_set_item_val("kbd.address.kernel", NULL,135 (uintptr_t) ns16550);136 135 sysinfo_set_item_val("kbd.address.physical", NULL, pa); 137 136 sysinfo_set_item_val("kbd.type.ns16550", NULL, true); -
kernel/arch/sparc64/src/drivers/pci.c
rbd5f3b7 r00aece0 39 39 #include <genarch/ofw/upa.h> 40 40 #include <arch/trap/interrupt.h> 41 #include <mm/ page.h>41 #include <mm/km.h> 42 42 #include <mm/slab.h> 43 43 #include <typedefs.h> … … 109 109 pci->model = PCI_SABRE; 110 110 pci->op = &pci_sabre_ops; 111 pci->reg = (uint64_t *) hw_map(paddr, reg[SABRE_INTERNAL_REG].size); 111 pci->reg = (uint64_t *) km_map(paddr, reg[SABRE_INTERNAL_REG].size, 112 PAGE_WRITE | PAGE_NOT_CACHEABLE); 112 113 113 114 /* … … 156 157 pci->model = PCI_PSYCHO; 157 158 pci->op = &pci_psycho_ops; 158 pci->reg = (uint64_t *) hw_map(paddr, reg[PSYCHO_INTERNAL_REG].size); 159 pci->reg = (uint64_t *) km_map(paddr, reg[PSYCHO_INTERNAL_REG].size, 160 PAGE_WRITE | PAGE_NOT_CACHEABLE); 159 161 160 162 /* -
kernel/arch/sparc64/src/mm/page.c
rbd5f3b7 r00aece0 51 51 } 52 52 53 /** Map memory-mapped device into virtual memory.54 *55 * We are currently using identity mapping for mapping device registers.56 *57 * @param physaddr Physical address of the page where the device is58 * located.59 * @param size Size of the device's registers.60 *61 * @return Virtual address of the page where the device is mapped.62 *63 */64 uintptr_t hw_map(uintptr_t physaddr, size_t size)65 {66 return PA2KA(physaddr);67 }68 69 53 /** @} 70 54 */ -
kernel/arch/sparc64/src/mm/sun4u/frame.c
rbd5f3b7 r00aece0 41 41 #include <macros.h> 42 42 43 uintptr_t last_frame = (uintptr_t) NULL;44 45 43 /** Create memory zones according to information stored in memmap. 46 44 * 47 45 * Walk the memory map and create frame zones according to it. 48 46 */ 49 void frame_arch_init(void)47 static void frame_common_arch_init(bool low) 50 48 { 51 if (config.cpu_active == 1) { 52 unsigned int i; 49 unsigned int i; 50 51 for (i = 0; i < memmap.cnt; i++) { 52 uintptr_t base; 53 size_t size; 54 55 /* 56 * The memmap is created by HelenOS boot loader. 57 * It already contains no holes. 58 */ 59 60 /* To be safe, make the available zone possibly smaller */ 61 base = ALIGN_UP((uintptr_t) memmap.zones[i].start, FRAME_SIZE); 62 size = ALIGN_DOWN(memmap.zones[i].size - 63 (base - ((uintptr_t) memmap.zones[i].start)), FRAME_SIZE); 53 64 54 for (i = 0; i < memmap.cnt; i++) { 55 /* To be safe, make the available zone possibly smaller */ 56 uintptr_t new_start = ALIGN_UP((uintptr_t) memmap.zones[i].start, 57 FRAME_SIZE); 58 size_t new_size = ALIGN_DOWN(memmap.zones[i].size - 59 (new_start - ((uintptr_t) memmap.zones[i].start)), FRAME_SIZE); 60 61 /* 62 * The memmap is created by HelenOS boot loader. 63 * It already contains no holes. 64 */ 65 66 pfn_t confdata = ADDR2PFN(new_start); 67 65 if (!frame_adjust_zone_bounds(low, &base, &size)) 66 continue; 67 68 pfn_t confdata; 69 pfn_t pfn = ADDR2PFN(base); 70 size_t count = SIZE2FRAMES(size); 71 72 if (low) { 73 confdata = pfn; 68 74 if (confdata == ADDR2PFN(KA2PA(PFN2ADDR(0)))) 69 75 confdata = ADDR2PFN(KA2PA(PFN2ADDR(2))); 70 76 71 zone_create(ADDR2PFN(new_start), SIZE2FRAMES(new_size), 72 confdata, 0); 73 74 last_frame = max(last_frame, new_start + new_size); 77 zone_create(pfn, count, confdata, 78 ZONE_AVAILABLE | ZONE_LOWMEM); 79 } else { 80 confdata = zone_external_conf_alloc(count); 81 if (confdata != 0) 82 zone_create(pfn, count, confdata, 83 ZONE_AVAILABLE | ZONE_HIGHMEM); 75 84 } 76 77 /*78 * On sparc64, physical memory can start on a non-zero address.79 * The generic frame_init() only marks PFN 0 as not free, so we80 * must mark the physically first frame not free explicitly81 * here, no matter what is its address.82 */83 frame_mark_unavailable(ADDR2PFN(KA2PA(PFN2ADDR(0))), 1);84 85 } 86 } 87 88 void frame_low_arch_init(void) 89 { 90 if (config.cpu_active > 1) 91 return; 85 92 86 end_of_identity = PA2KA(last_frame); 93 frame_common_arch_init(true); 94 95 /* 96 * On sparc64, physical memory can start on a non-zero address. 97 * The generic frame_init() only marks PFN 0 as not free, so we 98 * must mark the physically first frame not free explicitly 99 * here, no matter what is its address. 100 */ 101 frame_mark_unavailable(ADDR2PFN(KA2PA(PFN2ADDR(0))), 1); 102 103 /* PA2KA will work only on low-memory. */ 104 end_of_identity = PA2KA(config.physmem_end - FRAME_SIZE) + PAGE_SIZE; 105 } 106 107 void frame_high_arch_init(void) 108 { 109 if (config.cpu_active > 1) 110 return; 111 112 frame_common_arch_init(false); 87 113 } 88 114 -
kernel/arch/sparc64/src/mm/sun4u/tlb.c
rbd5f3b7 r00aece0 206 206 pte_t *t; 207 207 208 page_table_lock(AS, true);209 208 t = page_mapping_find(AS, page_16k, true); 210 209 if (t && PTE_EXECUTABLE(t)) { … … 218 217 itsb_pte_copy(t, index); 219 218 #endif 220 page_table_unlock(AS, true);221 219 } else { 222 220 /* … … 224 222 * handler. 225 223 */ 226 page_table_unlock(AS, true);227 224 if (as_page_fault(page_16k, PF_ACCESS_EXEC, istate) == 228 225 AS_PF_FAULT) { … … 250 247 size_t index; 251 248 pte_t *t; 249 as_t *as = AS; 252 250 253 251 page_8k = (uint64_t) tag.vpn << MMU_PAGE_WIDTH; … … 261 259 "Dereferencing NULL pointer."); 262 260 } else if (page_8k >= end_of_identity) { 263 /* 264 * The kernel is accessing the I/O space. 265 * We still do identity mapping for I/O, 266 * but without caching. 267 */ 268 dtlb_insert_mapping(page_8k, KA2PA(page_8k), 269 PAGESIZE_8K, false, false); 270 return; 261 /* Kernel non-identity. */ 262 as = AS_KERNEL; 263 } else { 264 do_fast_data_access_mmu_miss_fault(istate, tag, 265 "Unexpected kernel page fault."); 271 266 } 272 do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected " 273 "kernel page fault."); 274 } 275 276 page_table_lock(AS, true); 277 t = page_mapping_find(AS, page_16k, true); 267 } 268 269 t = page_mapping_find(as, page_16k, true); 278 270 if (t) { 279 271 /* … … 286 278 dtsb_pte_copy(t, index, true); 287 279 #endif 288 page_table_unlock(AS, true);289 280 } else { 290 281 /* 291 282 * Forward the page fault to the address space page fault 292 283 * handler. 293 */ 294 page_table_unlock(AS, true); 284 */ 295 285 if (as_page_fault(page_16k, PF_ACCESS_READ, istate) == 296 286 AS_PF_FAULT) { … … 314 304 size_t index; 315 305 pte_t *t; 306 as_t *as = AS; 316 307 317 308 page_16k = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE); 318 309 index = tag.vpn % MMU_PAGES_PER_PAGE; /* 16K-page emulation */ 319 310 320 page_table_lock(AS, true); 321 t = page_mapping_find(AS, page_16k, true); 311 if (tag.context == ASID_KERNEL) 312 as = AS_KERNEL; 313 314 t = page_mapping_find(as, page_16k, true); 322 315 if (t && PTE_WRITABLE(t)) { 323 316 /* … … 334 327 dtsb_pte_copy(t, index, false); 335 328 #endif 336 page_table_unlock(AS, true);337 329 } else { 338 330 /* … … 340 332 * handler. 341 333 */ 342 page_table_unlock(AS, true);343 334 if (as_page_fault(page_16k, PF_ACCESS_WRITE, istate) == 344 335 AS_PF_FAULT) { -
kernel/arch/sparc64/src/mm/sun4v/frame.c
rbd5f3b7 r00aece0 45 45 * Walk the memory map and create frame zones according to it. 46 46 */ 47 void frame_arch_init(void)47 static void frame_common_arch_init(bool low) 48 48 { 49 if (config.cpu_active == 1) { 50 unsigned int i; 49 unsigned int i; 51 50 52 for (i = 0; i < memmap.cnt; i++) { 53 /* To be safe, make the available zone possibly smaller */ 54 uintptr_t new_start = ALIGN_UP((uintptr_t) memmap.zones[i].start, 55 FRAME_SIZE); 56 size_t new_size = ALIGN_DOWN(memmap.zones[i].size - 57 (new_start - ((uintptr_t) memmap.zones[i].start)), FRAME_SIZE); 58 59 /* 60 * The memmap is created by HelenOS boot loader. 61 * It already contains no holes. 62 */ 63 64 pfn_t confdata = ADDR2PFN(new_start); 65 51 for (i = 0; i < memmap.cnt; i++) { 52 uintptr_t base; 53 size_t size; 54 55 /* 56 * The memmap is created by HelenOS boot loader. 57 * It already contains no holes. 58 */ 59 60 /* To be safe, make the available zone possibly smaller */ 61 base = ALIGN_UP((uintptr_t) memmap.zones[i].start, FRAME_SIZE); 62 size = ALIGN_DOWN(memmap.zones[i].size - 63 (base - ((uintptr_t) memmap.zones[i].start)), FRAME_SIZE); 64 65 if (!frame_adjust_zone_bounds(low, &base, &size)) 66 continue; 67 68 pfn_t confdata; 69 pfn_t pfn = ADDR2PFN(base); 70 size_t count = SIZE2FRAMES(size); 71 72 if (low) { 73 confdata = pfn; 66 74 if (confdata == ADDR2PFN(KA2PA(PFN2ADDR(0)))) 67 75 confdata = ADDR2PFN(KA2PA(PFN2ADDR(2))); 68 76 69 zone_create(ADDR2PFN(new_start), SIZE2FRAMES(new_size), 70 confdata, 0); 77 zone_create(pfn, count, confdata, 78 ZONE_AVAILABLE | ZONE_LOWMEM); 79 } else { 80 confdata = zone_external_conf_alloc(count); 81 if (confdata != 0) 82 zone_create(pfn, count, confdata, 83 ZONE_AVAILABLE | ZONE_HIGHMEM); 71 84 } 72 73 /*74 * On sparc64, physical memory can start on a non-zero address.75 * The generic frame_init() only marks PFN 0 as not free, so we76 * must mark the physically first frame not free explicitly77 * here, no matter what is its address.78 */79 frame_mark_unavailable(ADDR2PFN(KA2PA(PFN2ADDR(0))), 1);80 85 } 86 } 87 88 89 void frame_low_arch_init(void) 90 { 91 if (config.cpu_active > 1) 92 return; 93 94 frame_common_arch_init(true); 95 96 /* 97 * On sparc64, physical memory can start on a non-zero address. 98 * The generic frame_init() only marks PFN 0 as not free, so we 99 * must mark the physically first frame not free explicitly 100 * here, no matter what is its address. 101 */ 102 frame_mark_unavailable(ADDR2PFN(KA2PA(PFN2ADDR(0))), 1); 103 } 104 105 void frame_high_arch_init(void) 106 { 107 if (config.cpu_active > 1) 108 return; 109 110 frame_common_arch_init(false); 81 111 } 82 112 -
kernel/arch/sparc64/src/mm/sun4v/tlb.c
rbd5f3b7 r00aece0 218 218 pte_t *t; 219 219 220 page_table_lock(AS, true);221 220 t = page_mapping_find(AS, va, true); 222 221 … … 231 230 itsb_pte_copy(t); 232 231 #endif 233 page_table_unlock(AS, true);234 232 } else { 235 233 /* … … 237 235 * handler. 238 236 */ 239 page_table_unlock(AS, true);240 237 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { 241 238 do_fast_instruction_access_mmu_miss_fault(istate, … … 274 271 } 275 272 276 page_table_lock(AS, true);277 273 t = page_mapping_find(AS, va, true); 278 274 if (t) { … … 286 282 dtsb_pte_copy(t, true); 287 283 #endif 288 page_table_unlock(AS, true);289 284 } else { 290 285 /* … … 292 287 * handler. 293 288 */ 294 page_table_unlock(AS, true);295 289 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { 296 290 do_fast_data_access_mmu_miss_fault(istate, page_and_ctx, … … 316 310 uint16_t ctx = DMISS_CONTEXT(page_and_ctx); 317 311 318 page_table_lock(AS, true);319 312 t = page_mapping_find(AS, va, true); 320 313 if (t && PTE_WRITABLE(t)) { … … 331 324 dtsb_pte_copy(t, false); 332 325 #endif 333 page_table_unlock(AS, true);334 326 } else { 335 327 /* … … 337 329 * handler. 338 330 */ 339 page_table_unlock(AS, true);340 331 if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) { 341 332 do_fast_data_access_protection_fault(istate, page_and_ctx, -
kernel/arch/sparc64/src/sun4u/sparc64.c
rbd5f3b7 r00aece0 62 62 size_t i; 63 63 for (i = 0; i < init.cnt; i++) { 64 init.tasks[i]. addr = (uintptr_t) bootinfo->taskmap.tasks[i].addr;64 init.tasks[i].paddr = KA2PA(bootinfo->taskmap.tasks[i].addr); 65 65 init.tasks[i].size = bootinfo->taskmap.tasks[i].size; 66 66 str_cpy(init.tasks[i].name, CONFIG_TASK_NAME_BUFLEN, -
kernel/arch/sparc64/src/sun4v/sparc64.c
rbd5f3b7 r00aece0 64 64 size_t i; 65 65 for (i = 0; i < init.cnt; i++) { 66 init.tasks[i]. addr = (uintptr_t) bootinfo->taskmap.tasks[i].addr;66 init.tasks[i].paddr = KA2PA(bootinfo->taskmap.tasks[i].addr); 67 67 init.tasks[i].size = bootinfo->taskmap.tasks[i].size; 68 68 str_cpy(init.tasks[i].name, CONFIG_TASK_NAME_BUFLEN,
Note:
See TracChangeset
for help on using the changeset viewer.
