Changeset 2bf4936 in mainline
- Timestamp:
- 2009-04-14T15:50:56Z (15 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 47a6708
- Parents:
- 577b531
- Location:
- kernel/arch/sparc64
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/sparc64/include/mm/frame.h
r577b531 r2bf4936 74 74 75 75 extern uintptr_t last_frame; 76 extern uintptr_t end_of_identity; 77 76 78 extern void frame_arch_init(void); 77 79 #define physmem_print() -
kernel/arch/sparc64/include/trap/mmu.h
r577b531 r2bf4936 104 104 */ 105 105 0: 106 mov VA_DMMU_TAG_ACCESS, %g1 107 ldxa [%g1] ASI_DMMU, %g1 ! read the faulting Context and VPN 106 sethi %hi(fast_data_access_mmu_miss_data_hi), %g7 107 wr %g0, ASI_DMMU, %asi 108 ldxa [VA_DMMU_TAG_ACCESS] %asi, %g1 ! read the faulting Context and VPN 108 109 set TLB_TAG_ACCESS_CONTEXT_MASK, %g2 109 110 andcc %g1, %g2, %g3 ! get Context 110 bnz 0f! Context is non-zero111 bnz %xcc, 0f ! Context is non-zero 111 112 andncc %g1, %g2, %g3 ! get page address into %g3 112 bz 0f ! page address is zero 113 bz %xcc, 0f ! page address is zero 114 ldx [%g7 + %lo(end_of_identity)], %g4 115 cmp %g3, %g4 116 bgeu %xcc, 0f 113 117 114 sethi %hi(kernel_8k_tlb_data_template), %g2 115 ldx [%g2 + %lo(kernel_8k_tlb_data_template)], %g2 118 ldx [%g7 + %lo(kernel_8k_tlb_data_template)], %g2 116 119 or %g3, %g2, %g2 117 120 stxa %g2, [%g0] ASI_DTLB_DATA_IN_REG ! identity map the kernel page … … 139 142 * This is necessary to survive nested DTLB misses. 140 143 */ 141 mov VA_DMMU_TAG_ACCESS, %g2 142 ldxa [%g2] ASI_DMMU, %g2 144 ldxa [VA_DMMU_TAG_ACCESS] %asi, %g2 143 145 144 146 /* -
kernel/arch/sparc64/src/mm/frame.c
r577b531 r2bf4936 80 80 frame_mark_unavailable(ADDR2PFN(KA2PA(PFN2ADDR(0))), 1); 81 81 } 82 83 end_of_identity = PA2KA(last_frame); 82 84 } 83 85 -
kernel/arch/sparc64/src/mm/page.c
r577b531 r2bf4936 63 63 uintptr_t hw_map(uintptr_t physaddr, size_t size) 64 64 { 65 if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH)) 66 panic("Unable to map physical memory %p (%d bytes).", physaddr, size) 67 68 uintptr_t virtaddr = PA2KA(last_frame); 69 pfn_t i; 70 for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++) { 71 uintptr_t addr = PFN2ADDR(i); 72 page_mapping_insert(AS_KERNEL, virtaddr + addr, physaddr + addr, PAGE_NOT_CACHEABLE | PAGE_WRITE); 73 } 74 75 last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE); 76 77 return virtaddr; 65 return KA2PA(physaddr); 78 66 } 79 67 -
kernel/arch/sparc64/src/mm/tlb.c
r577b531 r2bf4936 200 200 void fast_instruction_access_mmu_miss(unative_t unused, istate_t *istate) 201 201 { 202 uintptr_t va= ALIGN_DOWN(istate->tpc, PAGE_SIZE);202 uintptr_t page_16k = ALIGN_DOWN(istate->tpc, PAGE_SIZE); 203 203 index_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE; 204 204 pte_t *t; 205 205 206 206 page_table_lock(AS, true); 207 t = page_mapping_find(AS, va);207 t = page_mapping_find(AS, page_16k); 208 208 if (t && PTE_EXECUTABLE(t)) { 209 209 /* … … 223 223 */ 224 224 page_table_unlock(AS, true); 225 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { 225 if (as_page_fault(page_16k, PF_ACCESS_EXEC, istate) == 226 AS_PF_FAULT) { 226 227 do_fast_instruction_access_mmu_miss_fault(istate, 227 228 __func__); … … 243 244 void fast_data_access_mmu_miss(tlb_tag_access_reg_t tag, istate_t *istate) 244 245 { 245 uintptr_t va; 246 uintptr_t page_8k; 247 uintptr_t page_16k; 246 248 index_t index; 247 249 pte_t *t; 248 250 249 va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE); 251 page_8k = (uint64_t) tag.vpn << MMU_PAGE_WIDTH; 252 page_16k = ALIGN_DOWN(page_8k, PAGE_SIZE); 250 253 index = tag.vpn % MMU_PAGES_PER_PAGE; 251 254 … … 255 258 do_fast_data_access_mmu_miss_fault(istate, tag, 256 259 __func__); 260 } else if (page_8k >= end_of_identity) { 261 /* 262 * The kernel is accessing the I/O space. 263 * We still do identity mapping for I/O, 264 * but without caching. 265 */ 266 dtlb_insert_mapping(page_8k, KA2PA(page_8k), 267 PAGESIZE_8K, false, false); 268 return; 257 269 } 258 270 do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected " … … 261 273 262 274 page_table_lock(AS, true); 263 t = page_mapping_find(AS, va);275 t = page_mapping_find(AS, page_16k); 264 276 if (t) { 265 277 /* … … 279 291 */ 280 292 page_table_unlock(AS, true); 281 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { 293 if (as_page_fault(page_16k, PF_ACCESS_READ, istate) == 294 AS_PF_FAULT) { 282 295 do_fast_data_access_mmu_miss_fault(istate, tag, 283 296 __func__); … … 296 309 void fast_data_access_protection(tlb_tag_access_reg_t tag, istate_t *istate) 297 310 { 298 uintptr_t va;311 uintptr_t page_16k; 299 312 index_t index; 300 313 pte_t *t; 301 314 302 va= ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);315 page_16k = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE); 303 316 index = tag.vpn % MMU_PAGES_PER_PAGE; /* 16K-page emulation */ 304 317 305 318 page_table_lock(AS, true); 306 t = page_mapping_find(AS, va);319 t = page_mapping_find(AS, page_16k); 307 320 if (t && PTE_WRITABLE(t)) { 308 321 /* … … 314 327 t->d = true; 315 328 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY, 316 va+ index * MMU_PAGE_SIZE);329 page_16k + index * MMU_PAGE_SIZE); 317 330 dtlb_pte_copy(t, index, false); 318 331 #ifdef CONFIG_TSB … … 326 339 */ 327 340 page_table_unlock(AS, true); 328 if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) { 341 if (as_page_fault(page_16k, PF_ACCESS_WRITE, istate) == 342 AS_PF_FAULT) { 329 343 do_fast_data_access_protection_fault(istate, tag, 330 344 __func__); -
kernel/arch/sparc64/src/start.S
r577b531 r2bf4936 85 85 sllx %l5, 13 + (63 - (PHYSMEM_ADDR_SIZE - 1)), %l5 86 86 srlx %l5, 63 - (PHYSMEM_ADDR_SIZE - 1), %l5 87 87 88 88 /* 89 89 * Setup basic runtime environment. … … 334 334 ldx [%g2], %g3 335 335 cmp %g3, %g1 336 bne 2b336 bne %xcc, 2b 337 337 nop 338 338 … … 382 382 383 383 /* 384 * This variable is used by the fast_data_MMU_miss trap handler. In runtime, it 385 * is further modified to reflect the starting address of physical memory. 386 */ 384 * The fast_data_access_mmu_miss_data_hi label and the end_of_identity and 385 * kernel_8k_tlb_data_template variables are meant to stay together, 386 * aligned on 16B boundary. 387 */ 388 .global fast_data_access_mmu_miss_data_hi 389 .global end_of_identity 387 390 .global kernel_8k_tlb_data_template 391 392 .align 16 393 /* 394 * This label is used by the fast_data_access_MMU_miss trap handler. 395 */ 396 fast_data_access_mmu_miss_data_hi: 397 /* 398 * This variable is used by the fast_data_access_MMU_miss trap handler. 399 * In runtime, it is modified to contain the address of the end of physical 400 * memory. 401 */ 402 end_of_identity: 403 .quad -1 404 /* 405 * This variable is used by the fast_data_access_MMU_miss trap handler. 406 * In runtime, it is further modified to reflect the starting address of 407 * physical memory. 408 */ 388 409 kernel_8k_tlb_data_template: 389 410 #ifdef CONFIG_VIRT_IDX_DCACHE
Note:
See TracChangeset
for help on using the changeset viewer.