Changeset 2bf4936 in mainline for kernel/arch/sparc64/src/mm/tlb.c
- Timestamp:
- 2009-04-14T15:50:56Z (15 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 47a6708
- Parents:
- 577b531
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/sparc64/src/mm/tlb.c
r577b531 r2bf4936 200 200 void fast_instruction_access_mmu_miss(unative_t unused, istate_t *istate) 201 201 { 202 uintptr_t va= ALIGN_DOWN(istate->tpc, PAGE_SIZE);202 uintptr_t page_16k = ALIGN_DOWN(istate->tpc, PAGE_SIZE); 203 203 index_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE; 204 204 pte_t *t; 205 205 206 206 page_table_lock(AS, true); 207 t = page_mapping_find(AS, va);207 t = page_mapping_find(AS, page_16k); 208 208 if (t && PTE_EXECUTABLE(t)) { 209 209 /* … … 223 223 */ 224 224 page_table_unlock(AS, true); 225 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { 225 if (as_page_fault(page_16k, PF_ACCESS_EXEC, istate) == 226 AS_PF_FAULT) { 226 227 do_fast_instruction_access_mmu_miss_fault(istate, 227 228 __func__); … … 243 244 void fast_data_access_mmu_miss(tlb_tag_access_reg_t tag, istate_t *istate) 244 245 { 245 uintptr_t va; 246 uintptr_t page_8k; 247 uintptr_t page_16k; 246 248 index_t index; 247 249 pte_t *t; 248 250 249 va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE); 251 page_8k = (uint64_t) tag.vpn << MMU_PAGE_WIDTH; 252 page_16k = ALIGN_DOWN(page_8k, PAGE_SIZE); 250 253 index = tag.vpn % MMU_PAGES_PER_PAGE; 251 254 … … 255 258 do_fast_data_access_mmu_miss_fault(istate, tag, 256 259 __func__); 260 } else if (page_8k >= end_of_identity) { 261 /* 262 * The kernel is accessing the I/O space. 263 * We still do identity mapping for I/O, 264 * but without caching. 265 */ 266 dtlb_insert_mapping(page_8k, KA2PA(page_8k), 267 PAGESIZE_8K, false, false); 268 return; 257 269 } 258 270 do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected " … … 261 273 262 274 page_table_lock(AS, true); 263 t = page_mapping_find(AS, va);275 t = page_mapping_find(AS, page_16k); 264 276 if (t) { 265 277 /* … … 279 291 */ 280 292 page_table_unlock(AS, true); 281 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { 293 if (as_page_fault(page_16k, PF_ACCESS_READ, istate) == 294 AS_PF_FAULT) { 282 295 do_fast_data_access_mmu_miss_fault(istate, tag, 283 296 __func__); … … 296 309 void fast_data_access_protection(tlb_tag_access_reg_t tag, istate_t *istate) 297 310 { 298 uintptr_t va;311 uintptr_t page_16k; 299 312 index_t index; 300 313 pte_t *t; 301 314 302 va= ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);315 page_16k = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE); 303 316 index = tag.vpn % MMU_PAGES_PER_PAGE; /* 16K-page emulation */ 304 317 305 318 page_table_lock(AS, true); 306 t = page_mapping_find(AS, va);319 t = page_mapping_find(AS, page_16k); 307 320 if (t && PTE_WRITABLE(t)) { 308 321 /* … … 314 327 t->d = true; 315 328 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY, 316 va+ index * MMU_PAGE_SIZE);329 page_16k + index * MMU_PAGE_SIZE); 317 330 dtlb_pte_copy(t, index, false); 318 331 #ifdef CONFIG_TSB … … 326 339 */ 327 340 page_table_unlock(AS, true); 328 if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) { 341 if (as_page_fault(page_16k, PF_ACCESS_WRITE, istate) == 342 AS_PF_FAULT) { 329 343 do_fast_data_access_protection_fault(istate, tag, 330 344 __func__);
Note:
See TracChangeset
for help on using the changeset viewer.