Changeset 2057572 in mainline for kernel/arch/sparc64/src
- Timestamp:
- 2007-03-27T23:40:25Z (18 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 399ece9
- Parents:
- 8d37a06
- Location:
- kernel/arch/sparc64/src
- Files:
-
- 1 deleted
- 7 edited
- 1 moved
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/sparc64/src/cpu/cpu.c
r8d37a06 r2057572 52 52 CPU->arch.mid = upa_config.mid; 53 53 54 #if (defined(CONFIG_SMP) && defined(CONFIG_VIRT_IDX_DCACHE))55 CPU->arch.dcache_active = 1;56 CPU->arch.dcache_message_count = 0;57 #endif58 59 54 /* 60 55 * Detect processor frequency. -
kernel/arch/sparc64/src/mm/as.c
r8d37a06 r2057572 63 63 #ifdef CONFIG_TSB 64 64 int order = fnzb32(((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * 65 sizeof(tsb_entry_t)) >> FRAME_WIDTH);65 sizeof(tsb_entry_t)) >> MMU_FRAME_WIDTH); 66 66 uintptr_t tsb = (uintptr_t) frame_alloc(order, flags | FRAME_KA); 67 67 … … 72 72 as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT * 73 73 sizeof(tsb_entry_t)); 74 memsetb((uintptr_t) as->arch.itsb, (ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT)75 74 memsetb((uintptr_t) as->arch.itsb, 75 (ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * sizeof(tsb_entry_t), 0); 76 76 #endif 77 77 return 0; … … 82 82 #ifdef CONFIG_TSB 83 83 count_t cnt = ((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * 84 sizeof(tsb_entry_t)) >> FRAME_WIDTH;84 sizeof(tsb_entry_t)) >> MMU_FRAME_WIDTH; 85 85 frame_free(KA2PA((uintptr_t) as->arch.itsb)); 86 86 return cnt; … … 140 140 uintptr_t tsb = (uintptr_t) as->arch.itsb; 141 141 142 if (!overlaps(tsb, 8 * PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {142 if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) { 143 143 /* 144 144 * TSBs were allocated from memory not covered … … 159 159 tsb_base.split = 0; 160 160 161 tsb_base.base = ((uintptr_t) as->arch.itsb) >> PAGE_WIDTH;161 tsb_base.base = ((uintptr_t) as->arch.itsb) >> MMU_PAGE_WIDTH; 162 162 itsb_base_write(tsb_base.value); 163 tsb_base.base = ((uintptr_t) as->arch.dtsb) >> PAGE_WIDTH;163 tsb_base.base = ((uintptr_t) as->arch.dtsb) >> MMU_PAGE_WIDTH; 164 164 dtsb_base_write(tsb_base.value); 165 165 #endif … … 190 190 uintptr_t tsb = (uintptr_t) as->arch.itsb; 191 191 192 if (!overlaps(tsb, 8 * PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {192 if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) { 193 193 /* 194 194 * TSBs were allocated from memory not covered -
kernel/arch/sparc64/src/mm/page.c
r8d37a06 r2057572 74 74 for (i = 0; i < bsp_locked_dtlb_entries; i++) { 75 75 dtlb_insert_mapping(bsp_locked_dtlb_entry[i].virt_page, 76 77 bsp_locked_dtlb_entry[i].pagesize_code,true,78 76 bsp_locked_dtlb_entry[i].phys_page, 77 bsp_locked_dtlb_entry[i].pagesize_code, true, 78 false); 79 79 } 80 80 #endif … … 108 108 count_t count; 109 109 } sizemap[] = { 110 { PAGESIZE_8K, 0, 1 }, /* 8K */111 { PAGESIZE_8K, PAGE_SIZE, 2 }, /* 16K */112 { PAGESIZE_8K, PAGE_SIZE, 4 }, /* 32K */113 { PAGESIZE_64K, 0, 1}, /* 64K */114 { PAGESIZE_64K, 8 * PAGE_SIZE, 2 },/* 128K */115 { PAGESIZE_64K, 8 * PAGE_SIZE, 4 },/* 256K */116 { PAGESIZE_512K, 0, 1 }, /* 512K */117 { PAGESIZE_512K, 64 * PAGE_SIZE, 2 }, /* 1M */118 { PAGESIZE_512K, 64 * PAGE_SIZE, 4 }, /* 2M */119 { PAGESIZE_4M, 0, 1 }, /* 4M */120 { PAGESIZE_4M, 512 * PAGE_SIZE, 2 }/* 8M */110 { PAGESIZE_8K, 0, 1 }, /* 8K */ 111 { PAGESIZE_8K, MMU_PAGE_SIZE, 2 }, /* 16K */ 112 { PAGESIZE_8K, MMU_PAGE_SIZE, 4 }, /* 32K */ 113 { PAGESIZE_64K, 0, 1}, /* 64K */ 114 { PAGESIZE_64K, 8 * MMU_PAGE_SIZE, 2 }, /* 128K */ 115 { PAGESIZE_64K, 8 * MMU_PAGE_SIZE, 4 }, /* 256K */ 116 { PAGESIZE_512K, 0, 1 }, /* 512K */ 117 { PAGESIZE_512K, 64 * MMU_PAGE_SIZE, 2 }, /* 1M */ 118 { PAGESIZE_512K, 64 * MMU_PAGE_SIZE, 4 }, /* 2M */ 119 { PAGESIZE_4M, 0, 1 }, /* 4M */ 120 { PAGESIZE_4M, 512 * MMU_PAGE_SIZE, 2 } /* 8M */ 121 121 }; 122 122 123 ASSERT(ALIGN_UP(physaddr, PAGE_SIZE) == physaddr);123 ASSERT(ALIGN_UP(physaddr, MMU_PAGE_SIZE) == physaddr); 124 124 ASSERT(size <= 8 * 1024 * 1024); 125 125 126 if (size <= FRAME_SIZE)126 if (size <= MMU_FRAME_SIZE) 127 127 order = 0; 128 128 else 129 order = (fnzb64(size - 1) + 1) - FRAME_WIDTH;129 order = (fnzb64(size - 1) + 1) - MMU_FRAME_WIDTH; 130 130 131 131 /* … … 135 135 */ 136 136 ASSERT(PA2KA(last_frame)); 137 uintptr_t virtaddr = ALIGN_UP(PA2KA(last_frame), 1 << (order + FRAME_WIDTH)); 138 last_frame = ALIGN_UP(KA2PA(virtaddr) + size, 1 << (order + FRAME_WIDTH)); 137 uintptr_t virtaddr = ALIGN_UP(PA2KA(last_frame), 138 1 << (order + FRAME_WIDTH)); 139 last_frame = ALIGN_UP(KA2PA(virtaddr) + size, 140 1 << (order + FRAME_WIDTH)); 139 141 140 142 for (i = 0; i < sizemap[order].count; i++) { … … 143 145 */ 144 146 dtlb_insert_mapping(virtaddr + i * sizemap[order].increment, 145 146 147 physaddr + i * sizemap[order].increment, 148 sizemap[order].pagesize_code, true, false); 147 149 148 150 #ifdef CONFIG_SMP … … 151 153 */ 152 154 bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].virt_page = 153 155 virtaddr + i * sizemap[order].increment; 154 156 bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].phys_page = 155 157 physaddr + i * sizemap[order].increment; 156 158 bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].pagesize_code = 157 159 sizemap[order].pagesize_code; 158 160 bsp_locked_dtlb_entries++; 159 161 #endif -
kernel/arch/sparc64/src/mm/tlb.c
r8d37a06 r2057572 55 55 #endif 56 56 57 static void dtlb_pte_copy(pte_t *t, bool ro);58 static void itlb_pte_copy(pte_t *t );59 static void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const60 57 static void dtlb_pte_copy(pte_t *t, index_t index, bool ro); 58 static void itlb_pte_copy(pte_t *t, index_t index); 59 static void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, 60 const char *str); 61 61 static void do_fast_data_access_mmu_miss_fault(istate_t *istate, 62 62 tlb_tag_access_reg_t tag, const char *str); 63 63 static void do_fast_data_access_protection_fault(istate_t *istate, 64 64 tlb_tag_access_reg_t tag, const char *str); 65 65 66 66 char *context_encoding[] = { … … 93 93 * @param cacheable True if the mapping is cacheable, false otherwise. 94 94 */ 95 void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize, bool96 95 void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize, 96 bool locked, bool cacheable) 97 97 { 98 98 tlb_tag_access_reg_t tag; … … 127 127 /** Copy PTE to TLB. 128 128 * 129 * @param t Page Table Entry to be copied. 130 * @param ro If true, the entry will be created read-only, regardless of its w 131 * field. 132 */ 133 void dtlb_pte_copy(pte_t *t, bool ro) 129 * @param t Page Table Entry to be copied. 130 * @param index Zero if lower 8K-subpage, one if higher 8K-subpage. 131 * @param ro If true, the entry will be created read-only, regardless of its 132 * w field. 133 */ 134 void dtlb_pte_copy(pte_t *t, index_t index, bool ro) 134 135 { 135 136 tlb_tag_access_reg_t tag; … … 138 139 frame_address_t fr; 139 140 140 pg.address = t->page ;141 fr.address = t->frame ;141 pg.address = t->page + (index << MMU_PAGE_WIDTH); 142 fr.address = t->frame + (index << MMU_PAGE_WIDTH); 142 143 143 144 tag.value = 0; 144 145 tag.context = t->as->asid; 145 146 tag.vpn = pg.vpn; 146 147 147 148 dtlb_tag_access_write(tag.value); 148 149 149 150 data.value = 0; 150 151 data.v = true; … … 159 160 data.w = ro ? false : t->w; 160 161 data.g = t->g; 161 162 162 163 dtlb_data_in_write(data.value); 163 164 } … … 165 166 /** Copy PTE to ITLB. 166 167 * 167 * @param t Page Table Entry to be copied. 168 */ 169 void itlb_pte_copy(pte_t *t) 168 * @param t Page Table Entry to be copied. 169 * @param index Zero if lower 8K-subpage, one if higher 8K-subpage. 170 */ 171 void itlb_pte_copy(pte_t *t, index_t index) 170 172 { 171 173 tlb_tag_access_reg_t tag; … … 174 176 frame_address_t fr; 175 177 176 pg.address = t->page ;177 fr.address = t->frame ;178 pg.address = t->page + (index << MMU_PAGE_WIDTH); 179 fr.address = t->frame + (index << MMU_PAGE_WIDTH); 178 180 179 181 tag.value = 0; … … 200 202 { 201 203 uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE); 204 index_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE; 202 205 pte_t *t; 203 206 … … 210 213 */ 211 214 t->a = true; 212 itlb_pte_copy(t );215 itlb_pte_copy(t, index); 213 216 #ifdef CONFIG_TSB 214 itsb_pte_copy(t );217 itsb_pte_copy(t, index); 215 218 #endif 216 219 page_table_unlock(AS, true); … … 223 226 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { 224 227 do_fast_instruction_access_mmu_miss_fault(istate, 225 228 __FUNCTION__); 226 229 } 227 230 } … … 237 240 tlb_tag_access_reg_t tag; 238 241 uintptr_t va; 242 index_t index; 239 243 pte_t *t; 240 244 241 245 tag.value = dtlb_tag_access_read(); 242 va = tag.vpn << PAGE_WIDTH; 246 va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE); 247 index = tag.vpn % MMU_PAGES_PER_PAGE; 243 248 244 249 if (tag.context == ASID_KERNEL) { … … 246 251 /* NULL access in kernel */ 247 252 do_fast_data_access_mmu_miss_fault(istate, tag, 248 253 __FUNCTION__); 249 254 } 250 255 do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected " 251 256 "kernel page fault."); 252 257 } 253 258 … … 260 265 */ 261 266 t->a = true; 262 dtlb_pte_copy(t, true);267 dtlb_pte_copy(t, index, true); 263 268 #ifdef CONFIG_TSB 264 dtsb_pte_copy(t, true);269 dtsb_pte_copy(t, index, true); 265 270 #endif 266 271 page_table_unlock(AS, true); 267 272 } else { 268 273 /* 269 * Forward the page fault to the address space page fault handler. 274 * Forward the page fault to the address space page fault 275 * handler. 270 276 */ 271 277 page_table_unlock(AS, true); 272 278 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { 273 279 do_fast_data_access_mmu_miss_fault(istate, tag, 274 280 __FUNCTION__); 275 281 } 276 282 } … … 282 288 tlb_tag_access_reg_t tag; 283 289 uintptr_t va; 290 index_t index; 284 291 pte_t *t; 285 292 286 293 tag.value = dtlb_tag_access_read(); 287 va = tag.vpn << PAGE_WIDTH; 294 va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE); 295 index = tag.vpn % MMU_PAGES_PER_PAGE; /* 16K-page emulation */ 288 296 289 297 page_table_lock(AS, true); … … 297 305 t->a = true; 298 306 t->d = true; 299 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY, va); 300 dtlb_pte_copy(t, false); 307 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY, 308 va + index * MMU_PAGE_SIZE); 309 dtlb_pte_copy(t, index, false); 301 310 #ifdef CONFIG_TSB 302 dtsb_pte_copy(t, false);311 dtsb_pte_copy(t, index, false); 303 312 #endif 304 313 page_table_unlock(AS, true); … … 311 320 if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) { 312 321 do_fast_data_access_protection_fault(istate, tag, 313 322 __FUNCTION__); 314 323 } 315 324 } … … 329 338 330 339 printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, " 331 332 333 334 340 "ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, " 341 "cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn, 342 t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag, 343 d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g); 335 344 } 336 345 … … 341 350 342 351 printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, " 343 344 345 346 347 } 348 349 } 350 351 void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const char352 352 "ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, " 353 "cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn, 354 t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag, 355 d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g); 356 } 357 358 } 359 360 void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, 361 const char *str) 353 362 { 354 363 fault_if_from_uspace(istate, "%s\n", str); … … 357 366 } 358 367 359 void do_fast_data_access_mmu_miss_fault(istate_t *istate, tlb_tag_access_reg_t360 368 void do_fast_data_access_mmu_miss_fault(istate_t *istate, 369 tlb_tag_access_reg_t tag, const char *str) 361 370 { 362 371 uintptr_t va; 363 372 364 va = tag.vpn << PAGE_WIDTH;373 va = tag.vpn << MMU_PAGE_WIDTH; 365 374 366 375 fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va, 367 376 tag.context); 368 377 dump_istate(istate); 369 378 printf("Faulting page: %p, ASID=%d\n", va, tag.context); … … 371 380 } 372 381 373 void do_fast_data_access_protection_fault(istate_t *istate, tlb_tag_access_reg_t374 382 void do_fast_data_access_protection_fault(istate_t *istate, 383 tlb_tag_access_reg_t tag, const char *str) 375 384 { 376 385 uintptr_t va; 377 386 378 va = tag.vpn << PAGE_WIDTH;387 va = tag.vpn << MMU_PAGE_WIDTH; 379 388 380 389 fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va, 381 390 tag.context); 382 391 printf("Faulting page: %p, ASID=%d\n", va, tag.context); 383 392 dump_istate(istate); … … 394 403 395 404 printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, " 396 397 405 "fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, 406 sfsr.ow, sfsr.fv); 398 407 printf("DTLB SFAR: address=%p\n", sfar); 399 408 … … 482 491 mmu_primary_context_write(ctx.v); 483 492 484 for (i = 0; i < cnt ; i++) {493 for (i = 0; i < cnt * MMU_PAGES_PER_PAGE; i++) { 485 494 itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, 486 page + i * PAGE_SIZE);495 page + i * MMU_PAGE_SIZE); 487 496 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, 488 page + i * PAGE_SIZE);497 page + i * MMU_PAGE_SIZE); 489 498 } 490 499 -
kernel/arch/sparc64/src/mm/tsb.c
r8d37a06 r2057572 35 35 #include <arch/mm/tsb.h> 36 36 #include <arch/mm/tlb.h> 37 #include <arch/mm/page.h> 37 38 #include <arch/barrier.h> 38 39 #include <mm/as.h> … … 41 42 #include <debug.h> 42 43 43 #define TSB_INDEX_MASK ((1 << (21 + 1 + TSB_SIZE -PAGE_WIDTH)) - 1)44 #define TSB_INDEX_MASK ((1 << (21 + 1 + TSB_SIZE - MMU_PAGE_WIDTH)) - 1) 44 45 45 46 /** Invalidate portion of TSB. … … 60 61 ASSERT(as->arch.itsb && as->arch.dtsb); 61 62 62 i0 = (page >> PAGE_WIDTH) & TSB_INDEX_MASK;63 cnt = min(pages , ITSB_ENTRY_COUNT);63 i0 = (page >> MMU_PAGE_WIDTH) & TSB_INDEX_MASK; 64 cnt = min(pages * MMU_PAGES_PER_PAGE, ITSB_ENTRY_COUNT); 64 65 65 66 for (i = 0; i < cnt; i++) { 66 67 as->arch.itsb[(i0 + i) & (ITSB_ENTRY_COUNT - 1)].tag.invalid = 67 68 true; 68 69 as->arch.dtsb[(i0 + i) & (DTSB_ENTRY_COUNT - 1)].tag.invalid = 69 70 true; 70 71 } 71 72 } … … 73 74 /** Copy software PTE to ITSB. 74 75 * 75 * @param t Software PTE. 76 * @param t Software PTE. 77 * @param index Zero if lower 8K-subpage, one if higher 8K subpage. 76 78 */ 77 void itsb_pte_copy(pte_t *t )79 void itsb_pte_copy(pte_t *t, index_t index) 78 80 { 79 81 as_t *as; 80 82 tsb_entry_t *tsb; 83 index_t entry; 81 84 82 85 as = t->as; 83 tsb = &as->arch.itsb[(t->page >> PAGE_WIDTH) & TSB_INDEX_MASK]; 86 entry = ((t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK; 87 tsb = &as->arch.itsb[entry]; 84 88 85 89 /* … … 96 100 97 101 tsb->tag.context = as->asid; 98 tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT; 102 tsb->tag.va_tag = (t->page + (index << MMU_PAGE_WIDTH)) >> 103 VA_TAG_PAGE_SHIFT; 99 104 tsb->data.value = 0; 100 105 tsb->data.size = PAGESIZE_8K; 101 tsb->data.pfn = t->frame >> FRAME_WIDTH;106 tsb->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index; 102 107 tsb->data.cp = t->c; 103 108 tsb->data.p = t->k; /* p as privileged */ … … 111 116 /** Copy software PTE to DTSB. 112 117 * 113 * @param t Software PTE. 114 * @param ro If true, the mapping is copied read-only. 118 * @param t Software PTE. 119 * @param index Zero if lower 8K-subpage, one if higher 8K-subpage. 120 * @param ro If true, the mapping is copied read-only. 115 121 */ 116 void dtsb_pte_copy(pte_t *t, bool ro)122 void dtsb_pte_copy(pte_t *t, index_t index, bool ro) 117 123 { 118 124 as_t *as; 119 125 tsb_entry_t *tsb; 126 index_t entry; 120 127 121 128 as = t->as; 122 tsb = &as->arch.dtsb[(t->page >> PAGE_WIDTH) & TSB_INDEX_MASK]; 129 entry = ((t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK; 130 tsb = &as->arch.dtsb[entry]; 123 131 124 132 /* … … 135 143 136 144 tsb->tag.context = as->asid; 137 tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT; 145 tsb->tag.va_tag = (t->page + (index << MMU_PAGE_WIDTH)) >> 146 VA_TAG_PAGE_SHIFT; 138 147 tsb->data.value = 0; 139 148 tsb->data.size = PAGESIZE_8K; 140 tsb->data.pfn = t->frame >> FRAME_WIDTH;149 tsb->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index; 141 150 tsb->data.cp = t->c; 142 151 #ifdef CONFIG_VIRT_IDX_DCACHE -
kernel/arch/sparc64/src/smp/ipi.c
r8d37a06 r2057572 40 40 #include <config.h> 41 41 #include <mm/tlb.h> 42 #include <arch/mm/cache.h>43 42 #include <arch/interrupt.h> 44 43 #include <arch/trap/interrupt.h> … … 126 125 func = tlb_shootdown_ipi_recv; 127 126 break; 128 #if (defined(CONFIG_SMP) && (defined(CONFIG_VIRT_IDX_DCACHE)))129 case IPI_DCACHE_SHOOTDOWN:130 func = dcache_shootdown_ipi_recv;131 break;132 #endif133 127 default: 134 128 panic("Unknown IPI (%d).\n", ipi); -
kernel/arch/sparc64/src/trap/interrupt.c
r8d37a06 r2057572 45 45 #include <arch.h> 46 46 #include <mm/tlb.h> 47 #include <arch/mm/cache.h>48 47 #include <config.h> 49 48 #include <synch/spinlock.h> … … 92 91 if (data0 == (uintptr_t) tlb_shootdown_ipi_recv) { 93 92 tlb_shootdown_ipi_recv(); 94 #ifdef CONFIG_VIRT_IDX_DCACHE95 } else if (data0 == (uintptr_t) dcache_shootdown_ipi_recv) {96 dcache_shootdown_ipi_recv();97 #endif98 93 } 99 94 #endif
Note:
See TracChangeset
for help on using the changeset viewer.