source: mainline/kernel/arch/sparc64/src/mm/tlb.c@ 563c2dd

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 563c2dd was 2057572, checked in by Jakub Jermar <jakub@…>, 18 years ago

The Ultimate Solution To Illegal Virtual Aliases.
It is better to avoid them completely than to fight them.
Switch the sparc64 port to 16K pages. The TLBs and TSBs
continue to operate with 8K pages only. Page tables and
other generic parts operate with 16K pages.

Because the MMU doesn't support 16K directly, each 16K
page is emulated by a pair of 8K pages. With 16K pages,
illegal aliases cannot be created in 16K D-cache.

  • Property mode set to 100644
File size: 12.7 KB
Line 
1/*
2 * Copyright (c) 2005 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup sparc64mm
30 * @{
31 */
32/** @file
33 */
34
35#include <arch/mm/tlb.h>
36#include <mm/tlb.h>
37#include <mm/as.h>
38#include <mm/asid.h>
39#include <arch/mm/frame.h>
40#include <arch/mm/page.h>
41#include <arch/mm/mmu.h>
42#include <arch/interrupt.h>
43#include <interrupt.h>
44#include <arch.h>
45#include <print.h>
46#include <arch/types.h>
47#include <config.h>
48#include <arch/trap/trap.h>
49#include <arch/trap/exception.h>
50#include <panic.h>
51#include <arch/asm.h>
52
53#ifdef CONFIG_TSB
54#include <arch/mm/tsb.h>
55#endif
56
57static void dtlb_pte_copy(pte_t *t, index_t index, bool ro);
58static void itlb_pte_copy(pte_t *t, index_t index);
59static void do_fast_instruction_access_mmu_miss_fault(istate_t *istate,
60 const char *str);
61static void do_fast_data_access_mmu_miss_fault(istate_t *istate,
62 tlb_tag_access_reg_t tag, const char *str);
63static void do_fast_data_access_protection_fault(istate_t *istate,
64 tlb_tag_access_reg_t tag, const char *str);
65
66char *context_encoding[] = {
67 "Primary",
68 "Secondary",
69 "Nucleus",
70 "Reserved"
71};
72
73void tlb_arch_init(void)
74{
75 /*
76 * Invalidate all non-locked DTLB and ITLB entries.
77 */
78 tlb_invalidate_all();
79
80 /*
81 * Clear both SFSRs.
82 */
83 dtlb_sfsr_write(0);
84 itlb_sfsr_write(0);
85}
86
87/** Insert privileged mapping into DMMU TLB.
88 *
89 * @param page Virtual page address.
90 * @param frame Physical frame address.
91 * @param pagesize Page size.
92 * @param locked True for permanent mappings, false otherwise.
93 * @param cacheable True if the mapping is cacheable, false otherwise.
94 */
95void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize,
96 bool locked, bool cacheable)
97{
98 tlb_tag_access_reg_t tag;
99 tlb_data_t data;
100 page_address_t pg;
101 frame_address_t fr;
102
103 pg.address = page;
104 fr.address = frame;
105
106 tag.value = ASID_KERNEL;
107 tag.vpn = pg.vpn;
108
109 dtlb_tag_access_write(tag.value);
110
111 data.value = 0;
112 data.v = true;
113 data.size = pagesize;
114 data.pfn = fr.pfn;
115 data.l = locked;
116 data.cp = cacheable;
117#ifdef CONFIG_VIRT_IDX_DCACHE
118 data.cv = cacheable;
119#endif /* CONFIG_VIRT_IDX_DCACHE */
120 data.p = true;
121 data.w = true;
122 data.g = false;
123
124 dtlb_data_in_write(data.value);
125}
126
127/** Copy PTE to TLB.
128 *
129 * @param t Page Table Entry to be copied.
130 * @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
131 * @param ro If true, the entry will be created read-only, regardless of its
132 * w field.
133 */
134void dtlb_pte_copy(pte_t *t, index_t index, bool ro)
135{
136 tlb_tag_access_reg_t tag;
137 tlb_data_t data;
138 page_address_t pg;
139 frame_address_t fr;
140
141 pg.address = t->page + (index << MMU_PAGE_WIDTH);
142 fr.address = t->frame + (index << MMU_PAGE_WIDTH);
143
144 tag.value = 0;
145 tag.context = t->as->asid;
146 tag.vpn = pg.vpn;
147
148 dtlb_tag_access_write(tag.value);
149
150 data.value = 0;
151 data.v = true;
152 data.size = PAGESIZE_8K;
153 data.pfn = fr.pfn;
154 data.l = false;
155 data.cp = t->c;
156#ifdef CONFIG_VIRT_IDX_DCACHE
157 data.cv = t->c;
158#endif /* CONFIG_VIRT_IDX_DCACHE */
159 data.p = t->k; /* p like privileged */
160 data.w = ro ? false : t->w;
161 data.g = t->g;
162
163 dtlb_data_in_write(data.value);
164}
165
166/** Copy PTE to ITLB.
167 *
168 * @param t Page Table Entry to be copied.
169 * @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
170 */
171void itlb_pte_copy(pte_t *t, index_t index)
172{
173 tlb_tag_access_reg_t tag;
174 tlb_data_t data;
175 page_address_t pg;
176 frame_address_t fr;
177
178 pg.address = t->page + (index << MMU_PAGE_WIDTH);
179 fr.address = t->frame + (index << MMU_PAGE_WIDTH);
180
181 tag.value = 0;
182 tag.context = t->as->asid;
183 tag.vpn = pg.vpn;
184
185 itlb_tag_access_write(tag.value);
186
187 data.value = 0;
188 data.v = true;
189 data.size = PAGESIZE_8K;
190 data.pfn = fr.pfn;
191 data.l = false;
192 data.cp = t->c;
193 data.p = t->k; /* p like privileged */
194 data.w = false;
195 data.g = t->g;
196
197 itlb_data_in_write(data.value);
198}
199
200/** ITLB miss handler. */
201void fast_instruction_access_mmu_miss(int n, istate_t *istate)
202{
203 uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
204 index_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE;
205 pte_t *t;
206
207 page_table_lock(AS, true);
208 t = page_mapping_find(AS, va);
209 if (t && PTE_EXECUTABLE(t)) {
210 /*
211 * The mapping was found in the software page hash table.
212 * Insert it into ITLB.
213 */
214 t->a = true;
215 itlb_pte_copy(t, index);
216#ifdef CONFIG_TSB
217 itsb_pte_copy(t, index);
218#endif
219 page_table_unlock(AS, true);
220 } else {
221 /*
222 * Forward the page fault to the address space page fault
223 * handler.
224 */
225 page_table_unlock(AS, true);
226 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
227 do_fast_instruction_access_mmu_miss_fault(istate,
228 __FUNCTION__);
229 }
230 }
231}
232
233/** DTLB miss handler.
234 *
235 * Note that some faults (e.g. kernel faults) were already resolved by the
236 * low-level, assembly language part of the fast_data_access_mmu_miss handler.
237 */
238void fast_data_access_mmu_miss(int n, istate_t *istate)
239{
240 tlb_tag_access_reg_t tag;
241 uintptr_t va;
242 index_t index;
243 pte_t *t;
244
245 tag.value = dtlb_tag_access_read();
246 va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
247 index = tag.vpn % MMU_PAGES_PER_PAGE;
248
249 if (tag.context == ASID_KERNEL) {
250 if (!tag.vpn) {
251 /* NULL access in kernel */
252 do_fast_data_access_mmu_miss_fault(istate, tag,
253 __FUNCTION__);
254 }
255 do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected "
256 "kernel page fault.");
257 }
258
259 page_table_lock(AS, true);
260 t = page_mapping_find(AS, va);
261 if (t) {
262 /*
263 * The mapping was found in the software page hash table.
264 * Insert it into DTLB.
265 */
266 t->a = true;
267 dtlb_pte_copy(t, index, true);
268#ifdef CONFIG_TSB
269 dtsb_pte_copy(t, index, true);
270#endif
271 page_table_unlock(AS, true);
272 } else {
273 /*
274 * Forward the page fault to the address space page fault
275 * handler.
276 */
277 page_table_unlock(AS, true);
278 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
279 do_fast_data_access_mmu_miss_fault(istate, tag,
280 __FUNCTION__);
281 }
282 }
283}
284
285/** DTLB protection fault handler. */
286void fast_data_access_protection(int n, istate_t *istate)
287{
288 tlb_tag_access_reg_t tag;
289 uintptr_t va;
290 index_t index;
291 pte_t *t;
292
293 tag.value = dtlb_tag_access_read();
294 va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
295 index = tag.vpn % MMU_PAGES_PER_PAGE; /* 16K-page emulation */
296
297 page_table_lock(AS, true);
298 t = page_mapping_find(AS, va);
299 if (t && PTE_WRITABLE(t)) {
300 /*
301 * The mapping was found in the software page hash table and is
302 * writable. Demap the old mapping and insert an updated mapping
303 * into DTLB.
304 */
305 t->a = true;
306 t->d = true;
307 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY,
308 va + index * MMU_PAGE_SIZE);
309 dtlb_pte_copy(t, index, false);
310#ifdef CONFIG_TSB
311 dtsb_pte_copy(t, index, false);
312#endif
313 page_table_unlock(AS, true);
314 } else {
315 /*
316 * Forward the page fault to the address space page fault
317 * handler.
318 */
319 page_table_unlock(AS, true);
320 if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
321 do_fast_data_access_protection_fault(istate, tag,
322 __FUNCTION__);
323 }
324 }
325}
326
327/** Print contents of both TLBs. */
328void tlb_print(void)
329{
330 int i;
331 tlb_data_t d;
332 tlb_tag_read_reg_t t;
333
334 printf("I-TLB contents:\n");
335 for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
336 d.value = itlb_data_access_read(i);
337 t.value = itlb_tag_read_read(i);
338
339 printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
340 "ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, "
341 "cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
342 t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag,
343 d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
344 }
345
346 printf("D-TLB contents:\n");
347 for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
348 d.value = dtlb_data_access_read(i);
349 t.value = dtlb_tag_read_read(i);
350
351 printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
352 "ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, "
353 "cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
354 t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag,
355 d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
356 }
357
358}
359
360void do_fast_instruction_access_mmu_miss_fault(istate_t *istate,
361 const char *str)
362{
363 fault_if_from_uspace(istate, "%s\n", str);
364 dump_istate(istate);
365 panic("%s\n", str);
366}
367
368void do_fast_data_access_mmu_miss_fault(istate_t *istate,
369 tlb_tag_access_reg_t tag, const char *str)
370{
371 uintptr_t va;
372
373 va = tag.vpn << MMU_PAGE_WIDTH;
374
375 fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va,
376 tag.context);
377 dump_istate(istate);
378 printf("Faulting page: %p, ASID=%d\n", va, tag.context);
379 panic("%s\n", str);
380}
381
382void do_fast_data_access_protection_fault(istate_t *istate,
383 tlb_tag_access_reg_t tag, const char *str)
384{
385 uintptr_t va;
386
387 va = tag.vpn << MMU_PAGE_WIDTH;
388
389 fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va,
390 tag.context);
391 printf("Faulting page: %p, ASID=%d\n", va, tag.context);
392 dump_istate(istate);
393 panic("%s\n", str);
394}
395
396void dump_sfsr_and_sfar(void)
397{
398 tlb_sfsr_reg_t sfsr;
399 uintptr_t sfar;
400
401 sfsr.value = dtlb_sfsr_read();
402 sfar = dtlb_sfar_read();
403
404 printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, "
405 "fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w,
406 sfsr.ow, sfsr.fv);
407 printf("DTLB SFAR: address=%p\n", sfar);
408
409 dtlb_sfsr_write(0);
410}
411
412/** Invalidate all unlocked ITLB and DTLB entries. */
413void tlb_invalidate_all(void)
414{
415 int i;
416 tlb_data_t d;
417 tlb_tag_read_reg_t t;
418
419 /*
420 * Walk all ITLB and DTLB entries and remove all unlocked mappings.
421 *
422 * The kernel doesn't use global mappings so any locked global mappings
423 * found must have been created by someone else. Their only purpose now
424 * is to collide with proper mappings. Invalidate immediately. It should
425 * be safe to invalidate them as late as now.
426 */
427
428 for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
429 d.value = itlb_data_access_read(i);
430 if (!d.l || d.g) {
431 t.value = itlb_tag_read_read(i);
432 d.v = false;
433 itlb_tag_access_write(t.value);
434 itlb_data_access_write(i, d.value);
435 }
436 }
437
438 for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
439 d.value = dtlb_data_access_read(i);
440 if (!d.l || d.g) {
441 t.value = dtlb_tag_read_read(i);
442 d.v = false;
443 dtlb_tag_access_write(t.value);
444 dtlb_data_access_write(i, d.value);
445 }
446 }
447
448}
449
450/** Invalidate all ITLB and DTLB entries that belong to specified ASID
451 * (Context).
452 *
453 * @param asid Address Space ID.
454 */
455void tlb_invalidate_asid(asid_t asid)
456{
457 tlb_context_reg_t pc_save, ctx;
458
459 /* switch to nucleus because we are mapped by the primary context */
460 nucleus_enter();
461
462 ctx.v = pc_save.v = mmu_primary_context_read();
463 ctx.context = asid;
464 mmu_primary_context_write(ctx.v);
465
466 itlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
467 dtlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
468
469 mmu_primary_context_write(pc_save.v);
470
471 nucleus_leave();
472}
473
474/** Invalidate all ITLB and DTLB entries for specified page range in specified
475 * address space.
476 *
477 * @param asid Address Space ID.
478 * @param page First page which to sweep out from ITLB and DTLB.
479 * @param cnt Number of ITLB and DTLB entries to invalidate.
480 */
481void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
482{
483 int i;
484 tlb_context_reg_t pc_save, ctx;
485
486 /* switch to nucleus because we are mapped by the primary context */
487 nucleus_enter();
488
489 ctx.v = pc_save.v = mmu_primary_context_read();
490 ctx.context = asid;
491 mmu_primary_context_write(ctx.v);
492
493 for (i = 0; i < cnt * MMU_PAGES_PER_PAGE; i++) {
494 itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
495 page + i * MMU_PAGE_SIZE);
496 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
497 page + i * MMU_PAGE_SIZE);
498 }
499
500 mmu_primary_context_write(pc_save.v);
501
502 nucleus_leave();
503}
504
505/** @}
506 */
Note: See TracBrowser for help on using the repository browser.