source: mainline/kernel/arch/sparc64/src/mm/sun4u/tlb.c

Last change on this file was 8c5586c, checked in by Jakub Jermar <jakub@…>, 6 years ago

Demap using primary context for kernel faults

When the protection fault happens because of a kernel mapping, the demap
must not use the secondary context as that belongs to the task in which
the fault happened, but the primary context or nucleus, which contains
kernel's context.

  • Property mode set to 100644
File size: 14.0 KB
Line 
1/*
2 * Copyright (c) 2005 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup kernel_sparc64_mm
30 * @{
31 */
32/** @file
33 */
34
35#include <arch/mm/tlb.h>
36#include <mm/tlb.h>
37#include <mm/as.h>
38#include <mm/asid.h>
39#include <arch/mm/frame.h>
40#include <arch/mm/page.h>
41#include <arch/mm/mmu.h>
42#include <arch/interrupt.h>
43#include <assert.h>
44#include <interrupt.h>
45#include <arch.h>
46#include <stdio.h>
47#include <typedefs.h>
48#include <config.h>
49#include <arch/trap/trap.h>
50#include <arch/trap/exception.h>
51#include <panic.h>
52#include <arch/asm.h>
53#include <genarch/mm/page_ht.h>
54
55#ifdef CONFIG_TSB
56#include <arch/mm/tsb.h>
57#endif
58
59static void dtlb_pte_copy(pte_t *, size_t, bool);
60static void itlb_pte_copy(pte_t *, size_t);
61
62const char *context_encoding[] = {
63 "Primary",
64 "Secondary",
65 "Nucleus",
66 "Reserved"
67};
68
69void tlb_arch_init(void)
70{
71 /*
72 * Invalidate all non-locked DTLB and ITLB entries.
73 */
74 tlb_invalidate_all();
75
76 /*
77 * Clear both SFSRs.
78 */
79 dtlb_sfsr_write(0);
80 itlb_sfsr_write(0);
81}
82
83/** Insert privileged mapping into DMMU TLB.
84 *
85 * @param page Virtual page address.
86 * @param frame Physical frame address.
87 * @param pagesize Page size.
88 * @param locked True for permanent mappings, false otherwise.
89 * @param cacheable True if the mapping is cacheable, false otherwise.
90 */
91void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize,
92 bool locked, bool cacheable)
93{
94 tlb_tag_access_reg_t tag;
95 tlb_data_t data;
96 page_address_t pg;
97 frame_address_t fr;
98
99 pg.address = page;
100 fr.address = frame;
101
102 tag.context = ASID_KERNEL;
103 tag.vpn = pg.vpn;
104
105 dtlb_tag_access_write(tag.value);
106
107 data.value = 0;
108 data.v = true;
109 data.size = pagesize;
110 data.pfn = fr.pfn;
111 data.l = locked;
112 data.cp = cacheable;
113#ifdef CONFIG_VIRT_IDX_DCACHE
114 data.cv = cacheable;
115#endif /* CONFIG_VIRT_IDX_DCACHE */
116 data.p = true;
117 data.w = true;
118 data.g = false;
119
120 dtlb_data_in_write(data.value);
121}
122
123/** Copy PTE to TLB.
124 *
125 * @param t Page Table Entry to be copied.
126 * @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
127 * @param ro If true, the entry will be created read-only, regardless
128 * of its w field.
129 */
130void dtlb_pte_copy(pte_t *t, size_t index, bool ro)
131{
132 tlb_tag_access_reg_t tag;
133 tlb_data_t data;
134 page_address_t pg;
135 frame_address_t fr;
136
137 pg.address = t->page + (index << MMU_PAGE_WIDTH);
138 fr.address = t->frame + (index << MMU_PAGE_WIDTH);
139
140 tag.value = 0;
141 tag.context = t->as->asid;
142 tag.vpn = pg.vpn;
143
144 dtlb_tag_access_write(tag.value);
145
146 data.value = 0;
147 data.v = true;
148 data.size = PAGESIZE_8K;
149 data.pfn = fr.pfn;
150 data.l = false;
151 data.cp = t->c;
152#ifdef CONFIG_VIRT_IDX_DCACHE
153 data.cv = t->c;
154#endif /* CONFIG_VIRT_IDX_DCACHE */
155 data.p = t->k; /* p like privileged */
156 data.w = ro ? false : t->w;
157 data.g = t->g;
158
159 dtlb_data_in_write(data.value);
160}
161
162/** Copy PTE to ITLB.
163 *
164 * @param t Page Table Entry to be copied.
165 * @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
166 */
167void itlb_pte_copy(pte_t *t, size_t index)
168{
169 tlb_tag_access_reg_t tag;
170 tlb_data_t data;
171 page_address_t pg;
172 frame_address_t fr;
173
174 pg.address = t->page + (index << MMU_PAGE_WIDTH);
175 fr.address = t->frame + (index << MMU_PAGE_WIDTH);
176
177 tag.value = 0;
178 tag.context = t->as->asid;
179 tag.vpn = pg.vpn;
180
181 itlb_tag_access_write(tag.value);
182
183 data.value = 0;
184 data.v = true;
185 data.size = PAGESIZE_8K;
186 data.pfn = fr.pfn;
187 data.l = false;
188 data.cp = t->c;
189 data.p = t->k; /* p like privileged */
190 data.w = false;
191 data.g = t->g;
192
193 itlb_data_in_write(data.value);
194}
195
196/** ITLB miss handler. */
197void fast_instruction_access_mmu_miss(unsigned int tt, istate_t *istate)
198{
199 size_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE;
200 pte_t t;
201
202 bool found = page_mapping_find(AS, istate->tpc, true, &t);
203 if (found && PTE_EXECUTABLE(&t)) {
204 assert(t.p);
205
206 /*
207 * The mapping was found in the software page hash table.
208 * Insert it into ITLB.
209 */
210 t.a = true;
211 itlb_pte_copy(&t, index);
212#ifdef CONFIG_TSB
213 itsb_pte_copy(&t, index);
214#endif
215 page_mapping_update(AS, istate->tpc, true, &t);
216 } else {
217 /*
218 * Forward the page fault to the address space page fault
219 * handler.
220 */
221 as_page_fault(istate->tpc, PF_ACCESS_EXEC, istate);
222 }
223}
224
225/** DTLB miss handler.
226 *
227 * Note that some faults (e.g. kernel faults) were already resolved by the
228 * low-level, assembly language part of the fast_data_access_mmu_miss handler.
229 *
230 * @param tt Trap type.
231 * @param istate Interrupted state saved on the stack.
232 */
233void fast_data_access_mmu_miss(unsigned int tt, istate_t *istate)
234{
235 tlb_tag_access_reg_t tag;
236 uintptr_t page_8k;
237 uintptr_t page_16k;
238 size_t index;
239 pte_t t;
240 as_t *as = AS;
241
242 tag.value = istate->tlb_tag_access;
243 page_8k = (uint64_t) tag.vpn << MMU_PAGE_WIDTH;
244 page_16k = ALIGN_DOWN(page_8k, PAGE_SIZE);
245 index = tag.vpn % MMU_PAGES_PER_PAGE;
246
247 if (tag.context == ASID_KERNEL) {
248 if (!tag.vpn) {
249 /* NULL access in kernel */
250 panic("NULL pointer dereference.");
251 } else if (page_8k >= end_of_identity) {
252 /* Kernel non-identity. */
253 as = AS_KERNEL;
254 } else {
255 panic("Unexpected kernel page fault.");
256 }
257 }
258
259 bool found = page_mapping_find(as, page_16k, true, &t);
260 if (found) {
261 assert(t.p);
262
263 /*
264 * The mapping was found in the software page hash table.
265 * Insert it into DTLB.
266 */
267 t.a = true;
268 dtlb_pte_copy(&t, index, true);
269#ifdef CONFIG_TSB
270 dtsb_pte_copy(&t, index, true);
271#endif
272 page_mapping_update(as, page_16k, true, &t);
273 } else {
274 /*
275 * Forward the page fault to the address space page fault
276 * handler.
277 */
278 as_page_fault(page_16k, PF_ACCESS_READ, istate);
279 }
280}
281
282/** DTLB protection fault handler.
283 *
284 * @param tt Trap type.
285 * @param istate Interrupted state saved on the stack.
286 */
287void fast_data_access_protection(unsigned int tt, istate_t *istate)
288{
289 tlb_tag_access_reg_t tag;
290 uintptr_t page_16k;
291 size_t index;
292 pte_t t;
293 as_t *as = AS;
294
295 tag.value = istate->tlb_tag_access;
296 page_16k = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
297 index = tag.vpn % MMU_PAGES_PER_PAGE; /* 16K-page emulation */
298
299 if (tag.context == ASID_KERNEL)
300 as = AS_KERNEL;
301
302 bool found = page_mapping_find(as, page_16k, true, &t);
303 if (found && PTE_WRITABLE(&t)) {
304 assert(t.p);
305
306 /*
307 * The mapping was found in the software page hash table and is
308 * writable. Demap the old mapping and insert an updated mapping
309 * into DTLB.
310 */
311 t.a = true;
312 t.d = true;
313 dtlb_demap(TLB_DEMAP_PAGE,
314 (as == AS_KERNEL) ? TLB_DEMAP_PRIMARY : TLB_DEMAP_SECONDARY,
315 page_16k + index * MMU_PAGE_SIZE);
316 dtlb_pte_copy(&t, index, false);
317#ifdef CONFIG_TSB
318 dtsb_pte_copy(&t, index, false);
319#endif
320 page_mapping_update(as, page_16k, true, &t);
321 } else {
322 /*
323 * Forward the page fault to the address space page fault
324 * handler.
325 */
326 as_page_fault(page_16k, PF_ACCESS_WRITE, istate);
327 }
328}
329
330/** Print TLB entry (for debugging purposes).
331 *
332 * The diag field has been left out in order to make this function more generic
333 * (there is no diag field in US3 architeture).
334 *
335 * @param i TLB entry number
336 * @param t TLB entry tag
337 * @param d TLB entry data
338 */
339static void print_tlb_entry(int i, tlb_tag_read_reg_t t, tlb_data_t d)
340{
341 printf("%u: vpn=%#" PRIx64 ", context=%u, v=%u, size=%u, nfo=%u, "
342 "ie=%u, soft2=%#x, pfn=%#x, soft=%#x, l=%u, "
343 "cp=%u, cv=%u, e=%u, p=%u, w=%u, g=%u\n", i, (uint64_t) t.vpn,
344 t.context, d.v, d.size, d.nfo, d.ie, d.soft2,
345 d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
346}
347
348#if defined (US)
349
350/** Print contents of both TLBs. */
351void tlb_print(void)
352{
353 int i;
354 tlb_data_t d;
355 tlb_tag_read_reg_t t;
356
357 printf("I-TLB contents:\n");
358 for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
359 d.value = itlb_data_access_read(i);
360 t.value = itlb_tag_read_read(i);
361 print_tlb_entry(i, t, d);
362 }
363
364 printf("D-TLB contents:\n");
365 for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
366 d.value = dtlb_data_access_read(i);
367 t.value = dtlb_tag_read_read(i);
368 print_tlb_entry(i, t, d);
369 }
370}
371
372#elif defined (US3)
373
374/** Print contents of all TLBs. */
375void tlb_print(void)
376{
377 int i;
378 tlb_data_t d;
379 tlb_tag_read_reg_t t;
380
381 printf("TLB_ISMALL contents:\n");
382 for (i = 0; i < tlb_ismall_size(); i++) {
383 d.value = dtlb_data_access_read(TLB_ISMALL, i);
384 t.value = dtlb_tag_read_read(TLB_ISMALL, i);
385 print_tlb_entry(i, t, d);
386 }
387
388 printf("TLB_IBIG contents:\n");
389 for (i = 0; i < tlb_ibig_size(); i++) {
390 d.value = dtlb_data_access_read(TLB_IBIG, i);
391 t.value = dtlb_tag_read_read(TLB_IBIG, i);
392 print_tlb_entry(i, t, d);
393 }
394
395 printf("TLB_DSMALL contents:\n");
396 for (i = 0; i < tlb_dsmall_size(); i++) {
397 d.value = dtlb_data_access_read(TLB_DSMALL, i);
398 t.value = dtlb_tag_read_read(TLB_DSMALL, i);
399 print_tlb_entry(i, t, d);
400 }
401
402 printf("TLB_DBIG_1 contents:\n");
403 for (i = 0; i < tlb_dbig_size(); i++) {
404 d.value = dtlb_data_access_read(TLB_DBIG_0, i);
405 t.value = dtlb_tag_read_read(TLB_DBIG_0, i);
406 print_tlb_entry(i, t, d);
407 }
408
409 printf("TLB_DBIG_2 contents:\n");
410 for (i = 0; i < tlb_dbig_size(); i++) {
411 d.value = dtlb_data_access_read(TLB_DBIG_1, i);
412 t.value = dtlb_tag_read_read(TLB_DBIG_1, i);
413 print_tlb_entry(i, t, d);
414 }
415}
416
417#endif
418
419void describe_dmmu_fault(void)
420{
421 tlb_sfsr_reg_t sfsr;
422 uintptr_t sfar;
423
424 sfsr.value = dtlb_sfsr_read();
425 sfar = dtlb_sfar_read();
426
427#if defined (US)
428 printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, "
429 "fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w,
430 sfsr.ow, sfsr.fv);
431#elif defined (US3)
432 printf("DTLB SFSR: nf=%d, asi=%#x, tm=%d, ft=%#x, e=%d, ct=%d, pr=%d, "
433 "w=%d, ow=%d, fv=%d\n", sfsr.nf, sfsr.asi, sfsr.tm, sfsr.ft,
434 sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, sfsr.ow, sfsr.fv);
435#endif
436
437 printf("DTLB SFAR: address=%p\n", (void *) sfar);
438
439 dtlb_sfsr_write(0);
440}
441
442void dump_sfsr_and_sfar(void)
443{
444 tlb_sfsr_reg_t sfsr;
445 uintptr_t sfar;
446
447 sfsr.value = dtlb_sfsr_read();
448 sfar = dtlb_sfar_read();
449
450#if defined (US)
451 printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, "
452 "fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w,
453 sfsr.ow, sfsr.fv);
454#elif defined (US3)
455 printf("DTLB SFSR: nf=%d, asi=%#x, tm=%d, ft=%#x, e=%d, ct=%d, pr=%d, "
456 "w=%d, ow=%d, fv=%d\n", sfsr.nf, sfsr.asi, sfsr.tm, sfsr.ft,
457 sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, sfsr.ow, sfsr.fv);
458#endif
459
460 printf("DTLB SFAR: address=%p\n", (void *) sfar);
461
462 dtlb_sfsr_write(0);
463}
464
465#if defined (US)
466/** Invalidate all unlocked ITLB and DTLB entries. */
467void tlb_invalidate_all(void)
468{
469 int i;
470
471 /*
472 * Walk all ITLB and DTLB entries and remove all unlocked mappings.
473 *
474 * The kernel doesn't use global mappings so any locked global mappings
475 * found must have been created by someone else. Their only purpose now
476 * is to collide with proper mappings. Invalidate immediately. It should
477 * be safe to invalidate them as late as now.
478 */
479
480 tlb_data_t d;
481 tlb_tag_read_reg_t t;
482
483 for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
484 d.value = itlb_data_access_read(i);
485 if (!d.l || d.g) {
486 t.value = itlb_tag_read_read(i);
487 d.v = false;
488 itlb_tag_access_write(t.value);
489 itlb_data_access_write(i, d.value);
490 }
491 }
492
493 for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
494 d.value = dtlb_data_access_read(i);
495 if (!d.l || d.g) {
496 t.value = dtlb_tag_read_read(i);
497 d.v = false;
498 dtlb_tag_access_write(t.value);
499 dtlb_data_access_write(i, d.value);
500 }
501 }
502
503}
504
505#elif defined (US3)
506
507/** Invalidate all unlocked ITLB and DTLB entries. */
508void tlb_invalidate_all(void)
509{
510 itlb_demap(TLB_DEMAP_ALL, 0, 0);
511 dtlb_demap(TLB_DEMAP_ALL, 0, 0);
512}
513
514#endif
515
516/** Invalidate all ITLB and DTLB entries that belong to specified ASID
517 * (Context).
518 *
519 * @param asid Address Space ID.
520 */
521void tlb_invalidate_asid(asid_t asid)
522{
523 tlb_context_reg_t pc_save, ctx;
524
525 /* switch to nucleus because we are mapped by the primary context */
526 nucleus_enter();
527
528 ctx.v = pc_save.v = mmu_primary_context_read();
529 ctx.context = asid;
530 mmu_primary_context_write(ctx.v);
531
532 itlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
533 dtlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
534
535 mmu_primary_context_write(pc_save.v);
536
537 nucleus_leave();
538}
539
540/** Invalidate all ITLB and DTLB entries for specified page range in specified
541 * address space.
542 *
543 * @param asid Address Space ID.
544 * @param page First page which to sweep out from ITLB and DTLB.
545 * @param cnt Number of ITLB and DTLB entries to invalidate.
546 */
547void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt)
548{
549 unsigned int i;
550 tlb_context_reg_t pc_save, ctx;
551
552 /* switch to nucleus because we are mapped by the primary context */
553 nucleus_enter();
554
555 ctx.v = pc_save.v = mmu_primary_context_read();
556 ctx.context = asid;
557 mmu_primary_context_write(ctx.v);
558
559 for (i = 0; i < cnt * MMU_PAGES_PER_PAGE; i++) {
560 itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
561 page + i * MMU_PAGE_SIZE);
562 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
563 page + i * MMU_PAGE_SIZE);
564 }
565
566 mmu_primary_context_write(pc_save.v);
567
568 nucleus_leave();
569}
570
571/** @}
572 */
Note: See TracBrowser for help on using the repository browser.