source: mainline/kernel/arch/sparc64/src/mm/sun4u/tlb.c@ 1b1be5f

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 1b1be5f was bab75df6, checked in by Jiri Svoboda <jiri@…>, 7 years ago

Let kernel code get printf via the standard stdio header. Clean up unused includes.

  • Property mode set to 100644
File size: 13.9 KB
Line 
1/*
2 * Copyright (c) 2005 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup kernel_sparc64_mm
30 * @{
31 */
32/** @file
33 */
34
35#include <arch/mm/tlb.h>
36#include <mm/tlb.h>
37#include <mm/as.h>
38#include <mm/asid.h>
39#include <arch/mm/frame.h>
40#include <arch/mm/page.h>
41#include <arch/mm/mmu.h>
42#include <arch/interrupt.h>
43#include <assert.h>
44#include <interrupt.h>
45#include <arch.h>
46#include <stdio.h>
47#include <typedefs.h>
48#include <config.h>
49#include <arch/trap/trap.h>
50#include <arch/trap/exception.h>
51#include <panic.h>
52#include <arch/asm.h>
53#include <genarch/mm/page_ht.h>
54
55#ifdef CONFIG_TSB
56#include <arch/mm/tsb.h>
57#endif
58
59static void dtlb_pte_copy(pte_t *, size_t, bool);
60static void itlb_pte_copy(pte_t *, size_t);
61
62const char *context_encoding[] = {
63 "Primary",
64 "Secondary",
65 "Nucleus",
66 "Reserved"
67};
68
69void tlb_arch_init(void)
70{
71 /*
72 * Invalidate all non-locked DTLB and ITLB entries.
73 */
74 tlb_invalidate_all();
75
76 /*
77 * Clear both SFSRs.
78 */
79 dtlb_sfsr_write(0);
80 itlb_sfsr_write(0);
81}
82
83/** Insert privileged mapping into DMMU TLB.
84 *
85 * @param page Virtual page address.
86 * @param frame Physical frame address.
87 * @param pagesize Page size.
88 * @param locked True for permanent mappings, false otherwise.
89 * @param cacheable True if the mapping is cacheable, false otherwise.
90 */
91void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize,
92 bool locked, bool cacheable)
93{
94 tlb_tag_access_reg_t tag;
95 tlb_data_t data;
96 page_address_t pg;
97 frame_address_t fr;
98
99 pg.address = page;
100 fr.address = frame;
101
102 tag.context = ASID_KERNEL;
103 tag.vpn = pg.vpn;
104
105 dtlb_tag_access_write(tag.value);
106
107 data.value = 0;
108 data.v = true;
109 data.size = pagesize;
110 data.pfn = fr.pfn;
111 data.l = locked;
112 data.cp = cacheable;
113#ifdef CONFIG_VIRT_IDX_DCACHE
114 data.cv = cacheable;
115#endif /* CONFIG_VIRT_IDX_DCACHE */
116 data.p = true;
117 data.w = true;
118 data.g = false;
119
120 dtlb_data_in_write(data.value);
121}
122
123/** Copy PTE to TLB.
124 *
125 * @param t Page Table Entry to be copied.
126 * @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
127 * @param ro If true, the entry will be created read-only, regardless
128 * of its w field.
129 */
130void dtlb_pte_copy(pte_t *t, size_t index, bool ro)
131{
132 tlb_tag_access_reg_t tag;
133 tlb_data_t data;
134 page_address_t pg;
135 frame_address_t fr;
136
137 pg.address = t->page + (index << MMU_PAGE_WIDTH);
138 fr.address = t->frame + (index << MMU_PAGE_WIDTH);
139
140 tag.value = 0;
141 tag.context = t->as->asid;
142 tag.vpn = pg.vpn;
143
144 dtlb_tag_access_write(tag.value);
145
146 data.value = 0;
147 data.v = true;
148 data.size = PAGESIZE_8K;
149 data.pfn = fr.pfn;
150 data.l = false;
151 data.cp = t->c;
152#ifdef CONFIG_VIRT_IDX_DCACHE
153 data.cv = t->c;
154#endif /* CONFIG_VIRT_IDX_DCACHE */
155 data.p = t->k; /* p like privileged */
156 data.w = ro ? false : t->w;
157 data.g = t->g;
158
159 dtlb_data_in_write(data.value);
160}
161
162/** Copy PTE to ITLB.
163 *
164 * @param t Page Table Entry to be copied.
165 * @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
166 */
167void itlb_pte_copy(pte_t *t, size_t index)
168{
169 tlb_tag_access_reg_t tag;
170 tlb_data_t data;
171 page_address_t pg;
172 frame_address_t fr;
173
174 pg.address = t->page + (index << MMU_PAGE_WIDTH);
175 fr.address = t->frame + (index << MMU_PAGE_WIDTH);
176
177 tag.value = 0;
178 tag.context = t->as->asid;
179 tag.vpn = pg.vpn;
180
181 itlb_tag_access_write(tag.value);
182
183 data.value = 0;
184 data.v = true;
185 data.size = PAGESIZE_8K;
186 data.pfn = fr.pfn;
187 data.l = false;
188 data.cp = t->c;
189 data.p = t->k; /* p like privileged */
190 data.w = false;
191 data.g = t->g;
192
193 itlb_data_in_write(data.value);
194}
195
196/** ITLB miss handler. */
197void fast_instruction_access_mmu_miss(unsigned int tt, istate_t *istate)
198{
199 size_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE;
200 pte_t t;
201
202 bool found = page_mapping_find(AS, istate->tpc, true, &t);
203 if (found && PTE_EXECUTABLE(&t)) {
204 assert(t.p);
205
206 /*
207 * The mapping was found in the software page hash table.
208 * Insert it into ITLB.
209 */
210 t.a = true;
211 itlb_pte_copy(&t, index);
212#ifdef CONFIG_TSB
213 itsb_pte_copy(&t, index);
214#endif
215 page_mapping_update(AS, istate->tpc, true, &t);
216 } else {
217 /*
218 * Forward the page fault to the address space page fault
219 * handler.
220 */
221 as_page_fault(istate->tpc, PF_ACCESS_EXEC, istate);
222 }
223}
224
225/** DTLB miss handler.
226 *
227 * Note that some faults (e.g. kernel faults) were already resolved by the
228 * low-level, assembly language part of the fast_data_access_mmu_miss handler.
229 *
230 * @param tt Trap type.
231 * @param istate Interrupted state saved on the stack.
232 */
233void fast_data_access_mmu_miss(unsigned int tt, istate_t *istate)
234{
235 tlb_tag_access_reg_t tag;
236 uintptr_t page_8k;
237 uintptr_t page_16k;
238 size_t index;
239 pte_t t;
240 as_t *as = AS;
241
242 tag.value = istate->tlb_tag_access;
243 page_8k = (uint64_t) tag.vpn << MMU_PAGE_WIDTH;
244 page_16k = ALIGN_DOWN(page_8k, PAGE_SIZE);
245 index = tag.vpn % MMU_PAGES_PER_PAGE;
246
247 if (tag.context == ASID_KERNEL) {
248 if (!tag.vpn) {
249 /* NULL access in kernel */
250 panic("NULL pointer dereference.");
251 } else if (page_8k >= end_of_identity) {
252 /* Kernel non-identity. */
253 as = AS_KERNEL;
254 } else {
255 panic("Unexpected kernel page fault.");
256 }
257 }
258
259 bool found = page_mapping_find(as, page_16k, true, &t);
260 if (found) {
261 assert(t.p);
262
263 /*
264 * The mapping was found in the software page hash table.
265 * Insert it into DTLB.
266 */
267 t.a = true;
268 dtlb_pte_copy(&t, index, true);
269#ifdef CONFIG_TSB
270 dtsb_pte_copy(&t, index, true);
271#endif
272 page_mapping_update(as, page_16k, true, &t);
273 } else {
274 /*
275 * Forward the page fault to the address space page fault
276 * handler.
277 */
278 as_page_fault(page_16k, PF_ACCESS_READ, istate);
279 }
280}
281
282/** DTLB protection fault handler.
283 *
284 * @param tt Trap type.
285 * @param istate Interrupted state saved on the stack.
286 */
287void fast_data_access_protection(unsigned int tt, istate_t *istate)
288{
289 tlb_tag_access_reg_t tag;
290 uintptr_t page_16k;
291 size_t index;
292 pte_t t;
293 as_t *as = AS;
294
295 tag.value = istate->tlb_tag_access;
296 page_16k = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
297 index = tag.vpn % MMU_PAGES_PER_PAGE; /* 16K-page emulation */
298
299 if (tag.context == ASID_KERNEL)
300 as = AS_KERNEL;
301
302 bool found = page_mapping_find(as, page_16k, true, &t);
303 if (found && PTE_WRITABLE(&t)) {
304 assert(t.p);
305
306 /*
307 * The mapping was found in the software page hash table and is
308 * writable. Demap the old mapping and insert an updated mapping
309 * into DTLB.
310 */
311 t.a = true;
312 t.d = true;
313 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY,
314 page_16k + index * MMU_PAGE_SIZE);
315 dtlb_pte_copy(&t, index, false);
316#ifdef CONFIG_TSB
317 dtsb_pte_copy(&t, index, false);
318#endif
319 page_mapping_update(as, page_16k, true, &t);
320 } else {
321 /*
322 * Forward the page fault to the address space page fault
323 * handler.
324 */
325 as_page_fault(page_16k, PF_ACCESS_WRITE, istate);
326 }
327}
328
329/** Print TLB entry (for debugging purposes).
330 *
331 * The diag field has been left out in order to make this function more generic
332 * (there is no diag field in US3 architeture).
333 *
334 * @param i TLB entry number
335 * @param t TLB entry tag
336 * @param d TLB entry data
337 */
338static void print_tlb_entry(int i, tlb_tag_read_reg_t t, tlb_data_t d)
339{
340 printf("%u: vpn=%#" PRIx64 ", context=%u, v=%u, size=%u, nfo=%u, "
341 "ie=%u, soft2=%#x, pfn=%#x, soft=%#x, l=%u, "
342 "cp=%u, cv=%u, e=%u, p=%u, w=%u, g=%u\n", i, (uint64_t) t.vpn,
343 t.context, d.v, d.size, d.nfo, d.ie, d.soft2,
344 d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
345}
346
347#if defined (US)
348
349/** Print contents of both TLBs. */
350void tlb_print(void)
351{
352 int i;
353 tlb_data_t d;
354 tlb_tag_read_reg_t t;
355
356 printf("I-TLB contents:\n");
357 for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
358 d.value = itlb_data_access_read(i);
359 t.value = itlb_tag_read_read(i);
360 print_tlb_entry(i, t, d);
361 }
362
363 printf("D-TLB contents:\n");
364 for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
365 d.value = dtlb_data_access_read(i);
366 t.value = dtlb_tag_read_read(i);
367 print_tlb_entry(i, t, d);
368 }
369}
370
371#elif defined (US3)
372
373/** Print contents of all TLBs. */
374void tlb_print(void)
375{
376 int i;
377 tlb_data_t d;
378 tlb_tag_read_reg_t t;
379
380 printf("TLB_ISMALL contents:\n");
381 for (i = 0; i < tlb_ismall_size(); i++) {
382 d.value = dtlb_data_access_read(TLB_ISMALL, i);
383 t.value = dtlb_tag_read_read(TLB_ISMALL, i);
384 print_tlb_entry(i, t, d);
385 }
386
387 printf("TLB_IBIG contents:\n");
388 for (i = 0; i < tlb_ibig_size(); i++) {
389 d.value = dtlb_data_access_read(TLB_IBIG, i);
390 t.value = dtlb_tag_read_read(TLB_IBIG, i);
391 print_tlb_entry(i, t, d);
392 }
393
394 printf("TLB_DSMALL contents:\n");
395 for (i = 0; i < tlb_dsmall_size(); i++) {
396 d.value = dtlb_data_access_read(TLB_DSMALL, i);
397 t.value = dtlb_tag_read_read(TLB_DSMALL, i);
398 print_tlb_entry(i, t, d);
399 }
400
401 printf("TLB_DBIG_1 contents:\n");
402 for (i = 0; i < tlb_dbig_size(); i++) {
403 d.value = dtlb_data_access_read(TLB_DBIG_0, i);
404 t.value = dtlb_tag_read_read(TLB_DBIG_0, i);
405 print_tlb_entry(i, t, d);
406 }
407
408 printf("TLB_DBIG_2 contents:\n");
409 for (i = 0; i < tlb_dbig_size(); i++) {
410 d.value = dtlb_data_access_read(TLB_DBIG_1, i);
411 t.value = dtlb_tag_read_read(TLB_DBIG_1, i);
412 print_tlb_entry(i, t, d);
413 }
414}
415
416#endif
417
418void describe_dmmu_fault(void)
419{
420 tlb_sfsr_reg_t sfsr;
421 uintptr_t sfar;
422
423 sfsr.value = dtlb_sfsr_read();
424 sfar = dtlb_sfar_read();
425
426#if defined (US)
427 printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, "
428 "fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w,
429 sfsr.ow, sfsr.fv);
430#elif defined (US3)
431 printf("DTLB SFSR: nf=%d, asi=%#x, tm=%d, ft=%#x, e=%d, ct=%d, pr=%d, "
432 "w=%d, ow=%d, fv=%d\n", sfsr.nf, sfsr.asi, sfsr.tm, sfsr.ft,
433 sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, sfsr.ow, sfsr.fv);
434#endif
435
436 printf("DTLB SFAR: address=%p\n", (void *) sfar);
437
438 dtlb_sfsr_write(0);
439}
440
441void dump_sfsr_and_sfar(void)
442{
443 tlb_sfsr_reg_t sfsr;
444 uintptr_t sfar;
445
446 sfsr.value = dtlb_sfsr_read();
447 sfar = dtlb_sfar_read();
448
449#if defined (US)
450 printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, "
451 "fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w,
452 sfsr.ow, sfsr.fv);
453#elif defined (US3)
454 printf("DTLB SFSR: nf=%d, asi=%#x, tm=%d, ft=%#x, e=%d, ct=%d, pr=%d, "
455 "w=%d, ow=%d, fv=%d\n", sfsr.nf, sfsr.asi, sfsr.tm, sfsr.ft,
456 sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, sfsr.ow, sfsr.fv);
457#endif
458
459 printf("DTLB SFAR: address=%p\n", (void *) sfar);
460
461 dtlb_sfsr_write(0);
462}
463
464#if defined (US)
465/** Invalidate all unlocked ITLB and DTLB entries. */
466void tlb_invalidate_all(void)
467{
468 int i;
469
470 /*
471 * Walk all ITLB and DTLB entries and remove all unlocked mappings.
472 *
473 * The kernel doesn't use global mappings so any locked global mappings
474 * found must have been created by someone else. Their only purpose now
475 * is to collide with proper mappings. Invalidate immediately. It should
476 * be safe to invalidate them as late as now.
477 */
478
479 tlb_data_t d;
480 tlb_tag_read_reg_t t;
481
482 for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
483 d.value = itlb_data_access_read(i);
484 if (!d.l || d.g) {
485 t.value = itlb_tag_read_read(i);
486 d.v = false;
487 itlb_tag_access_write(t.value);
488 itlb_data_access_write(i, d.value);
489 }
490 }
491
492 for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
493 d.value = dtlb_data_access_read(i);
494 if (!d.l || d.g) {
495 t.value = dtlb_tag_read_read(i);
496 d.v = false;
497 dtlb_tag_access_write(t.value);
498 dtlb_data_access_write(i, d.value);
499 }
500 }
501
502}
503
504#elif defined (US3)
505
506/** Invalidate all unlocked ITLB and DTLB entries. */
507void tlb_invalidate_all(void)
508{
509 itlb_demap(TLB_DEMAP_ALL, 0, 0);
510 dtlb_demap(TLB_DEMAP_ALL, 0, 0);
511}
512
513#endif
514
515/** Invalidate all ITLB and DTLB entries that belong to specified ASID
516 * (Context).
517 *
518 * @param asid Address Space ID.
519 */
520void tlb_invalidate_asid(asid_t asid)
521{
522 tlb_context_reg_t pc_save, ctx;
523
524 /* switch to nucleus because we are mapped by the primary context */
525 nucleus_enter();
526
527 ctx.v = pc_save.v = mmu_primary_context_read();
528 ctx.context = asid;
529 mmu_primary_context_write(ctx.v);
530
531 itlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
532 dtlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
533
534 mmu_primary_context_write(pc_save.v);
535
536 nucleus_leave();
537}
538
539/** Invalidate all ITLB and DTLB entries for specified page range in specified
540 * address space.
541 *
542 * @param asid Address Space ID.
543 * @param page First page which to sweep out from ITLB and DTLB.
544 * @param cnt Number of ITLB and DTLB entries to invalidate.
545 */
546void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt)
547{
548 unsigned int i;
549 tlb_context_reg_t pc_save, ctx;
550
551 /* switch to nucleus because we are mapped by the primary context */
552 nucleus_enter();
553
554 ctx.v = pc_save.v = mmu_primary_context_read();
555 ctx.context = asid;
556 mmu_primary_context_write(ctx.v);
557
558 for (i = 0; i < cnt * MMU_PAGES_PER_PAGE; i++) {
559 itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
560 page + i * MMU_PAGE_SIZE);
561 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
562 page + i * MMU_PAGE_SIZE);
563 }
564
565 mmu_primary_context_write(pc_save.v);
566
567 nucleus_leave();
568}
569
570/** @}
571 */
Note: See TracBrowser for help on using the repository browser.