source: mainline/kernel/arch/sparc64/src/mm/sun4u/tlb.c@ 96b02eb9

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 96b02eb9 was 96b02eb9, checked in by Martin Decky <martin@…>, 15 years ago

more unification of basic types

  • use sysarg_t and native_t (unsigned and signed variant) in both kernel and uspace
  • remove ipcarg_t in favour of sysarg_t

(no change in functionality)

  • Property mode set to 100644
File size: 15.8 KB
Line 
1/*
2 * Copyright (c) 2005 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup sparc64mm
30 * @{
31 */
32/** @file
33 */
34
35#include <arch/mm/tlb.h>
36#include <mm/tlb.h>
37#include <mm/as.h>
38#include <mm/asid.h>
39#include <arch/mm/frame.h>
40#include <arch/mm/page.h>
41#include <arch/mm/mmu.h>
42#include <arch/interrupt.h>
43#include <interrupt.h>
44#include <arch.h>
45#include <print.h>
46#include <typedefs.h>
47#include <config.h>
48#include <arch/trap/trap.h>
49#include <arch/trap/exception.h>
50#include <panic.h>
51#include <arch/asm.h>
52#include <genarch/mm/page_ht.h>
53
54#ifdef CONFIG_TSB
55#include <arch/mm/tsb.h>
56#endif
57
58static void dtlb_pte_copy(pte_t *, size_t, bool);
59static void itlb_pte_copy(pte_t *, size_t);
60static void do_fast_instruction_access_mmu_miss_fault(istate_t *, uintptr_t,
61 const char *);
62static void do_fast_data_access_mmu_miss_fault(istate_t *, tlb_tag_access_reg_t,
63 const char *);
64static void do_fast_data_access_protection_fault(istate_t *,
65 tlb_tag_access_reg_t, const char *);
66
67const char *context_encoding[] = {
68 "Primary",
69 "Secondary",
70 "Nucleus",
71 "Reserved"
72};
73
74void tlb_arch_init(void)
75{
76 /*
77 * Invalidate all non-locked DTLB and ITLB entries.
78 */
79 tlb_invalidate_all();
80
81 /*
82 * Clear both SFSRs.
83 */
84 dtlb_sfsr_write(0);
85 itlb_sfsr_write(0);
86}
87
88/** Insert privileged mapping into DMMU TLB.
89 *
90 * @param page Virtual page address.
91 * @param frame Physical frame address.
92 * @param pagesize Page size.
93 * @param locked True for permanent mappings, false otherwise.
94 * @param cacheable True if the mapping is cacheable, false otherwise.
95 */
96void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize,
97 bool locked, bool cacheable)
98{
99 tlb_tag_access_reg_t tag;
100 tlb_data_t data;
101 page_address_t pg;
102 frame_address_t fr;
103
104 pg.address = page;
105 fr.address = frame;
106
107 tag.context = ASID_KERNEL;
108 tag.vpn = pg.vpn;
109
110 dtlb_tag_access_write(tag.value);
111
112 data.value = 0;
113 data.v = true;
114 data.size = pagesize;
115 data.pfn = fr.pfn;
116 data.l = locked;
117 data.cp = cacheable;
118#ifdef CONFIG_VIRT_IDX_DCACHE
119 data.cv = cacheable;
120#endif /* CONFIG_VIRT_IDX_DCACHE */
121 data.p = true;
122 data.w = true;
123 data.g = false;
124
125 dtlb_data_in_write(data.value);
126}
127
128/** Copy PTE to TLB.
129 *
130 * @param t Page Table Entry to be copied.
131 * @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
132 * @param ro If true, the entry will be created read-only, regardless
133 * of its w field.
134 */
135void dtlb_pte_copy(pte_t *t, size_t index, bool ro)
136{
137 tlb_tag_access_reg_t tag;
138 tlb_data_t data;
139 page_address_t pg;
140 frame_address_t fr;
141
142 pg.address = t->page + (index << MMU_PAGE_WIDTH);
143 fr.address = t->frame + (index << MMU_PAGE_WIDTH);
144
145 tag.value = 0;
146 tag.context = t->as->asid;
147 tag.vpn = pg.vpn;
148
149 dtlb_tag_access_write(tag.value);
150
151 data.value = 0;
152 data.v = true;
153 data.size = PAGESIZE_8K;
154 data.pfn = fr.pfn;
155 data.l = false;
156 data.cp = t->c;
157#ifdef CONFIG_VIRT_IDX_DCACHE
158 data.cv = t->c;
159#endif /* CONFIG_VIRT_IDX_DCACHE */
160 data.p = t->k; /* p like privileged */
161 data.w = ro ? false : t->w;
162 data.g = t->g;
163
164 dtlb_data_in_write(data.value);
165}
166
167/** Copy PTE to ITLB.
168 *
169 * @param t Page Table Entry to be copied.
170 * @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
171 */
172void itlb_pte_copy(pte_t *t, size_t index)
173{
174 tlb_tag_access_reg_t tag;
175 tlb_data_t data;
176 page_address_t pg;
177 frame_address_t fr;
178
179 pg.address = t->page + (index << MMU_PAGE_WIDTH);
180 fr.address = t->frame + (index << MMU_PAGE_WIDTH);
181
182 tag.value = 0;
183 tag.context = t->as->asid;
184 tag.vpn = pg.vpn;
185
186 itlb_tag_access_write(tag.value);
187
188 data.value = 0;
189 data.v = true;
190 data.size = PAGESIZE_8K;
191 data.pfn = fr.pfn;
192 data.l = false;
193 data.cp = t->c;
194 data.p = t->k; /* p like privileged */
195 data.w = false;
196 data.g = t->g;
197
198 itlb_data_in_write(data.value);
199}
200
201/** ITLB miss handler. */
202void fast_instruction_access_mmu_miss(sysarg_t unused, istate_t *istate)
203{
204 uintptr_t page_16k = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
205 size_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE;
206 pte_t *t;
207
208 page_table_lock(AS, true);
209 t = page_mapping_find(AS, page_16k);
210 if (t && PTE_EXECUTABLE(t)) {
211 /*
212 * The mapping was found in the software page hash table.
213 * Insert it into ITLB.
214 */
215 t->a = true;
216 itlb_pte_copy(t, index);
217#ifdef CONFIG_TSB
218 itsb_pte_copy(t, index);
219#endif
220 page_table_unlock(AS, true);
221 } else {
222 /*
223 * Forward the page fault to the address space page fault
224 * handler.
225 */
226 page_table_unlock(AS, true);
227 if (as_page_fault(page_16k, PF_ACCESS_EXEC, istate) ==
228 AS_PF_FAULT) {
229 do_fast_instruction_access_mmu_miss_fault(istate,
230 istate->tpc, __func__);
231 }
232 }
233}
234
235/** DTLB miss handler.
236 *
237 * Note that some faults (e.g. kernel faults) were already resolved by the
238 * low-level, assembly language part of the fast_data_access_mmu_miss handler.
239 *
240 * @param tag Content of the TLB Tag Access register as it existed
241 * when the trap happened. This is to prevent confusion
242 * created by clobbered Tag Access register during a nested
243 * DTLB miss.
244 * @param istate Interrupted state saved on the stack.
245 */
246void fast_data_access_mmu_miss(tlb_tag_access_reg_t tag, istate_t *istate)
247{
248 uintptr_t page_8k;
249 uintptr_t page_16k;
250 size_t index;
251 pte_t *t;
252
253 page_8k = (uint64_t) tag.vpn << MMU_PAGE_WIDTH;
254 page_16k = ALIGN_DOWN(page_8k, PAGE_SIZE);
255 index = tag.vpn % MMU_PAGES_PER_PAGE;
256
257 if (tag.context == ASID_KERNEL) {
258 if (!tag.vpn) {
259 /* NULL access in kernel */
260 do_fast_data_access_mmu_miss_fault(istate, tag,
261 "Dereferencing NULL pointer.");
262 } else if (page_8k >= end_of_identity) {
263 /*
264 * The kernel is accessing the I/O space.
265 * We still do identity mapping for I/O,
266 * but without caching.
267 */
268 dtlb_insert_mapping(page_8k, KA2PA(page_8k),
269 PAGESIZE_8K, false, false);
270 return;
271 }
272 do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected "
273 "kernel page fault.");
274 }
275
276 page_table_lock(AS, true);
277 t = page_mapping_find(AS, page_16k);
278 if (t) {
279 /*
280 * The mapping was found in the software page hash table.
281 * Insert it into DTLB.
282 */
283 t->a = true;
284 dtlb_pte_copy(t, index, true);
285#ifdef CONFIG_TSB
286 dtsb_pte_copy(t, index, true);
287#endif
288 page_table_unlock(AS, true);
289 } else {
290 /*
291 * Forward the page fault to the address space page fault
292 * handler.
293 */
294 page_table_unlock(AS, true);
295 if (as_page_fault(page_16k, PF_ACCESS_READ, istate) ==
296 AS_PF_FAULT) {
297 do_fast_data_access_mmu_miss_fault(istate, tag,
298 __func__);
299 }
300 }
301}
302
303/** DTLB protection fault handler.
304 *
305 * @param tag Content of the TLB Tag Access register as it existed
306 * when the trap happened. This is to prevent confusion
307 * created by clobbered Tag Access register during a nested
308 * DTLB miss.
309 * @param istate Interrupted state saved on the stack.
310 */
311void fast_data_access_protection(tlb_tag_access_reg_t tag, istate_t *istate)
312{
313 uintptr_t page_16k;
314 size_t index;
315 pte_t *t;
316
317 page_16k = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
318 index = tag.vpn % MMU_PAGES_PER_PAGE; /* 16K-page emulation */
319
320 page_table_lock(AS, true);
321 t = page_mapping_find(AS, page_16k);
322 if (t && PTE_WRITABLE(t)) {
323 /*
324 * The mapping was found in the software page hash table and is
325 * writable. Demap the old mapping and insert an updated mapping
326 * into DTLB.
327 */
328 t->a = true;
329 t->d = true;
330 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY,
331 page_16k + index * MMU_PAGE_SIZE);
332 dtlb_pte_copy(t, index, false);
333#ifdef CONFIG_TSB
334 dtsb_pte_copy(t, index, false);
335#endif
336 page_table_unlock(AS, true);
337 } else {
338 /*
339 * Forward the page fault to the address space page fault
340 * handler.
341 */
342 page_table_unlock(AS, true);
343 if (as_page_fault(page_16k, PF_ACCESS_WRITE, istate) ==
344 AS_PF_FAULT) {
345 do_fast_data_access_protection_fault(istate, tag,
346 __func__);
347 }
348 }
349}
350
351/** Print TLB entry (for debugging purposes).
352 *
353 * The diag field has been left out in order to make this function more generic
354 * (there is no diag field in US3 architeture).
355 *
356 * @param i TLB entry number
357 * @param t TLB entry tag
358 * @param d TLB entry data
359 */
360static void print_tlb_entry(int i, tlb_tag_read_reg_t t, tlb_data_t d)
361{
362 printf("%u: vpn=%#" PRIx64 ", context=%u, v=%u, size=%u, nfo=%u, "
363 "ie=%u, soft2=%#x, pfn=%#x, soft=%#x, l=%u, "
364 "cp=%u, cv=%u, e=%u, p=%u, w=%u, g=%u\n", i, (uint64_t) t.vpn,
365 t.context, d.v, d.size, d.nfo, d.ie, d.soft2,
366 d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
367}
368
369#if defined (US)
370
371/** Print contents of both TLBs. */
372void tlb_print(void)
373{
374 int i;
375 tlb_data_t d;
376 tlb_tag_read_reg_t t;
377
378 printf("I-TLB contents:\n");
379 for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
380 d.value = itlb_data_access_read(i);
381 t.value = itlb_tag_read_read(i);
382 print_tlb_entry(i, t, d);
383 }
384
385 printf("D-TLB contents:\n");
386 for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
387 d.value = dtlb_data_access_read(i);
388 t.value = dtlb_tag_read_read(i);
389 print_tlb_entry(i, t, d);
390 }
391}
392
393#elif defined (US3)
394
395/** Print contents of all TLBs. */
396void tlb_print(void)
397{
398 int i;
399 tlb_data_t d;
400 tlb_tag_read_reg_t t;
401
402 printf("TLB_ISMALL contents:\n");
403 for (i = 0; i < tlb_ismall_size(); i++) {
404 d.value = dtlb_data_access_read(TLB_ISMALL, i);
405 t.value = dtlb_tag_read_read(TLB_ISMALL, i);
406 print_tlb_entry(i, t, d);
407 }
408
409 printf("TLB_IBIG contents:\n");
410 for (i = 0; i < tlb_ibig_size(); i++) {
411 d.value = dtlb_data_access_read(TLB_IBIG, i);
412 t.value = dtlb_tag_read_read(TLB_IBIG, i);
413 print_tlb_entry(i, t, d);
414 }
415
416 printf("TLB_DSMALL contents:\n");
417 for (i = 0; i < tlb_dsmall_size(); i++) {
418 d.value = dtlb_data_access_read(TLB_DSMALL, i);
419 t.value = dtlb_tag_read_read(TLB_DSMALL, i);
420 print_tlb_entry(i, t, d);
421 }
422
423 printf("TLB_DBIG_1 contents:\n");
424 for (i = 0; i < tlb_dbig_size(); i++) {
425 d.value = dtlb_data_access_read(TLB_DBIG_0, i);
426 t.value = dtlb_tag_read_read(TLB_DBIG_0, i);
427 print_tlb_entry(i, t, d);
428 }
429
430 printf("TLB_DBIG_2 contents:\n");
431 for (i = 0; i < tlb_dbig_size(); i++) {
432 d.value = dtlb_data_access_read(TLB_DBIG_1, i);
433 t.value = dtlb_tag_read_read(TLB_DBIG_1, i);
434 print_tlb_entry(i, t, d);
435 }
436}
437
438#endif
439
440void do_fast_instruction_access_mmu_miss_fault(istate_t *istate,
441 uintptr_t va, const char *str)
442{
443 fault_if_from_uspace(istate, "%s, address=%p.", str, (void *) va);
444 panic_memtrap(istate, PF_ACCESS_EXEC, va, str);
445}
446
447void do_fast_data_access_mmu_miss_fault(istate_t *istate,
448 tlb_tag_access_reg_t tag, const char *str)
449{
450 uintptr_t va;
451
452 va = tag.vpn << MMU_PAGE_WIDTH;
453 fault_if_from_uspace(istate, "%s, page=%p (asid=%u).", str,
454 (void *) va, tag.context);
455 panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, str);
456}
457
458void do_fast_data_access_protection_fault(istate_t *istate,
459 tlb_tag_access_reg_t tag, const char *str)
460{
461 uintptr_t va;
462
463 va = tag.vpn << MMU_PAGE_WIDTH;
464 fault_if_from_uspace(istate, "%s, page=%p (asid=%u).", str,
465 (void *) va, tag.context);
466 panic_memtrap(istate, PF_ACCESS_WRITE, va, str);
467}
468
469void describe_dmmu_fault(void)
470{
471 tlb_sfsr_reg_t sfsr;
472 uintptr_t sfar;
473
474 sfsr.value = dtlb_sfsr_read();
475 sfar = dtlb_sfar_read();
476
477#if defined (US)
478 printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, "
479 "fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w,
480 sfsr.ow, sfsr.fv);
481#elif defined (US3)
482 printf("DTLB SFSR: nf=%d, asi=%#x, tm=%d, ft=%#x, e=%d, ct=%d, pr=%d, "
483 "w=%d, ow=%d, fv=%d\n", sfsr.nf, sfsr.asi, sfsr.tm, sfsr.ft,
484 sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, sfsr.ow, sfsr.fv);
485#endif
486
487 printf("DTLB SFAR: address=%p\n", (void *) sfar);
488
489 dtlb_sfsr_write(0);
490}
491
492void dump_sfsr_and_sfar(void)
493{
494 tlb_sfsr_reg_t sfsr;
495 uintptr_t sfar;
496
497 sfsr.value = dtlb_sfsr_read();
498 sfar = dtlb_sfar_read();
499
500#if defined (US)
501 printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, "
502 "fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w,
503 sfsr.ow, sfsr.fv);
504#elif defined (US3)
505 printf("DTLB SFSR: nf=%d, asi=%#x, tm=%d, ft=%#x, e=%d, ct=%d, pr=%d, "
506 "w=%d, ow=%d, fv=%d\n", sfsr.nf, sfsr.asi, sfsr.tm, sfsr.ft,
507 sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, sfsr.ow, sfsr.fv);
508#endif
509
510 printf("DTLB SFAR: address=%p\n", (void *) sfar);
511
512 dtlb_sfsr_write(0);
513}
514
515#if defined (US)
516/** Invalidate all unlocked ITLB and DTLB entries. */
517void tlb_invalidate_all(void)
518{
519 int i;
520
521 /*
522 * Walk all ITLB and DTLB entries and remove all unlocked mappings.
523 *
524 * The kernel doesn't use global mappings so any locked global mappings
525 * found must have been created by someone else. Their only purpose now
526 * is to collide with proper mappings. Invalidate immediately. It should
527 * be safe to invalidate them as late as now.
528 */
529
530 tlb_data_t d;
531 tlb_tag_read_reg_t t;
532
533 for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
534 d.value = itlb_data_access_read(i);
535 if (!d.l || d.g) {
536 t.value = itlb_tag_read_read(i);
537 d.v = false;
538 itlb_tag_access_write(t.value);
539 itlb_data_access_write(i, d.value);
540 }
541 }
542
543 for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
544 d.value = dtlb_data_access_read(i);
545 if (!d.l || d.g) {
546 t.value = dtlb_tag_read_read(i);
547 d.v = false;
548 dtlb_tag_access_write(t.value);
549 dtlb_data_access_write(i, d.value);
550 }
551 }
552
553}
554
555#elif defined (US3)
556
557/** Invalidate all unlocked ITLB and DTLB entries. */
558void tlb_invalidate_all(void)
559{
560 itlb_demap(TLB_DEMAP_ALL, 0, 0);
561 dtlb_demap(TLB_DEMAP_ALL, 0, 0);
562}
563
564#endif
565
566/** Invalidate all ITLB and DTLB entries that belong to specified ASID
567 * (Context).
568 *
569 * @param asid Address Space ID.
570 */
571void tlb_invalidate_asid(asid_t asid)
572{
573 tlb_context_reg_t pc_save, ctx;
574
575 /* switch to nucleus because we are mapped by the primary context */
576 nucleus_enter();
577
578 ctx.v = pc_save.v = mmu_primary_context_read();
579 ctx.context = asid;
580 mmu_primary_context_write(ctx.v);
581
582 itlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
583 dtlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
584
585 mmu_primary_context_write(pc_save.v);
586
587 nucleus_leave();
588}
589
590/** Invalidate all ITLB and DTLB entries for specified page range in specified
591 * address space.
592 *
593 * @param asid Address Space ID.
594 * @param page First page which to sweep out from ITLB and DTLB.
595 * @param cnt Number of ITLB and DTLB entries to invalidate.
596 */
597void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt)
598{
599 unsigned int i;
600 tlb_context_reg_t pc_save, ctx;
601
602 /* switch to nucleus because we are mapped by the primary context */
603 nucleus_enter();
604
605 ctx.v = pc_save.v = mmu_primary_context_read();
606 ctx.context = asid;
607 mmu_primary_context_write(ctx.v);
608
609 for (i = 0; i < cnt * MMU_PAGES_PER_PAGE; i++) {
610 itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
611 page + i * MMU_PAGE_SIZE);
612 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
613 page + i * MMU_PAGE_SIZE);
614 }
615
616 mmu_primary_context_write(pc_save.v);
617
618 nucleus_leave();
619}
620
621/** @}
622 */
Note: See TracBrowser for help on using the repository browser.