source: mainline/kernel/arch/sparc64/src/mm/sun4u/tlb.c@ db71e2a

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since db71e2a was 59fb782, checked in by Jakub Jermar <jakub@…>, 13 years ago

Unify the use of virtual addresses and virtual page addresses in mm code.

  • as_page_fault() accepts faulting address (if available) and propagates the faulting page further along
  • backends' page_fault() handlers assume page fault address
  • page_mapping_create/destroy/find() accept addresses, but pass only page and frame addresses along
  • as_area_create(), as_area_resize() now test whether the address is page-aligned
  • renames of various variables to better fit their purpose (address vs. page)
  • no need to align the addresses in mips32 TLB exception handlers now
  • Property mode set to 100644
File size: 13.9 KB
Line 
1/*
2 * Copyright (c) 2005 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup sparc64mm
30 * @{
31 */
32/** @file
33 */
34
35#include <arch/mm/tlb.h>
36#include <mm/tlb.h>
37#include <mm/as.h>
38#include <mm/asid.h>
39#include <arch/mm/frame.h>
40#include <arch/mm/page.h>
41#include <arch/mm/mmu.h>
42#include <arch/interrupt.h>
43#include <interrupt.h>
44#include <arch.h>
45#include <print.h>
46#include <typedefs.h>
47#include <config.h>
48#include <arch/trap/trap.h>
49#include <arch/trap/exception.h>
50#include <panic.h>
51#include <arch/asm.h>
52#include <genarch/mm/page_ht.h>
53
54#ifdef CONFIG_TSB
55#include <arch/mm/tsb.h>
56#endif
57
58static void dtlb_pte_copy(pte_t *, size_t, bool);
59static void itlb_pte_copy(pte_t *, size_t);
60
61const char *context_encoding[] = {
62 "Primary",
63 "Secondary",
64 "Nucleus",
65 "Reserved"
66};
67
68void tlb_arch_init(void)
69{
70 /*
71 * Invalidate all non-locked DTLB and ITLB entries.
72 */
73 tlb_invalidate_all();
74
75 /*
76 * Clear both SFSRs.
77 */
78 dtlb_sfsr_write(0);
79 itlb_sfsr_write(0);
80}
81
82/** Insert privileged mapping into DMMU TLB.
83 *
84 * @param page Virtual page address.
85 * @param frame Physical frame address.
86 * @param pagesize Page size.
87 * @param locked True for permanent mappings, false otherwise.
88 * @param cacheable True if the mapping is cacheable, false otherwise.
89 */
90void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize,
91 bool locked, bool cacheable)
92{
93 tlb_tag_access_reg_t tag;
94 tlb_data_t data;
95 page_address_t pg;
96 frame_address_t fr;
97
98 pg.address = page;
99 fr.address = frame;
100
101 tag.context = ASID_KERNEL;
102 tag.vpn = pg.vpn;
103
104 dtlb_tag_access_write(tag.value);
105
106 data.value = 0;
107 data.v = true;
108 data.size = pagesize;
109 data.pfn = fr.pfn;
110 data.l = locked;
111 data.cp = cacheable;
112#ifdef CONFIG_VIRT_IDX_DCACHE
113 data.cv = cacheable;
114#endif /* CONFIG_VIRT_IDX_DCACHE */
115 data.p = true;
116 data.w = true;
117 data.g = false;
118
119 dtlb_data_in_write(data.value);
120}
121
122/** Copy PTE to TLB.
123 *
124 * @param t Page Table Entry to be copied.
125 * @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
126 * @param ro If true, the entry will be created read-only, regardless
127 * of its w field.
128 */
129void dtlb_pte_copy(pte_t *t, size_t index, bool ro)
130{
131 tlb_tag_access_reg_t tag;
132 tlb_data_t data;
133 page_address_t pg;
134 frame_address_t fr;
135
136 pg.address = t->page + (index << MMU_PAGE_WIDTH);
137 fr.address = t->frame + (index << MMU_PAGE_WIDTH);
138
139 tag.value = 0;
140 tag.context = t->as->asid;
141 tag.vpn = pg.vpn;
142
143 dtlb_tag_access_write(tag.value);
144
145 data.value = 0;
146 data.v = true;
147 data.size = PAGESIZE_8K;
148 data.pfn = fr.pfn;
149 data.l = false;
150 data.cp = t->c;
151#ifdef CONFIG_VIRT_IDX_DCACHE
152 data.cv = t->c;
153#endif /* CONFIG_VIRT_IDX_DCACHE */
154 data.p = t->k; /* p like privileged */
155 data.w = ro ? false : t->w;
156 data.g = t->g;
157
158 dtlb_data_in_write(data.value);
159}
160
161/** Copy PTE to ITLB.
162 *
163 * @param t Page Table Entry to be copied.
164 * @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
165 */
166void itlb_pte_copy(pte_t *t, size_t index)
167{
168 tlb_tag_access_reg_t tag;
169 tlb_data_t data;
170 page_address_t pg;
171 frame_address_t fr;
172
173 pg.address = t->page + (index << MMU_PAGE_WIDTH);
174 fr.address = t->frame + (index << MMU_PAGE_WIDTH);
175
176 tag.value = 0;
177 tag.context = t->as->asid;
178 tag.vpn = pg.vpn;
179
180 itlb_tag_access_write(tag.value);
181
182 data.value = 0;
183 data.v = true;
184 data.size = PAGESIZE_8K;
185 data.pfn = fr.pfn;
186 data.l = false;
187 data.cp = t->c;
188 data.p = t->k; /* p like privileged */
189 data.w = false;
190 data.g = t->g;
191
192 itlb_data_in_write(data.value);
193}
194
195/** ITLB miss handler. */
196void fast_instruction_access_mmu_miss(sysarg_t unused, istate_t *istate)
197{
198 size_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE;
199 pte_t *t;
200
201 t = page_mapping_find(AS, istate->tpc, true);
202 if (t && PTE_EXECUTABLE(t)) {
203 /*
204 * The mapping was found in the software page hash table.
205 * Insert it into ITLB.
206 */
207 t->a = true;
208 itlb_pte_copy(t, index);
209#ifdef CONFIG_TSB
210 itsb_pte_copy(t, index);
211#endif
212 } else {
213 /*
214 * Forward the page fault to the address space page fault
215 * handler.
216 */
217 as_page_fault(istate->tpc, PF_ACCESS_EXEC, istate);
218 }
219}
220
221/** DTLB miss handler.
222 *
223 * Note that some faults (e.g. kernel faults) were already resolved by the
224 * low-level, assembly language part of the fast_data_access_mmu_miss handler.
225 *
226 * @param tag Content of the TLB Tag Access register as it existed
227 * when the trap happened. This is to prevent confusion
228 * created by clobbered Tag Access register during a nested
229 * DTLB miss.
230 * @param istate Interrupted state saved on the stack.
231 */
232void fast_data_access_mmu_miss(tlb_tag_access_reg_t tag, istate_t *istate)
233{
234 uintptr_t page_8k;
235 uintptr_t page_16k;
236 size_t index;
237 pte_t *t;
238 as_t *as = AS;
239
240 page_8k = (uint64_t) tag.vpn << MMU_PAGE_WIDTH;
241 page_16k = ALIGN_DOWN(page_8k, PAGE_SIZE);
242 index = tag.vpn % MMU_PAGES_PER_PAGE;
243
244 if (tag.context == ASID_KERNEL) {
245 if (!tag.vpn) {
246 /* NULL access in kernel */
247 panic("NULL pointer dereference.");
248 } else if (page_8k >= end_of_identity) {
249 /* Kernel non-identity. */
250 as = AS_KERNEL;
251 } else {
252 panic("Unexpected kernel page fault.");
253 }
254 }
255
256 t = page_mapping_find(as, page_16k, true);
257 if (t) {
258 /*
259 * The mapping was found in the software page hash table.
260 * Insert it into DTLB.
261 */
262 t->a = true;
263 dtlb_pte_copy(t, index, true);
264#ifdef CONFIG_TSB
265 dtsb_pte_copy(t, index, true);
266#endif
267 } else {
268 /*
269 * Forward the page fault to the address space page fault
270 * handler.
271 */
272 as_page_fault(page_16k, PF_ACCESS_READ, istate);
273 }
274}
275
276/** DTLB protection fault handler.
277 *
278 * @param tag Content of the TLB Tag Access register as it existed
279 * when the trap happened. This is to prevent confusion
280 * created by clobbered Tag Access register during a nested
281 * DTLB miss.
282 * @param istate Interrupted state saved on the stack.
283 */
284void fast_data_access_protection(tlb_tag_access_reg_t tag, istate_t *istate)
285{
286 uintptr_t page_16k;
287 size_t index;
288 pte_t *t;
289 as_t *as = AS;
290
291 page_16k = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
292 index = tag.vpn % MMU_PAGES_PER_PAGE; /* 16K-page emulation */
293
294 if (tag.context == ASID_KERNEL)
295 as = AS_KERNEL;
296
297 t = page_mapping_find(as, page_16k, true);
298 if (t && PTE_WRITABLE(t)) {
299 /*
300 * The mapping was found in the software page hash table and is
301 * writable. Demap the old mapping and insert an updated mapping
302 * into DTLB.
303 */
304 t->a = true;
305 t->d = true;
306 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY,
307 page_16k + index * MMU_PAGE_SIZE);
308 dtlb_pte_copy(t, index, false);
309#ifdef CONFIG_TSB
310 dtsb_pte_copy(t, index, false);
311#endif
312 } else {
313 /*
314 * Forward the page fault to the address space page fault
315 * handler.
316 */
317 as_page_fault(page_16k, PF_ACCESS_WRITE, istate);
318 }
319}
320
321/** Print TLB entry (for debugging purposes).
322 *
323 * The diag field has been left out in order to make this function more generic
324 * (there is no diag field in US3 architeture).
325 *
326 * @param i TLB entry number
327 * @param t TLB entry tag
328 * @param d TLB entry data
329 */
330static void print_tlb_entry(int i, tlb_tag_read_reg_t t, tlb_data_t d)
331{
332 printf("%u: vpn=%#" PRIx64 ", context=%u, v=%u, size=%u, nfo=%u, "
333 "ie=%u, soft2=%#x, pfn=%#x, soft=%#x, l=%u, "
334 "cp=%u, cv=%u, e=%u, p=%u, w=%u, g=%u\n", i, (uint64_t) t.vpn,
335 t.context, d.v, d.size, d.nfo, d.ie, d.soft2,
336 d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
337}
338
339#if defined (US)
340
341/** Print contents of both TLBs. */
342void tlb_print(void)
343{
344 int i;
345 tlb_data_t d;
346 tlb_tag_read_reg_t t;
347
348 printf("I-TLB contents:\n");
349 for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
350 d.value = itlb_data_access_read(i);
351 t.value = itlb_tag_read_read(i);
352 print_tlb_entry(i, t, d);
353 }
354
355 printf("D-TLB contents:\n");
356 for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
357 d.value = dtlb_data_access_read(i);
358 t.value = dtlb_tag_read_read(i);
359 print_tlb_entry(i, t, d);
360 }
361}
362
363#elif defined (US3)
364
365/** Print contents of all TLBs. */
366void tlb_print(void)
367{
368 int i;
369 tlb_data_t d;
370 tlb_tag_read_reg_t t;
371
372 printf("TLB_ISMALL contents:\n");
373 for (i = 0; i < tlb_ismall_size(); i++) {
374 d.value = dtlb_data_access_read(TLB_ISMALL, i);
375 t.value = dtlb_tag_read_read(TLB_ISMALL, i);
376 print_tlb_entry(i, t, d);
377 }
378
379 printf("TLB_IBIG contents:\n");
380 for (i = 0; i < tlb_ibig_size(); i++) {
381 d.value = dtlb_data_access_read(TLB_IBIG, i);
382 t.value = dtlb_tag_read_read(TLB_IBIG, i);
383 print_tlb_entry(i, t, d);
384 }
385
386 printf("TLB_DSMALL contents:\n");
387 for (i = 0; i < tlb_dsmall_size(); i++) {
388 d.value = dtlb_data_access_read(TLB_DSMALL, i);
389 t.value = dtlb_tag_read_read(TLB_DSMALL, i);
390 print_tlb_entry(i, t, d);
391 }
392
393 printf("TLB_DBIG_1 contents:\n");
394 for (i = 0; i < tlb_dbig_size(); i++) {
395 d.value = dtlb_data_access_read(TLB_DBIG_0, i);
396 t.value = dtlb_tag_read_read(TLB_DBIG_0, i);
397 print_tlb_entry(i, t, d);
398 }
399
400 printf("TLB_DBIG_2 contents:\n");
401 for (i = 0; i < tlb_dbig_size(); i++) {
402 d.value = dtlb_data_access_read(TLB_DBIG_1, i);
403 t.value = dtlb_tag_read_read(TLB_DBIG_1, i);
404 print_tlb_entry(i, t, d);
405 }
406}
407
408#endif
409
410void describe_dmmu_fault(void)
411{
412 tlb_sfsr_reg_t sfsr;
413 uintptr_t sfar;
414
415 sfsr.value = dtlb_sfsr_read();
416 sfar = dtlb_sfar_read();
417
418#if defined (US)
419 printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, "
420 "fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w,
421 sfsr.ow, sfsr.fv);
422#elif defined (US3)
423 printf("DTLB SFSR: nf=%d, asi=%#x, tm=%d, ft=%#x, e=%d, ct=%d, pr=%d, "
424 "w=%d, ow=%d, fv=%d\n", sfsr.nf, sfsr.asi, sfsr.tm, sfsr.ft,
425 sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, sfsr.ow, sfsr.fv);
426#endif
427
428 printf("DTLB SFAR: address=%p\n", (void *) sfar);
429
430 dtlb_sfsr_write(0);
431}
432
433void dump_sfsr_and_sfar(void)
434{
435 tlb_sfsr_reg_t sfsr;
436 uintptr_t sfar;
437
438 sfsr.value = dtlb_sfsr_read();
439 sfar = dtlb_sfar_read();
440
441#if defined (US)
442 printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, "
443 "fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w,
444 sfsr.ow, sfsr.fv);
445#elif defined (US3)
446 printf("DTLB SFSR: nf=%d, asi=%#x, tm=%d, ft=%#x, e=%d, ct=%d, pr=%d, "
447 "w=%d, ow=%d, fv=%d\n", sfsr.nf, sfsr.asi, sfsr.tm, sfsr.ft,
448 sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, sfsr.ow, sfsr.fv);
449#endif
450
451 printf("DTLB SFAR: address=%p\n", (void *) sfar);
452
453 dtlb_sfsr_write(0);
454}
455
456#if defined (US)
457/** Invalidate all unlocked ITLB and DTLB entries. */
458void tlb_invalidate_all(void)
459{
460 int i;
461
462 /*
463 * Walk all ITLB and DTLB entries and remove all unlocked mappings.
464 *
465 * The kernel doesn't use global mappings so any locked global mappings
466 * found must have been created by someone else. Their only purpose now
467 * is to collide with proper mappings. Invalidate immediately. It should
468 * be safe to invalidate them as late as now.
469 */
470
471 tlb_data_t d;
472 tlb_tag_read_reg_t t;
473
474 for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
475 d.value = itlb_data_access_read(i);
476 if (!d.l || d.g) {
477 t.value = itlb_tag_read_read(i);
478 d.v = false;
479 itlb_tag_access_write(t.value);
480 itlb_data_access_write(i, d.value);
481 }
482 }
483
484 for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
485 d.value = dtlb_data_access_read(i);
486 if (!d.l || d.g) {
487 t.value = dtlb_tag_read_read(i);
488 d.v = false;
489 dtlb_tag_access_write(t.value);
490 dtlb_data_access_write(i, d.value);
491 }
492 }
493
494}
495
496#elif defined (US3)
497
498/** Invalidate all unlocked ITLB and DTLB entries. */
499void tlb_invalidate_all(void)
500{
501 itlb_demap(TLB_DEMAP_ALL, 0, 0);
502 dtlb_demap(TLB_DEMAP_ALL, 0, 0);
503}
504
505#endif
506
507/** Invalidate all ITLB and DTLB entries that belong to specified ASID
508 * (Context).
509 *
510 * @param asid Address Space ID.
511 */
512void tlb_invalidate_asid(asid_t asid)
513{
514 tlb_context_reg_t pc_save, ctx;
515
516 /* switch to nucleus because we are mapped by the primary context */
517 nucleus_enter();
518
519 ctx.v = pc_save.v = mmu_primary_context_read();
520 ctx.context = asid;
521 mmu_primary_context_write(ctx.v);
522
523 itlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
524 dtlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
525
526 mmu_primary_context_write(pc_save.v);
527
528 nucleus_leave();
529}
530
531/** Invalidate all ITLB and DTLB entries for specified page range in specified
532 * address space.
533 *
534 * @param asid Address Space ID.
535 * @param page First page which to sweep out from ITLB and DTLB.
536 * @param cnt Number of ITLB and DTLB entries to invalidate.
537 */
538void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt)
539{
540 unsigned int i;
541 tlb_context_reg_t pc_save, ctx;
542
543 /* switch to nucleus because we are mapped by the primary context */
544 nucleus_enter();
545
546 ctx.v = pc_save.v = mmu_primary_context_read();
547 ctx.context = asid;
548 mmu_primary_context_write(ctx.v);
549
550 for (i = 0; i < cnt * MMU_PAGES_PER_PAGE; i++) {
551 itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
552 page + i * MMU_PAGE_SIZE);
553 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
554 page + i * MMU_PAGE_SIZE);
555 }
556
557 mmu_primary_context_write(pc_save.v);
558
559 nucleus_leave();
560}
561
562/** @}
563 */
Note: See TracBrowser for help on using the repository browser.