source: mainline/kernel/arch/sparc64/src/mm/tlb.c@ 8e8c1a5

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 8e8c1a5 was b3f8fb7, checked in by Martin Decky <martin@…>, 19 years ago

huge type system cleanup
remove cyclical type dependencies across multiple header files
many minor coding style fixes

  • Property mode set to 100644
File size: 12.0 KB
Line 
1/*
2 * Copyright (c) 2005 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup sparc64mm
30 * @{
31 */
32/** @file
33 */
34
35#include <arch/mm/tlb.h>
36#include <mm/tlb.h>
37#include <mm/as.h>
38#include <mm/asid.h>
39#include <arch/mm/frame.h>
40#include <arch/mm/page.h>
41#include <arch/mm/mmu.h>
42#include <arch/interrupt.h>
43#include <interrupt.h>
44#include <arch.h>
45#include <print.h>
46#include <arch/types.h>
47#include <config.h>
48#include <arch/trap/trap.h>
49#include <arch/trap/exception.h>
50#include <panic.h>
51#include <arch/asm.h>
52
53#ifdef CONFIG_TSB
54#include <arch/mm/tsb.h>
55#endif
56
57static void dtlb_pte_copy(pte_t *t, bool ro);
58static void itlb_pte_copy(pte_t *t);
59static void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const
60 char *str);
61static void do_fast_data_access_mmu_miss_fault(istate_t *istate,
62 tlb_tag_access_reg_t tag, const char *str);
63static void do_fast_data_access_protection_fault(istate_t *istate,
64 tlb_tag_access_reg_t tag, const char *str);
65
66char *context_encoding[] = {
67 "Primary",
68 "Secondary",
69 "Nucleus",
70 "Reserved"
71};
72
73void tlb_arch_init(void)
74{
75 /*
76 * Invalidate all non-locked DTLB and ITLB entries.
77 */
78 tlb_invalidate_all();
79
80 /*
81 * Clear both SFSRs.
82 */
83 dtlb_sfsr_write(0);
84 itlb_sfsr_write(0);
85}
86
87/** Insert privileged mapping into DMMU TLB.
88 *
89 * @param page Virtual page address.
90 * @param frame Physical frame address.
91 * @param pagesize Page size.
92 * @param locked True for permanent mappings, false otherwise.
93 * @param cacheable True if the mapping is cacheable, false otherwise.
94 */
95void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize, bool
96 locked, bool cacheable)
97{
98 tlb_tag_access_reg_t tag;
99 tlb_data_t data;
100 page_address_t pg;
101 frame_address_t fr;
102
103 pg.address = page;
104 fr.address = frame;
105
106 tag.value = ASID_KERNEL;
107 tag.vpn = pg.vpn;
108
109 dtlb_tag_access_write(tag.value);
110
111 data.value = 0;
112 data.v = true;
113 data.size = pagesize;
114 data.pfn = fr.pfn;
115 data.l = locked;
116 data.cp = cacheable;
117#ifdef CONFIG_VIRT_IDX_DCACHE
118 data.cv = cacheable;
119#endif /* CONFIG_VIRT_IDX_DCACHE */
120 data.p = true;
121 data.w = true;
122 data.g = false;
123
124 dtlb_data_in_write(data.value);
125}
126
127/** Copy PTE to TLB.
128 *
129 * @param t Page Table Entry to be copied.
130 * @param ro If true, the entry will be created read-only, regardless of its w
131 * field.
132 */
133void dtlb_pte_copy(pte_t *t, bool ro)
134{
135 tlb_tag_access_reg_t tag;
136 tlb_data_t data;
137 page_address_t pg;
138 frame_address_t fr;
139
140 pg.address = t->page;
141 fr.address = t->frame;
142
143 tag.value = 0;
144 tag.context = t->as->asid;
145 tag.vpn = pg.vpn;
146
147 dtlb_tag_access_write(tag.value);
148
149 data.value = 0;
150 data.v = true;
151 data.size = PAGESIZE_8K;
152 data.pfn = fr.pfn;
153 data.l = false;
154 data.cp = t->c;
155#ifdef CONFIG_VIRT_IDX_DCACHE
156 data.cv = t->c;
157#endif /* CONFIG_VIRT_IDX_DCACHE */
158 data.p = t->k; /* p like privileged */
159 data.w = ro ? false : t->w;
160 data.g = t->g;
161
162 dtlb_data_in_write(data.value);
163}
164
165/** Copy PTE to ITLB.
166 *
167 * @param t Page Table Entry to be copied.
168 */
169void itlb_pte_copy(pte_t *t)
170{
171 tlb_tag_access_reg_t tag;
172 tlb_data_t data;
173 page_address_t pg;
174 frame_address_t fr;
175
176 pg.address = t->page;
177 fr.address = t->frame;
178
179 tag.value = 0;
180 tag.context = t->as->asid;
181 tag.vpn = pg.vpn;
182
183 itlb_tag_access_write(tag.value);
184
185 data.value = 0;
186 data.v = true;
187 data.size = PAGESIZE_8K;
188 data.pfn = fr.pfn;
189 data.l = false;
190 data.cp = t->c;
191 data.p = t->k; /* p like privileged */
192 data.w = false;
193 data.g = t->g;
194
195 itlb_data_in_write(data.value);
196}
197
198/** ITLB miss handler. */
199void fast_instruction_access_mmu_miss(int n, istate_t *istate)
200{
201 uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
202 pte_t *t;
203
204 page_table_lock(AS, true);
205 t = page_mapping_find(AS, va);
206 if (t && PTE_EXECUTABLE(t)) {
207 /*
208 * The mapping was found in the software page hash table.
209 * Insert it into ITLB.
210 */
211 t->a = true;
212 itlb_pte_copy(t);
213#ifdef CONFIG_TSB
214 itsb_pte_copy(t);
215#endif
216 page_table_unlock(AS, true);
217 } else {
218 /*
219 * Forward the page fault to the address space page fault
220 * handler.
221 */
222 page_table_unlock(AS, true);
223 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
224 do_fast_instruction_access_mmu_miss_fault(istate,
225 __FUNCTION__);
226 }
227 }
228}
229
230/** DTLB miss handler.
231 *
232 * Note that some faults (e.g. kernel faults) were already resolved by the
233 * low-level, assembly language part of the fast_data_access_mmu_miss handler.
234 */
235void fast_data_access_mmu_miss(int n, istate_t *istate)
236{
237 tlb_tag_access_reg_t tag;
238 uintptr_t va;
239 pte_t *t;
240
241 tag.value = dtlb_tag_access_read();
242 va = tag.vpn << PAGE_WIDTH;
243
244 if (tag.context == ASID_KERNEL) {
245 if (!tag.vpn) {
246 /* NULL access in kernel */
247 do_fast_data_access_mmu_miss_fault(istate, tag,
248 __FUNCTION__);
249 }
250 do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected "
251 "kernel page fault.");
252 }
253
254 page_table_lock(AS, true);
255 t = page_mapping_find(AS, va);
256 if (t) {
257 /*
258 * The mapping was found in the software page hash table.
259 * Insert it into DTLB.
260 */
261 t->a = true;
262 dtlb_pte_copy(t, true);
263#ifdef CONFIG_TSB
264 dtsb_pte_copy(t, true);
265#endif
266 page_table_unlock(AS, true);
267 } else {
268 /*
269 * Forward the page fault to the address space page fault handler.
270 */
271 page_table_unlock(AS, true);
272 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
273 do_fast_data_access_mmu_miss_fault(istate, tag,
274 __FUNCTION__);
275 }
276 }
277}
278
279/** DTLB protection fault handler. */
280void fast_data_access_protection(int n, istate_t *istate)
281{
282 tlb_tag_access_reg_t tag;
283 uintptr_t va;
284 pte_t *t;
285
286 tag.value = dtlb_tag_access_read();
287 va = tag.vpn << PAGE_WIDTH;
288
289 page_table_lock(AS, true);
290 t = page_mapping_find(AS, va);
291 if (t && PTE_WRITABLE(t)) {
292 /*
293 * The mapping was found in the software page hash table and is
294 * writable. Demap the old mapping and insert an updated mapping
295 * into DTLB.
296 */
297 t->a = true;
298 t->d = true;
299 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY, va);
300 dtlb_pte_copy(t, false);
301#ifdef CONFIG_TSB
302 dtsb_pte_copy(t, false);
303#endif
304 page_table_unlock(AS, true);
305 } else {
306 /*
307 * Forward the page fault to the address space page fault
308 * handler.
309 */
310 page_table_unlock(AS, true);
311 if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
312 do_fast_data_access_protection_fault(istate, tag,
313 __FUNCTION__);
314 }
315 }
316}
317
318/** Print contents of both TLBs. */
319void tlb_print(void)
320{
321 int i;
322 tlb_data_t d;
323 tlb_tag_read_reg_t t;
324
325 printf("I-TLB contents:\n");
326 for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
327 d.value = itlb_data_access_read(i);
328 t.value = itlb_tag_read_read(i);
329
330 printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
331 "ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, "
332 "cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
333 t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag,
334 d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
335 }
336
337 printf("D-TLB contents:\n");
338 for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
339 d.value = dtlb_data_access_read(i);
340 t.value = dtlb_tag_read_read(i);
341
342 printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
343 "ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, "
344 "cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
345 t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag,
346 d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
347 }
348
349}
350
351void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const char
352 *str)
353{
354 fault_if_from_uspace(istate, "%s\n", str);
355 dump_istate(istate);
356 panic("%s\n", str);
357}
358
359void do_fast_data_access_mmu_miss_fault(istate_t *istate, tlb_tag_access_reg_t
360 tag, const char *str)
361{
362 uintptr_t va;
363
364 va = tag.vpn << PAGE_WIDTH;
365
366 fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va,
367 tag.context);
368 dump_istate(istate);
369 printf("Faulting page: %p, ASID=%d\n", va, tag.context);
370 panic("%s\n", str);
371}
372
373void do_fast_data_access_protection_fault(istate_t *istate, tlb_tag_access_reg_t
374 tag, const char *str)
375{
376 uintptr_t va;
377
378 va = tag.vpn << PAGE_WIDTH;
379
380 fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va,
381 tag.context);
382 printf("Faulting page: %p, ASID=%d\n", va, tag.context);
383 dump_istate(istate);
384 panic("%s\n", str);
385}
386
387void dump_sfsr_and_sfar(void)
388{
389 tlb_sfsr_reg_t sfsr;
390 uintptr_t sfar;
391
392 sfsr.value = dtlb_sfsr_read();
393 sfar = dtlb_sfar_read();
394
395 printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, "
396 "fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w,
397 sfsr.ow, sfsr.fv);
398 printf("DTLB SFAR: address=%p\n", sfar);
399
400 dtlb_sfsr_write(0);
401}
402
403/** Invalidate all unlocked ITLB and DTLB entries. */
404void tlb_invalidate_all(void)
405{
406 int i;
407 tlb_data_t d;
408 tlb_tag_read_reg_t t;
409
410 /*
411 * Walk all ITLB and DTLB entries and remove all unlocked mappings.
412 *
413 * The kernel doesn't use global mappings so any locked global mappings
414 * found must have been created by someone else. Their only purpose now
415 * is to collide with proper mappings. Invalidate immediately. It should
416 * be safe to invalidate them as late as now.
417 */
418
419 for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
420 d.value = itlb_data_access_read(i);
421 if (!d.l || d.g) {
422 t.value = itlb_tag_read_read(i);
423 d.v = false;
424 itlb_tag_access_write(t.value);
425 itlb_data_access_write(i, d.value);
426 }
427 }
428
429 for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
430 d.value = dtlb_data_access_read(i);
431 if (!d.l || d.g) {
432 t.value = dtlb_tag_read_read(i);
433 d.v = false;
434 dtlb_tag_access_write(t.value);
435 dtlb_data_access_write(i, d.value);
436 }
437 }
438
439}
440
441/** Invalidate all ITLB and DTLB entries that belong to specified ASID
442 * (Context).
443 *
444 * @param asid Address Space ID.
445 */
446void tlb_invalidate_asid(asid_t asid)
447{
448 tlb_context_reg_t pc_save, ctx;
449
450 /* switch to nucleus because we are mapped by the primary context */
451 nucleus_enter();
452
453 ctx.v = pc_save.v = mmu_primary_context_read();
454 ctx.context = asid;
455 mmu_primary_context_write(ctx.v);
456
457 itlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
458 dtlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
459
460 mmu_primary_context_write(pc_save.v);
461
462 nucleus_leave();
463}
464
465/** Invalidate all ITLB and DTLB entries for specified page range in specified
466 * address space.
467 *
468 * @param asid Address Space ID.
469 * @param page First page which to sweep out from ITLB and DTLB.
470 * @param cnt Number of ITLB and DTLB entries to invalidate.
471 */
472void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
473{
474 int i;
475 tlb_context_reg_t pc_save, ctx;
476
477 /* switch to nucleus because we are mapped by the primary context */
478 nucleus_enter();
479
480 ctx.v = pc_save.v = mmu_primary_context_read();
481 ctx.context = asid;
482 mmu_primary_context_write(ctx.v);
483
484 for (i = 0; i < cnt; i++) {
485 itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, page + i *
486 PAGE_SIZE);
487 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, page + i *
488 PAGE_SIZE);
489 }
490
491 mmu_primary_context_write(pc_save.v);
492
493 nucleus_leave();
494}
495
496/** @}
497 */
Note: See TracBrowser for help on using the repository browser.