source: mainline/kernel/arch/sparc64/src/mm/sun4v/tlb.c@ eb79d60

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since eb79d60 was eb79d60, checked in by Pavel Rimsky <pavel@…>, 16 years ago

Merged the preemptible trap handler for userspace.

  • Property mode set to 100644
File size: 13.9 KB
Line 
1/*
2 * Copyright (c) 2005 Jakub Jermar
3 * Copyright (c) 2008 Pavel Rimsky
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup sparc64mm
31 * @{
32 */
33/** @file
34 */
35
36#include <arch/mm/tlb.h>
37#include <mm/tlb.h>
38#include <mm/as.h>
39#include <mm/asid.h>
40#include <arch/sun4v/hypercall.h>
41#include <arch/mm/frame.h>
42#include <arch/mm/page.h>
43#include <arch/mm/tte.h>
44#include <arch/mm/tlb.h>
45#include <arch/interrupt.h>
46#include <interrupt.h>
47#include <arch.h>
48#include <print.h>
49#include <arch/types.h>
50#include <config.h>
51#include <arch/trap/trap.h>
52#include <arch/trap/exception.h>
53#include <panic.h>
54#include <arch/asm.h>
55#include <arch/cpu.h>
56#include <arch/mm/pagesize.h>
57
58#ifdef CONFIG_TSB
59#include <arch/mm/tsb.h>
60#endif
61
62static void dtlb_pte_copy(pte_t *, size_t, bool);
63static void itlb_pte_copy(pte_t *, size_t);
64static void do_fast_instruction_access_mmu_miss_fault(istate_t *, const char *);
65static void do_fast_data_access_mmu_miss_fault(istate_t *, tlb_tag_access_reg_t,
66 const char *);
67static void do_fast_data_access_protection_fault(istate_t *,
68 tlb_tag_access_reg_t, const char *);
69
70char *context_encoding[] = {
71 "Primary",
72 "Secondary",
73 "Nucleus",
74 "Reserved"
75};
76
77/** Invalidate all unlocked ITLB and DTLB entries. */
78void tlb_invalidate_all(void)
79{
80 uint64_t errno = __hypercall_fast3(MMU_DEMAP_ALL, 0, 0,
81 MMU_FLAG_DTLB | MMU_FLAG_ITLB);
82 if (errno != EOK) {
83 panic("Error code = %d.\n", errno);
84 }
85}
86
87void tlb_arch_init(void)
88{
89 tlb_invalidate_all();
90}
91
92/** Insert privileged mapping into DMMU TLB.
93 *
94 * @param page Virtual page address.
95 * @param frame Physical frame address.
96 * @param pagesize Page size.
97 * @param locked True for permanent mappings, false otherwise.
98 * @param cacheable True if the mapping is cacheable, false otherwise.
99 */
100void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize,
101 bool locked, bool cacheable)
102{
103#if 0
104 tlb_tag_access_reg_t tag;
105 tlb_data_t data;
106 page_address_t pg;
107 frame_address_t fr;
108
109 pg.address = page;
110 fr.address = frame;
111
112 tag.context = ASID_KERNEL;
113 tag.vpn = pg.vpn;
114
115 dtlb_tag_access_write(tag.value);
116
117 data.value = 0;
118 data.v = true;
119 data.size = pagesize;
120 data.pfn = fr.pfn;
121 data.l = locked;
122 data.cp = cacheable;
123#ifdef CONFIG_VIRT_IDX_DCACHE
124 data.cv = cacheable;
125#endif /* CONFIG_VIRT_IDX_DCACHE */
126 data.p = true;
127 data.w = true;
128 data.g = false;
129
130 dtlb_data_in_write(data.value);
131#endif
132}
133
134/** Copy PTE to TLB.
135 *
136 * @param t Page Table Entry to be copied.
137 * @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
138 * @param ro If true, the entry will be created read-only, regardless
139 * of its w field.
140 */
141void dtlb_pte_copy(pte_t *t, size_t index, bool ro)
142{
143#if 0
144 tlb_tag_access_reg_t tag;
145 tlb_data_t data;
146 page_address_t pg;
147 frame_address_t fr;
148
149 pg.address = t->page + (index << MMU_PAGE_WIDTH);
150 fr.address = t->frame + (index << MMU_PAGE_WIDTH);
151
152 tag.value = 0;
153 tag.context = t->as->asid;
154 tag.vpn = pg.vpn;
155
156 dtlb_tag_access_write(tag.value);
157
158 data.value = 0;
159 data.v = true;
160 data.size = PAGESIZE_8K;
161 data.pfn = fr.pfn;
162 data.l = false;
163 data.cp = t->c;
164#ifdef CONFIG_VIRT_IDX_DCACHE
165 data.cv = t->c;
166#endif /* CONFIG_VIRT_IDX_DCACHE */
167 data.p = t->k; /* p like privileged */
168 data.w = ro ? false : t->w;
169 data.g = t->g;
170
171 dtlb_data_in_write(data.value);
172#endif
173}
174
175/** Copy PTE to ITLB.
176 *
177 * @param t Page Table Entry to be copied.
178 * @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
179 */
180void itlb_pte_copy(pte_t *t, size_t index)
181{
182#if 0
183 tlb_tag_access_reg_t tag;
184 tlb_data_t data;
185 page_address_t pg;
186 frame_address_t fr;
187
188 pg.address = t->page + (index << MMU_PAGE_WIDTH);
189 fr.address = t->frame + (index << MMU_PAGE_WIDTH);
190
191 tag.value = 0;
192 tag.context = t->as->asid;
193 tag.vpn = pg.vpn;
194
195 itlb_tag_access_write(tag.value);
196
197 data.value = 0;
198 data.v = true;
199 data.size = PAGESIZE_8K;
200 data.pfn = fr.pfn;
201 data.l = false;
202 data.cp = t->c;
203 data.p = t->k; /* p like privileged */
204 data.w = false;
205 data.g = t->g;
206
207 itlb_data_in_write(data.value);
208#endif
209}
210
211/** ITLB miss handler. */
212void fast_instruction_access_mmu_miss(unative_t unused, istate_t *istate)
213{
214 asm volatile ("sethi 0x41906, %g0");
215 uintptr_t page_16k = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
216 size_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE;
217 pte_t *t;
218
219 page_table_lock(AS, true);
220 t = page_mapping_find(AS, page_16k);
221 if (t && PTE_EXECUTABLE(t)) {
222 /*
223 * The mapping was found in the software page hash table.
224 * Insert it into ITLB.
225 */
226 t->a = true;
227 itlb_pte_copy(t, index);
228#ifdef CONFIG_TSB
229 itsb_pte_copy(t, index);
230#endif
231 page_table_unlock(AS, true);
232 } else {
233 /*
234 * Forward the page fault to the address space page fault
235 * handler.
236 */
237 page_table_unlock(AS, true);
238 if (as_page_fault(page_16k, PF_ACCESS_EXEC, istate) ==
239 AS_PF_FAULT) {
240 do_fast_instruction_access_mmu_miss_fault(istate,
241 __func__);
242 }
243 }
244}
245
246/** DTLB miss handler.
247 *
248 * Note that some faults (e.g. kernel faults) were already resolved by the
249 * low-level, assembly language part of the fast_data_access_mmu_miss handler.
250 *
251 * @param tag Content of the TLB Tag Access register as it existed
252 * when the trap happened. This is to prevent confusion
253 * created by clobbered Tag Access register during a nested
254 * DTLB miss.
255 * @param istate Interrupted state saved on the stack.
256 */
257void fast_data_access_mmu_miss(tlb_tag_access_reg_t tag, istate_t *istate)
258{
259 uintptr_t page_8k;
260 uintptr_t page_16k;
261 size_t index;
262 pte_t *t;
263
264 page_8k = (uint64_t) tag.vpn << MMU_PAGE_WIDTH;
265 page_16k = ALIGN_DOWN(page_8k, PAGE_SIZE);
266 index = tag.vpn % MMU_PAGES_PER_PAGE;
267
268 if (tag.context == ASID_KERNEL) {
269 if (!tag.vpn) {
270 /* NULL access in kernel */
271 do_fast_data_access_mmu_miss_fault(istate, tag,
272 __func__);
273//MH
274 } else {
275// } else if (page_8k >= end_of_identity) {
276 /*
277 * The kernel is accessing the I/O space.
278 * We still do identity mapping for I/O,
279 * but without caching.
280 */
281 dtlb_insert_mapping(page_8k, KA2PA(page_8k),
282 PAGESIZE_8K, false, false);
283 return;
284 }
285 do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected "
286 "kernel page fault.");
287 }
288
289 page_table_lock(AS, true);
290 t = page_mapping_find(AS, page_16k);
291 if (t) {
292 /*
293 * The mapping was found in the software page hash table.
294 * Insert it into DTLB.
295 */
296 t->a = true;
297 dtlb_pte_copy(t, index, true);
298#ifdef CONFIG_TSB
299 dtsb_pte_copy(t, index, true);
300#endif
301 page_table_unlock(AS, true);
302 } else {
303 /*
304 * Forward the page fault to the address space page fault
305 * handler.
306 */
307 page_table_unlock(AS, true);
308 if (as_page_fault(page_16k, PF_ACCESS_READ, istate) ==
309 AS_PF_FAULT) {
310 do_fast_data_access_mmu_miss_fault(istate, tag,
311 __func__);
312 }
313 }
314}
315
316/** DTLB protection fault handler.
317 *
318 * @param tag Content of the TLB Tag Access register as it existed
319 * when the trap happened. This is to prevent confusion
320 * created by clobbered Tag Access register during a nested
321 * DTLB miss.
322 * @param istate Interrupted state saved on the stack.
323 */
324void fast_data_access_protection(tlb_tag_access_reg_t tag, istate_t *istate)
325{
326 uintptr_t page_16k;
327 size_t index;
328 pte_t *t;
329
330 page_16k = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
331 index = tag.vpn % MMU_PAGES_PER_PAGE; /* 16K-page emulation */
332
333 page_table_lock(AS, true);
334 t = page_mapping_find(AS, page_16k);
335 if (t && PTE_WRITABLE(t)) {
336 /*
337 * The mapping was found in the software page hash table and is
338 * writable. Demap the old mapping and insert an updated mapping
339 * into DTLB.
340 */
341 t->a = true;
342 t->d = true;
343 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY,
344 page_16k + index * MMU_PAGE_SIZE);
345 dtlb_pte_copy(t, index, false);
346#ifdef CONFIG_TSB
347 dtsb_pte_copy(t, index, false);
348#endif
349 page_table_unlock(AS, true);
350 } else {
351 /*
352 * Forward the page fault to the address space page fault
353 * handler.
354 */
355 page_table_unlock(AS, true);
356 if (as_page_fault(page_16k, PF_ACCESS_WRITE, istate) ==
357 AS_PF_FAULT) {
358 do_fast_data_access_protection_fault(istate, tag,
359 __func__);
360 }
361 }
362}
363
364/** Print TLB entry (for debugging purposes).
365 *
366 * The diag field has been left out in order to make this function more generic
367 * (there is no diag field in US3 architeture).
368 *
369 * @param i TLB entry number
370 * @param t TLB entry tag
371 * @param d TLB entry data
372 */
373static void print_tlb_entry(int i, tlb_tag_read_reg_t t, tlb_data_t d)
374{
375#if 0
376 printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
377 "ie=%d, soft2=%#x, pfn=%#x, soft=%#x, l=%d, "
378 "cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
379 t.context, d.v, d.size, d.nfo, d.ie, d.soft2,
380 d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
381#endif
382}
383
384#if defined (US)
385
386/** Print contents of both TLBs. */
387void tlb_print(void)
388{
389 int i;
390 tlb_data_t d;
391 tlb_tag_read_reg_t t;
392
393 printf("I-TLB contents:\n");
394 for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
395 d.value = itlb_data_access_read(i);
396 t.value = itlb_tag_read_read(i);
397 print_tlb_entry(i, t, d);
398 }
399
400 printf("D-TLB contents:\n");
401 for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
402 d.value = dtlb_data_access_read(i);
403 t.value = dtlb_tag_read_read(i);
404 print_tlb_entry(i, t, d);
405 }
406}
407
408#elif defined (US3)
409
410/** Print contents of all TLBs. */
411void tlb_print(void)
412{
413 int i;
414 tlb_data_t d;
415 tlb_tag_read_reg_t t;
416
417 printf("TLB_ISMALL contents:\n");
418 for (i = 0; i < tlb_ismall_size(); i++) {
419 d.value = dtlb_data_access_read(TLB_ISMALL, i);
420 t.value = dtlb_tag_read_read(TLB_ISMALL, i);
421 print_tlb_entry(i, t, d);
422 }
423
424 printf("TLB_IBIG contents:\n");
425 for (i = 0; i < tlb_ibig_size(); i++) {
426 d.value = dtlb_data_access_read(TLB_IBIG, i);
427 t.value = dtlb_tag_read_read(TLB_IBIG, i);
428 print_tlb_entry(i, t, d);
429 }
430
431 printf("TLB_DSMALL contents:\n");
432 for (i = 0; i < tlb_dsmall_size(); i++) {
433 d.value = dtlb_data_access_read(TLB_DSMALL, i);
434 t.value = dtlb_tag_read_read(TLB_DSMALL, i);
435 print_tlb_entry(i, t, d);
436 }
437
438 printf("TLB_DBIG_1 contents:\n");
439 for (i = 0; i < tlb_dbig_size(); i++) {
440 d.value = dtlb_data_access_read(TLB_DBIG_0, i);
441 t.value = dtlb_tag_read_read(TLB_DBIG_0, i);
442 print_tlb_entry(i, t, d);
443 }
444
445 printf("TLB_DBIG_2 contents:\n");
446 for (i = 0; i < tlb_dbig_size(); i++) {
447 d.value = dtlb_data_access_read(TLB_DBIG_1, i);
448 t.value = dtlb_tag_read_read(TLB_DBIG_1, i);
449 print_tlb_entry(i, t, d);
450 }
451}
452
453#endif
454
455void do_fast_instruction_access_mmu_miss_fault(istate_t *istate,
456 const char *str)
457{
458 fault_if_from_uspace(istate, "%s.", str);
459 dump_istate(istate);
460 panic("%s.", str);
461}
462
463void do_fast_data_access_mmu_miss_fault(istate_t *istate,
464 tlb_tag_access_reg_t tag, const char *str)
465{
466 uintptr_t va;
467
468 va = tag.vpn << MMU_PAGE_WIDTH;
469 if (tag.context) {
470 fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d).", str, va,
471 tag.context);
472 }
473 dump_istate(istate);
474 printf("Faulting page: %p, ASID=%d.\n", va, tag.context);
475 panic("%s.", str);
476}
477
478void do_fast_data_access_protection_fault(istate_t *istate,
479 tlb_tag_access_reg_t tag, const char *str)
480{
481 uintptr_t va;
482
483 va = tag.vpn << MMU_PAGE_WIDTH;
484
485 if (tag.context) {
486 fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d).", str, va,
487 tag.context);
488 }
489 printf("Faulting page: %p, ASID=%d\n", va, tag.context);
490 dump_istate(istate);
491 panic("%s.", str);
492}
493
494void dump_sfsr_and_sfar(void)
495{
496 tlb_sfsr_reg_t sfsr;
497 uintptr_t sfar;
498
499 sfsr.value = dtlb_sfsr_read();
500 sfar = dtlb_sfar_read();
501
502#if defined (US)
503 printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, "
504 "fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w,
505 sfsr.ow, sfsr.fv);
506#elif defined (US3)
507 printf("DTLB SFSR: nf=%d, asi=%#x, tm=%d, ft=%#x, e=%d, ct=%d, pr=%d, "
508 "w=%d, ow=%d, fv=%d\n", sfsr.nf, sfsr.asi, sfsr.tm, sfsr.ft,
509 sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, sfsr.ow, sfsr.fv);
510#endif
511
512 printf("DTLB SFAR: address=%p\n", sfar);
513
514 dtlb_sfsr_write(0);
515}
516
517/** Invalidate all ITLB and DTLB entries that belong to specified ASID
518 * (Context).
519 *
520 * @param asid Address Space ID.
521 */
522void tlb_invalidate_asid(asid_t asid)
523{
524 /* switch to nucleus because we are mapped by the primary context */
525 nucleus_enter();
526 __hypercall_fast4(MMU_DEMAP_CTX, 0, 0, asid,
527 MMU_FLAG_ITLB | MMU_FLAG_DTLB);
528
529 nucleus_leave();
530}
531
532/** Invalidate all ITLB and DTLB entries for specified page range in specified
533 * address space.
534 *
535 * @param asid Address Space ID.
536 * @param page First page which to sweep out from ITLB and DTLB.
537 * @param cnt Number of ITLB and DTLB entries to invalidate.
538 */
539void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt)
540{
541 unsigned int i;
542
543 /* switch to nucleus because we are mapped by the primary context */
544 nucleus_enter();
545
546 for (i = 0; i < cnt; i++) {
547 __hypercall_fast5(MMU_DEMAP_PAGE, 0, 0, page, asid,
548 MMU_FLAG_DTLB | MMU_FLAG_ITLB);
549 }
550
551 nucleus_leave();
552}
553
554/** @}
555 */
Note: See TracBrowser for help on using the repository browser.