source: mainline/kernel/arch/sparc64/src/mm/sun4v/tlb.c@ 66e08d02

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 66e08d02 was 66e08d02, checked in by Pavel Rimsky <pavel@…>, 16 years ago

Output in kernel now possible, init phase passes to cpu_init.

  • Property mode set to 100644
File size: 15.2 KB
Line 
1/*
2 * Copyright (c) 2005 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup sparc64mm
30 * @{
31 */
32/** @file
33 */
34
35#include <arch/mm/tlb.h>
36#include <mm/tlb.h>
37#include <mm/as.h>
38#include <mm/asid.h>
39#include <arch/mm/frame.h>
40#include <arch/mm/page.h>
41#include <arch/mm/mmu.h>
42#include <arch/interrupt.h>
43#include <interrupt.h>
44#include <arch.h>
45#include <print.h>
46#include <arch/types.h>
47#include <config.h>
48#include <arch/trap/trap.h>
49#include <arch/trap/exception.h>
50#include <panic.h>
51#include <arch/asm.h>
52
53#ifdef CONFIG_TSB
54#include <arch/mm/tsb.h>
55#endif
56
57static void dtlb_pte_copy(pte_t *, size_t, bool);
58static void itlb_pte_copy(pte_t *, size_t);
59static void do_fast_instruction_access_mmu_miss_fault(istate_t *, const char *);
60static void do_fast_data_access_mmu_miss_fault(istate_t *, tlb_tag_access_reg_t,
61 const char *);
62static void do_fast_data_access_protection_fault(istate_t *,
63 tlb_tag_access_reg_t, const char *);
64
65char *context_encoding[] = {
66 "Primary",
67 "Secondary",
68 "Nucleus",
69 "Reserved"
70};
71
72void tlb_arch_init(void)
73{
74 /*
75 * Invalidate all non-locked DTLB and ITLB entries.
76 */
77 //MH
78 //tlb_invalidate_all();
79
80 /*
81 * Clear both SFSRs.
82 */
83 //MH
84 //dtlb_sfsr_write(0);
85 //itlb_sfsr_write(0);
86}
87
88/** Insert privileged mapping into DMMU TLB.
89 *
90 * @param page Virtual page address.
91 * @param frame Physical frame address.
92 * @param pagesize Page size.
93 * @param locked True for permanent mappings, false otherwise.
94 * @param cacheable True if the mapping is cacheable, false otherwise.
95 */
96void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize,
97 bool locked, bool cacheable)
98{
99 tlb_tag_access_reg_t tag;
100 tlb_data_t data;
101 page_address_t pg;
102 frame_address_t fr;
103
104 pg.address = page;
105 fr.address = frame;
106
107 tag.context = ASID_KERNEL;
108 tag.vpn = pg.vpn;
109
110 dtlb_tag_access_write(tag.value);
111
112 data.value = 0;
113 data.v = true;
114 data.size = pagesize;
115 data.pfn = fr.pfn;
116 data.l = locked;
117 data.cp = cacheable;
118#ifdef CONFIG_VIRT_IDX_DCACHE
119 data.cv = cacheable;
120#endif /* CONFIG_VIRT_IDX_DCACHE */
121 data.p = true;
122 data.w = true;
123 data.g = false;
124
125 dtlb_data_in_write(data.value);
126}
127
128/** Copy PTE to TLB.
129 *
130 * @param t Page Table Entry to be copied.
131 * @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
132 * @param ro If true, the entry will be created read-only, regardless
133 * of its w field.
134 */
135void dtlb_pte_copy(pte_t *t, size_t index, bool ro)
136{
137 tlb_tag_access_reg_t tag;
138 tlb_data_t data;
139 page_address_t pg;
140 frame_address_t fr;
141
142 pg.address = t->page + (index << MMU_PAGE_WIDTH);
143 fr.address = t->frame + (index << MMU_PAGE_WIDTH);
144
145 tag.value = 0;
146 tag.context = t->as->asid;
147 tag.vpn = pg.vpn;
148
149 dtlb_tag_access_write(tag.value);
150
151 data.value = 0;
152 data.v = true;
153 data.size = PAGESIZE_8K;
154 data.pfn = fr.pfn;
155 data.l = false;
156 data.cp = t->c;
157#ifdef CONFIG_VIRT_IDX_DCACHE
158 data.cv = t->c;
159#endif /* CONFIG_VIRT_IDX_DCACHE */
160 data.p = t->k; /* p like privileged */
161 data.w = ro ? false : t->w;
162 data.g = t->g;
163
164 dtlb_data_in_write(data.value);
165}
166
167/** Copy PTE to ITLB.
168 *
169 * @param t Page Table Entry to be copied.
170 * @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
171 */
172void itlb_pte_copy(pte_t *t, size_t index)
173{
174 tlb_tag_access_reg_t tag;
175 tlb_data_t data;
176 page_address_t pg;
177 frame_address_t fr;
178
179 pg.address = t->page + (index << MMU_PAGE_WIDTH);
180 fr.address = t->frame + (index << MMU_PAGE_WIDTH);
181
182 tag.value = 0;
183 tag.context = t->as->asid;
184 tag.vpn = pg.vpn;
185
186 itlb_tag_access_write(tag.value);
187
188 data.value = 0;
189 data.v = true;
190 data.size = PAGESIZE_8K;
191 data.pfn = fr.pfn;
192 data.l = false;
193 data.cp = t->c;
194 data.p = t->k; /* p like privileged */
195 data.w = false;
196 data.g = t->g;
197
198 itlb_data_in_write(data.value);
199}
200
201/** ITLB miss handler. */
202void fast_instruction_access_mmu_miss(unative_t unused, istate_t *istate)
203{
204 uintptr_t page_16k = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
205 size_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE;
206 pte_t *t;
207
208 page_table_lock(AS, true);
209 t = page_mapping_find(AS, page_16k);
210 if (t && PTE_EXECUTABLE(t)) {
211 /*
212 * The mapping was found in the software page hash table.
213 * Insert it into ITLB.
214 */
215 t->a = true;
216 itlb_pte_copy(t, index);
217#ifdef CONFIG_TSB
218 itsb_pte_copy(t, index);
219#endif
220 page_table_unlock(AS, true);
221 } else {
222 /*
223 * Forward the page fault to the address space page fault
224 * handler.
225 */
226 page_table_unlock(AS, true);
227 if (as_page_fault(page_16k, PF_ACCESS_EXEC, istate) ==
228 AS_PF_FAULT) {
229 do_fast_instruction_access_mmu_miss_fault(istate,
230 __func__);
231 }
232 }
233}
234
235/** DTLB miss handler.
236 *
237 * Note that some faults (e.g. kernel faults) were already resolved by the
238 * low-level, assembly language part of the fast_data_access_mmu_miss handler.
239 *
240 * @param tag Content of the TLB Tag Access register as it existed
241 * when the trap happened. This is to prevent confusion
242 * created by clobbered Tag Access register during a nested
243 * DTLB miss.
244 * @param istate Interrupted state saved on the stack.
245 */
246void fast_data_access_mmu_miss(tlb_tag_access_reg_t tag, istate_t *istate)
247{
248 uintptr_t page_8k;
249 uintptr_t page_16k;
250 size_t index;
251 pte_t *t;
252
253 page_8k = (uint64_t) tag.vpn << MMU_PAGE_WIDTH;
254 page_16k = ALIGN_DOWN(page_8k, PAGE_SIZE);
255 index = tag.vpn % MMU_PAGES_PER_PAGE;
256
257 if (tag.context == ASID_KERNEL) {
258 if (!tag.vpn) {
259 /* NULL access in kernel */
260 do_fast_data_access_mmu_miss_fault(istate, tag,
261 __func__);
262//MH
263 } else {
264// } else if (page_8k >= end_of_identity) {
265 /*
266 * The kernel is accessing the I/O space.
267 * We still do identity mapping for I/O,
268 * but without caching.
269 */
270 dtlb_insert_mapping(page_8k, KA2PA(page_8k),
271 PAGESIZE_8K, false, false);
272 return;
273 }
274 do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected "
275 "kernel page fault.");
276 }
277
278 page_table_lock(AS, true);
279 t = page_mapping_find(AS, page_16k);
280 if (t) {
281 /*
282 * The mapping was found in the software page hash table.
283 * Insert it into DTLB.
284 */
285 t->a = true;
286 dtlb_pte_copy(t, index, true);
287#ifdef CONFIG_TSB
288 dtsb_pte_copy(t, index, true);
289#endif
290 page_table_unlock(AS, true);
291 } else {
292 /*
293 * Forward the page fault to the address space page fault
294 * handler.
295 */
296 page_table_unlock(AS, true);
297 if (as_page_fault(page_16k, PF_ACCESS_READ, istate) ==
298 AS_PF_FAULT) {
299 do_fast_data_access_mmu_miss_fault(istate, tag,
300 __func__);
301 }
302 }
303}
304
305/** DTLB protection fault handler.
306 *
307 * @param tag Content of the TLB Tag Access register as it existed
308 * when the trap happened. This is to prevent confusion
309 * created by clobbered Tag Access register during a nested
310 * DTLB miss.
311 * @param istate Interrupted state saved on the stack.
312 */
313void fast_data_access_protection(tlb_tag_access_reg_t tag, istate_t *istate)
314{
315 uintptr_t page_16k;
316 size_t index;
317 pte_t *t;
318
319 page_16k = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
320 index = tag.vpn % MMU_PAGES_PER_PAGE; /* 16K-page emulation */
321
322 page_table_lock(AS, true);
323 t = page_mapping_find(AS, page_16k);
324 if (t && PTE_WRITABLE(t)) {
325 /*
326 * The mapping was found in the software page hash table and is
327 * writable. Demap the old mapping and insert an updated mapping
328 * into DTLB.
329 */
330 t->a = true;
331 t->d = true;
332 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY,
333 page_16k + index * MMU_PAGE_SIZE);
334 dtlb_pte_copy(t, index, false);
335#ifdef CONFIG_TSB
336 dtsb_pte_copy(t, index, false);
337#endif
338 page_table_unlock(AS, true);
339 } else {
340 /*
341 * Forward the page fault to the address space page fault
342 * handler.
343 */
344 page_table_unlock(AS, true);
345 if (as_page_fault(page_16k, PF_ACCESS_WRITE, istate) ==
346 AS_PF_FAULT) {
347 do_fast_data_access_protection_fault(istate, tag,
348 __func__);
349 }
350 }
351}
352
353/** Print TLB entry (for debugging purposes).
354 *
355 * The diag field has been left out in order to make this function more generic
356 * (there is no diag field in US3 architeture).
357 *
358 * @param i TLB entry number
359 * @param t TLB entry tag
360 * @param d TLB entry data
361 */
362static void print_tlb_entry(int i, tlb_tag_read_reg_t t, tlb_data_t d)
363{
364 printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
365 "ie=%d, soft2=%#x, pfn=%#x, soft=%#x, l=%d, "
366 "cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
367 t.context, d.v, d.size, d.nfo, d.ie, d.soft2,
368 d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
369}
370
371#if defined (US)
372
373/** Print contents of both TLBs. */
374void tlb_print(void)
375{
376 int i;
377 tlb_data_t d;
378 tlb_tag_read_reg_t t;
379
380 printf("I-TLB contents:\n");
381 for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
382 d.value = itlb_data_access_read(i);
383 t.value = itlb_tag_read_read(i);
384 print_tlb_entry(i, t, d);
385 }
386
387 printf("D-TLB contents:\n");
388 for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
389 d.value = dtlb_data_access_read(i);
390 t.value = dtlb_tag_read_read(i);
391 print_tlb_entry(i, t, d);
392 }
393}
394
395#elif defined (US3)
396
397/** Print contents of all TLBs. */
398void tlb_print(void)
399{
400 int i;
401 tlb_data_t d;
402 tlb_tag_read_reg_t t;
403
404 printf("TLB_ISMALL contents:\n");
405 for (i = 0; i < tlb_ismall_size(); i++) {
406 d.value = dtlb_data_access_read(TLB_ISMALL, i);
407 t.value = dtlb_tag_read_read(TLB_ISMALL, i);
408 print_tlb_entry(i, t, d);
409 }
410
411 printf("TLB_IBIG contents:\n");
412 for (i = 0; i < tlb_ibig_size(); i++) {
413 d.value = dtlb_data_access_read(TLB_IBIG, i);
414 t.value = dtlb_tag_read_read(TLB_IBIG, i);
415 print_tlb_entry(i, t, d);
416 }
417
418 printf("TLB_DSMALL contents:\n");
419 for (i = 0; i < tlb_dsmall_size(); i++) {
420 d.value = dtlb_data_access_read(TLB_DSMALL, i);
421 t.value = dtlb_tag_read_read(TLB_DSMALL, i);
422 print_tlb_entry(i, t, d);
423 }
424
425 printf("TLB_DBIG_1 contents:\n");
426 for (i = 0; i < tlb_dbig_size(); i++) {
427 d.value = dtlb_data_access_read(TLB_DBIG_0, i);
428 t.value = dtlb_tag_read_read(TLB_DBIG_0, i);
429 print_tlb_entry(i, t, d);
430 }
431
432 printf("TLB_DBIG_2 contents:\n");
433 for (i = 0; i < tlb_dbig_size(); i++) {
434 d.value = dtlb_data_access_read(TLB_DBIG_1, i);
435 t.value = dtlb_tag_read_read(TLB_DBIG_1, i);
436 print_tlb_entry(i, t, d);
437 }
438}
439
440#endif
441
442void do_fast_instruction_access_mmu_miss_fault(istate_t *istate,
443 const char *str)
444{
445 fault_if_from_uspace(istate, "%s.", str);
446 dump_istate(istate);
447 panic("%s.", str);
448}
449
450void do_fast_data_access_mmu_miss_fault(istate_t *istate,
451 tlb_tag_access_reg_t tag, const char *str)
452{
453 uintptr_t va;
454
455 va = tag.vpn << MMU_PAGE_WIDTH;
456 if (tag.context) {
457 fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d).", str, va,
458 tag.context);
459 }
460 dump_istate(istate);
461 printf("Faulting page: %p, ASID=%d.\n", va, tag.context);
462 panic("%s.", str);
463}
464
465void do_fast_data_access_protection_fault(istate_t *istate,
466 tlb_tag_access_reg_t tag, const char *str)
467{
468 uintptr_t va;
469
470 va = tag.vpn << MMU_PAGE_WIDTH;
471
472 if (tag.context) {
473 fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d).", str, va,
474 tag.context);
475 }
476 printf("Faulting page: %p, ASID=%d\n", va, tag.context);
477 dump_istate(istate);
478 panic("%s.", str);
479}
480
481void dump_sfsr_and_sfar(void)
482{
483 tlb_sfsr_reg_t sfsr;
484 uintptr_t sfar;
485
486 sfsr.value = dtlb_sfsr_read();
487 sfar = dtlb_sfar_read();
488
489#if defined (US)
490 printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, "
491 "fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w,
492 sfsr.ow, sfsr.fv);
493#elif defined (US3)
494 printf("DTLB SFSR: nf=%d, asi=%#x, tm=%d, ft=%#x, e=%d, ct=%d, pr=%d, "
495 "w=%d, ow=%d, fv=%d\n", sfsr.nf, sfsr.asi, sfsr.tm, sfsr.ft,
496 sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, sfsr.ow, sfsr.fv);
497#endif
498
499 printf("DTLB SFAR: address=%p\n", sfar);
500
501 dtlb_sfsr_write(0);
502}
503
504#if defined (US)
505/** Invalidate all unlocked ITLB and DTLB entries. */
506void tlb_invalidate_all(void)
507{
508 int i;
509
510 /*
511 * Walk all ITLB and DTLB entries and remove all unlocked mappings.
512 *
513 * The kernel doesn't use global mappings so any locked global mappings
514 * found must have been created by someone else. Their only purpose now
515 * is to collide with proper mappings. Invalidate immediately. It should
516 * be safe to invalidate them as late as now.
517 */
518
519 tlb_data_t d;
520 tlb_tag_read_reg_t t;
521
522 for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
523 d.value = itlb_data_access_read(i);
524 if (!d.l || d.g) {
525 t.value = itlb_tag_read_read(i);
526 d.v = false;
527 itlb_tag_access_write(t.value);
528 itlb_data_access_write(i, d.value);
529 }
530 }
531
532 for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
533 d.value = dtlb_data_access_read(i);
534 if (!d.l || d.g) {
535 t.value = dtlb_tag_read_read(i);
536 d.v = false;
537 dtlb_tag_access_write(t.value);
538 dtlb_data_access_write(i, d.value);
539 }
540 }
541
542}
543
544#elif defined (US3)
545
546/** Invalidate all unlocked ITLB and DTLB entries. */
547void tlb_invalidate_all(void)
548{
549 itlb_demap(TLB_DEMAP_ALL, 0, 0);
550 dtlb_demap(TLB_DEMAP_ALL, 0, 0);
551}
552
553#endif
554
555/** Invalidate all ITLB and DTLB entries that belong to specified ASID
556 * (Context).
557 *
558 * @param asid Address Space ID.
559 */
560void tlb_invalidate_asid(asid_t asid)
561{
562 tlb_context_reg_t pc_save, ctx;
563
564 /* switch to nucleus because we are mapped by the primary context */
565 nucleus_enter();
566
567 ctx.v = pc_save.v = mmu_primary_context_read();
568 ctx.context = asid;
569 mmu_primary_context_write(ctx.v);
570
571 itlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
572 dtlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
573
574 mmu_primary_context_write(pc_save.v);
575
576 nucleus_leave();
577}
578
579/** Invalidate all ITLB and DTLB entries for specified page range in specified
580 * address space.
581 *
582 * @param asid Address Space ID.
583 * @param page First page which to sweep out from ITLB and DTLB.
584 * @param cnt Number of ITLB and DTLB entries to invalidate.
585 */
586void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt)
587{
588 unsigned int i;
589 tlb_context_reg_t pc_save, ctx;
590
591 /* switch to nucleus because we are mapped by the primary context */
592 nucleus_enter();
593
594 ctx.v = pc_save.v = mmu_primary_context_read();
595 ctx.context = asid;
596 mmu_primary_context_write(ctx.v);
597
598 for (i = 0; i < cnt * MMU_PAGES_PER_PAGE; i++) {
599 itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
600 page + i * MMU_PAGE_SIZE);
601 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
602 page + i * MMU_PAGE_SIZE);
603 }
604
605 mmu_primary_context_write(pc_save.v);
606
607 nucleus_leave();
608}
609
610/** @}
611 */
Note: See TracBrowser for help on using the repository browser.