source: mainline/kernel/arch/sparc64/src/mm/sun4v/tlb.c@ 3da11f37

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 3da11f37 was 3da11f37, checked in by Pavel Rimsky <pavel@…>, 16 years ago

Merged changes essential for the code to reach 'uinit'.

  • Property mode set to 100644
File size: 13.9 KB
Line 
1/*
2 * Copyright (c) 2005 Jakub Jermar
3 * Copyright (c) 2008 Pavel Rimsky
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup sparc64mm
31 * @{
32 */
33/** @file
34 */
35
36#include <arch/mm/tlb.h>
37#include <mm/tlb.h>
38#include <mm/as.h>
39#include <mm/asid.h>
40#include <arch/sun4v/hypercall.h>
41#include <arch/mm/frame.h>
42#include <arch/mm/page.h>
43#include <arch/mm/tte.h>
44#include <arch/mm/tlb.h>
45#include <arch/interrupt.h>
46#include <interrupt.h>
47#include <arch.h>
48#include <print.h>
49#include <arch/types.h>
50#include <config.h>
51#include <arch/trap/trap.h>
52#include <arch/trap/exception.h>
53#include <panic.h>
54#include <arch/asm.h>
55#include <arch/cpu.h>
56#include <arch/mm/pagesize.h>
57
58#ifdef CONFIG_TSB
59#include <arch/mm/tsb.h>
60#endif
61
62static void dtlb_pte_copy(pte_t *, size_t, bool);
63static void itlb_pte_copy(pte_t *, size_t);
64static void do_fast_instruction_access_mmu_miss_fault(istate_t *, const char *);
65static void do_fast_data_access_mmu_miss_fault(istate_t *, tlb_tag_access_reg_t,
66 const char *);
67static void do_fast_data_access_protection_fault(istate_t *,
68 tlb_tag_access_reg_t, const char *);
69
70char *context_encoding[] = {
71 "Primary",
72 "Secondary",
73 "Nucleus",
74 "Reserved"
75};
76
77/** Invalidate all unlocked ITLB and DTLB entries. */
78void tlb_invalidate_all(void)
79{
80 uint64_t errno = __hypercall_fast3(MMU_DEMAP_ALL, 0, 0,
81 MMU_FLAG_DTLB | MMU_FLAG_ITLB);
82 if (errno != EOK) {
83 panic("Error code = %d.\n", errno);
84 }
85}
86
87void tlb_arch_init(void)
88{
89 tlb_invalidate_all();
90}
91
92/** Insert privileged mapping into DMMU TLB.
93 *
94 * @param page Virtual page address.
95 * @param frame Physical frame address.
96 * @param pagesize Page size.
97 * @param locked True for permanent mappings, false otherwise.
98 * @param cacheable True if the mapping is cacheable, false otherwise.
99 */
100void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize,
101 bool locked, bool cacheable)
102{
103#if 0
104 tlb_tag_access_reg_t tag;
105 tlb_data_t data;
106 page_address_t pg;
107 frame_address_t fr;
108
109 pg.address = page;
110 fr.address = frame;
111
112 tag.context = ASID_KERNEL;
113 tag.vpn = pg.vpn;
114
115 dtlb_tag_access_write(tag.value);
116
117 data.value = 0;
118 data.v = true;
119 data.size = pagesize;
120 data.pfn = fr.pfn;
121 data.l = locked;
122 data.cp = cacheable;
123#ifdef CONFIG_VIRT_IDX_DCACHE
124 data.cv = cacheable;
125#endif /* CONFIG_VIRT_IDX_DCACHE */
126 data.p = true;
127 data.w = true;
128 data.g = false;
129
130 dtlb_data_in_write(data.value);
131#endif
132}
133
134/** Copy PTE to TLB.
135 *
136 * @param t Page Table Entry to be copied.
137 * @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
138 * @param ro If true, the entry will be created read-only, regardless
139 * of its w field.
140 */
141void dtlb_pte_copy(pte_t *t, size_t index, bool ro)
142{
143#if 0
144 tlb_tag_access_reg_t tag;
145 tlb_data_t data;
146 page_address_t pg;
147 frame_address_t fr;
148
149 pg.address = t->page + (index << MMU_PAGE_WIDTH);
150 fr.address = t->frame + (index << MMU_PAGE_WIDTH);
151
152 tag.value = 0;
153 tag.context = t->as->asid;
154 tag.vpn = pg.vpn;
155
156 dtlb_tag_access_write(tag.value);
157
158 data.value = 0;
159 data.v = true;
160 data.size = PAGESIZE_8K;
161 data.pfn = fr.pfn;
162 data.l = false;
163 data.cp = t->c;
164#ifdef CONFIG_VIRT_IDX_DCACHE
165 data.cv = t->c;
166#endif /* CONFIG_VIRT_IDX_DCACHE */
167 data.p = t->k; /* p like privileged */
168 data.w = ro ? false : t->w;
169 data.g = t->g;
170
171 dtlb_data_in_write(data.value);
172#endif
173}
174
175/** Copy PTE to ITLB.
176 *
177 * @param t Page Table Entry to be copied.
178 * @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
179 */
180void itlb_pte_copy(pte_t *t, size_t index)
181{
182#if 0
183 tlb_tag_access_reg_t tag;
184 tlb_data_t data;
185 page_address_t pg;
186 frame_address_t fr;
187
188 pg.address = t->page + (index << MMU_PAGE_WIDTH);
189 fr.address = t->frame + (index << MMU_PAGE_WIDTH);
190
191 tag.value = 0;
192 tag.context = t->as->asid;
193 tag.vpn = pg.vpn;
194
195 itlb_tag_access_write(tag.value);
196
197 data.value = 0;
198 data.v = true;
199 data.size = PAGESIZE_8K;
200 data.pfn = fr.pfn;
201 data.l = false;
202 data.cp = t->c;
203 data.p = t->k; /* p like privileged */
204 data.w = false;
205 data.g = t->g;
206
207 itlb_data_in_write(data.value);
208#endif
209}
210
211/** ITLB miss handler. */
212void fast_instruction_access_mmu_miss(unative_t unused, istate_t *istate)
213{
214 uintptr_t page_16k = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
215 size_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE;
216 pte_t *t;
217
218 page_table_lock(AS, true);
219 t = page_mapping_find(AS, page_16k);
220 if (t && PTE_EXECUTABLE(t)) {
221 /*
222 * The mapping was found in the software page hash table.
223 * Insert it into ITLB.
224 */
225 t->a = true;
226 itlb_pte_copy(t, index);
227#ifdef CONFIG_TSB
228 itsb_pte_copy(t, index);
229#endif
230 page_table_unlock(AS, true);
231 } else {
232 /*
233 * Forward the page fault to the address space page fault
234 * handler.
235 */
236 page_table_unlock(AS, true);
237 if (as_page_fault(page_16k, PF_ACCESS_EXEC, istate) ==
238 AS_PF_FAULT) {
239 do_fast_instruction_access_mmu_miss_fault(istate,
240 __func__);
241 }
242 }
243}
244
245/** DTLB miss handler.
246 *
247 * Note that some faults (e.g. kernel faults) were already resolved by the
248 * low-level, assembly language part of the fast_data_access_mmu_miss handler.
249 *
250 * @param tag Content of the TLB Tag Access register as it existed
251 * when the trap happened. This is to prevent confusion
252 * created by clobbered Tag Access register during a nested
253 * DTLB miss.
254 * @param istate Interrupted state saved on the stack.
255 */
256void fast_data_access_mmu_miss(tlb_tag_access_reg_t tag, istate_t *istate)
257{
258 uintptr_t page_8k;
259 uintptr_t page_16k;
260 size_t index;
261 pte_t *t;
262
263 page_8k = (uint64_t) tag.vpn << MMU_PAGE_WIDTH;
264 page_16k = ALIGN_DOWN(page_8k, PAGE_SIZE);
265 index = tag.vpn % MMU_PAGES_PER_PAGE;
266
267 if (tag.context == ASID_KERNEL) {
268 if (!tag.vpn) {
269 /* NULL access in kernel */
270 do_fast_data_access_mmu_miss_fault(istate, tag,
271 __func__);
272//MH
273 } else {
274// } else if (page_8k >= end_of_identity) {
275 /*
276 * The kernel is accessing the I/O space.
277 * We still do identity mapping for I/O,
278 * but without caching.
279 */
280 dtlb_insert_mapping(page_8k, KA2PA(page_8k),
281 PAGESIZE_8K, false, false);
282 return;
283 }
284 do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected "
285 "kernel page fault.");
286 }
287
288 page_table_lock(AS, true);
289 t = page_mapping_find(AS, page_16k);
290 if (t) {
291 /*
292 * The mapping was found in the software page hash table.
293 * Insert it into DTLB.
294 */
295 t->a = true;
296 dtlb_pte_copy(t, index, true);
297#ifdef CONFIG_TSB
298 dtsb_pte_copy(t, index, true);
299#endif
300 page_table_unlock(AS, true);
301 } else {
302 /*
303 * Forward the page fault to the address space page fault
304 * handler.
305 */
306 page_table_unlock(AS, true);
307 if (as_page_fault(page_16k, PF_ACCESS_READ, istate) ==
308 AS_PF_FAULT) {
309 do_fast_data_access_mmu_miss_fault(istate, tag,
310 __func__);
311 }
312 }
313}
314
315/** DTLB protection fault handler.
316 *
317 * @param tag Content of the TLB Tag Access register as it existed
318 * when the trap happened. This is to prevent confusion
319 * created by clobbered Tag Access register during a nested
320 * DTLB miss.
321 * @param istate Interrupted state saved on the stack.
322 */
323void fast_data_access_protection(tlb_tag_access_reg_t tag, istate_t *istate)
324{
325 uintptr_t page_16k;
326 size_t index;
327 pte_t *t;
328
329 page_16k = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
330 index = tag.vpn % MMU_PAGES_PER_PAGE; /* 16K-page emulation */
331
332 page_table_lock(AS, true);
333 t = page_mapping_find(AS, page_16k);
334 if (t && PTE_WRITABLE(t)) {
335 /*
336 * The mapping was found in the software page hash table and is
337 * writable. Demap the old mapping and insert an updated mapping
338 * into DTLB.
339 */
340 t->a = true;
341 t->d = true;
342 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY,
343 page_16k + index * MMU_PAGE_SIZE);
344 dtlb_pte_copy(t, index, false);
345#ifdef CONFIG_TSB
346 dtsb_pte_copy(t, index, false);
347#endif
348 page_table_unlock(AS, true);
349 } else {
350 /*
351 * Forward the page fault to the address space page fault
352 * handler.
353 */
354 page_table_unlock(AS, true);
355 if (as_page_fault(page_16k, PF_ACCESS_WRITE, istate) ==
356 AS_PF_FAULT) {
357 do_fast_data_access_protection_fault(istate, tag,
358 __func__);
359 }
360 }
361}
362
363/** Print TLB entry (for debugging purposes).
364 *
365 * The diag field has been left out in order to make this function more generic
366 * (there is no diag field in US3 architeture).
367 *
368 * @param i TLB entry number
369 * @param t TLB entry tag
370 * @param d TLB entry data
371 */
372static void print_tlb_entry(int i, tlb_tag_read_reg_t t, tlb_data_t d)
373{
374#if 0
375 printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
376 "ie=%d, soft2=%#x, pfn=%#x, soft=%#x, l=%d, "
377 "cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
378 t.context, d.v, d.size, d.nfo, d.ie, d.soft2,
379 d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
380#endif
381}
382
383#if defined (US)
384
385/** Print contents of both TLBs. */
386void tlb_print(void)
387{
388 int i;
389 tlb_data_t d;
390 tlb_tag_read_reg_t t;
391
392 printf("I-TLB contents:\n");
393 for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
394 d.value = itlb_data_access_read(i);
395 t.value = itlb_tag_read_read(i);
396 print_tlb_entry(i, t, d);
397 }
398
399 printf("D-TLB contents:\n");
400 for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
401 d.value = dtlb_data_access_read(i);
402 t.value = dtlb_tag_read_read(i);
403 print_tlb_entry(i, t, d);
404 }
405}
406
407#elif defined (US3)
408
409/** Print contents of all TLBs. */
410void tlb_print(void)
411{
412 int i;
413 tlb_data_t d;
414 tlb_tag_read_reg_t t;
415
416 printf("TLB_ISMALL contents:\n");
417 for (i = 0; i < tlb_ismall_size(); i++) {
418 d.value = dtlb_data_access_read(TLB_ISMALL, i);
419 t.value = dtlb_tag_read_read(TLB_ISMALL, i);
420 print_tlb_entry(i, t, d);
421 }
422
423 printf("TLB_IBIG contents:\n");
424 for (i = 0; i < tlb_ibig_size(); i++) {
425 d.value = dtlb_data_access_read(TLB_IBIG, i);
426 t.value = dtlb_tag_read_read(TLB_IBIG, i);
427 print_tlb_entry(i, t, d);
428 }
429
430 printf("TLB_DSMALL contents:\n");
431 for (i = 0; i < tlb_dsmall_size(); i++) {
432 d.value = dtlb_data_access_read(TLB_DSMALL, i);
433 t.value = dtlb_tag_read_read(TLB_DSMALL, i);
434 print_tlb_entry(i, t, d);
435 }
436
437 printf("TLB_DBIG_1 contents:\n");
438 for (i = 0; i < tlb_dbig_size(); i++) {
439 d.value = dtlb_data_access_read(TLB_DBIG_0, i);
440 t.value = dtlb_tag_read_read(TLB_DBIG_0, i);
441 print_tlb_entry(i, t, d);
442 }
443
444 printf("TLB_DBIG_2 contents:\n");
445 for (i = 0; i < tlb_dbig_size(); i++) {
446 d.value = dtlb_data_access_read(TLB_DBIG_1, i);
447 t.value = dtlb_tag_read_read(TLB_DBIG_1, i);
448 print_tlb_entry(i, t, d);
449 }
450}
451
452#endif
453
454void do_fast_instruction_access_mmu_miss_fault(istate_t *istate,
455 const char *str)
456{
457 fault_if_from_uspace(istate, "%s.", str);
458 dump_istate(istate);
459 panic("%s.", str);
460}
461
462void do_fast_data_access_mmu_miss_fault(istate_t *istate,
463 tlb_tag_access_reg_t tag, const char *str)
464{
465 uintptr_t va;
466
467 va = tag.vpn << MMU_PAGE_WIDTH;
468 if (tag.context) {
469 fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d).", str, va,
470 tag.context);
471 }
472 dump_istate(istate);
473 printf("Faulting page: %p, ASID=%d.\n", va, tag.context);
474 panic("%s.", str);
475}
476
477void do_fast_data_access_protection_fault(istate_t *istate,
478 tlb_tag_access_reg_t tag, const char *str)
479{
480 uintptr_t va;
481
482 va = tag.vpn << MMU_PAGE_WIDTH;
483
484 if (tag.context) {
485 fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d).", str, va,
486 tag.context);
487 }
488 printf("Faulting page: %p, ASID=%d\n", va, tag.context);
489 dump_istate(istate);
490 panic("%s.", str);
491}
492
493void dump_sfsr_and_sfar(void)
494{
495 tlb_sfsr_reg_t sfsr;
496 uintptr_t sfar;
497
498 sfsr.value = dtlb_sfsr_read();
499 sfar = dtlb_sfar_read();
500
501#if defined (US)
502 printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, "
503 "fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w,
504 sfsr.ow, sfsr.fv);
505#elif defined (US3)
506 printf("DTLB SFSR: nf=%d, asi=%#x, tm=%d, ft=%#x, e=%d, ct=%d, pr=%d, "
507 "w=%d, ow=%d, fv=%d\n", sfsr.nf, sfsr.asi, sfsr.tm, sfsr.ft,
508 sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, sfsr.ow, sfsr.fv);
509#endif
510
511 printf("DTLB SFAR: address=%p\n", sfar);
512
513 dtlb_sfsr_write(0);
514}
515
516/** Invalidate all ITLB and DTLB entries that belong to specified ASID
517 * (Context).
518 *
519 * @param asid Address Space ID.
520 */
521void tlb_invalidate_asid(asid_t asid)
522{
523 /* switch to nucleus because we are mapped by the primary context */
524 nucleus_enter();
525 __hypercall_fast4(MMU_DEMAP_CTX, 0, 0, asid,
526 MMU_FLAG_ITLB | MMU_FLAG_DTLB);
527
528 nucleus_leave();
529}
530
531/** Invalidate all ITLB and DTLB entries for specified page range in specified
532 * address space.
533 *
534 * @param asid Address Space ID.
535 * @param page First page which to sweep out from ITLB and DTLB.
536 * @param cnt Number of ITLB and DTLB entries to invalidate.
537 */
538void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt)
539{
540 unsigned int i;
541
542 /* switch to nucleus because we are mapped by the primary context */
543 nucleus_enter();
544
545 for (i = 0; i < cnt; i++) {
546 __hypercall_fast5(MMU_DEMAP_PAGE, 0, 0, page, asid,
547 MMU_FLAG_DTLB | MMU_FLAG_ITLB);
548 }
549
550 nucleus_leave();
551}
552
553/** @}
554 */
Note: See TracBrowser for help on using the repository browser.