source: mainline/kernel/arch/sparc64/src/mm/sun4v/tlb.c@ 3da11f37

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 3da11f37 was 3da11f37, checked in by Pavel Rimsky <pavel@…>, 16 years ago

Merged changes essential for the code to reach 'uinit'.

  • Property mode set to 100644
File size: 13.9 KB
RevLine 
[0d04024]1/*
[df4ed85]2 * Copyright (c) 2005 Jakub Jermar
[3da11f37]3 * Copyright (c) 2008 Pavel Rimsky
[0d04024]4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
[10b890b]30/** @addtogroup sparc64mm
[b45c443]31 * @{
32 */
33/** @file
34 */
35
[0d04024]36#include <arch/mm/tlb.h>
37#include <mm/tlb.h>
[f47fd19]38#include <mm/as.h>
39#include <mm/asid.h>
[b4655da]40#include <arch/sun4v/hypercall.h>
[0cfc4d38]41#include <arch/mm/frame.h>
42#include <arch/mm/page.h>
[b4655da]43#include <arch/mm/tte.h>
44#include <arch/mm/tlb.h>
[f47fd19]45#include <arch/interrupt.h>
[e2bf639]46#include <interrupt.h>
[f47fd19]47#include <arch.h>
[0d04024]48#include <print.h>
[dbb6886]49#include <arch/types.h>
[0cfc4d38]50#include <config.h>
[49b6d32]51#include <arch/trap/trap.h>
[7bb6b06]52#include <arch/trap/exception.h>
[008029d]53#include <panic.h>
[b6fba84]54#include <arch/asm.h>
[3da11f37]55#include <arch/cpu.h>
56#include <arch/mm/pagesize.h>
[02f441c0]57
[29b2bbf]58#ifdef CONFIG_TSB
59#include <arch/mm/tsb.h>
60#endif
61
[98000fb]62static void dtlb_pte_copy(pte_t *, size_t, bool);
63static void itlb_pte_copy(pte_t *, size_t);
[965dc18]64static void do_fast_instruction_access_mmu_miss_fault(istate_t *, const char *);
65static void do_fast_data_access_mmu_miss_fault(istate_t *, tlb_tag_access_reg_t,
66 const char *);
67static void do_fast_data_access_protection_fault(istate_t *,
68 tlb_tag_access_reg_t, const char *);
[f47fd19]69
[b6fba84]70char *context_encoding[] = {
71 "Primary",
72 "Secondary",
73 "Nucleus",
74 "Reserved"
75};
[0d04024]76
[b4655da]77/** Invalidate all unlocked ITLB and DTLB entries. */
78void tlb_invalidate_all(void)
79{
80 uint64_t errno = __hypercall_fast3(MMU_DEMAP_ALL, 0, 0,
81 MMU_FLAG_DTLB | MMU_FLAG_ITLB);
82 if (errno != EOK) {
83 panic("Error code = %d.\n", errno);
84 }
85}
86
[0d04024]87void tlb_arch_init(void)
88{
[b4655da]89 tlb_invalidate_all();
[97f1691]90}
[b6fba84]91
[97f1691]92/** Insert privileged mapping into DMMU TLB.
93 *
[965dc18]94 * @param page Virtual page address.
95 * @param frame Physical frame address.
96 * @param pagesize Page size.
97 * @param locked True for permanent mappings, false otherwise.
98 * @param cacheable True if the mapping is cacheable, false otherwise.
[97f1691]99 */
[2057572]100void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize,
101 bool locked, bool cacheable)
[97f1691]102{
[b4655da]103#if 0
[97f1691]104 tlb_tag_access_reg_t tag;
105 tlb_data_t data;
106 page_address_t pg;
107 frame_address_t fr;
[b6fba84]108
[97f1691]109 pg.address = page;
110 fr.address = frame;
[02f441c0]111
[965dc18]112 tag.context = ASID_KERNEL;
[02f441c0]113 tag.vpn = pg.vpn;
114
115 dtlb_tag_access_write(tag.value);
116
117 data.value = 0;
118 data.v = true;
[97f1691]119 data.size = pagesize;
[02f441c0]120 data.pfn = fr.pfn;
[97f1691]121 data.l = locked;
122 data.cp = cacheable;
[92778f2]123#ifdef CONFIG_VIRT_IDX_DCACHE
[97f1691]124 data.cv = cacheable;
[92778f2]125#endif /* CONFIG_VIRT_IDX_DCACHE */
[02f441c0]126 data.p = true;
127 data.w = true;
[d681c17]128 data.g = false;
[02f441c0]129
130 dtlb_data_in_write(data.value);
[b4655da]131#endif
[0d04024]132}
133
[a7961271]134/** Copy PTE to TLB.
135 *
[965dc18]136 * @param t Page Table Entry to be copied.
137 * @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
138 * @param ro If true, the entry will be created read-only, regardless
139 * of its w field.
[a7961271]140 */
[98000fb]141void dtlb_pte_copy(pte_t *t, size_t index, bool ro)
[a7961271]142{
[b4655da]143#if 0
[a7961271]144 tlb_tag_access_reg_t tag;
145 tlb_data_t data;
146 page_address_t pg;
147 frame_address_t fr;
148
[2057572]149 pg.address = t->page + (index << MMU_PAGE_WIDTH);
150 fr.address = t->frame + (index << MMU_PAGE_WIDTH);
[a7961271]151
152 tag.value = 0;
153 tag.context = t->as->asid;
154 tag.vpn = pg.vpn;
[2057572]155
[a7961271]156 dtlb_tag_access_write(tag.value);
[2057572]157
[a7961271]158 data.value = 0;
159 data.v = true;
160 data.size = PAGESIZE_8K;
161 data.pfn = fr.pfn;
162 data.l = false;
163 data.cp = t->c;
[92778f2]164#ifdef CONFIG_VIRT_IDX_DCACHE
[a7961271]165 data.cv = t->c;
[92778f2]166#endif /* CONFIG_VIRT_IDX_DCACHE */
[cfa70add]167 data.p = t->k; /* p like privileged */
[a7961271]168 data.w = ro ? false : t->w;
169 data.g = t->g;
[2057572]170
[a7961271]171 dtlb_data_in_write(data.value);
[b4655da]172#endif
[a7961271]173}
174
[29b2bbf]175/** Copy PTE to ITLB.
176 *
[965dc18]177 * @param t Page Table Entry to be copied.
178 * @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
[29b2bbf]179 */
[98000fb]180void itlb_pte_copy(pte_t *t, size_t index)
[f47fd19]181{
[b4655da]182#if 0
[a7961271]183 tlb_tag_access_reg_t tag;
184 tlb_data_t data;
185 page_address_t pg;
186 frame_address_t fr;
187
[2057572]188 pg.address = t->page + (index << MMU_PAGE_WIDTH);
189 fr.address = t->frame + (index << MMU_PAGE_WIDTH);
[a7961271]190
191 tag.value = 0;
192 tag.context = t->as->asid;
193 tag.vpn = pg.vpn;
194
195 itlb_tag_access_write(tag.value);
196
197 data.value = 0;
198 data.v = true;
199 data.size = PAGESIZE_8K;
200 data.pfn = fr.pfn;
201 data.l = false;
202 data.cp = t->c;
[cfa70add]203 data.p = t->k; /* p like privileged */
[a7961271]204 data.w = false;
205 data.g = t->g;
206
207 itlb_data_in_write(data.value);
[b4655da]208#endif
[f47fd19]209}
210
[008029d]211/** ITLB miss handler. */
[36f19c0]212void fast_instruction_access_mmu_miss(unative_t unused, istate_t *istate)
[008029d]213{
[2bf4936]214 uintptr_t page_16k = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
[98000fb]215 size_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE;
[a7961271]216 pte_t *t;
217
218 page_table_lock(AS, true);
[2bf4936]219 t = page_mapping_find(AS, page_16k);
[a7961271]220 if (t && PTE_EXECUTABLE(t)) {
221 /*
222 * The mapping was found in the software page hash table.
223 * Insert it into ITLB.
224 */
225 t->a = true;
[2057572]226 itlb_pte_copy(t, index);
[29b2bbf]227#ifdef CONFIG_TSB
[2057572]228 itsb_pte_copy(t, index);
[29b2bbf]229#endif
[a7961271]230 page_table_unlock(AS, true);
231 } else {
232 /*
[771cd22]233 * Forward the page fault to the address space page fault
234 * handler.
[a7961271]235 */
236 page_table_unlock(AS, true);
[2bf4936]237 if (as_page_fault(page_16k, PF_ACCESS_EXEC, istate) ==
238 AS_PF_FAULT) {
[771cd22]239 do_fast_instruction_access_mmu_miss_fault(istate,
[3ee8a075]240 __func__);
[a7961271]241 }
242 }
[008029d]243}
244
[f47fd19]245/** DTLB miss handler.
246 *
[771cd22]247 * Note that some faults (e.g. kernel faults) were already resolved by the
248 * low-level, assembly language part of the fast_data_access_mmu_miss handler.
[36f19c0]249 *
[965dc18]250 * @param tag Content of the TLB Tag Access register as it existed
251 * when the trap happened. This is to prevent confusion
252 * created by clobbered Tag Access register during a nested
253 * DTLB miss.
254 * @param istate Interrupted state saved on the stack.
[f47fd19]255 */
[36f19c0]256void fast_data_access_mmu_miss(tlb_tag_access_reg_t tag, istate_t *istate)
[008029d]257{
[2bf4936]258 uintptr_t page_8k;
259 uintptr_t page_16k;
[98000fb]260 size_t index;
[f47fd19]261 pte_t *t;
[7cb53f62]262
[2bf4936]263 page_8k = (uint64_t) tag.vpn << MMU_PAGE_WIDTH;
264 page_16k = ALIGN_DOWN(page_8k, PAGE_SIZE);
[2057572]265 index = tag.vpn % MMU_PAGES_PER_PAGE;
[fd85ae5]266
[f47fd19]267 if (tag.context == ASID_KERNEL) {
268 if (!tag.vpn) {
269 /* NULL access in kernel */
[771cd22]270 do_fast_data_access_mmu_miss_fault(istate, tag,
[3ee8a075]271 __func__);
[f238e86]272//MH
273 } else {
274// } else if (page_8k >= end_of_identity) {
[2bf4936]275 /*
276 * The kernel is accessing the I/O space.
277 * We still do identity mapping for I/O,
278 * but without caching.
279 */
280 dtlb_insert_mapping(page_8k, KA2PA(page_8k),
281 PAGESIZE_8K, false, false);
282 return;
[f47fd19]283 }
[771cd22]284 do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected "
[2057572]285 "kernel page fault.");
[68656282]286 }
287
[f47fd19]288 page_table_lock(AS, true);
[2bf4936]289 t = page_mapping_find(AS, page_16k);
[f47fd19]290 if (t) {
291 /*
292 * The mapping was found in the software page hash table.
293 * Insert it into DTLB.
294 */
[a7961271]295 t->a = true;
[2057572]296 dtlb_pte_copy(t, index, true);
[29b2bbf]297#ifdef CONFIG_TSB
[2057572]298 dtsb_pte_copy(t, index, true);
[29b2bbf]299#endif
[f47fd19]300 page_table_unlock(AS, true);
301 } else {
302 /*
[2057572]303 * Forward the page fault to the address space page fault
304 * handler.
[f47fd19]305 */
306 page_table_unlock(AS, true);
[2bf4936]307 if (as_page_fault(page_16k, PF_ACCESS_READ, istate) ==
308 AS_PF_FAULT) {
[771cd22]309 do_fast_data_access_mmu_miss_fault(istate, tag,
[3ee8a075]310 __func__);
[f47fd19]311 }
312 }
[008029d]313}
314
[36f19c0]315/** DTLB protection fault handler.
316 *
[965dc18]317 * @param tag Content of the TLB Tag Access register as it existed
318 * when the trap happened. This is to prevent confusion
319 * created by clobbered Tag Access register during a nested
320 * DTLB miss.
321 * @param istate Interrupted state saved on the stack.
[36f19c0]322 */
323void fast_data_access_protection(tlb_tag_access_reg_t tag, istate_t *istate)
[008029d]324{
[2bf4936]325 uintptr_t page_16k;
[98000fb]326 size_t index;
[e0b241f]327 pte_t *t;
328
[2bf4936]329 page_16k = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
[2057572]330 index = tag.vpn % MMU_PAGES_PER_PAGE; /* 16K-page emulation */
[e0b241f]331
332 page_table_lock(AS, true);
[2bf4936]333 t = page_mapping_find(AS, page_16k);
[e0b241f]334 if (t && PTE_WRITABLE(t)) {
335 /*
[771cd22]336 * The mapping was found in the software page hash table and is
337 * writable. Demap the old mapping and insert an updated mapping
338 * into DTLB.
[e0b241f]339 */
340 t->a = true;
341 t->d = true;
[2057572]342 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY,
[2bf4936]343 page_16k + index * MMU_PAGE_SIZE);
[2057572]344 dtlb_pte_copy(t, index, false);
[29b2bbf]345#ifdef CONFIG_TSB
[2057572]346 dtsb_pte_copy(t, index, false);
[29b2bbf]347#endif
[e0b241f]348 page_table_unlock(AS, true);
349 } else {
350 /*
[771cd22]351 * Forward the page fault to the address space page fault
352 * handler.
[e0b241f]353 */
354 page_table_unlock(AS, true);
[2bf4936]355 if (as_page_fault(page_16k, PF_ACCESS_WRITE, istate) ==
356 AS_PF_FAULT) {
[771cd22]357 do_fast_data_access_protection_fault(istate, tag,
[3ee8a075]358 __func__);
[e0b241f]359 }
360 }
[008029d]361}
362
[965dc18]363/** Print TLB entry (for debugging purposes).
364 *
365 * The diag field has been left out in order to make this function more generic
366 * (there is no diag field in US3 architeture).
367 *
368 * @param i TLB entry number
369 * @param t TLB entry tag
370 * @param d TLB entry data
371 */
372static void print_tlb_entry(int i, tlb_tag_read_reg_t t, tlb_data_t d)
373{
[b4655da]374#if 0
[965dc18]375 printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
376 "ie=%d, soft2=%#x, pfn=%#x, soft=%#x, l=%d, "
377 "cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
378 t.context, d.v, d.size, d.nfo, d.ie, d.soft2,
379 d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
[b4655da]380#endif
[965dc18]381}
382
383#if defined (US)
384
[0d04024]385/** Print contents of both TLBs. */
386void tlb_print(void)
387{
388 int i;
389 tlb_data_t d;
390 tlb_tag_read_reg_t t;
391
392 printf("I-TLB contents:\n");
393 for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
394 d.value = itlb_data_access_read(i);
[c52ed6b]395 t.value = itlb_tag_read_read(i);
[965dc18]396 print_tlb_entry(i, t, d);
[0d04024]397 }
398
399 printf("D-TLB contents:\n");
400 for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
401 d.value = dtlb_data_access_read(i);
[c52ed6b]402 t.value = dtlb_tag_read_read(i);
[965dc18]403 print_tlb_entry(i, t, d);
[0d04024]404 }
[965dc18]405}
406
407#elif defined (US3)
[0d04024]408
[965dc18]409/** Print contents of all TLBs. */
410void tlb_print(void)
411{
412 int i;
413 tlb_data_t d;
414 tlb_tag_read_reg_t t;
415
416 printf("TLB_ISMALL contents:\n");
417 for (i = 0; i < tlb_ismall_size(); i++) {
418 d.value = dtlb_data_access_read(TLB_ISMALL, i);
419 t.value = dtlb_tag_read_read(TLB_ISMALL, i);
420 print_tlb_entry(i, t, d);
421 }
422
423 printf("TLB_IBIG contents:\n");
424 for (i = 0; i < tlb_ibig_size(); i++) {
425 d.value = dtlb_data_access_read(TLB_IBIG, i);
426 t.value = dtlb_tag_read_read(TLB_IBIG, i);
427 print_tlb_entry(i, t, d);
428 }
429
430 printf("TLB_DSMALL contents:\n");
431 for (i = 0; i < tlb_dsmall_size(); i++) {
432 d.value = dtlb_data_access_read(TLB_DSMALL, i);
433 t.value = dtlb_tag_read_read(TLB_DSMALL, i);
434 print_tlb_entry(i, t, d);
435 }
436
437 printf("TLB_DBIG_1 contents:\n");
438 for (i = 0; i < tlb_dbig_size(); i++) {
439 d.value = dtlb_data_access_read(TLB_DBIG_0, i);
440 t.value = dtlb_tag_read_read(TLB_DBIG_0, i);
441 print_tlb_entry(i, t, d);
442 }
443
444 printf("TLB_DBIG_2 contents:\n");
445 for (i = 0; i < tlb_dbig_size(); i++) {
446 d.value = dtlb_data_access_read(TLB_DBIG_1, i);
447 t.value = dtlb_tag_read_read(TLB_DBIG_1, i);
448 print_tlb_entry(i, t, d);
449 }
[0d04024]450}
[dbb6886]451
[965dc18]452#endif
453
[2057572]454void do_fast_instruction_access_mmu_miss_fault(istate_t *istate,
455 const char *str)
[a7961271]456{
[f651e80]457 fault_if_from_uspace(istate, "%s.", str);
[7bb6b06]458 dump_istate(istate);
[f651e80]459 panic("%s.", str);
[a7961271]460}
461
[2057572]462void do_fast_data_access_mmu_miss_fault(istate_t *istate,
463 tlb_tag_access_reg_t tag, const char *str)
[f47fd19]464{
465 uintptr_t va;
466
[2057572]467 va = tag.vpn << MMU_PAGE_WIDTH;
[36f19c0]468 if (tag.context) {
[f651e80]469 fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d).", str, va,
[36f19c0]470 tag.context);
471 }
[7bb6b06]472 dump_istate(istate);
[f651e80]473 printf("Faulting page: %p, ASID=%d.\n", va, tag.context);
474 panic("%s.", str);
[f47fd19]475}
476
[2057572]477void do_fast_data_access_protection_fault(istate_t *istate,
478 tlb_tag_access_reg_t tag, const char *str)
[e0b241f]479{
480 uintptr_t va;
481
[2057572]482 va = tag.vpn << MMU_PAGE_WIDTH;
[e0b241f]483
[36f19c0]484 if (tag.context) {
[f651e80]485 fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d).", str, va,
[36f19c0]486 tag.context);
487 }
[e0b241f]488 printf("Faulting page: %p, ASID=%d\n", va, tag.context);
[7bb6b06]489 dump_istate(istate);
[f651e80]490 panic("%s.", str);
[e0b241f]491}
492
[8cee705]493void dump_sfsr_and_sfar(void)
494{
495 tlb_sfsr_reg_t sfsr;
496 uintptr_t sfar;
497
498 sfsr.value = dtlb_sfsr_read();
499 sfar = dtlb_sfar_read();
500
[965dc18]501#if defined (US)
[771cd22]502 printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, "
[2057572]503 "fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w,
504 sfsr.ow, sfsr.fv);
[965dc18]505#elif defined (US3)
506 printf("DTLB SFSR: nf=%d, asi=%#x, tm=%d, ft=%#x, e=%d, ct=%d, pr=%d, "
507 "w=%d, ow=%d, fv=%d\n", sfsr.nf, sfsr.asi, sfsr.tm, sfsr.ft,
508 sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, sfsr.ow, sfsr.fv);
509#endif
510
[8cee705]511 printf("DTLB SFAR: address=%p\n", sfar);
512
513 dtlb_sfsr_write(0);
514}
515
[771cd22]516/** Invalidate all ITLB and DTLB entries that belong to specified ASID
517 * (Context).
[dbb6886]518 *
519 * @param asid Address Space ID.
520 */
521void tlb_invalidate_asid(asid_t asid)
522{
[fd85ae5]523 /* switch to nucleus because we are mapped by the primary context */
524 nucleus_enter();
[3da11f37]525 __hypercall_fast4(MMU_DEMAP_CTX, 0, 0, asid,
526 MMU_FLAG_ITLB | MMU_FLAG_DTLB);
527
[fd85ae5]528 nucleus_leave();
[dbb6886]529}
530
[771cd22]531/** Invalidate all ITLB and DTLB entries for specified page range in specified
532 * address space.
[dbb6886]533 *
[965dc18]534 * @param asid Address Space ID.
535 * @param page First page which to sweep out from ITLB and DTLB.
536 * @param cnt Number of ITLB and DTLB entries to invalidate.
[dbb6886]537 */
[98000fb]538void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt)
[dbb6886]539{
[6c441cf8]540 unsigned int i;
[ed166f7]541
[fd85ae5]542 /* switch to nucleus because we are mapped by the primary context */
543 nucleus_enter();
[3da11f37]544
545 for (i = 0; i < cnt; i++) {
546 __hypercall_fast5(MMU_DEMAP_PAGE, 0, 0, page, asid,
547 MMU_FLAG_DTLB | MMU_FLAG_ITLB);
[4512d7e]548 }
[3da11f37]549
[fd85ae5]550 nucleus_leave();
[dbb6886]551}
[b45c443]552
[10b890b]553/** @}
[b45c443]554 */
Note: See TracBrowser for help on using the repository browser.