source: mainline/kernel/arch/sparc64/src/mm/sun4u/tlb.c@ 0e78394

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 0e78394 was 0e78394, checked in by Jakub Jermar <jakub@…>, 14 years ago

sparc64: Make sure to use AS_KERNEL when handling kernel non-identity.

  • Property mode set to 100644
File size: 15.5 KB
RevLine 
[0d04024]1/*
[df4ed85]2 * Copyright (c) 2005 Jakub Jermar
[0d04024]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[7008097]29/** @addtogroup sparc64mm
[b45c443]30 * @{
31 */
32/** @file
33 */
34
[0d04024]35#include <arch/mm/tlb.h>
36#include <mm/tlb.h>
[f47fd19]37#include <mm/as.h>
38#include <mm/asid.h>
[0cfc4d38]39#include <arch/mm/frame.h>
40#include <arch/mm/page.h>
41#include <arch/mm/mmu.h>
[f47fd19]42#include <arch/interrupt.h>
[e2bf639]43#include <interrupt.h>
[f47fd19]44#include <arch.h>
[0d04024]45#include <print.h>
[d99c1d2]46#include <typedefs.h>
[0cfc4d38]47#include <config.h>
[49b6d32]48#include <arch/trap/trap.h>
[7bb6b06]49#include <arch/trap/exception.h>
[008029d]50#include <panic.h>
[b6fba84]51#include <arch/asm.h>
[387416b]52#include <genarch/mm/page_ht.h>
[02f441c0]53
[29b2bbf]54#ifdef CONFIG_TSB
55#include <arch/mm/tsb.h>
56#endif
57
[98000fb]58static void dtlb_pte_copy(pte_t *, size_t, bool);
59static void itlb_pte_copy(pte_t *, size_t);
[7008097]60static void do_fast_instruction_access_mmu_miss_fault(istate_t *, uintptr_t,
61 const char *);
[965dc18]62static void do_fast_data_access_mmu_miss_fault(istate_t *, tlb_tag_access_reg_t,
63 const char *);
64static void do_fast_data_access_protection_fault(istate_t *,
65 tlb_tag_access_reg_t, const char *);
[f47fd19]66
[a000878c]67const char *context_encoding[] = {
[b6fba84]68 "Primary",
69 "Secondary",
70 "Nucleus",
71 "Reserved"
72};
[0d04024]73
74void tlb_arch_init(void)
75{
[c6e314a]76 /*
[c23baab]77 * Invalidate all non-locked DTLB and ITLB entries.
[c6e314a]78 */
[c23baab]79 tlb_invalidate_all();
[8cee705]80
81 /*
82 * Clear both SFSRs.
83 */
84 dtlb_sfsr_write(0);
85 itlb_sfsr_write(0);
[97f1691]86}
[b6fba84]87
[97f1691]88/** Insert privileged mapping into DMMU TLB.
89 *
[965dc18]90 * @param page Virtual page address.
91 * @param frame Physical frame address.
92 * @param pagesize Page size.
93 * @param locked True for permanent mappings, false otherwise.
94 * @param cacheable True if the mapping is cacheable, false otherwise.
[97f1691]95 */
[2057572]96void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize,
97 bool locked, bool cacheable)
[97f1691]98{
99 tlb_tag_access_reg_t tag;
100 tlb_data_t data;
101 page_address_t pg;
102 frame_address_t fr;
[b6fba84]103
[97f1691]104 pg.address = page;
105 fr.address = frame;
[02f441c0]106
[965dc18]107 tag.context = ASID_KERNEL;
[02f441c0]108 tag.vpn = pg.vpn;
109
110 dtlb_tag_access_write(tag.value);
111
112 data.value = 0;
113 data.v = true;
[97f1691]114 data.size = pagesize;
[02f441c0]115 data.pfn = fr.pfn;
[97f1691]116 data.l = locked;
117 data.cp = cacheable;
[92778f2]118#ifdef CONFIG_VIRT_IDX_DCACHE
[97f1691]119 data.cv = cacheable;
[92778f2]120#endif /* CONFIG_VIRT_IDX_DCACHE */
[02f441c0]121 data.p = true;
122 data.w = true;
[d681c17]123 data.g = false;
[02f441c0]124
125 dtlb_data_in_write(data.value);
[0d04024]126}
127
[a7961271]128/** Copy PTE to TLB.
129 *
[965dc18]130 * @param t Page Table Entry to be copied.
131 * @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
132 * @param ro If true, the entry will be created read-only, regardless
133 * of its w field.
[a7961271]134 */
[98000fb]135void dtlb_pte_copy(pte_t *t, size_t index, bool ro)
[a7961271]136{
137 tlb_tag_access_reg_t tag;
138 tlb_data_t data;
139 page_address_t pg;
140 frame_address_t fr;
141
[2057572]142 pg.address = t->page + (index << MMU_PAGE_WIDTH);
143 fr.address = t->frame + (index << MMU_PAGE_WIDTH);
[a7961271]144
145 tag.value = 0;
146 tag.context = t->as->asid;
147 tag.vpn = pg.vpn;
[2057572]148
[a7961271]149 dtlb_tag_access_write(tag.value);
[2057572]150
[a7961271]151 data.value = 0;
152 data.v = true;
153 data.size = PAGESIZE_8K;
154 data.pfn = fr.pfn;
155 data.l = false;
156 data.cp = t->c;
[92778f2]157#ifdef CONFIG_VIRT_IDX_DCACHE
[a7961271]158 data.cv = t->c;
[92778f2]159#endif /* CONFIG_VIRT_IDX_DCACHE */
[cfa70add]160 data.p = t->k; /* p like privileged */
[a7961271]161 data.w = ro ? false : t->w;
162 data.g = t->g;
[2057572]163
[a7961271]164 dtlb_data_in_write(data.value);
165}
166
[29b2bbf]167/** Copy PTE to ITLB.
168 *
[965dc18]169 * @param t Page Table Entry to be copied.
170 * @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
[29b2bbf]171 */
[98000fb]172void itlb_pte_copy(pte_t *t, size_t index)
[f47fd19]173{
[a7961271]174 tlb_tag_access_reg_t tag;
175 tlb_data_t data;
176 page_address_t pg;
177 frame_address_t fr;
178
[2057572]179 pg.address = t->page + (index << MMU_PAGE_WIDTH);
180 fr.address = t->frame + (index << MMU_PAGE_WIDTH);
[a7961271]181
182 tag.value = 0;
183 tag.context = t->as->asid;
184 tag.vpn = pg.vpn;
185
186 itlb_tag_access_write(tag.value);
187
188 data.value = 0;
189 data.v = true;
190 data.size = PAGESIZE_8K;
191 data.pfn = fr.pfn;
192 data.l = false;
193 data.cp = t->c;
[cfa70add]194 data.p = t->k; /* p like privileged */
[a7961271]195 data.w = false;
196 data.g = t->g;
197
198 itlb_data_in_write(data.value);
[f47fd19]199}
200
[008029d]201/** ITLB miss handler. */
[96b02eb9]202void fast_instruction_access_mmu_miss(sysarg_t unused, istate_t *istate)
[008029d]203{
[2bf4936]204 uintptr_t page_16k = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
[98000fb]205 size_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE;
[a7961271]206 pte_t *t;
207
[0ff03f3]208 t = page_mapping_find(AS, page_16k, true);
[a7961271]209 if (t && PTE_EXECUTABLE(t)) {
210 /*
211 * The mapping was found in the software page hash table.
212 * Insert it into ITLB.
213 */
214 t->a = true;
[2057572]215 itlb_pte_copy(t, index);
[29b2bbf]216#ifdef CONFIG_TSB
[2057572]217 itsb_pte_copy(t, index);
[29b2bbf]218#endif
[a7961271]219 } else {
220 /*
[771cd22]221 * Forward the page fault to the address space page fault
222 * handler.
[7008097]223 */
[2bf4936]224 if (as_page_fault(page_16k, PF_ACCESS_EXEC, istate) ==
225 AS_PF_FAULT) {
[771cd22]226 do_fast_instruction_access_mmu_miss_fault(istate,
[7008097]227 istate->tpc, __func__);
[a7961271]228 }
229 }
[008029d]230}
231
[f47fd19]232/** DTLB miss handler.
233 *
[771cd22]234 * Note that some faults (e.g. kernel faults) were already resolved by the
235 * low-level, assembly language part of the fast_data_access_mmu_miss handler.
[36f19c0]236 *
[965dc18]237 * @param tag Content of the TLB Tag Access register as it existed
238 * when the trap happened. This is to prevent confusion
239 * created by clobbered Tag Access register during a nested
240 * DTLB miss.
241 * @param istate Interrupted state saved on the stack.
[f47fd19]242 */
[36f19c0]243void fast_data_access_mmu_miss(tlb_tag_access_reg_t tag, istate_t *istate)
[008029d]244{
[2bf4936]245 uintptr_t page_8k;
246 uintptr_t page_16k;
[98000fb]247 size_t index;
[f47fd19]248 pte_t *t;
[0e78394]249 as_t *as = AS;
[7cb53f62]250
[2bf4936]251 page_8k = (uint64_t) tag.vpn << MMU_PAGE_WIDTH;
252 page_16k = ALIGN_DOWN(page_8k, PAGE_SIZE);
[2057572]253 index = tag.vpn % MMU_PAGES_PER_PAGE;
[fd85ae5]254
[f47fd19]255 if (tag.context == ASID_KERNEL) {
256 if (!tag.vpn) {
257 /* NULL access in kernel */
[771cd22]258 do_fast_data_access_mmu_miss_fault(istate, tag,
[c15b374]259 "Dereferencing NULL pointer.");
[2bf4936]260 } else if (page_8k >= end_of_identity) {
[0e78394]261 /* Kernel non-identity. */
262 as = AS_KERNEL;
[1f5714e]263 } else {
264 do_fast_data_access_mmu_miss_fault(istate, tag,
265 "Unexpected kernel page fault.");
[f47fd19]266 }
[68656282]267 }
268
[0e78394]269 t = page_mapping_find(as, page_16k, true);
[f47fd19]270 if (t) {
271 /*
272 * The mapping was found in the software page hash table.
273 * Insert it into DTLB.
274 */
[a7961271]275 t->a = true;
[2057572]276 dtlb_pte_copy(t, index, true);
[29b2bbf]277#ifdef CONFIG_TSB
[2057572]278 dtsb_pte_copy(t, index, true);
[29b2bbf]279#endif
[f47fd19]280 } else {
281 /*
[2057572]282 * Forward the page fault to the address space page fault
283 * handler.
[0e78394]284 */
[2bf4936]285 if (as_page_fault(page_16k, PF_ACCESS_READ, istate) ==
286 AS_PF_FAULT) {
[771cd22]287 do_fast_data_access_mmu_miss_fault(istate, tag,
[3ee8a075]288 __func__);
[f47fd19]289 }
290 }
[008029d]291}
292
[36f19c0]293/** DTLB protection fault handler.
294 *
[965dc18]295 * @param tag Content of the TLB Tag Access register as it existed
296 * when the trap happened. This is to prevent confusion
297 * created by clobbered Tag Access register during a nested
298 * DTLB miss.
299 * @param istate Interrupted state saved on the stack.
[36f19c0]300 */
301void fast_data_access_protection(tlb_tag_access_reg_t tag, istate_t *istate)
[008029d]302{
[2bf4936]303 uintptr_t page_16k;
[98000fb]304 size_t index;
[e0b241f]305 pte_t *t;
[0e78394]306 as_t *as = AS;
[e0b241f]307
[2bf4936]308 page_16k = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
[2057572]309 index = tag.vpn % MMU_PAGES_PER_PAGE; /* 16K-page emulation */
[e0b241f]310
[0e78394]311 if (tag.context == ASID_KERNEL)
312 as = AS_KERNEL;
313
314 t = page_mapping_find(as, page_16k, true);
[e0b241f]315 if (t && PTE_WRITABLE(t)) {
316 /*
[771cd22]317 * The mapping was found in the software page hash table and is
318 * writable. Demap the old mapping and insert an updated mapping
319 * into DTLB.
[e0b241f]320 */
321 t->a = true;
322 t->d = true;
[2057572]323 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY,
[2bf4936]324 page_16k + index * MMU_PAGE_SIZE);
[2057572]325 dtlb_pte_copy(t, index, false);
[29b2bbf]326#ifdef CONFIG_TSB
[2057572]327 dtsb_pte_copy(t, index, false);
[29b2bbf]328#endif
[e0b241f]329 } else {
330 /*
[771cd22]331 * Forward the page fault to the address space page fault
332 * handler.
[e0b241f]333 */
[2bf4936]334 if (as_page_fault(page_16k, PF_ACCESS_WRITE, istate) ==
335 AS_PF_FAULT) {
[771cd22]336 do_fast_data_access_protection_fault(istate, tag,
[3ee8a075]337 __func__);
[e0b241f]338 }
339 }
[008029d]340}
341
[965dc18]342/** Print TLB entry (for debugging purposes).
343 *
344 * The diag field has been left out in order to make this function more generic
345 * (there is no diag field in US3 architeture).
346 *
347 * @param i TLB entry number
348 * @param t TLB entry tag
349 * @param d TLB entry data
350 */
351static void print_tlb_entry(int i, tlb_tag_read_reg_t t, tlb_data_t d)
352{
[7e752b2]353 printf("%u: vpn=%#" PRIx64 ", context=%u, v=%u, size=%u, nfo=%u, "
354 "ie=%u, soft2=%#x, pfn=%#x, soft=%#x, l=%u, "
355 "cp=%u, cv=%u, e=%u, p=%u, w=%u, g=%u\n", i, (uint64_t) t.vpn,
[965dc18]356 t.context, d.v, d.size, d.nfo, d.ie, d.soft2,
357 d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
358}
359
360#if defined (US)
361
[0d04024]362/** Print contents of both TLBs. */
363void tlb_print(void)
364{
365 int i;
366 tlb_data_t d;
367 tlb_tag_read_reg_t t;
368
369 printf("I-TLB contents:\n");
370 for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
371 d.value = itlb_data_access_read(i);
[c52ed6b]372 t.value = itlb_tag_read_read(i);
[965dc18]373 print_tlb_entry(i, t, d);
[0d04024]374 }
375
376 printf("D-TLB contents:\n");
377 for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
378 d.value = dtlb_data_access_read(i);
[c52ed6b]379 t.value = dtlb_tag_read_read(i);
[965dc18]380 print_tlb_entry(i, t, d);
[0d04024]381 }
[965dc18]382}
383
384#elif defined (US3)
[0d04024]385
[965dc18]386/** Print contents of all TLBs. */
387void tlb_print(void)
388{
389 int i;
390 tlb_data_t d;
391 tlb_tag_read_reg_t t;
392
393 printf("TLB_ISMALL contents:\n");
394 for (i = 0; i < tlb_ismall_size(); i++) {
395 d.value = dtlb_data_access_read(TLB_ISMALL, i);
396 t.value = dtlb_tag_read_read(TLB_ISMALL, i);
397 print_tlb_entry(i, t, d);
398 }
399
400 printf("TLB_IBIG contents:\n");
401 for (i = 0; i < tlb_ibig_size(); i++) {
402 d.value = dtlb_data_access_read(TLB_IBIG, i);
403 t.value = dtlb_tag_read_read(TLB_IBIG, i);
404 print_tlb_entry(i, t, d);
405 }
406
407 printf("TLB_DSMALL contents:\n");
408 for (i = 0; i < tlb_dsmall_size(); i++) {
409 d.value = dtlb_data_access_read(TLB_DSMALL, i);
410 t.value = dtlb_tag_read_read(TLB_DSMALL, i);
411 print_tlb_entry(i, t, d);
412 }
413
414 printf("TLB_DBIG_1 contents:\n");
415 for (i = 0; i < tlb_dbig_size(); i++) {
416 d.value = dtlb_data_access_read(TLB_DBIG_0, i);
417 t.value = dtlb_tag_read_read(TLB_DBIG_0, i);
418 print_tlb_entry(i, t, d);
419 }
420
421 printf("TLB_DBIG_2 contents:\n");
422 for (i = 0; i < tlb_dbig_size(); i++) {
423 d.value = dtlb_data_access_read(TLB_DBIG_1, i);
424 t.value = dtlb_tag_read_read(TLB_DBIG_1, i);
425 print_tlb_entry(i, t, d);
426 }
[0d04024]427}
[dbb6886]428
[965dc18]429#endif
430
[2057572]431void do_fast_instruction_access_mmu_miss_fault(istate_t *istate,
[7008097]432 uintptr_t va, const char *str)
[a7961271]433{
[7e752b2]434 fault_if_from_uspace(istate, "%s, address=%p.", str, (void *) va);
[c15b374]435 panic_memtrap(istate, PF_ACCESS_EXEC, va, str);
[a7961271]436}
437
[2057572]438void do_fast_data_access_mmu_miss_fault(istate_t *istate,
439 tlb_tag_access_reg_t tag, const char *str)
[f47fd19]440{
441 uintptr_t va;
442
[2057572]443 va = tag.vpn << MMU_PAGE_WIDTH;
[7e752b2]444 fault_if_from_uspace(istate, "%s, page=%p (asid=%u).", str,
445 (void *) va, tag.context);
[c15b374]446 panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, str);
[f47fd19]447}
448
[2057572]449void do_fast_data_access_protection_fault(istate_t *istate,
450 tlb_tag_access_reg_t tag, const char *str)
[e0b241f]451{
452 uintptr_t va;
453
[2057572]454 va = tag.vpn << MMU_PAGE_WIDTH;
[7e752b2]455 fault_if_from_uspace(istate, "%s, page=%p (asid=%u).", str,
456 (void *) va, tag.context);
[c15b374]457 panic_memtrap(istate, PF_ACCESS_WRITE, va, str);
[e0b241f]458}
459
[8c2214e]460void describe_dmmu_fault(void)
461{
462 tlb_sfsr_reg_t sfsr;
463 uintptr_t sfar;
464
465 sfsr.value = dtlb_sfsr_read();
466 sfar = dtlb_sfar_read();
467
468#if defined (US)
469 printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, "
470 "fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w,
471 sfsr.ow, sfsr.fv);
472#elif defined (US3)
473 printf("DTLB SFSR: nf=%d, asi=%#x, tm=%d, ft=%#x, e=%d, ct=%d, pr=%d, "
474 "w=%d, ow=%d, fv=%d\n", sfsr.nf, sfsr.asi, sfsr.tm, sfsr.ft,
475 sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, sfsr.ow, sfsr.fv);
476#endif
[7e752b2]477
478 printf("DTLB SFAR: address=%p\n", (void *) sfar);
[8c2214e]479
480 dtlb_sfsr_write(0);
481}
482
[8cee705]483void dump_sfsr_and_sfar(void)
484{
485 tlb_sfsr_reg_t sfsr;
486 uintptr_t sfar;
487
488 sfsr.value = dtlb_sfsr_read();
489 sfar = dtlb_sfar_read();
490
[965dc18]491#if defined (US)
[771cd22]492 printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, "
[2057572]493 "fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w,
494 sfsr.ow, sfsr.fv);
[965dc18]495#elif defined (US3)
496 printf("DTLB SFSR: nf=%d, asi=%#x, tm=%d, ft=%#x, e=%d, ct=%d, pr=%d, "
497 "w=%d, ow=%d, fv=%d\n", sfsr.nf, sfsr.asi, sfsr.tm, sfsr.ft,
498 sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, sfsr.ow, sfsr.fv);
499#endif
500
[7e752b2]501 printf("DTLB SFAR: address=%p\n", (void *) sfar);
[8cee705]502
503 dtlb_sfsr_write(0);
504}
505
[687246b]506#if defined (US)
[965dc18]507/** Invalidate all unlocked ITLB and DTLB entries. */
508void tlb_invalidate_all(void)
509{
510 int i;
511
[8dbc18c]512 /*
513 * Walk all ITLB and DTLB entries and remove all unlocked mappings.
514 *
515 * The kernel doesn't use global mappings so any locked global mappings
[965dc18]516 * found must have been created by someone else. Their only purpose now
[8dbc18c]517 * is to collide with proper mappings. Invalidate immediately. It should
518 * be safe to invalidate them as late as now.
519 */
520
[965dc18]521 tlb_data_t d;
522 tlb_tag_read_reg_t t;
523
[dbb6886]524 for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
525 d.value = itlb_data_access_read(i);
[8dbc18c]526 if (!d.l || d.g) {
[dbb6886]527 t.value = itlb_tag_read_read(i);
528 d.v = false;
529 itlb_tag_access_write(t.value);
530 itlb_data_access_write(i, d.value);
531 }
532 }
[965dc18]533
[dbb6886]534 for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
535 d.value = dtlb_data_access_read(i);
[8dbc18c]536 if (!d.l || d.g) {
[dbb6886]537 t.value = dtlb_tag_read_read(i);
538 d.v = false;
539 dtlb_tag_access_write(t.value);
540 dtlb_data_access_write(i, d.value);
541 }
542 }
[965dc18]543
[687246b]544}
[965dc18]545
[687246b]546#elif defined (US3)
[965dc18]547
[687246b]548/** Invalidate all unlocked ITLB and DTLB entries. */
549void tlb_invalidate_all(void)
550{
551 itlb_demap(TLB_DEMAP_ALL, 0, 0);
552 dtlb_demap(TLB_DEMAP_ALL, 0, 0);
[dbb6886]553}
554
[687246b]555#endif
556
[771cd22]557/** Invalidate all ITLB and DTLB entries that belong to specified ASID
558 * (Context).
[dbb6886]559 *
560 * @param asid Address Space ID.
561 */
562void tlb_invalidate_asid(asid_t asid)
563{
[fd85ae5]564 tlb_context_reg_t pc_save, ctx;
[ed166f7]565
[fd85ae5]566 /* switch to nucleus because we are mapped by the primary context */
567 nucleus_enter();
568
569 ctx.v = pc_save.v = mmu_primary_context_read();
[ed166f7]570 ctx.context = asid;
[fd85ae5]571 mmu_primary_context_write(ctx.v);
572
573 itlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
574 dtlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
[ed166f7]575
[fd85ae5]576 mmu_primary_context_write(pc_save.v);
[ed166f7]577
[fd85ae5]578 nucleus_leave();
[dbb6886]579}
580
[771cd22]581/** Invalidate all ITLB and DTLB entries for specified page range in specified
582 * address space.
[dbb6886]583 *
[965dc18]584 * @param asid Address Space ID.
585 * @param page First page which to sweep out from ITLB and DTLB.
586 * @param cnt Number of ITLB and DTLB entries to invalidate.
[dbb6886]587 */
[98000fb]588void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt)
[dbb6886]589{
[6c441cf8]590 unsigned int i;
[fd85ae5]591 tlb_context_reg_t pc_save, ctx;
[ed166f7]592
[fd85ae5]593 /* switch to nucleus because we are mapped by the primary context */
594 nucleus_enter();
595
596 ctx.v = pc_save.v = mmu_primary_context_read();
[ed166f7]597 ctx.context = asid;
[fd85ae5]598 mmu_primary_context_write(ctx.v);
[4512d7e]599
[2057572]600 for (i = 0; i < cnt * MMU_PAGES_PER_PAGE; i++) {
[454f1da]601 itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
[2057572]602 page + i * MMU_PAGE_SIZE);
[454f1da]603 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
[2057572]604 page + i * MMU_PAGE_SIZE);
[4512d7e]605 }
[ed166f7]606
[fd85ae5]607 mmu_primary_context_write(pc_save.v);
608
609 nucleus_leave();
[dbb6886]610}
[b45c443]611
[10b890b]612/** @}
[b45c443]613 */
Note: See TracBrowser for help on using the repository browser.