source: mainline/kernel/arch/ia64/src/mm/tlb.c@ 05882233

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 05882233 was 05882233, checked in by Jiří Zárevúcky <jiri.zarevucky@…>, 7 years ago

Unify various barrier includes into <barrier.h>

  • Property mode set to 100644
File size: 18.4 KB
RevLine 
[36b01bb2]1/*
[df4ed85]2 * Copyright (c) 2006 Jakub Jermar
[36b01bb2]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[5bda2f3e]29/** @addtogroup ia64mm
[b45c443]30 * @{
31 */
32/** @file
33 */
34
[36b01bb2]35/*
36 * TLB management.
37 */
38
39#include <mm/tlb.h>
[a0d74fd]40#include <mm/asid.h>
[9ad03fe]41#include <mm/page.h>
42#include <mm/as.h>
[bc78c75]43#include <arch/mm/tlb.h>
[a0d74fd]44#include <arch/mm/page.h>
[68091bd]45#include <arch/mm/vhpt.h>
[05882233]46#include <barrier.h>
[2c49fbbe]47#include <arch/interrupt.h>
[7c322bd]48#include <arch/pal/pal.h>
49#include <arch/asm.h>
[63e27ef]50#include <assert.h>
[2c49fbbe]51#include <panic.h>
[1065603e]52#include <print.h>
[9ad03fe]53#include <arch.h>
[a175a67]54#include <interrupt.h>
[22f0561]55#include <arch/legacyio.h>
[5bda2f3e]56
[ef67bab]57/** Invalidate all TLB entries. */
[36b01bb2]58void tlb_invalidate_all(void)
59{
[ee289cf0]60 ipl_t ipl;
61 uintptr_t adr;
62 uint32_t count1, count2, stride1, stride2;
[a35b458]63
[6c441cf8]64 unsigned int i, j;
[a35b458]65
[ee289cf0]66 adr = PAL_PTCE_INFO_BASE();
67 count1 = PAL_PTCE_INFO_COUNT1();
68 count2 = PAL_PTCE_INFO_COUNT2();
69 stride1 = PAL_PTCE_INFO_STRIDE1();
70 stride2 = PAL_PTCE_INFO_STRIDE2();
[a35b458]71
[ee289cf0]72 ipl = interrupts_disable();
[a35b458]73
[6c441cf8]74 for (i = 0; i < count1; i++) {
75 for (j = 0; j < count2; j++) {
[e7b7be3f]76 asm volatile (
[1433ecda]77 "ptc.e %[adr] ;;"
78 :: [adr] "r" (adr)
[ee289cf0]79 );
80 adr += stride2;
[7c322bd]81 }
[ee289cf0]82 adr += stride1;
83 }
[a35b458]84
[ee289cf0]85 interrupts_restore(ipl);
[a35b458]86
[ee289cf0]87 srlz_d();
88 srlz_i();
[a35b458]89
[68091bd]90#ifdef CONFIG_VHPT
[ee289cf0]91 vhpt_invalidate_all();
[5bda2f3e]92#endif
[36b01bb2]93}
94
95/** Invalidate entries belonging to an address space.
96 *
[5bda2f3e]97 * @param asid Address space identifier.
98 *
[36b01bb2]99 */
100void tlb_invalidate_asid(asid_t asid)
101{
[a82500ce]102 tlb_invalidate_all();
[36b01bb2]103}
[bc78c75]104
[a82500ce]105
[98000fb]106void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt)
[a82500ce]107{
[5bda2f3e]108 region_register_t rr;
[d0cf9de]109 bool restore_rr = false;
[1065603e]110 int b = 0;
111 int c = cnt;
[a35b458]112
[7f1c620]113 uintptr_t va;
[1065603e]114 va = page;
[a35b458]115
[9043309c]116 rr.word = rr_read(VA2VRN(page));
117 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(page))))) {
[d0cf9de]118 /*
119 * The selected region register does not contain required RID.
120 * Save the old content of the register and replace the RID.
121 */
[5bda2f3e]122 region_register_t rr0;
[a35b458]123
[d0cf9de]124 rr0 = rr;
[9043309c]125 rr0.map.rid = ASID2RID(asid, VA2VRN(page));
126 rr_write(VA2VRN(page), rr0.word);
[d0cf9de]127 srlz_d();
128 srlz_i();
129 }
[a35b458]130
[5bda2f3e]131 while (c >>= 1)
[1065603e]132 b++;
133 b >>= 1;
[7f1c620]134 uint64_t ps;
[a35b458]135
[1065603e]136 switch (b) {
[666773c]137 case 0: /* cnt 1 - 3 */
[ee289cf0]138 ps = PAGE_WIDTH;
139 break;
[666773c]140 case 1: /* cnt 4 - 15 */
141 ps = PAGE_WIDTH + 2;
[9043309c]142 va &= ~((1UL << ps) - 1);
[ee289cf0]143 break;
[666773c]144 case 2: /* cnt 16 - 63 */
145 ps = PAGE_WIDTH + 4;
[9043309c]146 va &= ~((1UL << ps) - 1);
[ee289cf0]147 break;
[666773c]148 case 3: /* cnt 64 - 255 */
149 ps = PAGE_WIDTH + 6;
[9043309c]150 va &= ~((1UL << ps) - 1);
[ee289cf0]151 break;
[666773c]152 case 4: /* cnt 256 - 1023 */
153 ps = PAGE_WIDTH + 8;
[9043309c]154 va &= ~((1UL << ps) - 1);
[ee289cf0]155 break;
[666773c]156 case 5: /* cnt 1024 - 4095 */
157 ps = PAGE_WIDTH + 10;
[9043309c]158 va &= ~((1UL << ps) - 1);
[ee289cf0]159 break;
[666773c]160 case 6: /* cnt 4096 - 16383 */
161 ps = PAGE_WIDTH + 12;
[9043309c]162 va &= ~((1UL << ps) - 1);
[ee289cf0]163 break;
[666773c]164 case 7: /* cnt 16384 - 65535 */
165 case 8: /* cnt 65536 - (256K - 1) */
166 ps = PAGE_WIDTH + 14;
[9043309c]167 va &= ~((1UL << ps) - 1);
[ee289cf0]168 break;
169 default:
[666773c]170 ps = PAGE_WIDTH + 18;
[9043309c]171 va &= ~((1UL << ps) - 1);
[ee289cf0]172 break;
[d0cf9de]173 }
[a35b458]174
[9043309c]175 for (; va < (page + cnt * PAGE_SIZE); va += (1UL << ps))
[5bda2f3e]176 asm volatile (
[1433ecda]177 "ptc.l %[va], %[ps] ;;"
178 :: [va] "r" (va),
179 [ps] "r" (ps << 2)
[5bda2f3e]180 );
[a35b458]181
[d0cf9de]182 srlz_d();
183 srlz_i();
[a35b458]184
[d0cf9de]185 if (restore_rr) {
[9043309c]186 rr_write(VA2VRN(page), rr.word);
[d0cf9de]187 srlz_d();
188 srlz_i();
189 }
[a82500ce]190}
191
[95042fd]192/** Insert data into data translation cache.
193 *
[5bda2f3e]194 * @param va Virtual page address.
195 * @param asid Address space identifier.
196 * @param entry The rest of TLB entry as required by TLB insertion
197 * format.
198 *
[95042fd]199 */
[7f1c620]200void dtc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
[b994a60]201{
[95042fd]202 tc_mapping_insert(va, asid, entry, true);
203}
[bc78c75]204
[95042fd]205/** Insert data into instruction translation cache.
206 *
[5bda2f3e]207 * @param va Virtual page address.
208 * @param asid Address space identifier.
209 * @param entry The rest of TLB entry as required by TLB insertion
210 * format.
[95042fd]211 */
[7f1c620]212void itc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
[b994a60]213{
[95042fd]214 tc_mapping_insert(va, asid, entry, false);
215}
[bc78c75]216
[95042fd]217/** Insert data into instruction or data translation cache.
218 *
[5bda2f3e]219 * @param va Virtual page address.
220 * @param asid Address space identifier.
221 * @param entry The rest of TLB entry as required by TLB insertion
222 * format.
223 * @param dtc If true, insert into data translation cache, use
224 * instruction translation cache otherwise.
225 *
[95042fd]226 */
[7f1c620]227void tc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtc)
[bc78c75]228{
[5bda2f3e]229 region_register_t rr;
[95042fd]230 bool restore_rr = false;
[a35b458]231
[a0d74fd]232 rr.word = rr_read(VA2VRN(va));
233 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
[95042fd]234 /*
235 * The selected region register does not contain required RID.
236 * Save the old content of the register and replace the RID.
237 */
[5bda2f3e]238 region_register_t rr0;
[a35b458]239
[95042fd]240 rr0 = rr;
[a0d74fd]241 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
242 rr_write(VA2VRN(va), rr0.word);
[89298e3]243 srlz_d();
[95042fd]244 srlz_i();
245 }
[a35b458]246
[e7b7be3f]247 asm volatile (
[1433ecda]248 "mov r8 = psr ;;\n"
249 "rsm %[mask] ;;\n" /* PSR_IC_MASK */
250 "srlz.d ;;\n"
251 "srlz.i ;;\n"
252 "mov cr.ifa = %[va]\n" /* va */
253 "mov cr.itir = %[word1] ;;\n" /* entry.word[1] */
254 "cmp.eq p6, p7 = %[dtc], r0 ;;\n" /* decide between itc and dtc */
255 "(p6) itc.i %[word0] ;;\n"
256 "(p7) itc.d %[word0] ;;\n"
257 "mov psr.l = r8 ;;\n"
258 "srlz.d ;;\n"
259 :: [mask] "i" (PSR_IC_MASK),
260 [va] "r" (va),
261 [word0] "r" (entry.word[0]),
262 [word1] "r" (entry.word[1]),
263 [dtc] "r" (dtc)
264 : "p6", "p7", "r8"
[95042fd]265 );
[a35b458]266
[95042fd]267 if (restore_rr) {
[a0d74fd]268 rr_write(VA2VRN(va), rr.word);
[95042fd]269 srlz_d();
270 srlz_i();
[bc78c75]271 }
272}
273
[95042fd]274/** Insert data into instruction translation register.
275 *
[5bda2f3e]276 * @param va Virtual page address.
277 * @param asid Address space identifier.
278 * @param entry The rest of TLB entry as required by TLB insertion
279 * format.
280 * @param tr Translation register.
281 *
[95042fd]282 */
[5bda2f3e]283void itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, size_t tr)
[bc78c75]284{
[95042fd]285 tr_mapping_insert(va, asid, entry, false, tr);
[bc78c75]286}
287
[95042fd]288/** Insert data into data translation register.
289 *
[5bda2f3e]290 * @param va Virtual page address.
291 * @param asid Address space identifier.
292 * @param entry The rest of TLB entry as required by TLB insertion
293 * format.
294 * @param tr Translation register.
295 *
[95042fd]296 */
[5bda2f3e]297void dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, size_t tr)
[95042fd]298{
299 tr_mapping_insert(va, asid, entry, true, tr);
300}
[bc78c75]301
[95042fd]302/** Insert data into instruction or data translation register.
303 *
[5bda2f3e]304 * @param va Virtual page address.
305 * @param asid Address space identifier.
306 * @param entry The rest of TLB entry as required by TLB insertion
307 * format.
308 * @param dtr If true, insert into data translation register, use
309 * instruction translation register otherwise.
310 * @param tr Translation register.
311 *
[95042fd]312 */
[5bda2f3e]313void tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr,
[98000fb]314 size_t tr)
[89298e3]315{
[5bda2f3e]316 region_register_t rr;
[95042fd]317 bool restore_rr = false;
[a35b458]318
[a0d74fd]319 rr.word = rr_read(VA2VRN(va));
320 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
[95042fd]321 /*
322 * The selected region register does not contain required RID.
323 * Save the old content of the register and replace the RID.
324 */
[5bda2f3e]325 region_register_t rr0;
[a35b458]326
[95042fd]327 rr0 = rr;
[a0d74fd]328 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
329 rr_write(VA2VRN(va), rr0.word);
[89298e3]330 srlz_d();
[95042fd]331 srlz_i();
[89298e3]332 }
[a35b458]333
[e7b7be3f]334 asm volatile (
[1433ecda]335 "mov r8 = psr ;;\n"
336 "rsm %[mask] ;;\n" /* PSR_IC_MASK */
337 "srlz.d ;;\n"
338 "srlz.i ;;\n"
339 "mov cr.ifa = %[va]\n" /* va */
340 "mov cr.itir = %[word1] ;;\n" /* entry.word[1] */
341 "cmp.eq p6, p7 = %[dtr], r0 ;;\n" /* decide between itr and dtr */
342 "(p6) itr.i itr[%[tr]] = %[word0] ;;\n"
343 "(p7) itr.d dtr[%[tr]] = %[word0] ;;\n"
344 "mov psr.l = r8 ;;\n"
345 "srlz.d ;;\n"
346 :: [mask] "i" (PSR_IC_MASK),
347 [va] "r" (va),
348 [word1] "r" (entry.word[1]),
349 [word0] "r" (entry.word[0]),
350 [tr] "r" (tr),
351 [dtr] "r" (dtr)
352 : "p6", "p7", "r8"
[95042fd]353 );
[a35b458]354
[95042fd]355 if (restore_rr) {
[a0d74fd]356 rr_write(VA2VRN(va), rr.word);
[95042fd]357 srlz_d();
358 srlz_i();
359 }
[89298e3]360}
361
[a0d74fd]362/** Insert data into DTLB.
363 *
[5bda2f3e]364 * @param page Virtual page address including VRN bits.
365 * @param frame Physical frame address.
366 * @param dtr If true, insert into data translation register, use data
367 * translation cache otherwise.
368 * @param tr Translation register if dtr is true, ignored otherwise.
369 *
[a0d74fd]370 */
[5bda2f3e]371void dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr,
[98000fb]372 size_t tr)
[a0d74fd]373{
374 tlb_entry_t entry;
[a35b458]375
[a0d74fd]376 entry.word[0] = 0;
377 entry.word[1] = 0;
[a35b458]378
[5bda2f3e]379 entry.p = true; /* present */
[a0d74fd]380 entry.ma = MA_WRITEBACK;
[5bda2f3e]381 entry.a = true; /* already accessed */
382 entry.d = true; /* already dirty */
[a0d74fd]383 entry.pl = PL_KERNEL;
384 entry.ar = AR_READ | AR_WRITE;
385 entry.ppn = frame >> PPN_SHIFT;
386 entry.ps = PAGE_WIDTH;
[a35b458]387
[a0d74fd]388 if (dtr)
389 dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
390 else
391 dtc_mapping_insert(page, ASID_KERNEL, entry);
392}
393
[208259c]394/** Purge kernel entries from DTR.
395 *
396 * Purge DTR entries used by the kernel.
397 *
[5bda2f3e]398 * @param page Virtual page address including VRN bits.
399 * @param width Width of the purge in bits.
400 *
[208259c]401 */
[98000fb]402void dtr_purge(uintptr_t page, size_t width)
[208259c]403{
[5bda2f3e]404 asm volatile (
[1433ecda]405 "ptr.d %[page], %[width]\n"
406 :: [page] "r" (page),
407 [width] "r" (width << 2)
[5bda2f3e]408 );
[208259c]409}
410
411
[9ad03fe]412/** Copy content of PTE into data translation cache.
413 *
[5bda2f3e]414 * @param t PTE.
415 *
[9ad03fe]416 */
417void dtc_pte_copy(pte_t *t)
418{
419 tlb_entry_t entry;
[a35b458]420
[9ad03fe]421 entry.word[0] = 0;
422 entry.word[1] = 0;
[a35b458]423
[9ad03fe]424 entry.p = t->p;
425 entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
426 entry.a = t->a;
427 entry.d = t->d;
428 entry.pl = t->k ? PL_KERNEL : PL_USER;
429 entry.ar = t->w ? AR_WRITE : AR_READ;
430 entry.ppn = t->frame >> PPN_SHIFT;
431 entry.ps = PAGE_WIDTH;
[a35b458]432
[9ad03fe]433 dtc_mapping_insert(t->page, t->as->asid, entry);
[a35b458]434
[68091bd]435#ifdef CONFIG_VHPT
436 vhpt_mapping_insert(t->page, t->as->asid, entry);
[5bda2f3e]437#endif
[9ad03fe]438}
439
440/** Copy content of PTE into instruction translation cache.
441 *
[5bda2f3e]442 * @param t PTE.
443 *
[9ad03fe]444 */
445void itc_pte_copy(pte_t *t)
446{
447 tlb_entry_t entry;
[a35b458]448
[9ad03fe]449 entry.word[0] = 0;
450 entry.word[1] = 0;
[a35b458]451
[63e27ef]452 assert(t->x);
[a35b458]453
[9ad03fe]454 entry.p = t->p;
455 entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
456 entry.a = t->a;
457 entry.pl = t->k ? PL_KERNEL : PL_USER;
458 entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
459 entry.ppn = t->frame >> PPN_SHIFT;
460 entry.ps = PAGE_WIDTH;
[a35b458]461
[9ad03fe]462 itc_mapping_insert(t->page, t->as->asid, entry);
[a35b458]463
[68091bd]464#ifdef CONFIG_VHPT
465 vhpt_mapping_insert(t->page, t->as->asid, entry);
[5bda2f3e]466#endif
[9ad03fe]467}
468
[0fd9b35]469static bool is_kernel_fault(uintptr_t va)
470{
471 region_register_t rr;
472
473 rr.word = rr_read(VA2VRN(va));
474 rid_t rid = rr.map.rid;
475 return (RID2ASID(rid) == ASID_KERNEL) && (VA2VRN(va) == VRN_KERNEL);
476}
477
[9ad03fe]478/** Instruction TLB fault handler for faults with VHPT turned off.
479 *
[9928240]480 * @param n Interruption vector.
[5bda2f3e]481 * @param istate Structure with saved interruption state.
482 *
[9ad03fe]483 */
[9928240]484void alternate_instruction_tlb_fault(unsigned int n, istate_t *istate)
[89298e3]485{
[7f1c620]486 uintptr_t va;
[38dc82d]487 pte_t t;
[a35b458]488
[5bda2f3e]489 va = istate->cr_ifa; /* faulting address */
[a35b458]490
[63e27ef]491 assert(!is_kernel_fault(va));
[0fd9b35]492
[38dc82d]493 bool found = page_mapping_find(AS, va, true, &t);
494 if (found) {
[63e27ef]495 assert(t.p);
[560b81c]496
[9ad03fe]497 /*
498 * The mapping was found in software page hash table.
499 * Insert it into data translation cache.
500 */
[38dc82d]501 itc_pte_copy(&t);
[9ad03fe]502 } else {
503 /*
504 * Forward the page fault to address space page fault handler.
505 */
[1dbc43f]506 as_page_fault(va, PF_ACCESS_EXEC, istate);
[9ad03fe]507 }
[95042fd]508}
[89298e3]509
[46321fb]510static int is_io_page_accessible(int page)
511{
[666773c]512 if (TASK->arch.iomap)
[38f6add]513 return bitmap_get(TASK->arch.iomap, page);
[666773c]514 else
515 return 0;
[46321fb]516}
517
[666773c]518/**
519 * There is special handling of memory mapped legacy io, because of 4KB sized
520 * access for userspace.
[46321fb]521 *
[5bda2f3e]522 * @param va Virtual address of page fault.
523 * @param istate Structure with saved interruption state.
524 *
525 * @return One on success, zero on failure.
[46321fb]526 *
527 */
528static int try_memmap_io_insertion(uintptr_t va, istate_t *istate)
529{
[22f0561]530 if ((va >= LEGACYIO_USER_BASE) && (va < LEGACYIO_USER_BASE + (1 << LEGACYIO_PAGE_WIDTH))) {
[666773c]531 if (TASK) {
[22f0561]532 uint64_t io_page = (va & ((1 << LEGACYIO_PAGE_WIDTH) - 1)) >>
533 LEGACYIO_SINGLE_PAGE_WIDTH;
[a35b458]534
[666773c]535 if (is_io_page_accessible(io_page)) {
536 uint64_t page, frame;
[a35b458]537
[22f0561]538 page = LEGACYIO_USER_BASE +
539 (1 << LEGACYIO_SINGLE_PAGE_WIDTH) * io_page;
540 frame = LEGACYIO_PHYS_BASE +
541 (1 << LEGACYIO_SINGLE_PAGE_WIDTH) * io_page;
[a35b458]542
[46321fb]543 tlb_entry_t entry;
[a35b458]544
[46321fb]545 entry.word[0] = 0;
546 entry.word[1] = 0;
[a35b458]547
[5bda2f3e]548 entry.p = true; /* present */
549 entry.ma = MA_UNCACHEABLE;
550 entry.a = true; /* already accessed */
551 entry.d = true; /* already dirty */
[46321fb]552 entry.pl = PL_USER;
553 entry.ar = AR_READ | AR_WRITE;
[ef5de6d]554 entry.ppn = frame >> PPN_SHIFT;
[22f0561]555 entry.ps = LEGACYIO_SINGLE_PAGE_WIDTH;
[a35b458]556
[ef5de6d]557 dtc_mapping_insert(page, TASK->as->asid, entry);
[46321fb]558 return 1;
[666773c]559 } else {
560 fault_if_from_uspace(istate,
[7e752b2]561 "IO access fault at %p.", (void *) va);
[666773c]562 }
563 }
564 }
[a35b458]565
[46321fb]566 return 0;
567}
568
[9ad03fe]569/** Data TLB fault handler for faults with VHPT turned off.
[a0d74fd]570 *
[9928240]571 * @param n Interruption vector.
[5bda2f3e]572 * @param istate Structure with saved interruption state.
573 *
[a0d74fd]574 */
[9928240]575void alternate_data_tlb_fault(unsigned int n, istate_t *istate)
[95042fd]576{
[93d66ef]577 if (istate->cr_isr.sp) {
[0fd9b35]578 /*
579 * Speculative load. Deffer the exception until a more clever
580 * approach can be used. Currently if we try to find the
581 * mapping for the speculative load while in the kernel, we
582 * might introduce a livelock because of the possibly invalid
583 * values of the address.
584 */
[93d66ef]585 istate->cr_ipsr.ed = true;
586 return;
587 }
[a35b458]588
[5bda2f3e]589 uintptr_t va = istate->cr_ifa; /* faulting address */
[0fd9b35]590 as_t *as = AS;
[a35b458]591
[0fd9b35]592 if (is_kernel_fault(va)) {
593 if (va < end_of_identity) {
[a0d74fd]594 /*
[1b20da0]595 * Create kernel identity mapping for low memory.
[a0d74fd]596 */
[9ad03fe]597 dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
[a0d74fd]598 return;
[0fd9b35]599 } else {
600 as = AS_KERNEL;
[a0d74fd]601 }
602 }
[a35b458]603
604
[38dc82d]605 pte_t t;
606 bool found = page_mapping_find(as, va, true, &t);
607 if (found) {
[63e27ef]608 assert(t.p);
[560b81c]609
[9ad03fe]610 /*
[f47fd19]611 * The mapping was found in the software page hash table.
[9ad03fe]612 * Insert it into data translation cache.
613 */
[38dc82d]614 dtc_pte_copy(&t);
[9ad03fe]615 } else {
[666773c]616 if (try_memmap_io_insertion(va, istate))
617 return;
[a35b458]618
[9ad03fe]619 /*
[5bda2f3e]620 * Forward the page fault to the address space page fault
[666773c]621 * handler.
[9ad03fe]622 */
[1dbc43f]623 as_page_fault(va, PF_ACCESS_READ, istate);
[9ad03fe]624 }
[95042fd]625}
[89298e3]626
[9ad03fe]627/** Data nested TLB fault handler.
628 *
629 * This fault should not occur.
630 *
[9928240]631 * @param n Interruption vector.
[5bda2f3e]632 * @param istate Structure with saved interruption state.
633 *
[9ad03fe]634 */
[9928240]635void data_nested_tlb_fault(unsigned int n, istate_t *istate)
[95042fd]636{
[63e27ef]637 assert(false);
[95042fd]638}
[89298e3]639
[9ad03fe]640/** Data Dirty bit fault handler.
641 *
[9928240]642 * @param n Interruption vector.
[5bda2f3e]643 * @param istate Structure with saved interruption state.
644 *
[9ad03fe]645 */
[9928240]646void data_dirty_bit_fault(unsigned int n, istate_t *istate)
[95042fd]647{
[7f1c620]648 uintptr_t va;
[38dc82d]649 pte_t t;
[0fd9b35]650 as_t *as = AS;
[a35b458]651
[5bda2f3e]652 va = istate->cr_ifa; /* faulting address */
[a35b458]653
[0fd9b35]654 if (is_kernel_fault(va))
655 as = AS_KERNEL;
656
[38dc82d]657 bool found = page_mapping_find(as, va, true, &t);
658
[63e27ef]659 assert(found);
660 assert(t.p);
[38dc82d]661
662 if (found && t.p && t.w) {
[9ad03fe]663 /*
664 * Update the Dirty bit in page tables and reinsert
665 * the mapping into DTC.
666 */
[38dc82d]667 t.d = true;
668 dtc_pte_copy(&t);
[346b12a2]669 page_mapping_update(as, va, true, &t);
[567807b1]670 } else {
[1dbc43f]671 as_page_fault(va, PF_ACCESS_WRITE, istate);
[9ad03fe]672 }
[95042fd]673}
[89298e3]674
[9ad03fe]675/** Instruction access bit fault handler.
676 *
[9928240]677 * @param n Interruption vector.
[5bda2f3e]678 * @param istate Structure with saved interruption state.
679 *
[9ad03fe]680 */
[9928240]681void instruction_access_bit_fault(unsigned int n, istate_t *istate)
[95042fd]682{
[7f1c620]683 uintptr_t va;
[38dc82d]684 pte_t t;
[a35b458]685
[5bda2f3e]686 va = istate->cr_ifa; /* faulting address */
[0fd9b35]687
[63e27ef]688 assert(!is_kernel_fault(va));
[a35b458]689
[38dc82d]690 bool found = page_mapping_find(AS, va, true, &t);
691
[63e27ef]692 assert(found);
693 assert(t.p);
[38dc82d]694
695 if (found && t.p && t.x) {
[9ad03fe]696 /*
697 * Update the Accessed bit in page tables and reinsert
698 * the mapping into ITC.
699 */
[38dc82d]700 t.a = true;
701 itc_pte_copy(&t);
[346b12a2]702 page_mapping_update(AS, va, true, &t);
[567807b1]703 } else {
[1dbc43f]704 as_page_fault(va, PF_ACCESS_EXEC, istate);
[9ad03fe]705 }
[95042fd]706}
[89298e3]707
[9ad03fe]708/** Data access bit fault handler.
709 *
[9928240]710 * @param n Interruption vector.
[25d7709]711 * @param istate Structure with saved interruption state.
[5bda2f3e]712 *
[9ad03fe]713 */
[9928240]714void data_access_bit_fault(unsigned int n, istate_t *istate)
[95042fd]715{
[7f1c620]716 uintptr_t va;
[38dc82d]717 pte_t t;
[0fd9b35]718 as_t *as = AS;
[a35b458]719
[5bda2f3e]720 va = istate->cr_ifa; /* faulting address */
[a35b458]721
[0fd9b35]722 if (is_kernel_fault(va))
723 as = AS_KERNEL;
724
[38dc82d]725 bool found = page_mapping_find(as, va, true, &t);
726
[63e27ef]727 assert(found);
728 assert(t.p);
[38dc82d]729
730 if (found && t.p) {
[9ad03fe]731 /*
732 * Update the Accessed bit in page tables and reinsert
733 * the mapping into DTC.
734 */
[38dc82d]735 t.a = true;
736 dtc_pte_copy(&t);
[346b12a2]737 page_mapping_update(as, va, true, &t);
[567807b1]738 } else {
739 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
[7e752b2]740 fault_if_from_uspace(istate, "Page fault at %p.",
741 (void *) va);
[c15b374]742 panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, NULL);
[567807b1]743 }
[9ad03fe]744 }
[89298e3]745}
746
[925be4e]747/** Data access rights fault handler.
748 *
[9928240]749 * @param n Interruption vector.
[925be4e]750 * @param istate Structure with saved interruption state.
[5bda2f3e]751 *
[925be4e]752 */
[9928240]753void data_access_rights_fault(unsigned int n, istate_t *istate)
[925be4e]754{
755 uintptr_t va;
[38dc82d]756 pte_t t;
[a35b458]757
[5bda2f3e]758 va = istate->cr_ifa; /* faulting address */
[0fd9b35]759
[63e27ef]760 assert(!is_kernel_fault(va));
[a35b458]761
[925be4e]762 /*
763 * Assume a write to a read-only page.
764 */
[38dc82d]765 bool found = page_mapping_find(AS, va, true, &t);
766
[63e27ef]767 assert(found);
768 assert(t.p);
769 assert(!t.w);
[38dc82d]770
[1dbc43f]771 as_page_fault(va, PF_ACCESS_WRITE, istate);
[925be4e]772}
773
[9ad03fe]774/** Page not present fault handler.
775 *
[9928240]776 * @param n Interruption vector.
[25d7709]777 * @param istate Structure with saved interruption state.
[5bda2f3e]778 *
[9ad03fe]779 */
[9928240]780void page_not_present(unsigned int n, istate_t *istate)
[95042fd]781{
[7f1c620]782 uintptr_t va;
[38dc82d]783 pte_t t;
[a35b458]784
[5bda2f3e]785 va = istate->cr_ifa; /* faulting address */
[a35b458]786
[63e27ef]787 assert(!is_kernel_fault(va));
[0fd9b35]788
[38dc82d]789 bool found = page_mapping_find(AS, va, true, &t);
790
[63e27ef]791 assert(found);
[a35b458]792
[38dc82d]793 if (t.p) {
[9ad03fe]794 /*
795 * If the Present bit is set in page hash table, just copy it
796 * and update ITC/DTC.
797 */
[38dc82d]798 if (t.x)
799 itc_pte_copy(&t);
[9ad03fe]800 else
[38dc82d]801 dtc_pte_copy(&t);
[9ad03fe]802 } else {
[1dbc43f]803 as_page_fault(va, PF_ACCESS_READ, istate);
[9ad03fe]804 }
[95042fd]805}
[b45c443]806
[9979acb]807void tlb_arch_init(void)
808{
809}
810
811void tlb_print(void)
812{
813}
814
[ee289cf0]815/** @}
[b45c443]816 */
Note: See TracBrowser for help on using the repository browser.