source: mainline/kernel/arch/ia64/src/mm/tlb.c

Last change on this file was 4484c16, checked in by Jakub Jermar <jakub@…>, 6 years ago

Consider kernel fault only if it came from kernel

  • Property mode set to 100644
File size: 18.5 KB
RevLine 
[36b01bb2]1/*
[df4ed85]2 * Copyright (c) 2006 Jakub Jermar
[36b01bb2]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[c5429fe]29/** @addtogroup kernel_ia64_mm
[b45c443]30 * @{
31 */
32/** @file
33 */
34
[36b01bb2]35/*
36 * TLB management.
37 */
38
39#include <mm/tlb.h>
[a0d74fd]40#include <mm/asid.h>
[9ad03fe]41#include <mm/page.h>
42#include <mm/as.h>
[bc78c75]43#include <arch/mm/tlb.h>
[a0d74fd]44#include <arch/mm/page.h>
[68091bd]45#include <arch/mm/vhpt.h>
[05882233]46#include <barrier.h>
[2c49fbbe]47#include <arch/interrupt.h>
[7c322bd]48#include <arch/pal/pal.h>
49#include <arch/asm.h>
[63e27ef]50#include <assert.h>
[2c49fbbe]51#include <panic.h>
[9ad03fe]52#include <arch.h>
[a175a67]53#include <interrupt.h>
[22f0561]54#include <arch/legacyio.h>
[5bda2f3e]55
[ef67bab]56/** Invalidate all TLB entries. */
[36b01bb2]57void tlb_invalidate_all(void)
58{
[ee289cf0]59 ipl_t ipl;
60 uintptr_t adr;
61 uint32_t count1, count2, stride1, stride2;
[a35b458]62
[6c441cf8]63 unsigned int i, j;
[a35b458]64
[ee289cf0]65 adr = PAL_PTCE_INFO_BASE();
66 count1 = PAL_PTCE_INFO_COUNT1();
67 count2 = PAL_PTCE_INFO_COUNT2();
68 stride1 = PAL_PTCE_INFO_STRIDE1();
69 stride2 = PAL_PTCE_INFO_STRIDE2();
[a35b458]70
[ee289cf0]71 ipl = interrupts_disable();
[a35b458]72
[6c441cf8]73 for (i = 0; i < count1; i++) {
74 for (j = 0; j < count2; j++) {
[e7b7be3f]75 asm volatile (
[1433ecda]76 "ptc.e %[adr] ;;"
77 :: [adr] "r" (adr)
[ee289cf0]78 );
79 adr += stride2;
[7c322bd]80 }
[ee289cf0]81 adr += stride1;
82 }
[a35b458]83
[ee289cf0]84 interrupts_restore(ipl);
[a35b458]85
[ee289cf0]86 srlz_d();
87 srlz_i();
[a35b458]88
[68091bd]89#ifdef CONFIG_VHPT
[ee289cf0]90 vhpt_invalidate_all();
[5bda2f3e]91#endif
[36b01bb2]92}
93
94/** Invalidate entries belonging to an address space.
95 *
[5bda2f3e]96 * @param asid Address space identifier.
97 *
[36b01bb2]98 */
99void tlb_invalidate_asid(asid_t asid)
100{
[a82500ce]101 tlb_invalidate_all();
[36b01bb2]102}
[bc78c75]103
[98000fb]104void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt)
[a82500ce]105{
[5bda2f3e]106 region_register_t rr;
[d0cf9de]107 bool restore_rr = false;
[1065603e]108 int b = 0;
109 int c = cnt;
[a35b458]110
[7f1c620]111 uintptr_t va;
[1065603e]112 va = page;
[a35b458]113
[9043309c]114 rr.word = rr_read(VA2VRN(page));
115 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(page))))) {
[d0cf9de]116 /*
117 * The selected region register does not contain required RID.
118 * Save the old content of the register and replace the RID.
119 */
[5bda2f3e]120 region_register_t rr0;
[a35b458]121
[d0cf9de]122 rr0 = rr;
[9043309c]123 rr0.map.rid = ASID2RID(asid, VA2VRN(page));
124 rr_write(VA2VRN(page), rr0.word);
[d0cf9de]125 srlz_d();
126 srlz_i();
127 }
[a35b458]128
[5bda2f3e]129 while (c >>= 1)
[1065603e]130 b++;
131 b >>= 1;
[7f1c620]132 uint64_t ps;
[a35b458]133
[1065603e]134 switch (b) {
[666773c]135 case 0: /* cnt 1 - 3 */
[ee289cf0]136 ps = PAGE_WIDTH;
137 break;
[666773c]138 case 1: /* cnt 4 - 15 */
139 ps = PAGE_WIDTH + 2;
[9043309c]140 va &= ~((1UL << ps) - 1);
[ee289cf0]141 break;
[666773c]142 case 2: /* cnt 16 - 63 */
143 ps = PAGE_WIDTH + 4;
[9043309c]144 va &= ~((1UL << ps) - 1);
[ee289cf0]145 break;
[666773c]146 case 3: /* cnt 64 - 255 */
147 ps = PAGE_WIDTH + 6;
[9043309c]148 va &= ~((1UL << ps) - 1);
[ee289cf0]149 break;
[666773c]150 case 4: /* cnt 256 - 1023 */
151 ps = PAGE_WIDTH + 8;
[9043309c]152 va &= ~((1UL << ps) - 1);
[ee289cf0]153 break;
[666773c]154 case 5: /* cnt 1024 - 4095 */
155 ps = PAGE_WIDTH + 10;
[9043309c]156 va &= ~((1UL << ps) - 1);
[ee289cf0]157 break;
[666773c]158 case 6: /* cnt 4096 - 16383 */
159 ps = PAGE_WIDTH + 12;
[9043309c]160 va &= ~((1UL << ps) - 1);
[ee289cf0]161 break;
[666773c]162 case 7: /* cnt 16384 - 65535 */
163 case 8: /* cnt 65536 - (256K - 1) */
164 ps = PAGE_WIDTH + 14;
[9043309c]165 va &= ~((1UL << ps) - 1);
[ee289cf0]166 break;
167 default:
[666773c]168 ps = PAGE_WIDTH + 18;
[9043309c]169 va &= ~((1UL << ps) - 1);
[ee289cf0]170 break;
[d0cf9de]171 }
[a35b458]172
[9043309c]173 for (; va < (page + cnt * PAGE_SIZE); va += (1UL << ps))
[5bda2f3e]174 asm volatile (
[1433ecda]175 "ptc.l %[va], %[ps] ;;"
176 :: [va] "r" (va),
177 [ps] "r" (ps << 2)
[5bda2f3e]178 );
[a35b458]179
[d0cf9de]180 srlz_d();
181 srlz_i();
[a35b458]182
[d0cf9de]183 if (restore_rr) {
[9043309c]184 rr_write(VA2VRN(page), rr.word);
[d0cf9de]185 srlz_d();
186 srlz_i();
187 }
[a82500ce]188}
189
[95042fd]190/** Insert data into data translation cache.
191 *
[5bda2f3e]192 * @param va Virtual page address.
193 * @param asid Address space identifier.
194 * @param entry The rest of TLB entry as required by TLB insertion
195 * format.
196 *
[95042fd]197 */
[7f1c620]198void dtc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
[b994a60]199{
[95042fd]200 tc_mapping_insert(va, asid, entry, true);
201}
[bc78c75]202
[95042fd]203/** Insert data into instruction translation cache.
204 *
[5bda2f3e]205 * @param va Virtual page address.
206 * @param asid Address space identifier.
207 * @param entry The rest of TLB entry as required by TLB insertion
208 * format.
[95042fd]209 */
[7f1c620]210void itc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
[b994a60]211{
[95042fd]212 tc_mapping_insert(va, asid, entry, false);
213}
[bc78c75]214
[95042fd]215/** Insert data into instruction or data translation cache.
216 *
[5bda2f3e]217 * @param va Virtual page address.
218 * @param asid Address space identifier.
219 * @param entry The rest of TLB entry as required by TLB insertion
220 * format.
221 * @param dtc If true, insert into data translation cache, use
222 * instruction translation cache otherwise.
223 *
[95042fd]224 */
[7f1c620]225void tc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtc)
[bc78c75]226{
[5bda2f3e]227 region_register_t rr;
[95042fd]228 bool restore_rr = false;
[a35b458]229
[a0d74fd]230 rr.word = rr_read(VA2VRN(va));
231 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
[95042fd]232 /*
233 * The selected region register does not contain required RID.
234 * Save the old content of the register and replace the RID.
235 */
[5bda2f3e]236 region_register_t rr0;
[a35b458]237
[95042fd]238 rr0 = rr;
[a0d74fd]239 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
240 rr_write(VA2VRN(va), rr0.word);
[89298e3]241 srlz_d();
[95042fd]242 srlz_i();
243 }
[a35b458]244
[e7b7be3f]245 asm volatile (
[1433ecda]246 "mov r8 = psr ;;\n"
247 "rsm %[mask] ;;\n" /* PSR_IC_MASK */
248 "srlz.d ;;\n"
249 "srlz.i ;;\n"
250 "mov cr.ifa = %[va]\n" /* va */
251 "mov cr.itir = %[word1] ;;\n" /* entry.word[1] */
252 "cmp.eq p6, p7 = %[dtc], r0 ;;\n" /* decide between itc and dtc */
253 "(p6) itc.i %[word0] ;;\n"
254 "(p7) itc.d %[word0] ;;\n"
255 "mov psr.l = r8 ;;\n"
256 "srlz.d ;;\n"
257 :: [mask] "i" (PSR_IC_MASK),
258 [va] "r" (va),
259 [word0] "r" (entry.word[0]),
260 [word1] "r" (entry.word[1]),
261 [dtc] "r" (dtc)
262 : "p6", "p7", "r8"
[95042fd]263 );
[a35b458]264
[95042fd]265 if (restore_rr) {
[a0d74fd]266 rr_write(VA2VRN(va), rr.word);
[95042fd]267 srlz_d();
268 srlz_i();
[bc78c75]269 }
270}
271
[95042fd]272/** Insert data into instruction translation register.
273 *
[5bda2f3e]274 * @param va Virtual page address.
275 * @param asid Address space identifier.
276 * @param entry The rest of TLB entry as required by TLB insertion
277 * format.
278 * @param tr Translation register.
279 *
[95042fd]280 */
[5bda2f3e]281void itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, size_t tr)
[bc78c75]282{
[95042fd]283 tr_mapping_insert(va, asid, entry, false, tr);
[bc78c75]284}
285
[95042fd]286/** Insert data into data translation register.
287 *
[5bda2f3e]288 * @param va Virtual page address.
289 * @param asid Address space identifier.
290 * @param entry The rest of TLB entry as required by TLB insertion
291 * format.
292 * @param tr Translation register.
293 *
[95042fd]294 */
[5bda2f3e]295void dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, size_t tr)
[95042fd]296{
297 tr_mapping_insert(va, asid, entry, true, tr);
298}
[bc78c75]299
[95042fd]300/** Insert data into instruction or data translation register.
301 *
[5bda2f3e]302 * @param va Virtual page address.
303 * @param asid Address space identifier.
304 * @param entry The rest of TLB entry as required by TLB insertion
305 * format.
306 * @param dtr If true, insert into data translation register, use
307 * instruction translation register otherwise.
308 * @param tr Translation register.
309 *
[95042fd]310 */
[5bda2f3e]311void tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr,
[98000fb]312 size_t tr)
[89298e3]313{
[5bda2f3e]314 region_register_t rr;
[95042fd]315 bool restore_rr = false;
[a35b458]316
[a0d74fd]317 rr.word = rr_read(VA2VRN(va));
318 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
[95042fd]319 /*
320 * The selected region register does not contain required RID.
321 * Save the old content of the register and replace the RID.
322 */
[5bda2f3e]323 region_register_t rr0;
[a35b458]324
[95042fd]325 rr0 = rr;
[a0d74fd]326 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
327 rr_write(VA2VRN(va), rr0.word);
[89298e3]328 srlz_d();
[95042fd]329 srlz_i();
[89298e3]330 }
[a35b458]331
[e7b7be3f]332 asm volatile (
[1433ecda]333 "mov r8 = psr ;;\n"
334 "rsm %[mask] ;;\n" /* PSR_IC_MASK */
335 "srlz.d ;;\n"
336 "srlz.i ;;\n"
337 "mov cr.ifa = %[va]\n" /* va */
338 "mov cr.itir = %[word1] ;;\n" /* entry.word[1] */
339 "cmp.eq p6, p7 = %[dtr], r0 ;;\n" /* decide between itr and dtr */
340 "(p6) itr.i itr[%[tr]] = %[word0] ;;\n"
341 "(p7) itr.d dtr[%[tr]] = %[word0] ;;\n"
342 "mov psr.l = r8 ;;\n"
343 "srlz.d ;;\n"
344 :: [mask] "i" (PSR_IC_MASK),
345 [va] "r" (va),
346 [word1] "r" (entry.word[1]),
347 [word0] "r" (entry.word[0]),
348 [tr] "r" (tr),
349 [dtr] "r" (dtr)
350 : "p6", "p7", "r8"
[95042fd]351 );
[a35b458]352
[95042fd]353 if (restore_rr) {
[a0d74fd]354 rr_write(VA2VRN(va), rr.word);
[95042fd]355 srlz_d();
356 srlz_i();
357 }
[89298e3]358}
359
[a0d74fd]360/** Insert data into DTLB.
361 *
[5bda2f3e]362 * @param page Virtual page address including VRN bits.
363 * @param frame Physical frame address.
364 * @param dtr If true, insert into data translation register, use data
365 * translation cache otherwise.
366 * @param tr Translation register if dtr is true, ignored otherwise.
367 *
[a0d74fd]368 */
[5bda2f3e]369void dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr,
[98000fb]370 size_t tr)
[a0d74fd]371{
372 tlb_entry_t entry;
[a35b458]373
[a0d74fd]374 entry.word[0] = 0;
375 entry.word[1] = 0;
[a35b458]376
[5bda2f3e]377 entry.p = true; /* present */
[a0d74fd]378 entry.ma = MA_WRITEBACK;
[5bda2f3e]379 entry.a = true; /* already accessed */
380 entry.d = true; /* already dirty */
[a0d74fd]381 entry.pl = PL_KERNEL;
382 entry.ar = AR_READ | AR_WRITE;
383 entry.ppn = frame >> PPN_SHIFT;
384 entry.ps = PAGE_WIDTH;
[a35b458]385
[a0d74fd]386 if (dtr)
387 dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
388 else
389 dtc_mapping_insert(page, ASID_KERNEL, entry);
390}
391
[208259c]392/** Purge kernel entries from DTR.
393 *
394 * Purge DTR entries used by the kernel.
395 *
[5bda2f3e]396 * @param page Virtual page address including VRN bits.
397 * @param width Width of the purge in bits.
398 *
[208259c]399 */
[98000fb]400void dtr_purge(uintptr_t page, size_t width)
[208259c]401{
[5bda2f3e]402 asm volatile (
[1433ecda]403 "ptr.d %[page], %[width]\n"
404 :: [page] "r" (page),
405 [width] "r" (width << 2)
[5bda2f3e]406 );
[208259c]407}
408
[9ad03fe]409/** Copy content of PTE into data translation cache.
410 *
[5bda2f3e]411 * @param t PTE.
412 *
[9ad03fe]413 */
414void dtc_pte_copy(pte_t *t)
415{
416 tlb_entry_t entry;
[a35b458]417
[9ad03fe]418 entry.word[0] = 0;
419 entry.word[1] = 0;
[a35b458]420
[9ad03fe]421 entry.p = t->p;
422 entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
423 entry.a = t->a;
424 entry.d = t->d;
425 entry.pl = t->k ? PL_KERNEL : PL_USER;
426 entry.ar = t->w ? AR_WRITE : AR_READ;
427 entry.ppn = t->frame >> PPN_SHIFT;
428 entry.ps = PAGE_WIDTH;
[a35b458]429
[9ad03fe]430 dtc_mapping_insert(t->page, t->as->asid, entry);
[a35b458]431
[68091bd]432#ifdef CONFIG_VHPT
433 vhpt_mapping_insert(t->page, t->as->asid, entry);
[5bda2f3e]434#endif
[9ad03fe]435}
436
437/** Copy content of PTE into instruction translation cache.
438 *
[5bda2f3e]439 * @param t PTE.
440 *
[9ad03fe]441 */
442void itc_pte_copy(pte_t *t)
443{
444 tlb_entry_t entry;
[a35b458]445
[9ad03fe]446 entry.word[0] = 0;
447 entry.word[1] = 0;
[a35b458]448
[63e27ef]449 assert(t->x);
[a35b458]450
[9ad03fe]451 entry.p = t->p;
452 entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
453 entry.a = t->a;
454 entry.pl = t->k ? PL_KERNEL : PL_USER;
455 entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
456 entry.ppn = t->frame >> PPN_SHIFT;
457 entry.ps = PAGE_WIDTH;
[a35b458]458
[9ad03fe]459 itc_mapping_insert(t->page, t->as->asid, entry);
[a35b458]460
[68091bd]461#ifdef CONFIG_VHPT
462 vhpt_mapping_insert(t->page, t->as->asid, entry);
[5bda2f3e]463#endif
[9ad03fe]464}
465
[4484c16]466static bool is_kernel_fault(istate_t *istate, uintptr_t va)
[0fd9b35]467{
468 region_register_t rr;
469
[4484c16]470 if (istate_from_uspace(istate))
471 return false;
472
[0fd9b35]473 rr.word = rr_read(VA2VRN(va));
474 rid_t rid = rr.map.rid;
475 return (RID2ASID(rid) == ASID_KERNEL) && (VA2VRN(va) == VRN_KERNEL);
476}
477
[9ad03fe]478/** Instruction TLB fault handler for faults with VHPT turned off.
479 *
[9928240]480 * @param n Interruption vector.
[5bda2f3e]481 * @param istate Structure with saved interruption state.
482 *
[9ad03fe]483 */
[9928240]484void alternate_instruction_tlb_fault(unsigned int n, istate_t *istate)
[89298e3]485{
[7f1c620]486 uintptr_t va;
[38dc82d]487 pte_t t;
[a35b458]488
[180e132]489 assert(istate_from_uspace(istate));
[a35b458]490
[180e132]491 va = istate->cr_ifa; /* faulting address */
[0fd9b35]492
[38dc82d]493 bool found = page_mapping_find(AS, va, true, &t);
494 if (found) {
[63e27ef]495 assert(t.p);
[560b81c]496
[9ad03fe]497 /*
498 * The mapping was found in software page hash table.
499 * Insert it into data translation cache.
500 */
[38dc82d]501 itc_pte_copy(&t);
[9ad03fe]502 } else {
503 /*
504 * Forward the page fault to address space page fault handler.
505 */
[1dbc43f]506 as_page_fault(va, PF_ACCESS_EXEC, istate);
[9ad03fe]507 }
[95042fd]508}
[89298e3]509
[46321fb]510static int is_io_page_accessible(int page)
511{
[666773c]512 if (TASK->arch.iomap)
[38f6add]513 return bitmap_get(TASK->arch.iomap, page);
[666773c]514 else
515 return 0;
[46321fb]516}
517
[666773c]518/**
519 * There is special handling of memory mapped legacy io, because of 4KB sized
520 * access for userspace.
[46321fb]521 *
[5bda2f3e]522 * @param va Virtual address of page fault.
523 * @param istate Structure with saved interruption state.
524 *
525 * @return One on success, zero on failure.
[46321fb]526 *
527 */
528static int try_memmap_io_insertion(uintptr_t va, istate_t *istate)
529{
[22f0561]530 if ((va >= LEGACYIO_USER_BASE) && (va < LEGACYIO_USER_BASE + (1 << LEGACYIO_PAGE_WIDTH))) {
[666773c]531 if (TASK) {
[22f0561]532 uint64_t io_page = (va & ((1 << LEGACYIO_PAGE_WIDTH) - 1)) >>
533 LEGACYIO_SINGLE_PAGE_WIDTH;
[a35b458]534
[666773c]535 if (is_io_page_accessible(io_page)) {
536 uint64_t page, frame;
[a35b458]537
[22f0561]538 page = LEGACYIO_USER_BASE +
539 (1 << LEGACYIO_SINGLE_PAGE_WIDTH) * io_page;
540 frame = LEGACYIO_PHYS_BASE +
541 (1 << LEGACYIO_SINGLE_PAGE_WIDTH) * io_page;
[a35b458]542
[46321fb]543 tlb_entry_t entry;
[a35b458]544
[46321fb]545 entry.word[0] = 0;
546 entry.word[1] = 0;
[a35b458]547
[5bda2f3e]548 entry.p = true; /* present */
549 entry.ma = MA_UNCACHEABLE;
550 entry.a = true; /* already accessed */
551 entry.d = true; /* already dirty */
[46321fb]552 entry.pl = PL_USER;
553 entry.ar = AR_READ | AR_WRITE;
[ef5de6d]554 entry.ppn = frame >> PPN_SHIFT;
[22f0561]555 entry.ps = LEGACYIO_SINGLE_PAGE_WIDTH;
[a35b458]556
[ef5de6d]557 dtc_mapping_insert(page, TASK->as->asid, entry);
[46321fb]558 return 1;
[666773c]559 } else {
560 fault_if_from_uspace(istate,
[7e752b2]561 "IO access fault at %p.", (void *) va);
[666773c]562 }
563 }
564 }
[a35b458]565
[46321fb]566 return 0;
567}
568
[9ad03fe]569/** Data TLB fault handler for faults with VHPT turned off.
[a0d74fd]570 *
[9928240]571 * @param n Interruption vector.
[5bda2f3e]572 * @param istate Structure with saved interruption state.
573 *
[a0d74fd]574 */
[9928240]575void alternate_data_tlb_fault(unsigned int n, istate_t *istate)
[95042fd]576{
[93d66ef]577 if (istate->cr_isr.sp) {
[0fd9b35]578 /*
579 * Speculative load. Deffer the exception until a more clever
580 * approach can be used. Currently if we try to find the
581 * mapping for the speculative load while in the kernel, we
582 * might introduce a livelock because of the possibly invalid
583 * values of the address.
584 */
[93d66ef]585 istate->cr_ipsr.ed = true;
586 return;
587 }
[a35b458]588
[5bda2f3e]589 uintptr_t va = istate->cr_ifa; /* faulting address */
[0fd9b35]590 as_t *as = AS;
[a35b458]591
[4484c16]592 if (is_kernel_fault(istate, va)) {
[0fd9b35]593 if (va < end_of_identity) {
[a0d74fd]594 /*
[1b20da0]595 * Create kernel identity mapping for low memory.
[a0d74fd]596 */
[9ad03fe]597 dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
[a0d74fd]598 return;
[0fd9b35]599 } else {
600 as = AS_KERNEL;
[a0d74fd]601 }
602 }
[a35b458]603
[38dc82d]604 pte_t t;
605 bool found = page_mapping_find(as, va, true, &t);
606 if (found) {
[63e27ef]607 assert(t.p);
[560b81c]608
[9ad03fe]609 /*
[f47fd19]610 * The mapping was found in the software page hash table.
[9ad03fe]611 * Insert it into data translation cache.
612 */
[38dc82d]613 dtc_pte_copy(&t);
[9ad03fe]614 } else {
[666773c]615 if (try_memmap_io_insertion(va, istate))
616 return;
[a35b458]617
[9ad03fe]618 /*
[5bda2f3e]619 * Forward the page fault to the address space page fault
[666773c]620 * handler.
[9ad03fe]621 */
[1dbc43f]622 as_page_fault(va, PF_ACCESS_READ, istate);
[9ad03fe]623 }
[95042fd]624}
[89298e3]625
[9ad03fe]626/** Data nested TLB fault handler.
627 *
628 * This fault should not occur.
629 *
[9928240]630 * @param n Interruption vector.
[5bda2f3e]631 * @param istate Structure with saved interruption state.
632 *
[9ad03fe]633 */
[9928240]634void data_nested_tlb_fault(unsigned int n, istate_t *istate)
[95042fd]635{
[63e27ef]636 assert(false);
[95042fd]637}
[89298e3]638
[9ad03fe]639/** Data Dirty bit fault handler.
640 *
[9928240]641 * @param n Interruption vector.
[5bda2f3e]642 * @param istate Structure with saved interruption state.
643 *
[9ad03fe]644 */
[9928240]645void data_dirty_bit_fault(unsigned int n, istate_t *istate)
[95042fd]646{
[7f1c620]647 uintptr_t va;
[38dc82d]648 pte_t t;
[0fd9b35]649 as_t *as = AS;
[a35b458]650
[5bda2f3e]651 va = istate->cr_ifa; /* faulting address */
[a35b458]652
[4484c16]653 if (is_kernel_fault(istate, va))
[0fd9b35]654 as = AS_KERNEL;
655
[38dc82d]656 bool found = page_mapping_find(as, va, true, &t);
657
[63e27ef]658 assert(found);
659 assert(t.p);
[38dc82d]660
661 if (found && t.p && t.w) {
[9ad03fe]662 /*
663 * Update the Dirty bit in page tables and reinsert
664 * the mapping into DTC.
665 */
[38dc82d]666 t.d = true;
667 dtc_pte_copy(&t);
[346b12a2]668 page_mapping_update(as, va, true, &t);
[567807b1]669 } else {
[1dbc43f]670 as_page_fault(va, PF_ACCESS_WRITE, istate);
[9ad03fe]671 }
[95042fd]672}
[89298e3]673
[9ad03fe]674/** Instruction access bit fault handler.
675 *
[9928240]676 * @param n Interruption vector.
[5bda2f3e]677 * @param istate Structure with saved interruption state.
678 *
[9ad03fe]679 */
[9928240]680void instruction_access_bit_fault(unsigned int n, istate_t *istate)
[95042fd]681{
[7f1c620]682 uintptr_t va;
[38dc82d]683 pte_t t;
[a35b458]684
[180e132]685 assert(istate_from_uspace(istate));
[0fd9b35]686
[180e132]687 va = istate->cr_ifa; /* faulting address */
[a35b458]688
[38dc82d]689 bool found = page_mapping_find(AS, va, true, &t);
690
[63e27ef]691 assert(found);
692 assert(t.p);
[38dc82d]693
694 if (found && t.p && t.x) {
[9ad03fe]695 /*
696 * Update the Accessed bit in page tables and reinsert
697 * the mapping into ITC.
698 */
[38dc82d]699 t.a = true;
700 itc_pte_copy(&t);
[346b12a2]701 page_mapping_update(AS, va, true, &t);
[567807b1]702 } else {
[1dbc43f]703 as_page_fault(va, PF_ACCESS_EXEC, istate);
[9ad03fe]704 }
[95042fd]705}
[89298e3]706
[9ad03fe]707/** Data access bit fault handler.
708 *
[9928240]709 * @param n Interruption vector.
[25d7709]710 * @param istate Structure with saved interruption state.
[5bda2f3e]711 *
[9ad03fe]712 */
[9928240]713void data_access_bit_fault(unsigned int n, istate_t *istate)
[95042fd]714{
[7f1c620]715 uintptr_t va;
[38dc82d]716 pte_t t;
[0fd9b35]717 as_t *as = AS;
[a35b458]718
[5bda2f3e]719 va = istate->cr_ifa; /* faulting address */
[a35b458]720
[4484c16]721 if (is_kernel_fault(istate, va))
[0fd9b35]722 as = AS_KERNEL;
723
[38dc82d]724 bool found = page_mapping_find(as, va, true, &t);
725
[63e27ef]726 assert(found);
727 assert(t.p);
[38dc82d]728
729 if (found && t.p) {
[9ad03fe]730 /*
731 * Update the Accessed bit in page tables and reinsert
732 * the mapping into DTC.
733 */
[38dc82d]734 t.a = true;
735 dtc_pte_copy(&t);
[346b12a2]736 page_mapping_update(as, va, true, &t);
[567807b1]737 } else {
738 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
[7e752b2]739 fault_if_from_uspace(istate, "Page fault at %p.",
740 (void *) va);
[c15b374]741 panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, NULL);
[567807b1]742 }
[9ad03fe]743 }
[89298e3]744}
745
[925be4e]746/** Data access rights fault handler.
747 *
[9928240]748 * @param n Interruption vector.
[925be4e]749 * @param istate Structure with saved interruption state.
[5bda2f3e]750 *
[925be4e]751 */
[9928240]752void data_access_rights_fault(unsigned int n, istate_t *istate)
[925be4e]753{
754 uintptr_t va;
[38dc82d]755 pte_t t;
[a35b458]756
[180e132]757 assert(istate_from_uspace(istate));
[0fd9b35]758
[180e132]759 va = istate->cr_ifa; /* faulting address */
[a35b458]760
[925be4e]761 /*
762 * Assume a write to a read-only page.
763 */
[38dc82d]764 bool found = page_mapping_find(AS, va, true, &t);
765
[63e27ef]766 assert(found);
767 assert(t.p);
768 assert(!t.w);
[38dc82d]769
[1dbc43f]770 as_page_fault(va, PF_ACCESS_WRITE, istate);
[925be4e]771}
772
[9ad03fe]773/** Page not present fault handler.
774 *
[9928240]775 * @param n Interruption vector.
[25d7709]776 * @param istate Structure with saved interruption state.
[5bda2f3e]777 *
[9ad03fe]778 */
[9928240]779void page_not_present(unsigned int n, istate_t *istate)
[95042fd]780{
[7f1c620]781 uintptr_t va;
[38dc82d]782 pte_t t;
[a35b458]783
[180e132]784 assert(istate_from_uspace(istate));
[a35b458]785
[180e132]786 va = istate->cr_ifa; /* faulting address */
[0fd9b35]787
[38dc82d]788 bool found = page_mapping_find(AS, va, true, &t);
789
[63e27ef]790 assert(found);
[a35b458]791
[38dc82d]792 if (t.p) {
[9ad03fe]793 /*
794 * If the Present bit is set in page hash table, just copy it
795 * and update ITC/DTC.
796 */
[38dc82d]797 if (t.x)
798 itc_pte_copy(&t);
[9ad03fe]799 else
[38dc82d]800 dtc_pte_copy(&t);
[9ad03fe]801 } else {
[1dbc43f]802 as_page_fault(va, PF_ACCESS_READ, istate);
[9ad03fe]803 }
[95042fd]804}
[b45c443]805
[9979acb]806void tlb_arch_init(void)
807{
808}
809
810void tlb_print(void)
811{
812}
813
[ee289cf0]814/** @}
[b45c443]815 */
Note: See TracBrowser for help on using the repository browser.