source: mainline/kernel/arch/ia64/src/mm/tlb.c@ 09ab0a9a

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 09ab0a9a was 09ab0a9a, checked in by Jiri Svoboda <jiri@…>, 7 years ago

Fix vertical spacing with new Ccheck revision.

  • Property mode set to 100644
File size: 18.4 KB
RevLine 
[36b01bb2]1/*
[df4ed85]2 * Copyright (c) 2006 Jakub Jermar
[36b01bb2]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[5bda2f3e]29/** @addtogroup ia64mm
[b45c443]30 * @{
31 */
32/** @file
33 */
34
[36b01bb2]35/*
36 * TLB management.
37 */
38
39#include <mm/tlb.h>
[a0d74fd]40#include <mm/asid.h>
[9ad03fe]41#include <mm/page.h>
42#include <mm/as.h>
[bc78c75]43#include <arch/mm/tlb.h>
[a0d74fd]44#include <arch/mm/page.h>
[68091bd]45#include <arch/mm/vhpt.h>
[05882233]46#include <barrier.h>
[2c49fbbe]47#include <arch/interrupt.h>
[7c322bd]48#include <arch/pal/pal.h>
49#include <arch/asm.h>
[63e27ef]50#include <assert.h>
[2c49fbbe]51#include <panic.h>
[1065603e]52#include <print.h>
[9ad03fe]53#include <arch.h>
[a175a67]54#include <interrupt.h>
[22f0561]55#include <arch/legacyio.h>
[5bda2f3e]56
[ef67bab]57/** Invalidate all TLB entries. */
[36b01bb2]58void tlb_invalidate_all(void)
59{
[ee289cf0]60 ipl_t ipl;
61 uintptr_t adr;
62 uint32_t count1, count2, stride1, stride2;
[a35b458]63
[6c441cf8]64 unsigned int i, j;
[a35b458]65
[ee289cf0]66 adr = PAL_PTCE_INFO_BASE();
67 count1 = PAL_PTCE_INFO_COUNT1();
68 count2 = PAL_PTCE_INFO_COUNT2();
69 stride1 = PAL_PTCE_INFO_STRIDE1();
70 stride2 = PAL_PTCE_INFO_STRIDE2();
[a35b458]71
[ee289cf0]72 ipl = interrupts_disable();
[a35b458]73
[6c441cf8]74 for (i = 0; i < count1; i++) {
75 for (j = 0; j < count2; j++) {
[e7b7be3f]76 asm volatile (
[1433ecda]77 "ptc.e %[adr] ;;"
78 :: [adr] "r" (adr)
[ee289cf0]79 );
80 adr += stride2;
[7c322bd]81 }
[ee289cf0]82 adr += stride1;
83 }
[a35b458]84
[ee289cf0]85 interrupts_restore(ipl);
[a35b458]86
[ee289cf0]87 srlz_d();
88 srlz_i();
[a35b458]89
[68091bd]90#ifdef CONFIG_VHPT
[ee289cf0]91 vhpt_invalidate_all();
[5bda2f3e]92#endif
[36b01bb2]93}
94
95/** Invalidate entries belonging to an address space.
96 *
[5bda2f3e]97 * @param asid Address space identifier.
98 *
[36b01bb2]99 */
100void tlb_invalidate_asid(asid_t asid)
101{
[a82500ce]102 tlb_invalidate_all();
[36b01bb2]103}
[bc78c75]104
[98000fb]105void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt)
[a82500ce]106{
[5bda2f3e]107 region_register_t rr;
[d0cf9de]108 bool restore_rr = false;
[1065603e]109 int b = 0;
110 int c = cnt;
[a35b458]111
[7f1c620]112 uintptr_t va;
[1065603e]113 va = page;
[a35b458]114
[9043309c]115 rr.word = rr_read(VA2VRN(page));
116 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(page))))) {
[d0cf9de]117 /*
118 * The selected region register does not contain required RID.
119 * Save the old content of the register and replace the RID.
120 */
[5bda2f3e]121 region_register_t rr0;
[a35b458]122
[d0cf9de]123 rr0 = rr;
[9043309c]124 rr0.map.rid = ASID2RID(asid, VA2VRN(page));
125 rr_write(VA2VRN(page), rr0.word);
[d0cf9de]126 srlz_d();
127 srlz_i();
128 }
[a35b458]129
[5bda2f3e]130 while (c >>= 1)
[1065603e]131 b++;
132 b >>= 1;
[7f1c620]133 uint64_t ps;
[a35b458]134
[1065603e]135 switch (b) {
[666773c]136 case 0: /* cnt 1 - 3 */
[ee289cf0]137 ps = PAGE_WIDTH;
138 break;
[666773c]139 case 1: /* cnt 4 - 15 */
140 ps = PAGE_WIDTH + 2;
[9043309c]141 va &= ~((1UL << ps) - 1);
[ee289cf0]142 break;
[666773c]143 case 2: /* cnt 16 - 63 */
144 ps = PAGE_WIDTH + 4;
[9043309c]145 va &= ~((1UL << ps) - 1);
[ee289cf0]146 break;
[666773c]147 case 3: /* cnt 64 - 255 */
148 ps = PAGE_WIDTH + 6;
[9043309c]149 va &= ~((1UL << ps) - 1);
[ee289cf0]150 break;
[666773c]151 case 4: /* cnt 256 - 1023 */
152 ps = PAGE_WIDTH + 8;
[9043309c]153 va &= ~((1UL << ps) - 1);
[ee289cf0]154 break;
[666773c]155 case 5: /* cnt 1024 - 4095 */
156 ps = PAGE_WIDTH + 10;
[9043309c]157 va &= ~((1UL << ps) - 1);
[ee289cf0]158 break;
[666773c]159 case 6: /* cnt 4096 - 16383 */
160 ps = PAGE_WIDTH + 12;
[9043309c]161 va &= ~((1UL << ps) - 1);
[ee289cf0]162 break;
[666773c]163 case 7: /* cnt 16384 - 65535 */
164 case 8: /* cnt 65536 - (256K - 1) */
165 ps = PAGE_WIDTH + 14;
[9043309c]166 va &= ~((1UL << ps) - 1);
[ee289cf0]167 break;
168 default:
[666773c]169 ps = PAGE_WIDTH + 18;
[9043309c]170 va &= ~((1UL << ps) - 1);
[ee289cf0]171 break;
[d0cf9de]172 }
[a35b458]173
[9043309c]174 for (; va < (page + cnt * PAGE_SIZE); va += (1UL << ps))
[5bda2f3e]175 asm volatile (
[1433ecda]176 "ptc.l %[va], %[ps] ;;"
177 :: [va] "r" (va),
178 [ps] "r" (ps << 2)
[5bda2f3e]179 );
[a35b458]180
[d0cf9de]181 srlz_d();
182 srlz_i();
[a35b458]183
[d0cf9de]184 if (restore_rr) {
[9043309c]185 rr_write(VA2VRN(page), rr.word);
[d0cf9de]186 srlz_d();
187 srlz_i();
188 }
[a82500ce]189}
190
[95042fd]191/** Insert data into data translation cache.
192 *
[5bda2f3e]193 * @param va Virtual page address.
194 * @param asid Address space identifier.
195 * @param entry The rest of TLB entry as required by TLB insertion
196 * format.
197 *
[95042fd]198 */
[7f1c620]199void dtc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
[b994a60]200{
[95042fd]201 tc_mapping_insert(va, asid, entry, true);
202}
[bc78c75]203
[95042fd]204/** Insert data into instruction translation cache.
205 *
[5bda2f3e]206 * @param va Virtual page address.
207 * @param asid Address space identifier.
208 * @param entry The rest of TLB entry as required by TLB insertion
209 * format.
[95042fd]210 */
[7f1c620]211void itc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
[b994a60]212{
[95042fd]213 tc_mapping_insert(va, asid, entry, false);
214}
[bc78c75]215
[95042fd]216/** Insert data into instruction or data translation cache.
217 *
[5bda2f3e]218 * @param va Virtual page address.
219 * @param asid Address space identifier.
220 * @param entry The rest of TLB entry as required by TLB insertion
221 * format.
222 * @param dtc If true, insert into data translation cache, use
223 * instruction translation cache otherwise.
224 *
[95042fd]225 */
[7f1c620]226void tc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtc)
[bc78c75]227{
[5bda2f3e]228 region_register_t rr;
[95042fd]229 bool restore_rr = false;
[a35b458]230
[a0d74fd]231 rr.word = rr_read(VA2VRN(va));
232 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
[95042fd]233 /*
234 * The selected region register does not contain required RID.
235 * Save the old content of the register and replace the RID.
236 */
[5bda2f3e]237 region_register_t rr0;
[a35b458]238
[95042fd]239 rr0 = rr;
[a0d74fd]240 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
241 rr_write(VA2VRN(va), rr0.word);
[89298e3]242 srlz_d();
[95042fd]243 srlz_i();
244 }
[a35b458]245
[e7b7be3f]246 asm volatile (
[1433ecda]247 "mov r8 = psr ;;\n"
248 "rsm %[mask] ;;\n" /* PSR_IC_MASK */
249 "srlz.d ;;\n"
250 "srlz.i ;;\n"
251 "mov cr.ifa = %[va]\n" /* va */
252 "mov cr.itir = %[word1] ;;\n" /* entry.word[1] */
253 "cmp.eq p6, p7 = %[dtc], r0 ;;\n" /* decide between itc and dtc */
254 "(p6) itc.i %[word0] ;;\n"
255 "(p7) itc.d %[word0] ;;\n"
256 "mov psr.l = r8 ;;\n"
257 "srlz.d ;;\n"
258 :: [mask] "i" (PSR_IC_MASK),
259 [va] "r" (va),
260 [word0] "r" (entry.word[0]),
261 [word1] "r" (entry.word[1]),
262 [dtc] "r" (dtc)
263 : "p6", "p7", "r8"
[95042fd]264 );
[a35b458]265
[95042fd]266 if (restore_rr) {
[a0d74fd]267 rr_write(VA2VRN(va), rr.word);
[95042fd]268 srlz_d();
269 srlz_i();
[bc78c75]270 }
271}
272
[95042fd]273/** Insert data into instruction translation register.
274 *
[5bda2f3e]275 * @param va Virtual page address.
276 * @param asid Address space identifier.
277 * @param entry The rest of TLB entry as required by TLB insertion
278 * format.
279 * @param tr Translation register.
280 *
[95042fd]281 */
[5bda2f3e]282void itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, size_t tr)
[bc78c75]283{
[95042fd]284 tr_mapping_insert(va, asid, entry, false, tr);
[bc78c75]285}
286
[95042fd]287/** Insert data into data translation register.
288 *
[5bda2f3e]289 * @param va Virtual page address.
290 * @param asid Address space identifier.
291 * @param entry The rest of TLB entry as required by TLB insertion
292 * format.
293 * @param tr Translation register.
294 *
[95042fd]295 */
[5bda2f3e]296void dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, size_t tr)
[95042fd]297{
298 tr_mapping_insert(va, asid, entry, true, tr);
299}
[bc78c75]300
[95042fd]301/** Insert data into instruction or data translation register.
302 *
[5bda2f3e]303 * @param va Virtual page address.
304 * @param asid Address space identifier.
305 * @param entry The rest of TLB entry as required by TLB insertion
306 * format.
307 * @param dtr If true, insert into data translation register, use
308 * instruction translation register otherwise.
309 * @param tr Translation register.
310 *
[95042fd]311 */
[5bda2f3e]312void tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr,
[98000fb]313 size_t tr)
[89298e3]314{
[5bda2f3e]315 region_register_t rr;
[95042fd]316 bool restore_rr = false;
[a35b458]317
[a0d74fd]318 rr.word = rr_read(VA2VRN(va));
319 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
[95042fd]320 /*
321 * The selected region register does not contain required RID.
322 * Save the old content of the register and replace the RID.
323 */
[5bda2f3e]324 region_register_t rr0;
[a35b458]325
[95042fd]326 rr0 = rr;
[a0d74fd]327 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
328 rr_write(VA2VRN(va), rr0.word);
[89298e3]329 srlz_d();
[95042fd]330 srlz_i();
[89298e3]331 }
[a35b458]332
[e7b7be3f]333 asm volatile (
[1433ecda]334 "mov r8 = psr ;;\n"
335 "rsm %[mask] ;;\n" /* PSR_IC_MASK */
336 "srlz.d ;;\n"
337 "srlz.i ;;\n"
338 "mov cr.ifa = %[va]\n" /* va */
339 "mov cr.itir = %[word1] ;;\n" /* entry.word[1] */
340 "cmp.eq p6, p7 = %[dtr], r0 ;;\n" /* decide between itr and dtr */
341 "(p6) itr.i itr[%[tr]] = %[word0] ;;\n"
342 "(p7) itr.d dtr[%[tr]] = %[word0] ;;\n"
343 "mov psr.l = r8 ;;\n"
344 "srlz.d ;;\n"
345 :: [mask] "i" (PSR_IC_MASK),
346 [va] "r" (va),
347 [word1] "r" (entry.word[1]),
348 [word0] "r" (entry.word[0]),
349 [tr] "r" (tr),
350 [dtr] "r" (dtr)
351 : "p6", "p7", "r8"
[95042fd]352 );
[a35b458]353
[95042fd]354 if (restore_rr) {
[a0d74fd]355 rr_write(VA2VRN(va), rr.word);
[95042fd]356 srlz_d();
357 srlz_i();
358 }
[89298e3]359}
360
[a0d74fd]361/** Insert data into DTLB.
362 *
[5bda2f3e]363 * @param page Virtual page address including VRN bits.
364 * @param frame Physical frame address.
365 * @param dtr If true, insert into data translation register, use data
366 * translation cache otherwise.
367 * @param tr Translation register if dtr is true, ignored otherwise.
368 *
[a0d74fd]369 */
[5bda2f3e]370void dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr,
[98000fb]371 size_t tr)
[a0d74fd]372{
373 tlb_entry_t entry;
[a35b458]374
[a0d74fd]375 entry.word[0] = 0;
376 entry.word[1] = 0;
[a35b458]377
[5bda2f3e]378 entry.p = true; /* present */
[a0d74fd]379 entry.ma = MA_WRITEBACK;
[5bda2f3e]380 entry.a = true; /* already accessed */
381 entry.d = true; /* already dirty */
[a0d74fd]382 entry.pl = PL_KERNEL;
383 entry.ar = AR_READ | AR_WRITE;
384 entry.ppn = frame >> PPN_SHIFT;
385 entry.ps = PAGE_WIDTH;
[a35b458]386
[a0d74fd]387 if (dtr)
388 dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
389 else
390 dtc_mapping_insert(page, ASID_KERNEL, entry);
391}
392
[208259c]393/** Purge kernel entries from DTR.
394 *
395 * Purge DTR entries used by the kernel.
396 *
[5bda2f3e]397 * @param page Virtual page address including VRN bits.
398 * @param width Width of the purge in bits.
399 *
[208259c]400 */
[98000fb]401void dtr_purge(uintptr_t page, size_t width)
[208259c]402{
[5bda2f3e]403 asm volatile (
[1433ecda]404 "ptr.d %[page], %[width]\n"
405 :: [page] "r" (page),
406 [width] "r" (width << 2)
[5bda2f3e]407 );
[208259c]408}
409
[9ad03fe]410/** Copy content of PTE into data translation cache.
411 *
[5bda2f3e]412 * @param t PTE.
413 *
[9ad03fe]414 */
415void dtc_pte_copy(pte_t *t)
416{
417 tlb_entry_t entry;
[a35b458]418
[9ad03fe]419 entry.word[0] = 0;
420 entry.word[1] = 0;
[a35b458]421
[9ad03fe]422 entry.p = t->p;
423 entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
424 entry.a = t->a;
425 entry.d = t->d;
426 entry.pl = t->k ? PL_KERNEL : PL_USER;
427 entry.ar = t->w ? AR_WRITE : AR_READ;
428 entry.ppn = t->frame >> PPN_SHIFT;
429 entry.ps = PAGE_WIDTH;
[a35b458]430
[9ad03fe]431 dtc_mapping_insert(t->page, t->as->asid, entry);
[a35b458]432
[68091bd]433#ifdef CONFIG_VHPT
434 vhpt_mapping_insert(t->page, t->as->asid, entry);
[5bda2f3e]435#endif
[9ad03fe]436}
437
438/** Copy content of PTE into instruction translation cache.
439 *
[5bda2f3e]440 * @param t PTE.
441 *
[9ad03fe]442 */
443void itc_pte_copy(pte_t *t)
444{
445 tlb_entry_t entry;
[a35b458]446
[9ad03fe]447 entry.word[0] = 0;
448 entry.word[1] = 0;
[a35b458]449
[63e27ef]450 assert(t->x);
[a35b458]451
[9ad03fe]452 entry.p = t->p;
453 entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
454 entry.a = t->a;
455 entry.pl = t->k ? PL_KERNEL : PL_USER;
456 entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
457 entry.ppn = t->frame >> PPN_SHIFT;
458 entry.ps = PAGE_WIDTH;
[a35b458]459
[9ad03fe]460 itc_mapping_insert(t->page, t->as->asid, entry);
[a35b458]461
[68091bd]462#ifdef CONFIG_VHPT
463 vhpt_mapping_insert(t->page, t->as->asid, entry);
[5bda2f3e]464#endif
[9ad03fe]465}
466
[0fd9b35]467static bool is_kernel_fault(uintptr_t va)
468{
469 region_register_t rr;
470
471 rr.word = rr_read(VA2VRN(va));
472 rid_t rid = rr.map.rid;
473 return (RID2ASID(rid) == ASID_KERNEL) && (VA2VRN(va) == VRN_KERNEL);
474}
475
[9ad03fe]476/** Instruction TLB fault handler for faults with VHPT turned off.
477 *
[9928240]478 * @param n Interruption vector.
[5bda2f3e]479 * @param istate Structure with saved interruption state.
480 *
[9ad03fe]481 */
[9928240]482void alternate_instruction_tlb_fault(unsigned int n, istate_t *istate)
[89298e3]483{
[7f1c620]484 uintptr_t va;
[38dc82d]485 pte_t t;
[a35b458]486
[5bda2f3e]487 va = istate->cr_ifa; /* faulting address */
[a35b458]488
[63e27ef]489 assert(!is_kernel_fault(va));
[0fd9b35]490
[38dc82d]491 bool found = page_mapping_find(AS, va, true, &t);
492 if (found) {
[63e27ef]493 assert(t.p);
[560b81c]494
[9ad03fe]495 /*
496 * The mapping was found in software page hash table.
497 * Insert it into data translation cache.
498 */
[38dc82d]499 itc_pte_copy(&t);
[9ad03fe]500 } else {
501 /*
502 * Forward the page fault to address space page fault handler.
503 */
[1dbc43f]504 as_page_fault(va, PF_ACCESS_EXEC, istate);
[9ad03fe]505 }
[95042fd]506}
[89298e3]507
[46321fb]508static int is_io_page_accessible(int page)
509{
[666773c]510 if (TASK->arch.iomap)
[38f6add]511 return bitmap_get(TASK->arch.iomap, page);
[666773c]512 else
513 return 0;
[46321fb]514}
515
[666773c]516/**
517 * There is special handling of memory mapped legacy io, because of 4KB sized
518 * access for userspace.
[46321fb]519 *
[5bda2f3e]520 * @param va Virtual address of page fault.
521 * @param istate Structure with saved interruption state.
522 *
523 * @return One on success, zero on failure.
[46321fb]524 *
525 */
526static int try_memmap_io_insertion(uintptr_t va, istate_t *istate)
527{
[22f0561]528 if ((va >= LEGACYIO_USER_BASE) && (va < LEGACYIO_USER_BASE + (1 << LEGACYIO_PAGE_WIDTH))) {
[666773c]529 if (TASK) {
[22f0561]530 uint64_t io_page = (va & ((1 << LEGACYIO_PAGE_WIDTH) - 1)) >>
531 LEGACYIO_SINGLE_PAGE_WIDTH;
[a35b458]532
[666773c]533 if (is_io_page_accessible(io_page)) {
534 uint64_t page, frame;
[a35b458]535
[22f0561]536 page = LEGACYIO_USER_BASE +
537 (1 << LEGACYIO_SINGLE_PAGE_WIDTH) * io_page;
538 frame = LEGACYIO_PHYS_BASE +
539 (1 << LEGACYIO_SINGLE_PAGE_WIDTH) * io_page;
[a35b458]540
[46321fb]541 tlb_entry_t entry;
[a35b458]542
[46321fb]543 entry.word[0] = 0;
544 entry.word[1] = 0;
[a35b458]545
[5bda2f3e]546 entry.p = true; /* present */
547 entry.ma = MA_UNCACHEABLE;
548 entry.a = true; /* already accessed */
549 entry.d = true; /* already dirty */
[46321fb]550 entry.pl = PL_USER;
551 entry.ar = AR_READ | AR_WRITE;
[ef5de6d]552 entry.ppn = frame >> PPN_SHIFT;
[22f0561]553 entry.ps = LEGACYIO_SINGLE_PAGE_WIDTH;
[a35b458]554
[ef5de6d]555 dtc_mapping_insert(page, TASK->as->asid, entry);
[46321fb]556 return 1;
[666773c]557 } else {
558 fault_if_from_uspace(istate,
[7e752b2]559 "IO access fault at %p.", (void *) va);
[666773c]560 }
561 }
562 }
[a35b458]563
[46321fb]564 return 0;
565}
566
[9ad03fe]567/** Data TLB fault handler for faults with VHPT turned off.
[a0d74fd]568 *
[9928240]569 * @param n Interruption vector.
[5bda2f3e]570 * @param istate Structure with saved interruption state.
571 *
[a0d74fd]572 */
[9928240]573void alternate_data_tlb_fault(unsigned int n, istate_t *istate)
[95042fd]574{
[93d66ef]575 if (istate->cr_isr.sp) {
[0fd9b35]576 /*
577 * Speculative load. Deffer the exception until a more clever
578 * approach can be used. Currently if we try to find the
579 * mapping for the speculative load while in the kernel, we
580 * might introduce a livelock because of the possibly invalid
581 * values of the address.
582 */
[93d66ef]583 istate->cr_ipsr.ed = true;
584 return;
585 }
[a35b458]586
[5bda2f3e]587 uintptr_t va = istate->cr_ifa; /* faulting address */
[0fd9b35]588 as_t *as = AS;
[a35b458]589
[0fd9b35]590 if (is_kernel_fault(va)) {
591 if (va < end_of_identity) {
[a0d74fd]592 /*
[1b20da0]593 * Create kernel identity mapping for low memory.
[a0d74fd]594 */
[9ad03fe]595 dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
[a0d74fd]596 return;
[0fd9b35]597 } else {
598 as = AS_KERNEL;
[a0d74fd]599 }
600 }
[a35b458]601
[38dc82d]602 pte_t t;
603 bool found = page_mapping_find(as, va, true, &t);
604 if (found) {
[63e27ef]605 assert(t.p);
[560b81c]606
[9ad03fe]607 /*
[f47fd19]608 * The mapping was found in the software page hash table.
[9ad03fe]609 * Insert it into data translation cache.
610 */
[38dc82d]611 dtc_pte_copy(&t);
[9ad03fe]612 } else {
[666773c]613 if (try_memmap_io_insertion(va, istate))
614 return;
[a35b458]615
[9ad03fe]616 /*
[5bda2f3e]617 * Forward the page fault to the address space page fault
[666773c]618 * handler.
[9ad03fe]619 */
[1dbc43f]620 as_page_fault(va, PF_ACCESS_READ, istate);
[9ad03fe]621 }
[95042fd]622}
[89298e3]623
[9ad03fe]624/** Data nested TLB fault handler.
625 *
626 * This fault should not occur.
627 *
[9928240]628 * @param n Interruption vector.
[5bda2f3e]629 * @param istate Structure with saved interruption state.
630 *
[9ad03fe]631 */
[9928240]632void data_nested_tlb_fault(unsigned int n, istate_t *istate)
[95042fd]633{
[63e27ef]634 assert(false);
[95042fd]635}
[89298e3]636
[9ad03fe]637/** Data Dirty bit fault handler.
638 *
[9928240]639 * @param n Interruption vector.
[5bda2f3e]640 * @param istate Structure with saved interruption state.
641 *
[9ad03fe]642 */
[9928240]643void data_dirty_bit_fault(unsigned int n, istate_t *istate)
[95042fd]644{
[7f1c620]645 uintptr_t va;
[38dc82d]646 pte_t t;
[0fd9b35]647 as_t *as = AS;
[a35b458]648
[5bda2f3e]649 va = istate->cr_ifa; /* faulting address */
[a35b458]650
[0fd9b35]651 if (is_kernel_fault(va))
652 as = AS_KERNEL;
653
[38dc82d]654 bool found = page_mapping_find(as, va, true, &t);
655
[63e27ef]656 assert(found);
657 assert(t.p);
[38dc82d]658
659 if (found && t.p && t.w) {
[9ad03fe]660 /*
661 * Update the Dirty bit in page tables and reinsert
662 * the mapping into DTC.
663 */
[38dc82d]664 t.d = true;
665 dtc_pte_copy(&t);
[346b12a2]666 page_mapping_update(as, va, true, &t);
[567807b1]667 } else {
[1dbc43f]668 as_page_fault(va, PF_ACCESS_WRITE, istate);
[9ad03fe]669 }
[95042fd]670}
[89298e3]671
[9ad03fe]672/** Instruction access bit fault handler.
673 *
[9928240]674 * @param n Interruption vector.
[5bda2f3e]675 * @param istate Structure with saved interruption state.
676 *
[9ad03fe]677 */
[9928240]678void instruction_access_bit_fault(unsigned int n, istate_t *istate)
[95042fd]679{
[7f1c620]680 uintptr_t va;
[38dc82d]681 pte_t t;
[a35b458]682
[5bda2f3e]683 va = istate->cr_ifa; /* faulting address */
[0fd9b35]684
[63e27ef]685 assert(!is_kernel_fault(va));
[a35b458]686
[38dc82d]687 bool found = page_mapping_find(AS, va, true, &t);
688
[63e27ef]689 assert(found);
690 assert(t.p);
[38dc82d]691
692 if (found && t.p && t.x) {
[9ad03fe]693 /*
694 * Update the Accessed bit in page tables and reinsert
695 * the mapping into ITC.
696 */
[38dc82d]697 t.a = true;
698 itc_pte_copy(&t);
[346b12a2]699 page_mapping_update(AS, va, true, &t);
[567807b1]700 } else {
[1dbc43f]701 as_page_fault(va, PF_ACCESS_EXEC, istate);
[9ad03fe]702 }
[95042fd]703}
[89298e3]704
[9ad03fe]705/** Data access bit fault handler.
706 *
[9928240]707 * @param n Interruption vector.
[25d7709]708 * @param istate Structure with saved interruption state.
[5bda2f3e]709 *
[9ad03fe]710 */
[9928240]711void data_access_bit_fault(unsigned int n, istate_t *istate)
[95042fd]712{
[7f1c620]713 uintptr_t va;
[38dc82d]714 pte_t t;
[0fd9b35]715 as_t *as = AS;
[a35b458]716
[5bda2f3e]717 va = istate->cr_ifa; /* faulting address */
[a35b458]718
[0fd9b35]719 if (is_kernel_fault(va))
720 as = AS_KERNEL;
721
[38dc82d]722 bool found = page_mapping_find(as, va, true, &t);
723
[63e27ef]724 assert(found);
725 assert(t.p);
[38dc82d]726
727 if (found && t.p) {
[9ad03fe]728 /*
729 * Update the Accessed bit in page tables and reinsert
730 * the mapping into DTC.
731 */
[38dc82d]732 t.a = true;
733 dtc_pte_copy(&t);
[346b12a2]734 page_mapping_update(as, va, true, &t);
[567807b1]735 } else {
736 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
[7e752b2]737 fault_if_from_uspace(istate, "Page fault at %p.",
738 (void *) va);
[c15b374]739 panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, NULL);
[567807b1]740 }
[9ad03fe]741 }
[89298e3]742}
743
[925be4e]744/** Data access rights fault handler.
745 *
[9928240]746 * @param n Interruption vector.
[925be4e]747 * @param istate Structure with saved interruption state.
[5bda2f3e]748 *
[925be4e]749 */
[9928240]750void data_access_rights_fault(unsigned int n, istate_t *istate)
[925be4e]751{
752 uintptr_t va;
[38dc82d]753 pte_t t;
[a35b458]754
[5bda2f3e]755 va = istate->cr_ifa; /* faulting address */
[0fd9b35]756
[63e27ef]757 assert(!is_kernel_fault(va));
[a35b458]758
[925be4e]759 /*
760 * Assume a write to a read-only page.
761 */
[38dc82d]762 bool found = page_mapping_find(AS, va, true, &t);
763
[63e27ef]764 assert(found);
765 assert(t.p);
766 assert(!t.w);
[38dc82d]767
[1dbc43f]768 as_page_fault(va, PF_ACCESS_WRITE, istate);
[925be4e]769}
770
[9ad03fe]771/** Page not present fault handler.
772 *
[9928240]773 * @param n Interruption vector.
[25d7709]774 * @param istate Structure with saved interruption state.
[5bda2f3e]775 *
[9ad03fe]776 */
[9928240]777void page_not_present(unsigned int n, istate_t *istate)
[95042fd]778{
[7f1c620]779 uintptr_t va;
[38dc82d]780 pte_t t;
[a35b458]781
[5bda2f3e]782 va = istate->cr_ifa; /* faulting address */
[a35b458]783
[63e27ef]784 assert(!is_kernel_fault(va));
[0fd9b35]785
[38dc82d]786 bool found = page_mapping_find(AS, va, true, &t);
787
[63e27ef]788 assert(found);
[a35b458]789
[38dc82d]790 if (t.p) {
[9ad03fe]791 /*
792 * If the Present bit is set in page hash table, just copy it
793 * and update ITC/DTC.
794 */
[38dc82d]795 if (t.x)
796 itc_pte_copy(&t);
[9ad03fe]797 else
[38dc82d]798 dtc_pte_copy(&t);
[9ad03fe]799 } else {
[1dbc43f]800 as_page_fault(va, PF_ACCESS_READ, istate);
[9ad03fe]801 }
[95042fd]802}
[b45c443]803
[9979acb]804void tlb_arch_init(void)
805{
806}
807
808void tlb_print(void)
809{
810}
811
[ee289cf0]812/** @}
[b45c443]813 */
Note: See TracBrowser for help on using the repository browser.