source: mainline/kernel/arch/ia64/src/mm/tlb.c@ a35b458

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since a35b458 was a35b458, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 7 years ago

style: Remove trailing whitespace on _all_ lines, including empty ones, for particular file types.

Command used: tools/srepl '\s\+$' '' -- *.c *.h *.py *.sh *.s *.S *.ag

Currently, whitespace on empty lines is very inconsistent.
There are two basic choices: Either remove the whitespace, or keep empty lines
indented to the level of surrounding code. The former is AFAICT more common,
and also much easier to do automatically.

Alternatively, we could write script for automatic indentation, and use that
instead. However, if such a script exists, it's possible to use the indented
style locally, by having the editor apply relevant conversions on load/save,
without affecting remote repository. IMO, it makes more sense to adopt
the simpler rule.

  • Property mode set to 100644
File size: 18.3 KB
Line 
1/*
2 * Copyright (c) 2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup ia64mm
30 * @{
31 */
32/** @file
33 */
34
35/*
36 * TLB management.
37 */
38
39#include <mm/tlb.h>
40#include <mm/asid.h>
41#include <mm/page.h>
42#include <mm/as.h>
43#include <arch/mm/tlb.h>
44#include <arch/mm/page.h>
45#include <arch/mm/vhpt.h>
46#include <arch/barrier.h>
47#include <arch/interrupt.h>
48#include <arch/pal/pal.h>
49#include <arch/asm.h>
50#include <assert.h>
51#include <panic.h>
52#include <print.h>
53#include <arch.h>
54#include <interrupt.h>
55#include <arch/legacyio.h>
56
57/** Invalidate all TLB entries. */
58void tlb_invalidate_all(void)
59{
60 ipl_t ipl;
61 uintptr_t adr;
62 uint32_t count1, count2, stride1, stride2;
63
64 unsigned int i, j;
65
66 adr = PAL_PTCE_INFO_BASE();
67 count1 = PAL_PTCE_INFO_COUNT1();
68 count2 = PAL_PTCE_INFO_COUNT2();
69 stride1 = PAL_PTCE_INFO_STRIDE1();
70 stride2 = PAL_PTCE_INFO_STRIDE2();
71
72 ipl = interrupts_disable();
73
74 for (i = 0; i < count1; i++) {
75 for (j = 0; j < count2; j++) {
76 asm volatile (
77 "ptc.e %[adr] ;;"
78 :: [adr] "r" (adr)
79 );
80 adr += stride2;
81 }
82 adr += stride1;
83 }
84
85 interrupts_restore(ipl);
86
87 srlz_d();
88 srlz_i();
89
90#ifdef CONFIG_VHPT
91 vhpt_invalidate_all();
92#endif
93}
94
95/** Invalidate entries belonging to an address space.
96 *
97 * @param asid Address space identifier.
98 *
99 */
100void tlb_invalidate_asid(asid_t asid)
101{
102 tlb_invalidate_all();
103}
104
105
106void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt)
107{
108 region_register_t rr;
109 bool restore_rr = false;
110 int b = 0;
111 int c = cnt;
112
113 uintptr_t va;
114 va = page;
115
116 rr.word = rr_read(VA2VRN(page));
117 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(page))))) {
118 /*
119 * The selected region register does not contain required RID.
120 * Save the old content of the register and replace the RID.
121 */
122 region_register_t rr0;
123
124 rr0 = rr;
125 rr0.map.rid = ASID2RID(asid, VA2VRN(page));
126 rr_write(VA2VRN(page), rr0.word);
127 srlz_d();
128 srlz_i();
129 }
130
131 while (c >>= 1)
132 b++;
133 b >>= 1;
134 uint64_t ps;
135
136 switch (b) {
137 case 0: /* cnt 1 - 3 */
138 ps = PAGE_WIDTH;
139 break;
140 case 1: /* cnt 4 - 15 */
141 ps = PAGE_WIDTH + 2;
142 va &= ~((1UL << ps) - 1);
143 break;
144 case 2: /* cnt 16 - 63 */
145 ps = PAGE_WIDTH + 4;
146 va &= ~((1UL << ps) - 1);
147 break;
148 case 3: /* cnt 64 - 255 */
149 ps = PAGE_WIDTH + 6;
150 va &= ~((1UL << ps) - 1);
151 break;
152 case 4: /* cnt 256 - 1023 */
153 ps = PAGE_WIDTH + 8;
154 va &= ~((1UL << ps) - 1);
155 break;
156 case 5: /* cnt 1024 - 4095 */
157 ps = PAGE_WIDTH + 10;
158 va &= ~((1UL << ps) - 1);
159 break;
160 case 6: /* cnt 4096 - 16383 */
161 ps = PAGE_WIDTH + 12;
162 va &= ~((1UL << ps) - 1);
163 break;
164 case 7: /* cnt 16384 - 65535 */
165 case 8: /* cnt 65536 - (256K - 1) */
166 ps = PAGE_WIDTH + 14;
167 va &= ~((1UL << ps) - 1);
168 break;
169 default:
170 ps = PAGE_WIDTH + 18;
171 va &= ~((1UL << ps) - 1);
172 break;
173 }
174
175 for (; va < (page + cnt * PAGE_SIZE); va += (1UL << ps))
176 asm volatile (
177 "ptc.l %[va], %[ps] ;;"
178 :: [va]"r" (va),
179 [ps] "r" (ps << 2)
180 );
181
182 srlz_d();
183 srlz_i();
184
185 if (restore_rr) {
186 rr_write(VA2VRN(page), rr.word);
187 srlz_d();
188 srlz_i();
189 }
190}
191
192/** Insert data into data translation cache.
193 *
194 * @param va Virtual page address.
195 * @param asid Address space identifier.
196 * @param entry The rest of TLB entry as required by TLB insertion
197 * format.
198 *
199 */
200void dtc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
201{
202 tc_mapping_insert(va, asid, entry, true);
203}
204
205/** Insert data into instruction translation cache.
206 *
207 * @param va Virtual page address.
208 * @param asid Address space identifier.
209 * @param entry The rest of TLB entry as required by TLB insertion
210 * format.
211 */
212void itc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
213{
214 tc_mapping_insert(va, asid, entry, false);
215}
216
217/** Insert data into instruction or data translation cache.
218 *
219 * @param va Virtual page address.
220 * @param asid Address space identifier.
221 * @param entry The rest of TLB entry as required by TLB insertion
222 * format.
223 * @param dtc If true, insert into data translation cache, use
224 * instruction translation cache otherwise.
225 *
226 */
227void tc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtc)
228{
229 region_register_t rr;
230 bool restore_rr = false;
231
232 rr.word = rr_read(VA2VRN(va));
233 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
234 /*
235 * The selected region register does not contain required RID.
236 * Save the old content of the register and replace the RID.
237 */
238 region_register_t rr0;
239
240 rr0 = rr;
241 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
242 rr_write(VA2VRN(va), rr0.word);
243 srlz_d();
244 srlz_i();
245 }
246
247 asm volatile (
248 "mov r8 = psr ;;\n"
249 "rsm %[mask] ;;\n" /* PSR_IC_MASK */
250 "srlz.d ;;\n"
251 "srlz.i ;;\n"
252 "mov cr.ifa = %[va]\n" /* va */
253 "mov cr.itir = %[word1] ;;\n" /* entry.word[1] */
254 "cmp.eq p6, p7 = %[dtc], r0 ;;\n" /* decide between itc and dtc */
255 "(p6) itc.i %[word0] ;;\n"
256 "(p7) itc.d %[word0] ;;\n"
257 "mov psr.l = r8 ;;\n"
258 "srlz.d ;;\n"
259 :: [mask] "i" (PSR_IC_MASK),
260 [va] "r" (va),
261 [word0] "r" (entry.word[0]),
262 [word1] "r" (entry.word[1]),
263 [dtc] "r" (dtc)
264 : "p6", "p7", "r8"
265 );
266
267 if (restore_rr) {
268 rr_write(VA2VRN(va), rr.word);
269 srlz_d();
270 srlz_i();
271 }
272}
273
274/** Insert data into instruction translation register.
275 *
276 * @param va Virtual page address.
277 * @param asid Address space identifier.
278 * @param entry The rest of TLB entry as required by TLB insertion
279 * format.
280 * @param tr Translation register.
281 *
282 */
283void itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, size_t tr)
284{
285 tr_mapping_insert(va, asid, entry, false, tr);
286}
287
288/** Insert data into data translation register.
289 *
290 * @param va Virtual page address.
291 * @param asid Address space identifier.
292 * @param entry The rest of TLB entry as required by TLB insertion
293 * format.
294 * @param tr Translation register.
295 *
296 */
297void dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, size_t tr)
298{
299 tr_mapping_insert(va, asid, entry, true, tr);
300}
301
302/** Insert data into instruction or data translation register.
303 *
304 * @param va Virtual page address.
305 * @param asid Address space identifier.
306 * @param entry The rest of TLB entry as required by TLB insertion
307 * format.
308 * @param dtr If true, insert into data translation register, use
309 * instruction translation register otherwise.
310 * @param tr Translation register.
311 *
312 */
313void tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr,
314 size_t tr)
315{
316 region_register_t rr;
317 bool restore_rr = false;
318
319 rr.word = rr_read(VA2VRN(va));
320 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
321 /*
322 * The selected region register does not contain required RID.
323 * Save the old content of the register and replace the RID.
324 */
325 region_register_t rr0;
326
327 rr0 = rr;
328 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
329 rr_write(VA2VRN(va), rr0.word);
330 srlz_d();
331 srlz_i();
332 }
333
334 asm volatile (
335 "mov r8 = psr ;;\n"
336 "rsm %[mask] ;;\n" /* PSR_IC_MASK */
337 "srlz.d ;;\n"
338 "srlz.i ;;\n"
339 "mov cr.ifa = %[va]\n" /* va */
340 "mov cr.itir = %[word1] ;;\n" /* entry.word[1] */
341 "cmp.eq p6, p7 = %[dtr], r0 ;;\n" /* decide between itr and dtr */
342 "(p6) itr.i itr[%[tr]] = %[word0] ;;\n"
343 "(p7) itr.d dtr[%[tr]] = %[word0] ;;\n"
344 "mov psr.l = r8 ;;\n"
345 "srlz.d ;;\n"
346 :: [mask] "i" (PSR_IC_MASK),
347 [va] "r" (va),
348 [word1] "r" (entry.word[1]),
349 [word0] "r" (entry.word[0]),
350 [tr] "r" (tr),
351 [dtr] "r" (dtr)
352 : "p6", "p7", "r8"
353 );
354
355 if (restore_rr) {
356 rr_write(VA2VRN(va), rr.word);
357 srlz_d();
358 srlz_i();
359 }
360}
361
362/** Insert data into DTLB.
363 *
364 * @param page Virtual page address including VRN bits.
365 * @param frame Physical frame address.
366 * @param dtr If true, insert into data translation register, use data
367 * translation cache otherwise.
368 * @param tr Translation register if dtr is true, ignored otherwise.
369 *
370 */
371void dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr,
372 size_t tr)
373{
374 tlb_entry_t entry;
375
376 entry.word[0] = 0;
377 entry.word[1] = 0;
378
379 entry.p = true; /* present */
380 entry.ma = MA_WRITEBACK;
381 entry.a = true; /* already accessed */
382 entry.d = true; /* already dirty */
383 entry.pl = PL_KERNEL;
384 entry.ar = AR_READ | AR_WRITE;
385 entry.ppn = frame >> PPN_SHIFT;
386 entry.ps = PAGE_WIDTH;
387
388 if (dtr)
389 dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
390 else
391 dtc_mapping_insert(page, ASID_KERNEL, entry);
392}
393
394/** Purge kernel entries from DTR.
395 *
396 * Purge DTR entries used by the kernel.
397 *
398 * @param page Virtual page address including VRN bits.
399 * @param width Width of the purge in bits.
400 *
401 */
402void dtr_purge(uintptr_t page, size_t width)
403{
404 asm volatile (
405 "ptr.d %[page], %[width]\n"
406 :: [page] "r" (page),
407 [width] "r" (width << 2)
408 );
409}
410
411
412/** Copy content of PTE into data translation cache.
413 *
414 * @param t PTE.
415 *
416 */
417void dtc_pte_copy(pte_t *t)
418{
419 tlb_entry_t entry;
420
421 entry.word[0] = 0;
422 entry.word[1] = 0;
423
424 entry.p = t->p;
425 entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
426 entry.a = t->a;
427 entry.d = t->d;
428 entry.pl = t->k ? PL_KERNEL : PL_USER;
429 entry.ar = t->w ? AR_WRITE : AR_READ;
430 entry.ppn = t->frame >> PPN_SHIFT;
431 entry.ps = PAGE_WIDTH;
432
433 dtc_mapping_insert(t->page, t->as->asid, entry);
434
435#ifdef CONFIG_VHPT
436 vhpt_mapping_insert(t->page, t->as->asid, entry);
437#endif
438}
439
440/** Copy content of PTE into instruction translation cache.
441 *
442 * @param t PTE.
443 *
444 */
445void itc_pte_copy(pte_t *t)
446{
447 tlb_entry_t entry;
448
449 entry.word[0] = 0;
450 entry.word[1] = 0;
451
452 assert(t->x);
453
454 entry.p = t->p;
455 entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
456 entry.a = t->a;
457 entry.pl = t->k ? PL_KERNEL : PL_USER;
458 entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
459 entry.ppn = t->frame >> PPN_SHIFT;
460 entry.ps = PAGE_WIDTH;
461
462 itc_mapping_insert(t->page, t->as->asid, entry);
463
464#ifdef CONFIG_VHPT
465 vhpt_mapping_insert(t->page, t->as->asid, entry);
466#endif
467}
468
469static bool is_kernel_fault(uintptr_t va)
470{
471 region_register_t rr;
472
473 rr.word = rr_read(VA2VRN(va));
474 rid_t rid = rr.map.rid;
475 return (RID2ASID(rid) == ASID_KERNEL) && (VA2VRN(va) == VRN_KERNEL);
476}
477
478/** Instruction TLB fault handler for faults with VHPT turned off.
479 *
480 * @param n Interruption vector.
481 * @param istate Structure with saved interruption state.
482 *
483 */
484void alternate_instruction_tlb_fault(unsigned int n, istate_t *istate)
485{
486 uintptr_t va;
487 pte_t t;
488
489 va = istate->cr_ifa; /* faulting address */
490
491 assert(!is_kernel_fault(va));
492
493 bool found = page_mapping_find(AS, va, true, &t);
494 if (found) {
495 assert(t.p);
496
497 /*
498 * The mapping was found in software page hash table.
499 * Insert it into data translation cache.
500 */
501 itc_pte_copy(&t);
502 } else {
503 /*
504 * Forward the page fault to address space page fault handler.
505 */
506 as_page_fault(va, PF_ACCESS_EXEC, istate);
507 }
508}
509
510static int is_io_page_accessible(int page)
511{
512 if (TASK->arch.iomap)
513 return bitmap_get(TASK->arch.iomap, page);
514 else
515 return 0;
516}
517
518/**
519 * There is special handling of memory mapped legacy io, because of 4KB sized
520 * access for userspace.
521 *
522 * @param va Virtual address of page fault.
523 * @param istate Structure with saved interruption state.
524 *
525 * @return One on success, zero on failure.
526 *
527 */
528static int try_memmap_io_insertion(uintptr_t va, istate_t *istate)
529{
530 if ((va >= LEGACYIO_USER_BASE) && (va < LEGACYIO_USER_BASE + (1 << LEGACYIO_PAGE_WIDTH))) {
531 if (TASK) {
532 uint64_t io_page = (va & ((1 << LEGACYIO_PAGE_WIDTH) - 1)) >>
533 LEGACYIO_SINGLE_PAGE_WIDTH;
534
535 if (is_io_page_accessible(io_page)) {
536 uint64_t page, frame;
537
538 page = LEGACYIO_USER_BASE +
539 (1 << LEGACYIO_SINGLE_PAGE_WIDTH) * io_page;
540 frame = LEGACYIO_PHYS_BASE +
541 (1 << LEGACYIO_SINGLE_PAGE_WIDTH) * io_page;
542
543 tlb_entry_t entry;
544
545 entry.word[0] = 0;
546 entry.word[1] = 0;
547
548 entry.p = true; /* present */
549 entry.ma = MA_UNCACHEABLE;
550 entry.a = true; /* already accessed */
551 entry.d = true; /* already dirty */
552 entry.pl = PL_USER;
553 entry.ar = AR_READ | AR_WRITE;
554 entry.ppn = frame >> PPN_SHIFT;
555 entry.ps = LEGACYIO_SINGLE_PAGE_WIDTH;
556
557 dtc_mapping_insert(page, TASK->as->asid, entry);
558 return 1;
559 } else {
560 fault_if_from_uspace(istate,
561 "IO access fault at %p.", (void *) va);
562 }
563 }
564 }
565
566 return 0;
567}
568
569/** Data TLB fault handler for faults with VHPT turned off.
570 *
571 * @param n Interruption vector.
572 * @param istate Structure with saved interruption state.
573 *
574 */
575void alternate_data_tlb_fault(unsigned int n, istate_t *istate)
576{
577 if (istate->cr_isr.sp) {
578 /*
579 * Speculative load. Deffer the exception until a more clever
580 * approach can be used. Currently if we try to find the
581 * mapping for the speculative load while in the kernel, we
582 * might introduce a livelock because of the possibly invalid
583 * values of the address.
584 */
585 istate->cr_ipsr.ed = true;
586 return;
587 }
588
589 uintptr_t va = istate->cr_ifa; /* faulting address */
590 as_t *as = AS;
591
592 if (is_kernel_fault(va)) {
593 if (va < end_of_identity) {
594 /*
595 * Create kernel identity mapping for low memory.
596 */
597 dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
598 return;
599 } else {
600 as = AS_KERNEL;
601 }
602 }
603
604
605 pte_t t;
606 bool found = page_mapping_find(as, va, true, &t);
607 if (found) {
608 assert(t.p);
609
610 /*
611 * The mapping was found in the software page hash table.
612 * Insert it into data translation cache.
613 */
614 dtc_pte_copy(&t);
615 } else {
616 if (try_memmap_io_insertion(va, istate))
617 return;
618
619 /*
620 * Forward the page fault to the address space page fault
621 * handler.
622 */
623 as_page_fault(va, PF_ACCESS_READ, istate);
624 }
625}
626
627/** Data nested TLB fault handler.
628 *
629 * This fault should not occur.
630 *
631 * @param n Interruption vector.
632 * @param istate Structure with saved interruption state.
633 *
634 */
635void data_nested_tlb_fault(unsigned int n, istate_t *istate)
636{
637 assert(false);
638}
639
640/** Data Dirty bit fault handler.
641 *
642 * @param n Interruption vector.
643 * @param istate Structure with saved interruption state.
644 *
645 */
646void data_dirty_bit_fault(unsigned int n, istate_t *istate)
647{
648 uintptr_t va;
649 pte_t t;
650 as_t *as = AS;
651
652 va = istate->cr_ifa; /* faulting address */
653
654 if (is_kernel_fault(va))
655 as = AS_KERNEL;
656
657 bool found = page_mapping_find(as, va, true, &t);
658
659 assert(found);
660 assert(t.p);
661
662 if (found && t.p && t.w) {
663 /*
664 * Update the Dirty bit in page tables and reinsert
665 * the mapping into DTC.
666 */
667 t.d = true;
668 dtc_pte_copy(&t);
669 page_mapping_update(as, va, true, &t);
670 } else {
671 as_page_fault(va, PF_ACCESS_WRITE, istate);
672 }
673}
674
675/** Instruction access bit fault handler.
676 *
677 * @param n Interruption vector.
678 * @param istate Structure with saved interruption state.
679 *
680 */
681void instruction_access_bit_fault(unsigned int n, istate_t *istate)
682{
683 uintptr_t va;
684 pte_t t;
685
686 va = istate->cr_ifa; /* faulting address */
687
688 assert(!is_kernel_fault(va));
689
690 bool found = page_mapping_find(AS, va, true, &t);
691
692 assert(found);
693 assert(t.p);
694
695 if (found && t.p && t.x) {
696 /*
697 * Update the Accessed bit in page tables and reinsert
698 * the mapping into ITC.
699 */
700 t.a = true;
701 itc_pte_copy(&t);
702 page_mapping_update(AS, va, true, &t);
703 } else {
704 as_page_fault(va, PF_ACCESS_EXEC, istate);
705 }
706}
707
708/** Data access bit fault handler.
709 *
710 * @param n Interruption vector.
711 * @param istate Structure with saved interruption state.
712 *
713 */
714void data_access_bit_fault(unsigned int n, istate_t *istate)
715{
716 uintptr_t va;
717 pte_t t;
718 as_t *as = AS;
719
720 va = istate->cr_ifa; /* faulting address */
721
722 if (is_kernel_fault(va))
723 as = AS_KERNEL;
724
725 bool found = page_mapping_find(as, va, true, &t);
726
727 assert(found);
728 assert(t.p);
729
730 if (found && t.p) {
731 /*
732 * Update the Accessed bit in page tables and reinsert
733 * the mapping into DTC.
734 */
735 t.a = true;
736 dtc_pte_copy(&t);
737 page_mapping_update(as, va, true, &t);
738 } else {
739 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
740 fault_if_from_uspace(istate, "Page fault at %p.",
741 (void *) va);
742 panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, NULL);
743 }
744 }
745}
746
747/** Data access rights fault handler.
748 *
749 * @param n Interruption vector.
750 * @param istate Structure with saved interruption state.
751 *
752 */
753void data_access_rights_fault(unsigned int n, istate_t *istate)
754{
755 uintptr_t va;
756 pte_t t;
757
758 va = istate->cr_ifa; /* faulting address */
759
760 assert(!is_kernel_fault(va));
761
762 /*
763 * Assume a write to a read-only page.
764 */
765 bool found = page_mapping_find(AS, va, true, &t);
766
767 assert(found);
768 assert(t.p);
769 assert(!t.w);
770
771 as_page_fault(va, PF_ACCESS_WRITE, istate);
772}
773
774/** Page not present fault handler.
775 *
776 * @param n Interruption vector.
777 * @param istate Structure with saved interruption state.
778 *
779 */
780void page_not_present(unsigned int n, istate_t *istate)
781{
782 uintptr_t va;
783 pte_t t;
784
785 va = istate->cr_ifa; /* faulting address */
786
787 assert(!is_kernel_fault(va));
788
789 bool found = page_mapping_find(AS, va, true, &t);
790
791 assert(found);
792
793 if (t.p) {
794 /*
795 * If the Present bit is set in page hash table, just copy it
796 * and update ITC/DTC.
797 */
798 if (t.x)
799 itc_pte_copy(&t);
800 else
801 dtc_pte_copy(&t);
802 } else {
803 as_page_fault(va, PF_ACCESS_READ, istate);
804 }
805}
806
807void tlb_arch_init(void)
808{
809}
810
811void tlb_print(void)
812{
813}
814
815/** @}
816 */
Note: See TracBrowser for help on using the repository browser.