source: mainline/kernel/arch/ia64/src/mm/tlb.c@ 705ca2b

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 705ca2b was bab75df6, checked in by Jiri Svoboda <jiri@…>, 7 years ago

Let kernel code get printf via the standard stdio header. Clean up unused includes.

  • Property mode set to 100644
File size: 18.4 KB
Line 
1/*
2 * Copyright (c) 2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup kernel_ia64_mm
30 * @{
31 */
32/** @file
33 */
34
35/*
36 * TLB management.
37 */
38
39#include <mm/tlb.h>
40#include <mm/asid.h>
41#include <mm/page.h>
42#include <mm/as.h>
43#include <arch/mm/tlb.h>
44#include <arch/mm/page.h>
45#include <arch/mm/vhpt.h>
46#include <barrier.h>
47#include <arch/interrupt.h>
48#include <arch/pal/pal.h>
49#include <arch/asm.h>
50#include <assert.h>
51#include <panic.h>
52#include <arch.h>
53#include <interrupt.h>
54#include <arch/legacyio.h>
55
56/** Invalidate all TLB entries. */
57void tlb_invalidate_all(void)
58{
59 ipl_t ipl;
60 uintptr_t adr;
61 uint32_t count1, count2, stride1, stride2;
62
63 unsigned int i, j;
64
65 adr = PAL_PTCE_INFO_BASE();
66 count1 = PAL_PTCE_INFO_COUNT1();
67 count2 = PAL_PTCE_INFO_COUNT2();
68 stride1 = PAL_PTCE_INFO_STRIDE1();
69 stride2 = PAL_PTCE_INFO_STRIDE2();
70
71 ipl = interrupts_disable();
72
73 for (i = 0; i < count1; i++) {
74 for (j = 0; j < count2; j++) {
75 asm volatile (
76 "ptc.e %[adr] ;;"
77 :: [adr] "r" (adr)
78 );
79 adr += stride2;
80 }
81 adr += stride1;
82 }
83
84 interrupts_restore(ipl);
85
86 srlz_d();
87 srlz_i();
88
89#ifdef CONFIG_VHPT
90 vhpt_invalidate_all();
91#endif
92}
93
94/** Invalidate entries belonging to an address space.
95 *
96 * @param asid Address space identifier.
97 *
98 */
99void tlb_invalidate_asid(asid_t asid)
100{
101 tlb_invalidate_all();
102}
103
104void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt)
105{
106 region_register_t rr;
107 bool restore_rr = false;
108 int b = 0;
109 int c = cnt;
110
111 uintptr_t va;
112 va = page;
113
114 rr.word = rr_read(VA2VRN(page));
115 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(page))))) {
116 /*
117 * The selected region register does not contain required RID.
118 * Save the old content of the register and replace the RID.
119 */
120 region_register_t rr0;
121
122 rr0 = rr;
123 rr0.map.rid = ASID2RID(asid, VA2VRN(page));
124 rr_write(VA2VRN(page), rr0.word);
125 srlz_d();
126 srlz_i();
127 }
128
129 while (c >>= 1)
130 b++;
131 b >>= 1;
132 uint64_t ps;
133
134 switch (b) {
135 case 0: /* cnt 1 - 3 */
136 ps = PAGE_WIDTH;
137 break;
138 case 1: /* cnt 4 - 15 */
139 ps = PAGE_WIDTH + 2;
140 va &= ~((1UL << ps) - 1);
141 break;
142 case 2: /* cnt 16 - 63 */
143 ps = PAGE_WIDTH + 4;
144 va &= ~((1UL << ps) - 1);
145 break;
146 case 3: /* cnt 64 - 255 */
147 ps = PAGE_WIDTH + 6;
148 va &= ~((1UL << ps) - 1);
149 break;
150 case 4: /* cnt 256 - 1023 */
151 ps = PAGE_WIDTH + 8;
152 va &= ~((1UL << ps) - 1);
153 break;
154 case 5: /* cnt 1024 - 4095 */
155 ps = PAGE_WIDTH + 10;
156 va &= ~((1UL << ps) - 1);
157 break;
158 case 6: /* cnt 4096 - 16383 */
159 ps = PAGE_WIDTH + 12;
160 va &= ~((1UL << ps) - 1);
161 break;
162 case 7: /* cnt 16384 - 65535 */
163 case 8: /* cnt 65536 - (256K - 1) */
164 ps = PAGE_WIDTH + 14;
165 va &= ~((1UL << ps) - 1);
166 break;
167 default:
168 ps = PAGE_WIDTH + 18;
169 va &= ~((1UL << ps) - 1);
170 break;
171 }
172
173 for (; va < (page + cnt * PAGE_SIZE); va += (1UL << ps))
174 asm volatile (
175 "ptc.l %[va], %[ps] ;;"
176 :: [va] "r" (va),
177 [ps] "r" (ps << 2)
178 );
179
180 srlz_d();
181 srlz_i();
182
183 if (restore_rr) {
184 rr_write(VA2VRN(page), rr.word);
185 srlz_d();
186 srlz_i();
187 }
188}
189
190/** Insert data into data translation cache.
191 *
192 * @param va Virtual page address.
193 * @param asid Address space identifier.
194 * @param entry The rest of TLB entry as required by TLB insertion
195 * format.
196 *
197 */
198void dtc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
199{
200 tc_mapping_insert(va, asid, entry, true);
201}
202
203/** Insert data into instruction translation cache.
204 *
205 * @param va Virtual page address.
206 * @param asid Address space identifier.
207 * @param entry The rest of TLB entry as required by TLB insertion
208 * format.
209 */
210void itc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
211{
212 tc_mapping_insert(va, asid, entry, false);
213}
214
215/** Insert data into instruction or data translation cache.
216 *
217 * @param va Virtual page address.
218 * @param asid Address space identifier.
219 * @param entry The rest of TLB entry as required by TLB insertion
220 * format.
221 * @param dtc If true, insert into data translation cache, use
222 * instruction translation cache otherwise.
223 *
224 */
225void tc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtc)
226{
227 region_register_t rr;
228 bool restore_rr = false;
229
230 rr.word = rr_read(VA2VRN(va));
231 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
232 /*
233 * The selected region register does not contain required RID.
234 * Save the old content of the register and replace the RID.
235 */
236 region_register_t rr0;
237
238 rr0 = rr;
239 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
240 rr_write(VA2VRN(va), rr0.word);
241 srlz_d();
242 srlz_i();
243 }
244
245 asm volatile (
246 "mov r8 = psr ;;\n"
247 "rsm %[mask] ;;\n" /* PSR_IC_MASK */
248 "srlz.d ;;\n"
249 "srlz.i ;;\n"
250 "mov cr.ifa = %[va]\n" /* va */
251 "mov cr.itir = %[word1] ;;\n" /* entry.word[1] */
252 "cmp.eq p6, p7 = %[dtc], r0 ;;\n" /* decide between itc and dtc */
253 "(p6) itc.i %[word0] ;;\n"
254 "(p7) itc.d %[word0] ;;\n"
255 "mov psr.l = r8 ;;\n"
256 "srlz.d ;;\n"
257 :: [mask] "i" (PSR_IC_MASK),
258 [va] "r" (va),
259 [word0] "r" (entry.word[0]),
260 [word1] "r" (entry.word[1]),
261 [dtc] "r" (dtc)
262 : "p6", "p7", "r8"
263 );
264
265 if (restore_rr) {
266 rr_write(VA2VRN(va), rr.word);
267 srlz_d();
268 srlz_i();
269 }
270}
271
272/** Insert data into instruction translation register.
273 *
274 * @param va Virtual page address.
275 * @param asid Address space identifier.
276 * @param entry The rest of TLB entry as required by TLB insertion
277 * format.
278 * @param tr Translation register.
279 *
280 */
281void itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, size_t tr)
282{
283 tr_mapping_insert(va, asid, entry, false, tr);
284}
285
286/** Insert data into data translation register.
287 *
288 * @param va Virtual page address.
289 * @param asid Address space identifier.
290 * @param entry The rest of TLB entry as required by TLB insertion
291 * format.
292 * @param tr Translation register.
293 *
294 */
295void dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, size_t tr)
296{
297 tr_mapping_insert(va, asid, entry, true, tr);
298}
299
300/** Insert data into instruction or data translation register.
301 *
302 * @param va Virtual page address.
303 * @param asid Address space identifier.
304 * @param entry The rest of TLB entry as required by TLB insertion
305 * format.
306 * @param dtr If true, insert into data translation register, use
307 * instruction translation register otherwise.
308 * @param tr Translation register.
309 *
310 */
311void tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr,
312 size_t tr)
313{
314 region_register_t rr;
315 bool restore_rr = false;
316
317 rr.word = rr_read(VA2VRN(va));
318 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
319 /*
320 * The selected region register does not contain required RID.
321 * Save the old content of the register and replace the RID.
322 */
323 region_register_t rr0;
324
325 rr0 = rr;
326 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
327 rr_write(VA2VRN(va), rr0.word);
328 srlz_d();
329 srlz_i();
330 }
331
332 asm volatile (
333 "mov r8 = psr ;;\n"
334 "rsm %[mask] ;;\n" /* PSR_IC_MASK */
335 "srlz.d ;;\n"
336 "srlz.i ;;\n"
337 "mov cr.ifa = %[va]\n" /* va */
338 "mov cr.itir = %[word1] ;;\n" /* entry.word[1] */
339 "cmp.eq p6, p7 = %[dtr], r0 ;;\n" /* decide between itr and dtr */
340 "(p6) itr.i itr[%[tr]] = %[word0] ;;\n"
341 "(p7) itr.d dtr[%[tr]] = %[word0] ;;\n"
342 "mov psr.l = r8 ;;\n"
343 "srlz.d ;;\n"
344 :: [mask] "i" (PSR_IC_MASK),
345 [va] "r" (va),
346 [word1] "r" (entry.word[1]),
347 [word0] "r" (entry.word[0]),
348 [tr] "r" (tr),
349 [dtr] "r" (dtr)
350 : "p6", "p7", "r8"
351 );
352
353 if (restore_rr) {
354 rr_write(VA2VRN(va), rr.word);
355 srlz_d();
356 srlz_i();
357 }
358}
359
360/** Insert data into DTLB.
361 *
362 * @param page Virtual page address including VRN bits.
363 * @param frame Physical frame address.
364 * @param dtr If true, insert into data translation register, use data
365 * translation cache otherwise.
366 * @param tr Translation register if dtr is true, ignored otherwise.
367 *
368 */
369void dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr,
370 size_t tr)
371{
372 tlb_entry_t entry;
373
374 entry.word[0] = 0;
375 entry.word[1] = 0;
376
377 entry.p = true; /* present */
378 entry.ma = MA_WRITEBACK;
379 entry.a = true; /* already accessed */
380 entry.d = true; /* already dirty */
381 entry.pl = PL_KERNEL;
382 entry.ar = AR_READ | AR_WRITE;
383 entry.ppn = frame >> PPN_SHIFT;
384 entry.ps = PAGE_WIDTH;
385
386 if (dtr)
387 dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
388 else
389 dtc_mapping_insert(page, ASID_KERNEL, entry);
390}
391
392/** Purge kernel entries from DTR.
393 *
394 * Purge DTR entries used by the kernel.
395 *
396 * @param page Virtual page address including VRN bits.
397 * @param width Width of the purge in bits.
398 *
399 */
400void dtr_purge(uintptr_t page, size_t width)
401{
402 asm volatile (
403 "ptr.d %[page], %[width]\n"
404 :: [page] "r" (page),
405 [width] "r" (width << 2)
406 );
407}
408
409/** Copy content of PTE into data translation cache.
410 *
411 * @param t PTE.
412 *
413 */
414void dtc_pte_copy(pte_t *t)
415{
416 tlb_entry_t entry;
417
418 entry.word[0] = 0;
419 entry.word[1] = 0;
420
421 entry.p = t->p;
422 entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
423 entry.a = t->a;
424 entry.d = t->d;
425 entry.pl = t->k ? PL_KERNEL : PL_USER;
426 entry.ar = t->w ? AR_WRITE : AR_READ;
427 entry.ppn = t->frame >> PPN_SHIFT;
428 entry.ps = PAGE_WIDTH;
429
430 dtc_mapping_insert(t->page, t->as->asid, entry);
431
432#ifdef CONFIG_VHPT
433 vhpt_mapping_insert(t->page, t->as->asid, entry);
434#endif
435}
436
437/** Copy content of PTE into instruction translation cache.
438 *
439 * @param t PTE.
440 *
441 */
442void itc_pte_copy(pte_t *t)
443{
444 tlb_entry_t entry;
445
446 entry.word[0] = 0;
447 entry.word[1] = 0;
448
449 assert(t->x);
450
451 entry.p = t->p;
452 entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
453 entry.a = t->a;
454 entry.pl = t->k ? PL_KERNEL : PL_USER;
455 entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
456 entry.ppn = t->frame >> PPN_SHIFT;
457 entry.ps = PAGE_WIDTH;
458
459 itc_mapping_insert(t->page, t->as->asid, entry);
460
461#ifdef CONFIG_VHPT
462 vhpt_mapping_insert(t->page, t->as->asid, entry);
463#endif
464}
465
466static bool is_kernel_fault(uintptr_t va)
467{
468 region_register_t rr;
469
470 rr.word = rr_read(VA2VRN(va));
471 rid_t rid = rr.map.rid;
472 return (RID2ASID(rid) == ASID_KERNEL) && (VA2VRN(va) == VRN_KERNEL);
473}
474
475/** Instruction TLB fault handler for faults with VHPT turned off.
476 *
477 * @param n Interruption vector.
478 * @param istate Structure with saved interruption state.
479 *
480 */
481void alternate_instruction_tlb_fault(unsigned int n, istate_t *istate)
482{
483 uintptr_t va;
484 pte_t t;
485
486 va = istate->cr_ifa; /* faulting address */
487
488 assert(!is_kernel_fault(va));
489
490 bool found = page_mapping_find(AS, va, true, &t);
491 if (found) {
492 assert(t.p);
493
494 /*
495 * The mapping was found in software page hash table.
496 * Insert it into data translation cache.
497 */
498 itc_pte_copy(&t);
499 } else {
500 /*
501 * Forward the page fault to address space page fault handler.
502 */
503 as_page_fault(va, PF_ACCESS_EXEC, istate);
504 }
505}
506
507static int is_io_page_accessible(int page)
508{
509 if (TASK->arch.iomap)
510 return bitmap_get(TASK->arch.iomap, page);
511 else
512 return 0;
513}
514
515/**
516 * There is special handling of memory mapped legacy io, because of 4KB sized
517 * access for userspace.
518 *
519 * @param va Virtual address of page fault.
520 * @param istate Structure with saved interruption state.
521 *
522 * @return One on success, zero on failure.
523 *
524 */
525static int try_memmap_io_insertion(uintptr_t va, istate_t *istate)
526{
527 if ((va >= LEGACYIO_USER_BASE) && (va < LEGACYIO_USER_BASE + (1 << LEGACYIO_PAGE_WIDTH))) {
528 if (TASK) {
529 uint64_t io_page = (va & ((1 << LEGACYIO_PAGE_WIDTH) - 1)) >>
530 LEGACYIO_SINGLE_PAGE_WIDTH;
531
532 if (is_io_page_accessible(io_page)) {
533 uint64_t page, frame;
534
535 page = LEGACYIO_USER_BASE +
536 (1 << LEGACYIO_SINGLE_PAGE_WIDTH) * io_page;
537 frame = LEGACYIO_PHYS_BASE +
538 (1 << LEGACYIO_SINGLE_PAGE_WIDTH) * io_page;
539
540 tlb_entry_t entry;
541
542 entry.word[0] = 0;
543 entry.word[1] = 0;
544
545 entry.p = true; /* present */
546 entry.ma = MA_UNCACHEABLE;
547 entry.a = true; /* already accessed */
548 entry.d = true; /* already dirty */
549 entry.pl = PL_USER;
550 entry.ar = AR_READ | AR_WRITE;
551 entry.ppn = frame >> PPN_SHIFT;
552 entry.ps = LEGACYIO_SINGLE_PAGE_WIDTH;
553
554 dtc_mapping_insert(page, TASK->as->asid, entry);
555 return 1;
556 } else {
557 fault_if_from_uspace(istate,
558 "IO access fault at %p.", (void *) va);
559 }
560 }
561 }
562
563 return 0;
564}
565
566/** Data TLB fault handler for faults with VHPT turned off.
567 *
568 * @param n Interruption vector.
569 * @param istate Structure with saved interruption state.
570 *
571 */
572void alternate_data_tlb_fault(unsigned int n, istate_t *istate)
573{
574 if (istate->cr_isr.sp) {
575 /*
576 * Speculative load. Deffer the exception until a more clever
577 * approach can be used. Currently if we try to find the
578 * mapping for the speculative load while in the kernel, we
579 * might introduce a livelock because of the possibly invalid
580 * values of the address.
581 */
582 istate->cr_ipsr.ed = true;
583 return;
584 }
585
586 uintptr_t va = istate->cr_ifa; /* faulting address */
587 as_t *as = AS;
588
589 if (is_kernel_fault(va)) {
590 if (va < end_of_identity) {
591 /*
592 * Create kernel identity mapping for low memory.
593 */
594 dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
595 return;
596 } else {
597 as = AS_KERNEL;
598 }
599 }
600
601 pte_t t;
602 bool found = page_mapping_find(as, va, true, &t);
603 if (found) {
604 assert(t.p);
605
606 /*
607 * The mapping was found in the software page hash table.
608 * Insert it into data translation cache.
609 */
610 dtc_pte_copy(&t);
611 } else {
612 if (try_memmap_io_insertion(va, istate))
613 return;
614
615 /*
616 * Forward the page fault to the address space page fault
617 * handler.
618 */
619 as_page_fault(va, PF_ACCESS_READ, istate);
620 }
621}
622
623/** Data nested TLB fault handler.
624 *
625 * This fault should not occur.
626 *
627 * @param n Interruption vector.
628 * @param istate Structure with saved interruption state.
629 *
630 */
631void data_nested_tlb_fault(unsigned int n, istate_t *istate)
632{
633 assert(false);
634}
635
636/** Data Dirty bit fault handler.
637 *
638 * @param n Interruption vector.
639 * @param istate Structure with saved interruption state.
640 *
641 */
642void data_dirty_bit_fault(unsigned int n, istate_t *istate)
643{
644 uintptr_t va;
645 pte_t t;
646 as_t *as = AS;
647
648 va = istate->cr_ifa; /* faulting address */
649
650 if (is_kernel_fault(va))
651 as = AS_KERNEL;
652
653 bool found = page_mapping_find(as, va, true, &t);
654
655 assert(found);
656 assert(t.p);
657
658 if (found && t.p && t.w) {
659 /*
660 * Update the Dirty bit in page tables and reinsert
661 * the mapping into DTC.
662 */
663 t.d = true;
664 dtc_pte_copy(&t);
665 page_mapping_update(as, va, true, &t);
666 } else {
667 as_page_fault(va, PF_ACCESS_WRITE, istate);
668 }
669}
670
671/** Instruction access bit fault handler.
672 *
673 * @param n Interruption vector.
674 * @param istate Structure with saved interruption state.
675 *
676 */
677void instruction_access_bit_fault(unsigned int n, istate_t *istate)
678{
679 uintptr_t va;
680 pte_t t;
681
682 va = istate->cr_ifa; /* faulting address */
683
684 assert(!is_kernel_fault(va));
685
686 bool found = page_mapping_find(AS, va, true, &t);
687
688 assert(found);
689 assert(t.p);
690
691 if (found && t.p && t.x) {
692 /*
693 * Update the Accessed bit in page tables and reinsert
694 * the mapping into ITC.
695 */
696 t.a = true;
697 itc_pte_copy(&t);
698 page_mapping_update(AS, va, true, &t);
699 } else {
700 as_page_fault(va, PF_ACCESS_EXEC, istate);
701 }
702}
703
704/** Data access bit fault handler.
705 *
706 * @param n Interruption vector.
707 * @param istate Structure with saved interruption state.
708 *
709 */
710void data_access_bit_fault(unsigned int n, istate_t *istate)
711{
712 uintptr_t va;
713 pte_t t;
714 as_t *as = AS;
715
716 va = istate->cr_ifa; /* faulting address */
717
718 if (is_kernel_fault(va))
719 as = AS_KERNEL;
720
721 bool found = page_mapping_find(as, va, true, &t);
722
723 assert(found);
724 assert(t.p);
725
726 if (found && t.p) {
727 /*
728 * Update the Accessed bit in page tables and reinsert
729 * the mapping into DTC.
730 */
731 t.a = true;
732 dtc_pte_copy(&t);
733 page_mapping_update(as, va, true, &t);
734 } else {
735 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
736 fault_if_from_uspace(istate, "Page fault at %p.",
737 (void *) va);
738 panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, NULL);
739 }
740 }
741}
742
743/** Data access rights fault handler.
744 *
745 * @param n Interruption vector.
746 * @param istate Structure with saved interruption state.
747 *
748 */
749void data_access_rights_fault(unsigned int n, istate_t *istate)
750{
751 uintptr_t va;
752 pte_t t;
753
754 va = istate->cr_ifa; /* faulting address */
755
756 assert(!is_kernel_fault(va));
757
758 /*
759 * Assume a write to a read-only page.
760 */
761 bool found = page_mapping_find(AS, va, true, &t);
762
763 assert(found);
764 assert(t.p);
765 assert(!t.w);
766
767 as_page_fault(va, PF_ACCESS_WRITE, istate);
768}
769
770/** Page not present fault handler.
771 *
772 * @param n Interruption vector.
773 * @param istate Structure with saved interruption state.
774 *
775 */
776void page_not_present(unsigned int n, istate_t *istate)
777{
778 uintptr_t va;
779 pte_t t;
780
781 va = istate->cr_ifa; /* faulting address */
782
783 assert(!is_kernel_fault(va));
784
785 bool found = page_mapping_find(AS, va, true, &t);
786
787 assert(found);
788
789 if (t.p) {
790 /*
791 * If the Present bit is set in page hash table, just copy it
792 * and update ITC/DTC.
793 */
794 if (t.x)
795 itc_pte_copy(&t);
796 else
797 dtc_pte_copy(&t);
798 } else {
799 as_page_fault(va, PF_ACCESS_READ, istate);
800 }
801}
802
803void tlb_arch_init(void)
804{
805}
806
807void tlb_print(void)
808{
809}
810
811/** @}
812 */
Note: See TracBrowser for help on using the repository browser.