source: mainline/kernel/arch/ia64/src/mm/tlb.c@ 346b12a2

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 346b12a2 was 346b12a2, checked in by Jakub Jermar <jakub@…>, 9 years ago

Add page_mapping_update()

page_mapping_update() can be used to safely update the accessed and dirty
bits of a PTE in the actual page tables.

  • Property mode set to 100644
File size: 18.3 KB
Line 
1/*
2 * Copyright (c) 2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup ia64mm
30 * @{
31 */
32/** @file
33 */
34
35/*
36 * TLB management.
37 */
38
39#include <mm/tlb.h>
40#include <mm/asid.h>
41#include <mm/page.h>
42#include <mm/as.h>
43#include <arch/mm/tlb.h>
44#include <arch/mm/page.h>
45#include <arch/mm/vhpt.h>
46#include <arch/barrier.h>
47#include <arch/interrupt.h>
48#include <arch/pal/pal.h>
49#include <arch/asm.h>
50#include <panic.h>
51#include <print.h>
52#include <arch.h>
53#include <interrupt.h>
54#include <arch/legacyio.h>
55
56/** Invalidate all TLB entries. */
57void tlb_invalidate_all(void)
58{
59 ipl_t ipl;
60 uintptr_t adr;
61 uint32_t count1, count2, stride1, stride2;
62
63 unsigned int i, j;
64
65 adr = PAL_PTCE_INFO_BASE();
66 count1 = PAL_PTCE_INFO_COUNT1();
67 count2 = PAL_PTCE_INFO_COUNT2();
68 stride1 = PAL_PTCE_INFO_STRIDE1();
69 stride2 = PAL_PTCE_INFO_STRIDE2();
70
71 ipl = interrupts_disable();
72
73 for (i = 0; i < count1; i++) {
74 for (j = 0; j < count2; j++) {
75 asm volatile (
76 "ptc.e %[adr] ;;"
77 :: [adr] "r" (adr)
78 );
79 adr += stride2;
80 }
81 adr += stride1;
82 }
83
84 interrupts_restore(ipl);
85
86 srlz_d();
87 srlz_i();
88
89#ifdef CONFIG_VHPT
90 vhpt_invalidate_all();
91#endif
92}
93
94/** Invalidate entries belonging to an address space.
95 *
96 * @param asid Address space identifier.
97 *
98 */
99void tlb_invalidate_asid(asid_t asid)
100{
101 tlb_invalidate_all();
102}
103
104
105void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt)
106{
107 region_register_t rr;
108 bool restore_rr = false;
109 int b = 0;
110 int c = cnt;
111
112 uintptr_t va;
113 va = page;
114
115 rr.word = rr_read(VA2VRN(page));
116 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(page))))) {
117 /*
118 * The selected region register does not contain required RID.
119 * Save the old content of the register and replace the RID.
120 */
121 region_register_t rr0;
122
123 rr0 = rr;
124 rr0.map.rid = ASID2RID(asid, VA2VRN(page));
125 rr_write(VA2VRN(page), rr0.word);
126 srlz_d();
127 srlz_i();
128 }
129
130 while (c >>= 1)
131 b++;
132 b >>= 1;
133 uint64_t ps;
134
135 switch (b) {
136 case 0: /* cnt 1 - 3 */
137 ps = PAGE_WIDTH;
138 break;
139 case 1: /* cnt 4 - 15 */
140 ps = PAGE_WIDTH + 2;
141 va &= ~((1UL << ps) - 1);
142 break;
143 case 2: /* cnt 16 - 63 */
144 ps = PAGE_WIDTH + 4;
145 va &= ~((1UL << ps) - 1);
146 break;
147 case 3: /* cnt 64 - 255 */
148 ps = PAGE_WIDTH + 6;
149 va &= ~((1UL << ps) - 1);
150 break;
151 case 4: /* cnt 256 - 1023 */
152 ps = PAGE_WIDTH + 8;
153 va &= ~((1UL << ps) - 1);
154 break;
155 case 5: /* cnt 1024 - 4095 */
156 ps = PAGE_WIDTH + 10;
157 va &= ~((1UL << ps) - 1);
158 break;
159 case 6: /* cnt 4096 - 16383 */
160 ps = PAGE_WIDTH + 12;
161 va &= ~((1UL << ps) - 1);
162 break;
163 case 7: /* cnt 16384 - 65535 */
164 case 8: /* cnt 65536 - (256K - 1) */
165 ps = PAGE_WIDTH + 14;
166 va &= ~((1UL << ps) - 1);
167 break;
168 default:
169 ps = PAGE_WIDTH + 18;
170 va &= ~((1UL << ps) - 1);
171 break;
172 }
173
174 for (; va < (page + cnt * PAGE_SIZE); va += (1UL << ps))
175 asm volatile (
176 "ptc.l %[va], %[ps] ;;"
177 :: [va]"r" (va),
178 [ps] "r" (ps << 2)
179 );
180
181 srlz_d();
182 srlz_i();
183
184 if (restore_rr) {
185 rr_write(VA2VRN(page), rr.word);
186 srlz_d();
187 srlz_i();
188 }
189}
190
191/** Insert data into data translation cache.
192 *
193 * @param va Virtual page address.
194 * @param asid Address space identifier.
195 * @param entry The rest of TLB entry as required by TLB insertion
196 * format.
197 *
198 */
199void dtc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
200{
201 tc_mapping_insert(va, asid, entry, true);
202}
203
204/** Insert data into instruction translation cache.
205 *
206 * @param va Virtual page address.
207 * @param asid Address space identifier.
208 * @param entry The rest of TLB entry as required by TLB insertion
209 * format.
210 */
211void itc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
212{
213 tc_mapping_insert(va, asid, entry, false);
214}
215
216/** Insert data into instruction or data translation cache.
217 *
218 * @param va Virtual page address.
219 * @param asid Address space identifier.
220 * @param entry The rest of TLB entry as required by TLB insertion
221 * format.
222 * @param dtc If true, insert into data translation cache, use
223 * instruction translation cache otherwise.
224 *
225 */
226void tc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtc)
227{
228 region_register_t rr;
229 bool restore_rr = false;
230
231 rr.word = rr_read(VA2VRN(va));
232 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
233 /*
234 * The selected region register does not contain required RID.
235 * Save the old content of the register and replace the RID.
236 */
237 region_register_t rr0;
238
239 rr0 = rr;
240 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
241 rr_write(VA2VRN(va), rr0.word);
242 srlz_d();
243 srlz_i();
244 }
245
246 asm volatile (
247 "mov r8 = psr ;;\n"
248 "rsm %[mask] ;;\n" /* PSR_IC_MASK */
249 "srlz.d ;;\n"
250 "srlz.i ;;\n"
251 "mov cr.ifa = %[va]\n" /* va */
252 "mov cr.itir = %[word1] ;;\n" /* entry.word[1] */
253 "cmp.eq p6, p7 = %[dtc], r0 ;;\n" /* decide between itc and dtc */
254 "(p6) itc.i %[word0] ;;\n"
255 "(p7) itc.d %[word0] ;;\n"
256 "mov psr.l = r8 ;;\n"
257 "srlz.d ;;\n"
258 :: [mask] "i" (PSR_IC_MASK),
259 [va] "r" (va),
260 [word0] "r" (entry.word[0]),
261 [word1] "r" (entry.word[1]),
262 [dtc] "r" (dtc)
263 : "p6", "p7", "r8"
264 );
265
266 if (restore_rr) {
267 rr_write(VA2VRN(va), rr.word);
268 srlz_d();
269 srlz_i();
270 }
271}
272
273/** Insert data into instruction translation register.
274 *
275 * @param va Virtual page address.
276 * @param asid Address space identifier.
277 * @param entry The rest of TLB entry as required by TLB insertion
278 * format.
279 * @param tr Translation register.
280 *
281 */
282void itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, size_t tr)
283{
284 tr_mapping_insert(va, asid, entry, false, tr);
285}
286
287/** Insert data into data translation register.
288 *
289 * @param va Virtual page address.
290 * @param asid Address space identifier.
291 * @param entry The rest of TLB entry as required by TLB insertion
292 * format.
293 * @param tr Translation register.
294 *
295 */
296void dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, size_t tr)
297{
298 tr_mapping_insert(va, asid, entry, true, tr);
299}
300
301/** Insert data into instruction or data translation register.
302 *
303 * @param va Virtual page address.
304 * @param asid Address space identifier.
305 * @param entry The rest of TLB entry as required by TLB insertion
306 * format.
307 * @param dtr If true, insert into data translation register, use
308 * instruction translation register otherwise.
309 * @param tr Translation register.
310 *
311 */
312void tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr,
313 size_t tr)
314{
315 region_register_t rr;
316 bool restore_rr = false;
317
318 rr.word = rr_read(VA2VRN(va));
319 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
320 /*
321 * The selected region register does not contain required RID.
322 * Save the old content of the register and replace the RID.
323 */
324 region_register_t rr0;
325
326 rr0 = rr;
327 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
328 rr_write(VA2VRN(va), rr0.word);
329 srlz_d();
330 srlz_i();
331 }
332
333 asm volatile (
334 "mov r8 = psr ;;\n"
335 "rsm %[mask] ;;\n" /* PSR_IC_MASK */
336 "srlz.d ;;\n"
337 "srlz.i ;;\n"
338 "mov cr.ifa = %[va]\n" /* va */
339 "mov cr.itir = %[word1] ;;\n" /* entry.word[1] */
340 "cmp.eq p6, p7 = %[dtr], r0 ;;\n" /* decide between itr and dtr */
341 "(p6) itr.i itr[%[tr]] = %[word0] ;;\n"
342 "(p7) itr.d dtr[%[tr]] = %[word0] ;;\n"
343 "mov psr.l = r8 ;;\n"
344 "srlz.d ;;\n"
345 :: [mask] "i" (PSR_IC_MASK),
346 [va] "r" (va),
347 [word1] "r" (entry.word[1]),
348 [word0] "r" (entry.word[0]),
349 [tr] "r" (tr),
350 [dtr] "r" (dtr)
351 : "p6", "p7", "r8"
352 );
353
354 if (restore_rr) {
355 rr_write(VA2VRN(va), rr.word);
356 srlz_d();
357 srlz_i();
358 }
359}
360
361/** Insert data into DTLB.
362 *
363 * @param page Virtual page address including VRN bits.
364 * @param frame Physical frame address.
365 * @param dtr If true, insert into data translation register, use data
366 * translation cache otherwise.
367 * @param tr Translation register if dtr is true, ignored otherwise.
368 *
369 */
370void dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr,
371 size_t tr)
372{
373 tlb_entry_t entry;
374
375 entry.word[0] = 0;
376 entry.word[1] = 0;
377
378 entry.p = true; /* present */
379 entry.ma = MA_WRITEBACK;
380 entry.a = true; /* already accessed */
381 entry.d = true; /* already dirty */
382 entry.pl = PL_KERNEL;
383 entry.ar = AR_READ | AR_WRITE;
384 entry.ppn = frame >> PPN_SHIFT;
385 entry.ps = PAGE_WIDTH;
386
387 if (dtr)
388 dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
389 else
390 dtc_mapping_insert(page, ASID_KERNEL, entry);
391}
392
393/** Purge kernel entries from DTR.
394 *
395 * Purge DTR entries used by the kernel.
396 *
397 * @param page Virtual page address including VRN bits.
398 * @param width Width of the purge in bits.
399 *
400 */
401void dtr_purge(uintptr_t page, size_t width)
402{
403 asm volatile (
404 "ptr.d %[page], %[width]\n"
405 :: [page] "r" (page),
406 [width] "r" (width << 2)
407 );
408}
409
410
411/** Copy content of PTE into data translation cache.
412 *
413 * @param t PTE.
414 *
415 */
416void dtc_pte_copy(pte_t *t)
417{
418 tlb_entry_t entry;
419
420 entry.word[0] = 0;
421 entry.word[1] = 0;
422
423 entry.p = t->p;
424 entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
425 entry.a = t->a;
426 entry.d = t->d;
427 entry.pl = t->k ? PL_KERNEL : PL_USER;
428 entry.ar = t->w ? AR_WRITE : AR_READ;
429 entry.ppn = t->frame >> PPN_SHIFT;
430 entry.ps = PAGE_WIDTH;
431
432 dtc_mapping_insert(t->page, t->as->asid, entry);
433
434#ifdef CONFIG_VHPT
435 vhpt_mapping_insert(t->page, t->as->asid, entry);
436#endif
437}
438
439/** Copy content of PTE into instruction translation cache.
440 *
441 * @param t PTE.
442 *
443 */
444void itc_pte_copy(pte_t *t)
445{
446 tlb_entry_t entry;
447
448 entry.word[0] = 0;
449 entry.word[1] = 0;
450
451 ASSERT(t->x);
452
453 entry.p = t->p;
454 entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
455 entry.a = t->a;
456 entry.pl = t->k ? PL_KERNEL : PL_USER;
457 entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
458 entry.ppn = t->frame >> PPN_SHIFT;
459 entry.ps = PAGE_WIDTH;
460
461 itc_mapping_insert(t->page, t->as->asid, entry);
462
463#ifdef CONFIG_VHPT
464 vhpt_mapping_insert(t->page, t->as->asid, entry);
465#endif
466}
467
468static bool is_kernel_fault(uintptr_t va)
469{
470 region_register_t rr;
471
472 rr.word = rr_read(VA2VRN(va));
473 rid_t rid = rr.map.rid;
474 return (RID2ASID(rid) == ASID_KERNEL) && (VA2VRN(va) == VRN_KERNEL);
475}
476
477/** Instruction TLB fault handler for faults with VHPT turned off.
478 *
479 * @param n Interruption vector.
480 * @param istate Structure with saved interruption state.
481 *
482 */
483void alternate_instruction_tlb_fault(unsigned int n, istate_t *istate)
484{
485 uintptr_t va;
486 pte_t t;
487
488 va = istate->cr_ifa; /* faulting address */
489
490 ASSERT(!is_kernel_fault(va));
491
492 bool found = page_mapping_find(AS, va, true, &t);
493 if (found) {
494 /*
495 * The mapping was found in software page hash table.
496 * Insert it into data translation cache.
497 */
498 itc_pte_copy(&t);
499 } else {
500 /*
501 * Forward the page fault to address space page fault handler.
502 */
503 as_page_fault(va, PF_ACCESS_EXEC, istate);
504 }
505}
506
507static int is_io_page_accessible(int page)
508{
509 if (TASK->arch.iomap)
510 return bitmap_get(TASK->arch.iomap, page);
511 else
512 return 0;
513}
514
515/**
516 * There is special handling of memory mapped legacy io, because of 4KB sized
517 * access for userspace.
518 *
519 * @param va Virtual address of page fault.
520 * @param istate Structure with saved interruption state.
521 *
522 * @return One on success, zero on failure.
523 *
524 */
525static int try_memmap_io_insertion(uintptr_t va, istate_t *istate)
526{
527 if ((va >= LEGACYIO_USER_BASE) && (va < LEGACYIO_USER_BASE + (1 << LEGACYIO_PAGE_WIDTH))) {
528 if (TASK) {
529 uint64_t io_page = (va & ((1 << LEGACYIO_PAGE_WIDTH) - 1)) >>
530 LEGACYIO_SINGLE_PAGE_WIDTH;
531
532 if (is_io_page_accessible(io_page)) {
533 uint64_t page, frame;
534
535 page = LEGACYIO_USER_BASE +
536 (1 << LEGACYIO_SINGLE_PAGE_WIDTH) * io_page;
537 frame = LEGACYIO_PHYS_BASE +
538 (1 << LEGACYIO_SINGLE_PAGE_WIDTH) * io_page;
539
540 tlb_entry_t entry;
541
542 entry.word[0] = 0;
543 entry.word[1] = 0;
544
545 entry.p = true; /* present */
546 entry.ma = MA_UNCACHEABLE;
547 entry.a = true; /* already accessed */
548 entry.d = true; /* already dirty */
549 entry.pl = PL_USER;
550 entry.ar = AR_READ | AR_WRITE;
551 entry.ppn = frame >> PPN_SHIFT;
552 entry.ps = LEGACYIO_SINGLE_PAGE_WIDTH;
553
554 dtc_mapping_insert(page, TASK->as->asid, entry);
555 return 1;
556 } else {
557 fault_if_from_uspace(istate,
558 "IO access fault at %p.", (void *) va);
559 }
560 }
561 }
562
563 return 0;
564}
565
566/** Data TLB fault handler for faults with VHPT turned off.
567 *
568 * @param n Interruption vector.
569 * @param istate Structure with saved interruption state.
570 *
571 */
572void alternate_data_tlb_fault(unsigned int n, istate_t *istate)
573{
574 if (istate->cr_isr.sp) {
575 /*
576 * Speculative load. Deffer the exception until a more clever
577 * approach can be used. Currently if we try to find the
578 * mapping for the speculative load while in the kernel, we
579 * might introduce a livelock because of the possibly invalid
580 * values of the address.
581 */
582 istate->cr_ipsr.ed = true;
583 return;
584 }
585
586 uintptr_t va = istate->cr_ifa; /* faulting address */
587 as_t *as = AS;
588
589 if (is_kernel_fault(va)) {
590 if (va < end_of_identity) {
591 /*
592 * Create kernel identity mapping for low memory.
593 */
594 dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
595 return;
596 } else {
597 as = AS_KERNEL;
598 }
599 }
600
601
602 pte_t t;
603 bool found = page_mapping_find(as, va, true, &t);
604 if (found) {
605 /*
606 * The mapping was found in the software page hash table.
607 * Insert it into data translation cache.
608 */
609 dtc_pte_copy(&t);
610 } else {
611 if (try_memmap_io_insertion(va, istate))
612 return;
613
614 /*
615 * Forward the page fault to the address space page fault
616 * handler.
617 */
618 as_page_fault(va, PF_ACCESS_READ, istate);
619 }
620}
621
622/** Data nested TLB fault handler.
623 *
624 * This fault should not occur.
625 *
626 * @param n Interruption vector.
627 * @param istate Structure with saved interruption state.
628 *
629 */
630void data_nested_tlb_fault(unsigned int n, istate_t *istate)
631{
632 ASSERT(false);
633}
634
635/** Data Dirty bit fault handler.
636 *
637 * @param n Interruption vector.
638 * @param istate Structure with saved interruption state.
639 *
640 */
641void data_dirty_bit_fault(unsigned int n, istate_t *istate)
642{
643 uintptr_t va;
644 pte_t t;
645 as_t *as = AS;
646
647 va = istate->cr_ifa; /* faulting address */
648
649 if (is_kernel_fault(va))
650 as = AS_KERNEL;
651
652 bool found = page_mapping_find(as, va, true, &t);
653
654 ASSERT(found);
655 ASSERT(t.p);
656
657 if (found && t.p && t.w) {
658 /*
659 * Update the Dirty bit in page tables and reinsert
660 * the mapping into DTC.
661 */
662 t.d = true;
663 dtc_pte_copy(&t);
664 page_mapping_update(as, va, true, &t);
665 } else {
666 as_page_fault(va, PF_ACCESS_WRITE, istate);
667 }
668}
669
670/** Instruction access bit fault handler.
671 *
672 * @param n Interruption vector.
673 * @param istate Structure with saved interruption state.
674 *
675 */
676void instruction_access_bit_fault(unsigned int n, istate_t *istate)
677{
678 uintptr_t va;
679 pte_t t;
680
681 va = istate->cr_ifa; /* faulting address */
682
683 ASSERT(!is_kernel_fault(va));
684
685 bool found = page_mapping_find(AS, va, true, &t);
686
687 ASSERT(found);
688 ASSERT(t.p);
689
690 if (found && t.p && t.x) {
691 /*
692 * Update the Accessed bit in page tables and reinsert
693 * the mapping into ITC.
694 */
695 t.a = true;
696 itc_pte_copy(&t);
697 page_mapping_update(AS, va, true, &t);
698 } else {
699 as_page_fault(va, PF_ACCESS_EXEC, istate);
700 }
701}
702
703/** Data access bit fault handler.
704 *
705 * @param n Interruption vector.
706 * @param istate Structure with saved interruption state.
707 *
708 */
709void data_access_bit_fault(unsigned int n, istate_t *istate)
710{
711 uintptr_t va;
712 pte_t t;
713 as_t *as = AS;
714
715 va = istate->cr_ifa; /* faulting address */
716
717 if (is_kernel_fault(va))
718 as = AS_KERNEL;
719
720 bool found = page_mapping_find(as, va, true, &t);
721
722 ASSERT(found);
723 ASSERT(t.p);
724
725 if (found && t.p) {
726 /*
727 * Update the Accessed bit in page tables and reinsert
728 * the mapping into DTC.
729 */
730 t.a = true;
731 dtc_pte_copy(&t);
732 page_mapping_update(as, va, true, &t);
733 } else {
734 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
735 fault_if_from_uspace(istate, "Page fault at %p.",
736 (void *) va);
737 panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, NULL);
738 }
739 }
740}
741
742/** Data access rights fault handler.
743 *
744 * @param n Interruption vector.
745 * @param istate Structure with saved interruption state.
746 *
747 */
748void data_access_rights_fault(unsigned int n, istate_t *istate)
749{
750 uintptr_t va;
751 pte_t t;
752
753 va = istate->cr_ifa; /* faulting address */
754
755 ASSERT(!is_kernel_fault(va));
756
757 /*
758 * Assume a write to a read-only page.
759 */
760 bool found = page_mapping_find(AS, va, true, &t);
761
762 ASSERT(found);
763 ASSERT(t.p);
764 ASSERT(!t.w);
765
766 as_page_fault(va, PF_ACCESS_WRITE, istate);
767}
768
769/** Page not present fault handler.
770 *
771 * @param n Interruption vector.
772 * @param istate Structure with saved interruption state.
773 *
774 */
775void page_not_present(unsigned int n, istate_t *istate)
776{
777 uintptr_t va;
778 pte_t t;
779
780 va = istate->cr_ifa; /* faulting address */
781
782 ASSERT(!is_kernel_fault(va));
783
784 bool found = page_mapping_find(AS, va, true, &t);
785
786 ASSERT(found);
787
788 if (t.p) {
789 /*
790 * If the Present bit is set in page hash table, just copy it
791 * and update ITC/DTC.
792 */
793 if (t.x)
794 itc_pte_copy(&t);
795 else
796 dtc_pte_copy(&t);
797 } else {
798 as_page_fault(va, PF_ACCESS_READ, istate);
799 }
800}
801
802void tlb_arch_init(void)
803{
804}
805
806void tlb_print(void)
807{
808}
809
810/** @}
811 */
Note: See TracBrowser for help on using the repository browser.