source: mainline/kernel/arch/ia64/src/mm/tlb.c@ efb48eb

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since efb48eb was 0fd9b35, checked in by Jakub Jermar <jakub@…>, 14 years ago

Limit kernel identity on ia64 to low memory and make sure to use
the kernel address space for kernel non-identity page table
lookups.

  • Property mode set to 100644
File size: 18.9 KB
Line 
1/*
2 * Copyright (c) 2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup ia64mm
30 * @{
31 */
32/** @file
33 */
34
35/*
36 * TLB management.
37 */
38
39#include <mm/tlb.h>
40#include <mm/asid.h>
41#include <mm/page.h>
42#include <mm/as.h>
43#include <arch/mm/tlb.h>
44#include <arch/mm/page.h>
45#include <arch/mm/vhpt.h>
46#include <arch/barrier.h>
47#include <arch/interrupt.h>
48#include <arch/pal/pal.h>
49#include <arch/asm.h>
50#include <panic.h>
51#include <print.h>
52#include <arch.h>
53#include <interrupt.h>
54
55#define IO_FRAME_BASE 0xFFFFC000000
56
57/** Invalidate all TLB entries. */
58void tlb_invalidate_all(void)
59{
60 ipl_t ipl;
61 uintptr_t adr;
62 uint32_t count1, count2, stride1, stride2;
63
64 unsigned int i, j;
65
66 adr = PAL_PTCE_INFO_BASE();
67 count1 = PAL_PTCE_INFO_COUNT1();
68 count2 = PAL_PTCE_INFO_COUNT2();
69 stride1 = PAL_PTCE_INFO_STRIDE1();
70 stride2 = PAL_PTCE_INFO_STRIDE2();
71
72 ipl = interrupts_disable();
73
74 for (i = 0; i < count1; i++) {
75 for (j = 0; j < count2; j++) {
76 asm volatile (
77 "ptc.e %[adr] ;;"
78 :: [adr] "r" (adr)
79 );
80 adr += stride2;
81 }
82 adr += stride1;
83 }
84
85 interrupts_restore(ipl);
86
87 srlz_d();
88 srlz_i();
89
90#ifdef CONFIG_VHPT
91 vhpt_invalidate_all();
92#endif
93}
94
95/** Invalidate entries belonging to an address space.
96 *
97 * @param asid Address space identifier.
98 *
99 */
100void tlb_invalidate_asid(asid_t asid)
101{
102 tlb_invalidate_all();
103}
104
105
106void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt)
107{
108 region_register_t rr;
109 bool restore_rr = false;
110 int b = 0;
111 int c = cnt;
112
113 uintptr_t va;
114 va = page;
115
116 rr.word = rr_read(VA2VRN(va));
117 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
118 /*
119 * The selected region register does not contain required RID.
120 * Save the old content of the register and replace the RID.
121 */
122 region_register_t rr0;
123
124 rr0 = rr;
125 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
126 rr_write(VA2VRN(va), rr0.word);
127 srlz_d();
128 srlz_i();
129 }
130
131 while (c >>= 1)
132 b++;
133 b >>= 1;
134 uint64_t ps;
135
136 switch (b) {
137 case 0: /* cnt 1 - 3 */
138 ps = PAGE_WIDTH;
139 break;
140 case 1: /* cnt 4 - 15 */
141 ps = PAGE_WIDTH + 2;
142 va &= ~((1 << ps) - 1);
143 break;
144 case 2: /* cnt 16 - 63 */
145 ps = PAGE_WIDTH + 4;
146 va &= ~((1 << ps) - 1);
147 break;
148 case 3: /* cnt 64 - 255 */
149 ps = PAGE_WIDTH + 6;
150 va &= ~((1 << ps) - 1);
151 break;
152 case 4: /* cnt 256 - 1023 */
153 ps = PAGE_WIDTH + 8;
154 va &= ~((1 << ps) - 1);
155 break;
156 case 5: /* cnt 1024 - 4095 */
157 ps = PAGE_WIDTH + 10;
158 va &= ~((1 << ps) - 1);
159 break;
160 case 6: /* cnt 4096 - 16383 */
161 ps = PAGE_WIDTH + 12;
162 va &= ~((1 << ps) - 1);
163 break;
164 case 7: /* cnt 16384 - 65535 */
165 case 8: /* cnt 65536 - (256K - 1) */
166 ps = PAGE_WIDTH + 14;
167 va &= ~((1 << ps) - 1);
168 break;
169 default:
170 ps = PAGE_WIDTH + 18;
171 va &= ~((1 << ps) - 1);
172 break;
173 }
174
175 for (; va < (page + cnt * PAGE_SIZE); va += (1 << ps))
176 asm volatile (
177 "ptc.l %[va], %[ps] ;;"
178 :: [va]"r" (va),
179 [ps] "r" (ps << 2)
180 );
181
182 srlz_d();
183 srlz_i();
184
185 if (restore_rr) {
186 rr_write(VA2VRN(va), rr.word);
187 srlz_d();
188 srlz_i();
189 }
190}
191
192/** Insert data into data translation cache.
193 *
194 * @param va Virtual page address.
195 * @param asid Address space identifier.
196 * @param entry The rest of TLB entry as required by TLB insertion
197 * format.
198 *
199 */
200void dtc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
201{
202 tc_mapping_insert(va, asid, entry, true);
203}
204
205/** Insert data into instruction translation cache.
206 *
207 * @param va Virtual page address.
208 * @param asid Address space identifier.
209 * @param entry The rest of TLB entry as required by TLB insertion
210 * format.
211 */
212void itc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
213{
214 tc_mapping_insert(va, asid, entry, false);
215}
216
217/** Insert data into instruction or data translation cache.
218 *
219 * @param va Virtual page address.
220 * @param asid Address space identifier.
221 * @param entry The rest of TLB entry as required by TLB insertion
222 * format.
223 * @param dtc If true, insert into data translation cache, use
224 * instruction translation cache otherwise.
225 *
226 */
227void tc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtc)
228{
229 region_register_t rr;
230 bool restore_rr = false;
231
232 rr.word = rr_read(VA2VRN(va));
233 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
234 /*
235 * The selected region register does not contain required RID.
236 * Save the old content of the register and replace the RID.
237 */
238 region_register_t rr0;
239
240 rr0 = rr;
241 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
242 rr_write(VA2VRN(va), rr0.word);
243 srlz_d();
244 srlz_i();
245 }
246
247 asm volatile (
248 "mov r8 = psr ;;\n"
249 "rsm %[mask] ;;\n" /* PSR_IC_MASK */
250 "srlz.d ;;\n"
251 "srlz.i ;;\n"
252 "mov cr.ifa = %[va]\n" /* va */
253 "mov cr.itir = %[word1] ;;\n" /* entry.word[1] */
254 "cmp.eq p6, p7 = %[dtc], r0 ;;\n" /* decide between itc and dtc */
255 "(p6) itc.i %[word0] ;;\n"
256 "(p7) itc.d %[word0] ;;\n"
257 "mov psr.l = r8 ;;\n"
258 "srlz.d ;;\n"
259 :: [mask] "i" (PSR_IC_MASK),
260 [va] "r" (va),
261 [word0] "r" (entry.word[0]),
262 [word1] "r" (entry.word[1]),
263 [dtc] "r" (dtc)
264 : "p6", "p7", "r8"
265 );
266
267 if (restore_rr) {
268 rr_write(VA2VRN(va), rr.word);
269 srlz_d();
270 srlz_i();
271 }
272}
273
274/** Insert data into instruction translation register.
275 *
276 * @param va Virtual page address.
277 * @param asid Address space identifier.
278 * @param entry The rest of TLB entry as required by TLB insertion
279 * format.
280 * @param tr Translation register.
281 *
282 */
283void itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, size_t tr)
284{
285 tr_mapping_insert(va, asid, entry, false, tr);
286}
287
288/** Insert data into data translation register.
289 *
290 * @param va Virtual page address.
291 * @param asid Address space identifier.
292 * @param entry The rest of TLB entry as required by TLB insertion
293 * format.
294 * @param tr Translation register.
295 *
296 */
297void dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, size_t tr)
298{
299 tr_mapping_insert(va, asid, entry, true, tr);
300}
301
302/** Insert data into instruction or data translation register.
303 *
304 * @param va Virtual page address.
305 * @param asid Address space identifier.
306 * @param entry The rest of TLB entry as required by TLB insertion
307 * format.
308 * @param dtr If true, insert into data translation register, use
309 * instruction translation register otherwise.
310 * @param tr Translation register.
311 *
312 */
313void tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr,
314 size_t tr)
315{
316 region_register_t rr;
317 bool restore_rr = false;
318
319 rr.word = rr_read(VA2VRN(va));
320 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
321 /*
322 * The selected region register does not contain required RID.
323 * Save the old content of the register and replace the RID.
324 */
325 region_register_t rr0;
326
327 rr0 = rr;
328 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
329 rr_write(VA2VRN(va), rr0.word);
330 srlz_d();
331 srlz_i();
332 }
333
334 asm volatile (
335 "mov r8 = psr ;;\n"
336 "rsm %[mask] ;;\n" /* PSR_IC_MASK */
337 "srlz.d ;;\n"
338 "srlz.i ;;\n"
339 "mov cr.ifa = %[va]\n" /* va */
340 "mov cr.itir = %[word1] ;;\n" /* entry.word[1] */
341 "cmp.eq p6, p7 = %[dtr], r0 ;;\n" /* decide between itr and dtr */
342 "(p6) itr.i itr[%[tr]] = %[word0] ;;\n"
343 "(p7) itr.d dtr[%[tr]] = %[word0] ;;\n"
344 "mov psr.l = r8 ;;\n"
345 "srlz.d ;;\n"
346 :: [mask] "i" (PSR_IC_MASK),
347 [va] "r" (va),
348 [word1] "r" (entry.word[1]),
349 [word0] "r" (entry.word[0]),
350 [tr] "r" (tr),
351 [dtr] "r" (dtr)
352 : "p6", "p7", "r8"
353 );
354
355 if (restore_rr) {
356 rr_write(VA2VRN(va), rr.word);
357 srlz_d();
358 srlz_i();
359 }
360}
361
362/** Insert data into DTLB.
363 *
364 * @param page Virtual page address including VRN bits.
365 * @param frame Physical frame address.
366 * @param dtr If true, insert into data translation register, use data
367 * translation cache otherwise.
368 * @param tr Translation register if dtr is true, ignored otherwise.
369 *
370 */
371void dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr,
372 size_t tr)
373{
374 tlb_entry_t entry;
375
376 entry.word[0] = 0;
377 entry.word[1] = 0;
378
379 entry.p = true; /* present */
380 entry.ma = MA_WRITEBACK;
381 entry.a = true; /* already accessed */
382 entry.d = true; /* already dirty */
383 entry.pl = PL_KERNEL;
384 entry.ar = AR_READ | AR_WRITE;
385 entry.ppn = frame >> PPN_SHIFT;
386 entry.ps = PAGE_WIDTH;
387
388 if (dtr)
389 dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
390 else
391 dtc_mapping_insert(page, ASID_KERNEL, entry);
392}
393
394/** Purge kernel entries from DTR.
395 *
396 * Purge DTR entries used by the kernel.
397 *
398 * @param page Virtual page address including VRN bits.
399 * @param width Width of the purge in bits.
400 *
401 */
402void dtr_purge(uintptr_t page, size_t width)
403{
404 asm volatile (
405 "ptr.d %[page], %[width]\n"
406 :: [page] "r" (page),
407 [width] "r" (width << 2)
408 );
409}
410
411
412/** Copy content of PTE into data translation cache.
413 *
414 * @param t PTE.
415 *
416 */
417void dtc_pte_copy(pte_t *t)
418{
419 tlb_entry_t entry;
420
421 entry.word[0] = 0;
422 entry.word[1] = 0;
423
424 entry.p = t->p;
425 entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
426 entry.a = t->a;
427 entry.d = t->d;
428 entry.pl = t->k ? PL_KERNEL : PL_USER;
429 entry.ar = t->w ? AR_WRITE : AR_READ;
430 entry.ppn = t->frame >> PPN_SHIFT;
431 entry.ps = PAGE_WIDTH;
432
433 dtc_mapping_insert(t->page, t->as->asid, entry);
434
435#ifdef CONFIG_VHPT
436 vhpt_mapping_insert(t->page, t->as->asid, entry);
437#endif
438}
439
440/** Copy content of PTE into instruction translation cache.
441 *
442 * @param t PTE.
443 *
444 */
445void itc_pte_copy(pte_t *t)
446{
447 tlb_entry_t entry;
448
449 entry.word[0] = 0;
450 entry.word[1] = 0;
451
452 ASSERT(t->x);
453
454 entry.p = t->p;
455 entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
456 entry.a = t->a;
457 entry.pl = t->k ? PL_KERNEL : PL_USER;
458 entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
459 entry.ppn = t->frame >> PPN_SHIFT;
460 entry.ps = PAGE_WIDTH;
461
462 itc_mapping_insert(t->page, t->as->asid, entry);
463
464#ifdef CONFIG_VHPT
465 vhpt_mapping_insert(t->page, t->as->asid, entry);
466#endif
467}
468
469static bool is_kernel_fault(uintptr_t va)
470{
471 region_register_t rr;
472
473 rr.word = rr_read(VA2VRN(va));
474 rid_t rid = rr.map.rid;
475 return (RID2ASID(rid) == ASID_KERNEL) && (VA2VRN(va) == VRN_KERNEL);
476}
477
478/** Instruction TLB fault handler for faults with VHPT turned off.
479 *
480 * @param vector Interruption vector.
481 * @param istate Structure with saved interruption state.
482 *
483 */
484void alternate_instruction_tlb_fault(uint64_t vector, istate_t *istate)
485{
486 uintptr_t va;
487 pte_t *t;
488
489 va = istate->cr_ifa; /* faulting address */
490
491 ASSERT(!is_kernel_fault(va));
492
493 t = page_mapping_find(AS, va, true);
494 if (t) {
495 /*
496 * The mapping was found in software page hash table.
497 * Insert it into data translation cache.
498 */
499 itc_pte_copy(t);
500 } else {
501 /*
502 * Forward the page fault to address space page fault handler.
503 */
504 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
505 fault_if_from_uspace(istate, "Page fault at %p.",
506 (void *) va);
507 panic_memtrap(istate, PF_ACCESS_EXEC, va, NULL);
508 }
509 }
510}
511
512static int is_io_page_accessible(int page)
513{
514 if (TASK->arch.iomap)
515 return bitmap_get(TASK->arch.iomap, page);
516 else
517 return 0;
518}
519
520/**
521 * There is special handling of memory mapped legacy io, because of 4KB sized
522 * access for userspace.
523 *
524 * @param va Virtual address of page fault.
525 * @param istate Structure with saved interruption state.
526 *
527 * @return One on success, zero on failure.
528 *
529 */
530static int try_memmap_io_insertion(uintptr_t va, istate_t *istate)
531{
532 if ((va >= IO_OFFSET ) && (va < IO_OFFSET + (1 << IO_PAGE_WIDTH))) {
533 if (TASK) {
534 uint64_t io_page = (va & ((1 << IO_PAGE_WIDTH) - 1)) >>
535 USPACE_IO_PAGE_WIDTH;
536
537 if (is_io_page_accessible(io_page)) {
538 uint64_t page, frame;
539
540 page = IO_OFFSET +
541 (1 << USPACE_IO_PAGE_WIDTH) * io_page;
542 frame = IO_FRAME_BASE +
543 (1 << USPACE_IO_PAGE_WIDTH) * io_page;
544
545 tlb_entry_t entry;
546
547 entry.word[0] = 0;
548 entry.word[1] = 0;
549
550 entry.p = true; /* present */
551 entry.ma = MA_UNCACHEABLE;
552 entry.a = true; /* already accessed */
553 entry.d = true; /* already dirty */
554 entry.pl = PL_USER;
555 entry.ar = AR_READ | AR_WRITE;
556 entry.ppn = frame >> PPN_SHIFT;
557 entry.ps = USPACE_IO_PAGE_WIDTH;
558
559 dtc_mapping_insert(page, TASK->as->asid, entry);
560 return 1;
561 } else {
562 fault_if_from_uspace(istate,
563 "IO access fault at %p.", (void *) va);
564 }
565 }
566 }
567
568 return 0;
569}
570
571/** Data TLB fault handler for faults with VHPT turned off.
572 *
573 * @param vector Interruption vector.
574 * @param istate Structure with saved interruption state.
575 *
576 */
577void alternate_data_tlb_fault(uint64_t vector, istate_t *istate)
578{
579 if (istate->cr_isr.sp) {
580 /*
581 * Speculative load. Deffer the exception until a more clever
582 * approach can be used. Currently if we try to find the
583 * mapping for the speculative load while in the kernel, we
584 * might introduce a livelock because of the possibly invalid
585 * values of the address.
586 */
587 istate->cr_ipsr.ed = true;
588 return;
589 }
590
591 uintptr_t va = istate->cr_ifa; /* faulting address */
592 as_t *as = AS;
593
594 if (is_kernel_fault(va)) {
595 if (va < end_of_identity) {
596 /*
597 * Create kernel identity mapping for low memory.
598 */
599 dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
600 return;
601 } else {
602 as = AS_KERNEL;
603 }
604 }
605
606
607 pte_t *entry = page_mapping_find(as, va, true);
608 if (entry) {
609 /*
610 * The mapping was found in the software page hash table.
611 * Insert it into data translation cache.
612 */
613 dtc_pte_copy(entry);
614 } else {
615 if (try_memmap_io_insertion(va, istate))
616 return;
617
618 /*
619 * Forward the page fault to the address space page fault
620 * handler.
621 */
622 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
623 fault_if_from_uspace(istate, "Page fault at %p.",
624 (void *) va);
625 panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, NULL);
626 }
627 }
628}
629
630/** Data nested TLB fault handler.
631 *
632 * This fault should not occur.
633 *
634 * @param vector Interruption vector.
635 * @param istate Structure with saved interruption state.
636 *
637 */
638void data_nested_tlb_fault(uint64_t vector, istate_t *istate)
639{
640 ASSERT(false);
641}
642
643/** Data Dirty bit fault handler.
644 *
645 * @param vector Interruption vector.
646 * @param istate Structure with saved interruption state.
647 *
648 */
649void data_dirty_bit_fault(uint64_t vector, istate_t *istate)
650{
651 uintptr_t va;
652 pte_t *t;
653 as_t *as = AS;
654
655 va = istate->cr_ifa; /* faulting address */
656
657 if (is_kernel_fault(va))
658 as = AS_KERNEL;
659
660 t = page_mapping_find(as, va, true);
661 ASSERT((t) && (t->p));
662 if ((t) && (t->p) && (t->w)) {
663 /*
664 * Update the Dirty bit in page tables and reinsert
665 * the mapping into DTC.
666 */
667 t->d = true;
668 dtc_pte_copy(t);
669 } else {
670 if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
671 fault_if_from_uspace(istate, "Page fault at %p.",
672 (void *) va);
673 panic_memtrap(istate, PF_ACCESS_WRITE, va, NULL);
674 }
675 }
676}
677
678/** Instruction access bit fault handler.
679 *
680 * @param vector Interruption vector.
681 * @param istate Structure with saved interruption state.
682 *
683 */
684void instruction_access_bit_fault(uint64_t vector, istate_t *istate)
685{
686 uintptr_t va;
687 pte_t *t;
688
689 va = istate->cr_ifa; /* faulting address */
690
691 ASSERT(!is_kernel_fault(va));
692
693 t = page_mapping_find(AS, va, true);
694 ASSERT((t) && (t->p));
695 if ((t) && (t->p) && (t->x)) {
696 /*
697 * Update the Accessed bit in page tables and reinsert
698 * the mapping into ITC.
699 */
700 t->a = true;
701 itc_pte_copy(t);
702 } else {
703 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
704 fault_if_from_uspace(istate, "Page fault at %p.",
705 (void *) va);
706 panic_memtrap(istate, PF_ACCESS_EXEC, va, NULL);
707 }
708 }
709}
710
711/** Data access bit fault handler.
712 *
713 * @param vector Interruption vector.
714 * @param istate Structure with saved interruption state.
715 *
716 */
717void data_access_bit_fault(uint64_t vector, istate_t *istate)
718{
719 uintptr_t va;
720 pte_t *t;
721 as_t *as = AS;
722
723 va = istate->cr_ifa; /* faulting address */
724
725 if (is_kernel_fault(va))
726 as = AS_KERNEL;
727
728 t = page_mapping_find(as, va, true);
729 ASSERT((t) && (t->p));
730 if ((t) && (t->p)) {
731 /*
732 * Update the Accessed bit in page tables and reinsert
733 * the mapping into DTC.
734 */
735 t->a = true;
736 dtc_pte_copy(t);
737 } else {
738 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
739 fault_if_from_uspace(istate, "Page fault at %p.",
740 (void *) va);
741 panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, NULL);
742 }
743 }
744}
745
746/** Data access rights fault handler.
747 *
748 * @param vector Interruption vector.
749 * @param istate Structure with saved interruption state.
750 *
751 */
752void data_access_rights_fault(uint64_t vector, istate_t *istate)
753{
754 uintptr_t va;
755 pte_t *t;
756
757 va = istate->cr_ifa; /* faulting address */
758
759 ASSERT(!is_kernel_fault(va));
760
761 /*
762 * Assume a write to a read-only page.
763 */
764 t = page_mapping_find(AS, va, true);
765 ASSERT((t) && (t->p));
766 ASSERT(!t->w);
767 if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
768 fault_if_from_uspace(istate, "Page fault at %p.",
769 (void *) va);
770 panic_memtrap(istate, PF_ACCESS_WRITE, va, NULL);
771 }
772}
773
774/** Page not present fault handler.
775 *
776 * @param vector Interruption vector.
777 * @param istate Structure with saved interruption state.
778 *
779 */
780void page_not_present(uint64_t vector, istate_t *istate)
781{
782 uintptr_t va;
783 pte_t *t;
784
785 va = istate->cr_ifa; /* faulting address */
786
787 ASSERT(!is_kernel_fault(va));
788
789 t = page_mapping_find(AS, va, true);
790 ASSERT(t);
791
792 if (t->p) {
793 /*
794 * If the Present bit is set in page hash table, just copy it
795 * and update ITC/DTC.
796 */
797 if (t->x)
798 itc_pte_copy(t);
799 else
800 dtc_pte_copy(t);
801 } else {
802 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
803 fault_if_from_uspace(istate, "Page fault at %p.",
804 (void *) va);
805 panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, NULL);
806 }
807 }
808}
809
810void tlb_arch_init(void)
811{
812}
813
814void tlb_print(void)
815{
816}
817
818/** @}
819 */
Note: See TracBrowser for help on using the repository browser.