source: mainline/kernel/arch/ia64/src/mm/tlb.c@ 98000fb

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 98000fb was 98000fb, checked in by Martin Decky <martin@…>, 16 years ago

remove redundant index_t and count_t types (which were always quite ambiguous and not actually needed)

  • Property mode set to 100644
File size: 17.8 KB
Line 
1/*
2 * Copyright (c) 2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup ia64mm
30 * @{
31 */
32/** @file
33 */
34
35/*
36 * TLB management.
37 */
38
39#include <mm/tlb.h>
40#include <mm/asid.h>
41#include <mm/page.h>
42#include <mm/as.h>
43#include <arch/mm/tlb.h>
44#include <arch/mm/page.h>
45#include <arch/mm/vhpt.h>
46#include <arch/barrier.h>
47#include <arch/interrupt.h>
48#include <arch/pal/pal.h>
49#include <arch/asm.h>
50#include <panic.h>
51#include <print.h>
52#include <arch.h>
53#include <interrupt.h>
54
55/** Invalidate all TLB entries. */
56void tlb_invalidate_all(void)
57{
58 ipl_t ipl;
59 uintptr_t adr;
60 uint32_t count1, count2, stride1, stride2;
61
62 unsigned int i, j;
63
64 adr = PAL_PTCE_INFO_BASE();
65 count1 = PAL_PTCE_INFO_COUNT1();
66 count2 = PAL_PTCE_INFO_COUNT2();
67 stride1 = PAL_PTCE_INFO_STRIDE1();
68 stride2 = PAL_PTCE_INFO_STRIDE2();
69
70 ipl = interrupts_disable();
71
72 for (i = 0; i < count1; i++) {
73 for (j = 0; j < count2; j++) {
74 asm volatile (
75 "ptc.e %0 ;;"
76 :
77 : "r" (adr)
78 );
79 adr += stride2;
80 }
81 adr += stride1;
82 }
83
84 interrupts_restore(ipl);
85
86 srlz_d();
87 srlz_i();
88#ifdef CONFIG_VHPT
89 vhpt_invalidate_all();
90#endif
91}
92
93/** Invalidate entries belonging to an address space.
94 *
95 * @param asid Address space identifier.
96 */
97void tlb_invalidate_asid(asid_t asid)
98{
99 tlb_invalidate_all();
100}
101
102
103void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt)
104{
105 region_register rr;
106 bool restore_rr = false;
107 int b = 0;
108 int c = cnt;
109
110 uintptr_t va;
111 va = page;
112
113 rr.word = rr_read(VA2VRN(va));
114 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
115 /*
116 * The selected region register does not contain required RID.
117 * Save the old content of the register and replace the RID.
118 */
119 region_register rr0;
120
121 rr0 = rr;
122 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
123 rr_write(VA2VRN(va), rr0.word);
124 srlz_d();
125 srlz_i();
126 }
127
128 while(c >>= 1)
129 b++;
130 b >>= 1;
131 uint64_t ps;
132
133 switch (b) {
134 case 0: /* cnt 1 - 3 */
135 ps = PAGE_WIDTH;
136 break;
137 case 1: /* cnt 4 - 15 */
138 ps = PAGE_WIDTH + 2;
139 va &= ~((1 << ps) - 1);
140 break;
141 case 2: /* cnt 16 - 63 */
142 ps = PAGE_WIDTH + 4;
143 va &= ~((1 << ps) - 1);
144 break;
145 case 3: /* cnt 64 - 255 */
146 ps = PAGE_WIDTH + 6;
147 va &= ~((1 << ps) - 1);
148 break;
149 case 4: /* cnt 256 - 1023 */
150 ps = PAGE_WIDTH + 8;
151 va &= ~((1 << ps) - 1);
152 break;
153 case 5: /* cnt 1024 - 4095 */
154 ps = PAGE_WIDTH + 10;
155 va &= ~((1 << ps) - 1);
156 break;
157 case 6: /* cnt 4096 - 16383 */
158 ps = PAGE_WIDTH + 12;
159 va &= ~((1 << ps) - 1);
160 break;
161 case 7: /* cnt 16384 - 65535 */
162 case 8: /* cnt 65536 - (256K - 1) */
163 ps = PAGE_WIDTH + 14;
164 va &= ~((1 << ps) - 1);
165 break;
166 default:
167 ps = PAGE_WIDTH + 18;
168 va &= ~((1 << ps) - 1);
169 break;
170 }
171 for(; va < (page + cnt * PAGE_SIZE); va += (1 << ps))
172 asm volatile ("ptc.l %0, %1;;" :: "r" (va), "r" (ps << 2));
173 srlz_d();
174 srlz_i();
175
176 if (restore_rr) {
177 rr_write(VA2VRN(va), rr.word);
178 srlz_d();
179 srlz_i();
180 }
181}
182
183/** Insert data into data translation cache.
184 *
185 * @param va Virtual page address.
186 * @param asid Address space identifier.
187 * @param entry The rest of TLB entry as required by TLB insertion
188 * format.
189 */
190void dtc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
191{
192 tc_mapping_insert(va, asid, entry, true);
193}
194
195/** Insert data into instruction translation cache.
196 *
197 * @param va Virtual page address.
198 * @param asid Address space identifier.
199 * @param entry The rest of TLB entry as required by TLB insertion
200 * format.
201 */
202void itc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
203{
204 tc_mapping_insert(va, asid, entry, false);
205}
206
207/** Insert data into instruction or data translation cache.
208 *
209 * @param va Virtual page address.
210 * @param asid Address space identifier.
211 * @param entry The rest of TLB entry as required by TLB insertion
212 * format.
213 * @param dtc If true, insert into data translation cache, use
214 * instruction translation cache otherwise.
215 */
216void tc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtc)
217{
218 region_register rr;
219 bool restore_rr = false;
220
221 rr.word = rr_read(VA2VRN(va));
222 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
223 /*
224 * The selected region register does not contain required RID.
225 * Save the old content of the register and replace the RID.
226 */
227 region_register rr0;
228
229 rr0 = rr;
230 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
231 rr_write(VA2VRN(va), rr0.word);
232 srlz_d();
233 srlz_i();
234 }
235
236 asm volatile (
237 "mov r8 = psr;;\n"
238 "rsm %0;;\n" /* PSR_IC_MASK */
239 "srlz.d;;\n"
240 "srlz.i;;\n"
241 "mov cr.ifa = %1\n" /* va */
242 "mov cr.itir = %2;;\n" /* entry.word[1] */
243 "cmp.eq p6,p7 = %4,r0;;\n" /* decide between itc and dtc */
244 "(p6) itc.i %3;;\n"
245 "(p7) itc.d %3;;\n"
246 "mov psr.l = r8;;\n"
247 "srlz.d;;\n"
248 :
249 : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]),
250 "r" (entry.word[0]), "r" (dtc)
251 : "p6", "p7", "r8"
252 );
253
254 if (restore_rr) {
255 rr_write(VA2VRN(va), rr.word);
256 srlz_d();
257 srlz_i();
258 }
259}
260
261/** Insert data into instruction translation register.
262 *
263 * @param va Virtual page address.
264 * @param asid Address space identifier.
265 * @param entry The rest of TLB entry as required by TLB insertion
266 * format.
267 * @param tr Translation register.
268 */
269void
270itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, size_t tr)
271{
272 tr_mapping_insert(va, asid, entry, false, tr);
273}
274
275/** Insert data into data translation register.
276 *
277 * @param va Virtual page address.
278 * @param asid Address space identifier.
279 * @param entry The rest of TLB entry as required by TLB insertion
280 * format.
281 * @param tr Translation register.
282 */
283void
284dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, size_t tr)
285{
286 tr_mapping_insert(va, asid, entry, true, tr);
287}
288
289/** Insert data into instruction or data translation register.
290 *
291 * @param va Virtual page address.
292 * @param asid Address space identifier.
293 * @param entry The rest of TLB entry as required by TLB insertion
294 * format.
295 * @param dtr If true, insert into data translation register, use
296 * instruction translation register otherwise.
297 * @param tr Translation register.
298 */
299void
300tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr,
301 size_t tr)
302{
303 region_register rr;
304 bool restore_rr = false;
305
306 rr.word = rr_read(VA2VRN(va));
307 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
308 /*
309 * The selected region register does not contain required RID.
310 * Save the old content of the register and replace the RID.
311 */
312 region_register rr0;
313
314 rr0 = rr;
315 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
316 rr_write(VA2VRN(va), rr0.word);
317 srlz_d();
318 srlz_i();
319 }
320
321 asm volatile (
322 "mov r8 = psr;;\n"
323 "rsm %0;;\n" /* PSR_IC_MASK */
324 "srlz.d;;\n"
325 "srlz.i;;\n"
326 "mov cr.ifa = %1\n" /* va */
327 "mov cr.itir = %2;;\n" /* entry.word[1] */
328 "cmp.eq p6,p7 = %5,r0;;\n" /* decide between itr and dtr */
329 "(p6) itr.i itr[%4] = %3;;\n"
330 "(p7) itr.d dtr[%4] = %3;;\n"
331 "mov psr.l = r8;;\n"
332 "srlz.d;;\n"
333 :
334 : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]),
335 "r" (entry.word[0]), "r" (tr), "r" (dtr)
336 : "p6", "p7", "r8"
337 );
338
339 if (restore_rr) {
340 rr_write(VA2VRN(va), rr.word);
341 srlz_d();
342 srlz_i();
343 }
344}
345
346/** Insert data into DTLB.
347 *
348 * @param page Virtual page address including VRN bits.
349 * @param frame Physical frame address.
350 * @param dtr If true, insert into data translation register, use data
351 * translation cache otherwise.
352 * @param tr Translation register if dtr is true, ignored otherwise.
353 */
354void
355dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr,
356 size_t tr)
357{
358 tlb_entry_t entry;
359
360 entry.word[0] = 0;
361 entry.word[1] = 0;
362
363 entry.p = true; /* present */
364 entry.ma = MA_WRITEBACK;
365 entry.a = true; /* already accessed */
366 entry.d = true; /* already dirty */
367 entry.pl = PL_KERNEL;
368 entry.ar = AR_READ | AR_WRITE;
369 entry.ppn = frame >> PPN_SHIFT;
370 entry.ps = PAGE_WIDTH;
371
372 if (dtr)
373 dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
374 else
375 dtc_mapping_insert(page, ASID_KERNEL, entry);
376}
377
378/** Purge kernel entries from DTR.
379 *
380 * Purge DTR entries used by the kernel.
381 *
382 * @param page Virtual page address including VRN bits.
383 * @param width Width of the purge in bits.
384 */
385void dtr_purge(uintptr_t page, size_t width)
386{
387 asm volatile ("ptr.d %0, %1\n" : : "r" (page), "r" (width << 2));
388}
389
390
391/** Copy content of PTE into data translation cache.
392 *
393 * @param t PTE.
394 */
395void dtc_pte_copy(pte_t *t)
396{
397 tlb_entry_t entry;
398
399 entry.word[0] = 0;
400 entry.word[1] = 0;
401
402 entry.p = t->p;
403 entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
404 entry.a = t->a;
405 entry.d = t->d;
406 entry.pl = t->k ? PL_KERNEL : PL_USER;
407 entry.ar = t->w ? AR_WRITE : AR_READ;
408 entry.ppn = t->frame >> PPN_SHIFT;
409 entry.ps = PAGE_WIDTH;
410
411 dtc_mapping_insert(t->page, t->as->asid, entry);
412#ifdef CONFIG_VHPT
413 vhpt_mapping_insert(t->page, t->as->asid, entry);
414#endif
415}
416
417/** Copy content of PTE into instruction translation cache.
418 *
419 * @param t PTE.
420 */
421void itc_pte_copy(pte_t *t)
422{
423 tlb_entry_t entry;
424
425 entry.word[0] = 0;
426 entry.word[1] = 0;
427
428 ASSERT(t->x);
429
430 entry.p = t->p;
431 entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
432 entry.a = t->a;
433 entry.pl = t->k ? PL_KERNEL : PL_USER;
434 entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
435 entry.ppn = t->frame >> PPN_SHIFT;
436 entry.ps = PAGE_WIDTH;
437
438 itc_mapping_insert(t->page, t->as->asid, entry);
439#ifdef CONFIG_VHPT
440 vhpt_mapping_insert(t->page, t->as->asid, entry);
441#endif
442}
443
444/** Instruction TLB fault handler for faults with VHPT turned off.
445 *
446 * @param vector Interruption vector.
447 * @param istate Structure with saved interruption state.
448 */
449void alternate_instruction_tlb_fault(uint64_t vector, istate_t *istate)
450{
451 region_register rr;
452 rid_t rid;
453 uintptr_t va;
454 pte_t *t;
455
456 va = istate->cr_ifa; /* faulting address */
457 rr.word = rr_read(VA2VRN(va));
458 rid = rr.map.rid;
459
460 page_table_lock(AS, true);
461 t = page_mapping_find(AS, va);
462 if (t) {
463 /*
464 * The mapping was found in software page hash table.
465 * Insert it into data translation cache.
466 */
467 itc_pte_copy(t);
468 page_table_unlock(AS, true);
469 } else {
470 /*
471 * Forward the page fault to address space page fault handler.
472 */
473 page_table_unlock(AS, true);
474 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
475 fault_if_from_uspace(istate,"Page fault at %p.",va);
476 panic("%s: va=%p, rid=%d, iip=%p.", __func__, va, rid,
477 istate->cr_iip);
478 }
479 }
480}
481
482static int is_io_page_accessible(int page)
483{
484 if (TASK->arch.iomap)
485 return bitmap_get(TASK->arch.iomap, page);
486 else
487 return 0;
488}
489
490#define IO_FRAME_BASE 0xFFFFC000000
491
492/**
493 * There is special handling of memory mapped legacy io, because of 4KB sized
494 * access for userspace.
495 *
496 * @param va Virtual address of page fault.
497 * @param istate Structure with saved interruption state.
498 *
499 * @return One on success, zero on failure.
500 */
501static int try_memmap_io_insertion(uintptr_t va, istate_t *istate)
502{
503 if ((va >= IO_OFFSET ) && (va < IO_OFFSET + (1 << IO_PAGE_WIDTH))) {
504 if (TASK) {
505 uint64_t io_page = (va & ((1 << IO_PAGE_WIDTH) - 1)) >>
506 USPACE_IO_PAGE_WIDTH;
507
508 if (is_io_page_accessible(io_page)) {
509 uint64_t page, frame;
510
511 page = IO_OFFSET +
512 (1 << USPACE_IO_PAGE_WIDTH) * io_page;
513 frame = IO_FRAME_BASE +
514 (1 << USPACE_IO_PAGE_WIDTH) * io_page;
515
516 tlb_entry_t entry;
517
518 entry.word[0] = 0;
519 entry.word[1] = 0;
520
521 entry.p = true; /* present */
522 entry.ma = MA_UNCACHEABLE;
523 entry.a = true; /* already accessed */
524 entry.d = true; /* already dirty */
525 entry.pl = PL_USER;
526 entry.ar = AR_READ | AR_WRITE;
527 entry.ppn = frame >> PPN_SHIFT;
528 entry.ps = USPACE_IO_PAGE_WIDTH;
529
530 dtc_mapping_insert(page, TASK->as->asid, entry);
531 return 1;
532 } else {
533 fault_if_from_uspace(istate,
534 "IO access fault at %p.", va);
535 }
536 }
537 }
538
539 return 0;
540}
541
542/** Data TLB fault handler for faults with VHPT turned off.
543 *
544 * @param vector Interruption vector.
545 * @param istate Structure with saved interruption state.
546 */
547void alternate_data_tlb_fault(uint64_t vector, istate_t *istate)
548{
549 region_register rr;
550 rid_t rid;
551 uintptr_t va;
552 pte_t *t;
553
554 va = istate->cr_ifa; /* faulting address */
555 rr.word = rr_read(VA2VRN(va));
556 rid = rr.map.rid;
557 if (RID2ASID(rid) == ASID_KERNEL) {
558 if (VA2VRN(va) == VRN_KERNEL) {
559 /*
560 * Provide KA2PA(identity) mapping for faulting piece of
561 * kernel address space.
562 */
563 dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
564 return;
565 }
566 }
567
568 page_table_lock(AS, true);
569 t = page_mapping_find(AS, va);
570 if (t) {
571 /*
572 * The mapping was found in the software page hash table.
573 * Insert it into data translation cache.
574 */
575 dtc_pte_copy(t);
576 page_table_unlock(AS, true);
577 } else {
578 page_table_unlock(AS, true);
579 if (try_memmap_io_insertion(va, istate))
580 return;
581 /*
582 * Forward the page fault to the address space page fault
583 * handler.
584 */
585 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
586 fault_if_from_uspace(istate,"Page fault at %p.",va);
587 panic("%s: va=%p, rid=%d, iip=%p.", __func__, va, rid,
588 istate->cr_iip);
589 }
590 }
591}
592
593/** Data nested TLB fault handler.
594 *
595 * This fault should not occur.
596 *
597 * @param vector Interruption vector.
598 * @param istate Structure with saved interruption state.
599 */
600void data_nested_tlb_fault(uint64_t vector, istate_t *istate)
601{
602 panic("%s.", __func__);
603}
604
605/** Data Dirty bit fault handler.
606 *
607 * @param vector Interruption vector.
608 * @param istate Structure with saved interruption state.
609 */
610void data_dirty_bit_fault(uint64_t vector, istate_t *istate)
611{
612 region_register rr;
613 rid_t rid;
614 uintptr_t va;
615 pte_t *t;
616
617 va = istate->cr_ifa; /* faulting address */
618 rr.word = rr_read(VA2VRN(va));
619 rid = rr.map.rid;
620
621 page_table_lock(AS, true);
622 t = page_mapping_find(AS, va);
623 ASSERT(t && t->p);
624 if (t && t->p && t->w) {
625 /*
626 * Update the Dirty bit in page tables and reinsert
627 * the mapping into DTC.
628 */
629 t->d = true;
630 dtc_pte_copy(t);
631 } else {
632 if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
633 fault_if_from_uspace(istate,"Page fault at %p.",va);
634 panic("%s: va=%p, rid=%d, iip=%p.", __func__, va, rid,
635 istate->cr_iip);
636 }
637 }
638 page_table_unlock(AS, true);
639}
640
641/** Instruction access bit fault handler.
642 *
643 * @param vector Interruption vector.
644 * @param istate Structure with saved interruption state.
645 */
646void instruction_access_bit_fault(uint64_t vector, istate_t *istate)
647{
648 region_register rr;
649 rid_t rid;
650 uintptr_t va;
651 pte_t *t;
652
653 va = istate->cr_ifa; /* faulting address */
654 rr.word = rr_read(VA2VRN(va));
655 rid = rr.map.rid;
656
657 page_table_lock(AS, true);
658 t = page_mapping_find(AS, va);
659 ASSERT(t && t->p);
660 if (t && t->p && t->x) {
661 /*
662 * Update the Accessed bit in page tables and reinsert
663 * the mapping into ITC.
664 */
665 t->a = true;
666 itc_pte_copy(t);
667 } else {
668 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
669 fault_if_from_uspace(istate, "Page fault at %p.", va);
670 panic("%s: va=%p, rid=%d, iip=%p.", __func__, va, rid,
671 istate->cr_iip);
672 }
673 }
674 page_table_unlock(AS, true);
675}
676
677/** Data access bit fault handler.
678 *
679 * @param vector Interruption vector.
680 * @param istate Structure with saved interruption state.
681 */
682void data_access_bit_fault(uint64_t vector, istate_t *istate)
683{
684 region_register rr;
685 rid_t rid;
686 uintptr_t va;
687 pte_t *t;
688
689 va = istate->cr_ifa; /* faulting address */
690 rr.word = rr_read(VA2VRN(va));
691 rid = rr.map.rid;
692
693 page_table_lock(AS, true);
694 t = page_mapping_find(AS, va);
695 ASSERT(t && t->p);
696 if (t && t->p) {
697 /*
698 * Update the Accessed bit in page tables and reinsert
699 * the mapping into DTC.
700 */
701 t->a = true;
702 dtc_pte_copy(t);
703 } else {
704 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
705 fault_if_from_uspace(istate, "Page fault at %p.", va);
706 panic("%s: va=%p, rid=%d, iip=%p.", __func__, va, rid,
707 istate->cr_iip);
708 }
709 }
710 page_table_unlock(AS, true);
711}
712
713/** Page not present fault handler.
714 *
715 * @param vector Interruption vector.
716 * @param istate Structure with saved interruption state.
717 */
718void page_not_present(uint64_t vector, istate_t *istate)
719{
720 region_register rr;
721 rid_t rid;
722 uintptr_t va;
723 pte_t *t;
724
725 va = istate->cr_ifa; /* faulting address */
726 rr.word = rr_read(VA2VRN(va));
727 rid = rr.map.rid;
728
729 page_table_lock(AS, true);
730 t = page_mapping_find(AS, va);
731 ASSERT(t);
732
733 if (t->p) {
734 /*
735 * If the Present bit is set in page hash table, just copy it
736 * and update ITC/DTC.
737 */
738 if (t->x)
739 itc_pte_copy(t);
740 else
741 dtc_pte_copy(t);
742 page_table_unlock(AS, true);
743 } else {
744 page_table_unlock(AS, true);
745 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
746 fault_if_from_uspace(istate, "Page fault at %p.", va);
747 panic("%s: va=%p, rid=%d.", __func__, va, rid);
748 }
749 }
750}
751
752void tlb_arch_init(void)
753{
754}
755
756void tlb_print(void)
757{
758}
759
760/** @}
761 */
Note: See TracBrowser for help on using the repository browser.