source: mainline/kernel/arch/ia64/src/mm/tlb.c@ 8ccd2ea

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 8ccd2ea was 3ee8a075, checked in by Jakub Jermar <jakub@…>, 18 years ago

Replace gcc-specific FUNCTION with C99 func.
suncc's xregs=no%float can be used only on sparc64.

  • Property mode set to 100644
File size: 16.4 KB
RevLine 
[36b01bb2]1/*
[df4ed85]2 * Copyright (c) 2006 Jakub Jermar
[36b01bb2]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[ee289cf0]29/** @addtogroup ia64mm
[b45c443]30 * @{
31 */
32/** @file
33 */
34
[36b01bb2]35/*
36 * TLB management.
37 */
38
39#include <mm/tlb.h>
[a0d74fd]40#include <mm/asid.h>
[9ad03fe]41#include <mm/page.h>
42#include <mm/as.h>
[bc78c75]43#include <arch/mm/tlb.h>
[a0d74fd]44#include <arch/mm/page.h>
[68091bd]45#include <arch/mm/vhpt.h>
[89298e3]46#include <arch/barrier.h>
[2c49fbbe]47#include <arch/interrupt.h>
[7c322bd]48#include <arch/pal/pal.h>
49#include <arch/asm.h>
[2c49fbbe]50#include <panic.h>
[1065603e]51#include <print.h>
[9ad03fe]52#include <arch.h>
[a175a67]53#include <interrupt.h>
[36b01bb2]54
[ef67bab]55/** Invalidate all TLB entries. */
[36b01bb2]56void tlb_invalidate_all(void)
57{
[ee289cf0]58 ipl_t ipl;
59 uintptr_t adr;
60 uint32_t count1, count2, stride1, stride2;
[7c322bd]61
[ee289cf0]62 int i, j;
[7c322bd]63
[ee289cf0]64 adr = PAL_PTCE_INFO_BASE();
65 count1 = PAL_PTCE_INFO_COUNT1();
66 count2 = PAL_PTCE_INFO_COUNT2();
67 stride1 = PAL_PTCE_INFO_STRIDE1();
68 stride2 = PAL_PTCE_INFO_STRIDE2();
[7c322bd]69
[ee289cf0]70 ipl = interrupts_disable();
71
72 for(i = 0; i < count1; i++) {
73 for(j = 0; j < count2; j++) {
[e7b7be3f]74 asm volatile (
[ee289cf0]75 "ptc.e %0 ;;"
76 :
77 : "r" (adr)
78 );
79 adr += stride2;
[7c322bd]80 }
[ee289cf0]81 adr += stride1;
82 }
[7c322bd]83
[ee289cf0]84 interrupts_restore(ipl);
[7c322bd]85
[ee289cf0]86 srlz_d();
87 srlz_i();
[68091bd]88#ifdef CONFIG_VHPT
[ee289cf0]89 vhpt_invalidate_all();
[68091bd]90#endif
[36b01bb2]91}
92
93/** Invalidate entries belonging to an address space.
94 *
95 * @param asid Address space identifier.
96 */
97void tlb_invalidate_asid(asid_t asid)
98{
[a82500ce]99 tlb_invalidate_all();
[36b01bb2]100}
[bc78c75]101
[a82500ce]102
[7f1c620]103void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
[a82500ce]104{
[d0cf9de]105 region_register rr;
106 bool restore_rr = false;
[1065603e]107 int b = 0;
108 int c = cnt;
[9bda3af6]109
[7f1c620]110 uintptr_t va;
[1065603e]111 va = page;
[d0cf9de]112
113 rr.word = rr_read(VA2VRN(va));
114 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
115 /*
116 * The selected region register does not contain required RID.
117 * Save the old content of the register and replace the RID.
118 */
119 region_register rr0;
120
121 rr0 = rr;
122 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
123 rr_write(VA2VRN(va), rr0.word);
124 srlz_d();
125 srlz_i();
126 }
127
[1065603e]128 while(c >>= 1)
129 b++;
130 b >>= 1;
[7f1c620]131 uint64_t ps;
[d0cf9de]132
[1065603e]133 switch (b) {
[ee289cf0]134 case 0: /*cnt 1-3*/
135 ps = PAGE_WIDTH;
136 break;
137 case 1: /*cnt 4-15*/
138 /*cnt=((cnt-1)/4)+1;*/
139 ps = PAGE_WIDTH+2;
140 va &= ~((1<<ps)-1);
141 break;
142 case 2: /*cnt 16-63*/
143 /*cnt=((cnt-1)/16)+1;*/
144 ps = PAGE_WIDTH+4;
145 va &= ~((1<<ps)-1);
146 break;
147 case 3: /*cnt 64-255*/
148 /*cnt=((cnt-1)/64)+1;*/
149 ps = PAGE_WIDTH+6;
150 va &= ~((1<<ps)-1);
151 break;
152 case 4: /*cnt 256-1023*/
153 /*cnt=((cnt-1)/256)+1;*/
154 ps = PAGE_WIDTH+8;
155 va &= ~((1<<ps)-1);
156 break;
157 case 5: /*cnt 1024-4095*/
158 /*cnt=((cnt-1)/1024)+1;*/
159 ps = PAGE_WIDTH+10;
160 va &= ~((1<<ps)-1);
161 break;
162 case 6: /*cnt 4096-16383*/
163 /*cnt=((cnt-1)/4096)+1;*/
164 ps = PAGE_WIDTH+12;
165 va &= ~((1<<ps)-1);
166 break;
167 case 7: /*cnt 16384-65535*/
168 case 8: /*cnt 65536-(256K-1)*/
169 /*cnt=((cnt-1)/16384)+1;*/
170 ps = PAGE_WIDTH+14;
171 va &= ~((1<<ps)-1);
172 break;
173 default:
174 /*cnt=((cnt-1)/(16384*16))+1;*/
175 ps=PAGE_WIDTH+18;
176 va&=~((1<<ps)-1);
177 break;
[d0cf9de]178 }
[9bda3af6]179 /*cnt+=(page!=va);*/
[1065603e]180 for(; va<(page+cnt*(PAGE_SIZE)); va += (1<<ps)) {
[e7b7be3f]181 asm volatile (
[9bda3af6]182 "ptc.l %0,%1;;"
183 :
[1065603e]184 : "r" (va), "r" (ps<<2)
[9bda3af6]185 );
[d0cf9de]186 }
187 srlz_d();
188 srlz_i();
189
190 if (restore_rr) {
191 rr_write(VA2VRN(va), rr.word);
192 srlz_d();
193 srlz_i();
194 }
[a82500ce]195}
196
[95042fd]197/** Insert data into data translation cache.
198 *
199 * @param va Virtual page address.
200 * @param asid Address space identifier.
201 * @param entry The rest of TLB entry as required by TLB insertion format.
202 */
[7f1c620]203void dtc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
[b994a60]204{
[95042fd]205 tc_mapping_insert(va, asid, entry, true);
206}
[bc78c75]207
[95042fd]208/** Insert data into instruction translation cache.
209 *
210 * @param va Virtual page address.
211 * @param asid Address space identifier.
212 * @param entry The rest of TLB entry as required by TLB insertion format.
213 */
[7f1c620]214void itc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
[b994a60]215{
[95042fd]216 tc_mapping_insert(va, asid, entry, false);
217}
[bc78c75]218
[95042fd]219/** Insert data into instruction or data translation cache.
220 *
221 * @param va Virtual page address.
222 * @param asid Address space identifier.
223 * @param entry The rest of TLB entry as required by TLB insertion format.
224 * @param dtc If true, insert into data translation cache, use instruction translation cache otherwise.
225 */
[7f1c620]226void tc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtc)
[bc78c75]227{
228 region_register rr;
[95042fd]229 bool restore_rr = false;
[bc78c75]230
[a0d74fd]231 rr.word = rr_read(VA2VRN(va));
232 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
[95042fd]233 /*
234 * The selected region register does not contain required RID.
235 * Save the old content of the register and replace the RID.
236 */
[bc78c75]237 region_register rr0;
[95042fd]238
239 rr0 = rr;
[a0d74fd]240 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
241 rr_write(VA2VRN(va), rr0.word);
[89298e3]242 srlz_d();
[95042fd]243 srlz_i();
244 }
245
[e7b7be3f]246 asm volatile (
[95042fd]247 "mov r8=psr;;\n"
[2c49fbbe]248 "rsm %0;;\n" /* PSR_IC_MASK */
[95042fd]249 "srlz.d;;\n"
250 "srlz.i;;\n"
251 "mov cr.ifa=%1\n" /* va */
252 "mov cr.itir=%2;;\n" /* entry.word[1] */
253 "cmp.eq p6,p7 = %4,r0;;\n" /* decide between itc and dtc */
254 "(p6) itc.i %3;;\n"
255 "(p7) itc.d %3;;\n"
256 "mov psr.l=r8;;\n"
257 "srlz.d;;\n"
258 :
[2c49fbbe]259 : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (dtc)
260 : "p6", "p7", "r8"
[95042fd]261 );
262
263 if (restore_rr) {
[a0d74fd]264 rr_write(VA2VRN(va), rr.word);
[95042fd]265 srlz_d();
266 srlz_i();
[bc78c75]267 }
268}
269
[95042fd]270/** Insert data into instruction translation register.
271 *
272 * @param va Virtual page address.
273 * @param asid Address space identifier.
274 * @param entry The rest of TLB entry as required by TLB insertion format.
275 * @param tr Translation register.
276 */
[7f1c620]277void itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr)
[bc78c75]278{
[95042fd]279 tr_mapping_insert(va, asid, entry, false, tr);
[bc78c75]280}
281
[95042fd]282/** Insert data into data translation register.
283 *
284 * @param va Virtual page address.
285 * @param asid Address space identifier.
286 * @param entry The rest of TLB entry as required by TLB insertion format.
287 * @param tr Translation register.
288 */
[7f1c620]289void dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr)
[95042fd]290{
291 tr_mapping_insert(va, asid, entry, true, tr);
292}
[bc78c75]293
[95042fd]294/** Insert data into instruction or data translation register.
295 *
296 * @param va Virtual page address.
297 * @param asid Address space identifier.
298 * @param entry The rest of TLB entry as required by TLB insertion format.
[abbc16e]299 * @param dtr If true, insert into data translation register, use instruction translation register otherwise.
[95042fd]300 * @param tr Translation register.
301 */
[7f1c620]302void tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr)
[89298e3]303{
304 region_register rr;
[95042fd]305 bool restore_rr = false;
[89298e3]306
[a0d74fd]307 rr.word = rr_read(VA2VRN(va));
308 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
[95042fd]309 /*
310 * The selected region register does not contain required RID.
311 * Save the old content of the register and replace the RID.
312 */
[89298e3]313 region_register rr0;
[95042fd]314
315 rr0 = rr;
[a0d74fd]316 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
317 rr_write(VA2VRN(va), rr0.word);
[89298e3]318 srlz_d();
[95042fd]319 srlz_i();
[89298e3]320 }
321
[e7b7be3f]322 asm volatile (
[95042fd]323 "mov r8=psr;;\n"
[2c49fbbe]324 "rsm %0;;\n" /* PSR_IC_MASK */
[95042fd]325 "srlz.d;;\n"
326 "srlz.i;;\n"
327 "mov cr.ifa=%1\n" /* va */
328 "mov cr.itir=%2;;\n" /* entry.word[1] */
329 "cmp.eq p6,p7=%5,r0;;\n" /* decide between itr and dtr */
330 "(p6) itr.i itr[%4]=%3;;\n"
331 "(p7) itr.d dtr[%4]=%3;;\n"
332 "mov psr.l=r8;;\n"
333 "srlz.d;;\n"
334 :
[2c49fbbe]335 : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (tr), "r" (dtr)
336 : "p6", "p7", "r8"
[95042fd]337 );
338
339 if (restore_rr) {
[a0d74fd]340 rr_write(VA2VRN(va), rr.word);
[95042fd]341 srlz_d();
342 srlz_i();
343 }
[89298e3]344}
345
[a0d74fd]346/** Insert data into DTLB.
347 *
[208259c]348 * @param page Virtual page address including VRN bits.
349 * @param frame Physical frame address.
[a0d74fd]350 * @param dtr If true, insert into data translation register, use data translation cache otherwise.
351 * @param tr Translation register if dtr is true, ignored otherwise.
352 */
[7f1c620]353void dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr, index_t tr)
[a0d74fd]354{
355 tlb_entry_t entry;
356
357 entry.word[0] = 0;
358 entry.word[1] = 0;
359
360 entry.p = true; /* present */
361 entry.ma = MA_WRITEBACK;
362 entry.a = true; /* already accessed */
363 entry.d = true; /* already dirty */
364 entry.pl = PL_KERNEL;
365 entry.ar = AR_READ | AR_WRITE;
366 entry.ppn = frame >> PPN_SHIFT;
367 entry.ps = PAGE_WIDTH;
368
369 if (dtr)
370 dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
371 else
372 dtc_mapping_insert(page, ASID_KERNEL, entry);
373}
374
[208259c]375/** Purge kernel entries from DTR.
376 *
377 * Purge DTR entries used by the kernel.
378 *
379 * @param page Virtual page address including VRN bits.
380 * @param width Width of the purge in bits.
381 */
[7f1c620]382void dtr_purge(uintptr_t page, count_t width)
[208259c]383{
[e7b7be3f]384 asm volatile ("ptr.d %0, %1\n" : : "r" (page), "r" (width<<2));
[208259c]385}
386
387
[9ad03fe]388/** Copy content of PTE into data translation cache.
389 *
390 * @param t PTE.
391 */
392void dtc_pte_copy(pte_t *t)
393{
394 tlb_entry_t entry;
395
396 entry.word[0] = 0;
397 entry.word[1] = 0;
398
399 entry.p = t->p;
400 entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
401 entry.a = t->a;
402 entry.d = t->d;
403 entry.pl = t->k ? PL_KERNEL : PL_USER;
404 entry.ar = t->w ? AR_WRITE : AR_READ;
405 entry.ppn = t->frame >> PPN_SHIFT;
406 entry.ps = PAGE_WIDTH;
407
408 dtc_mapping_insert(t->page, t->as->asid, entry);
[68091bd]409#ifdef CONFIG_VHPT
410 vhpt_mapping_insert(t->page, t->as->asid, entry);
411#endif
[9ad03fe]412}
413
414/** Copy content of PTE into instruction translation cache.
415 *
416 * @param t PTE.
417 */
418void itc_pte_copy(pte_t *t)
419{
420 tlb_entry_t entry;
421
422 entry.word[0] = 0;
423 entry.word[1] = 0;
424
425 ASSERT(t->x);
426
427 entry.p = t->p;
428 entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
429 entry.a = t->a;
430 entry.pl = t->k ? PL_KERNEL : PL_USER;
431 entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
432 entry.ppn = t->frame >> PPN_SHIFT;
433 entry.ps = PAGE_WIDTH;
434
435 itc_mapping_insert(t->page, t->as->asid, entry);
[68091bd]436#ifdef CONFIG_VHPT
437 vhpt_mapping_insert(t->page, t->as->asid, entry);
438#endif
[9ad03fe]439}
440
441/** Instruction TLB fault handler for faults with VHPT turned off.
442 *
443 * @param vector Interruption vector.
[25d7709]444 * @param istate Structure with saved interruption state.
[9ad03fe]445 */
[7f1c620]446void alternate_instruction_tlb_fault(uint64_t vector, istate_t *istate)
[89298e3]447{
[9ad03fe]448 region_register rr;
[567807b1]449 rid_t rid;
[7f1c620]450 uintptr_t va;
[9ad03fe]451 pte_t *t;
452
[25d7709]453 va = istate->cr_ifa; /* faulting address */
[567807b1]454 rr.word = rr_read(VA2VRN(va));
455 rid = rr.map.rid;
456
[2299914]457 page_table_lock(AS, true);
[9ad03fe]458 t = page_mapping_find(AS, va);
459 if (t) {
460 /*
461 * The mapping was found in software page hash table.
462 * Insert it into data translation cache.
463 */
464 itc_pte_copy(t);
[2299914]465 page_table_unlock(AS, true);
[9ad03fe]466 } else {
467 /*
468 * Forward the page fault to address space page fault handler.
469 */
[2299914]470 page_table_unlock(AS, true);
[567807b1]471 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
[fbf7b4c]472 fault_if_from_uspace(istate,"Page fault at %p",va);
[3ee8a075]473 panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
[9ad03fe]474 }
475 }
[95042fd]476}
[89298e3]477
[9ad03fe]478/** Data TLB fault handler for faults with VHPT turned off.
[a0d74fd]479 *
480 * @param vector Interruption vector.
[25d7709]481 * @param istate Structure with saved interruption state.
[a0d74fd]482 */
[7f1c620]483void alternate_data_tlb_fault(uint64_t vector, istate_t *istate)
[95042fd]484{
[a0d74fd]485 region_register rr;
486 rid_t rid;
[7f1c620]487 uintptr_t va;
[9ad03fe]488 pte_t *t;
[a0d74fd]489
[25d7709]490 va = istate->cr_ifa; /* faulting address */
[a0d74fd]491 rr.word = rr_read(VA2VRN(va));
492 rid = rr.map.rid;
493 if (RID2ASID(rid) == ASID_KERNEL) {
494 if (VA2VRN(va) == VRN_KERNEL) {
495 /*
496 * Provide KA2PA(identity) mapping for faulting piece of
497 * kernel address space.
498 */
[9ad03fe]499 dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
[a0d74fd]500 return;
501 }
502 }
[b994a60]503
[2299914]504 page_table_lock(AS, true);
[9ad03fe]505 t = page_mapping_find(AS, va);
506 if (t) {
507 /*
[f47fd19]508 * The mapping was found in the software page hash table.
[9ad03fe]509 * Insert it into data translation cache.
510 */
511 dtc_pte_copy(t);
[2299914]512 page_table_unlock(AS, true);
[9ad03fe]513 } else {
514 /*
[f47fd19]515 * Forward the page fault to the address space page fault handler.
[9ad03fe]516 */
[2299914]517 page_table_unlock(AS, true);
[567807b1]518 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
[fbf7b4c]519 fault_if_from_uspace(istate,"Page fault at %p",va);
[3ee8a075]520 panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
[9ad03fe]521 }
522 }
[95042fd]523}
[89298e3]524
[9ad03fe]525/** Data nested TLB fault handler.
526 *
527 * This fault should not occur.
528 *
529 * @param vector Interruption vector.
[25d7709]530 * @param istate Structure with saved interruption state.
[9ad03fe]531 */
[7f1c620]532void data_nested_tlb_fault(uint64_t vector, istate_t *istate)
[95042fd]533{
[3ee8a075]534 panic("%s\n", __func__);
[95042fd]535}
[89298e3]536
[9ad03fe]537/** Data Dirty bit fault handler.
538 *
539 * @param vector Interruption vector.
[25d7709]540 * @param istate Structure with saved interruption state.
[9ad03fe]541 */
[7f1c620]542void data_dirty_bit_fault(uint64_t vector, istate_t *istate)
[95042fd]543{
[567807b1]544 region_register rr;
545 rid_t rid;
[7f1c620]546 uintptr_t va;
[9ad03fe]547 pte_t *t;
[567807b1]548
549 va = istate->cr_ifa; /* faulting address */
550 rr.word = rr_read(VA2VRN(va));
551 rid = rr.map.rid;
[9ad03fe]552
[2299914]553 page_table_lock(AS, true);
[567807b1]554 t = page_mapping_find(AS, va);
[9ad03fe]555 ASSERT(t && t->p);
[567807b1]556 if (t && t->p && t->w) {
[9ad03fe]557 /*
558 * Update the Dirty bit in page tables and reinsert
559 * the mapping into DTC.
560 */
561 t->d = true;
562 dtc_pte_copy(t);
[567807b1]563 } else {
564 if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
[fbf7b4c]565 fault_if_from_uspace(istate,"Page fault at %p",va);
[3ee8a075]566 panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
[567807b1]567 t->d = true;
568 dtc_pte_copy(t);
569 }
[9ad03fe]570 }
[2299914]571 page_table_unlock(AS, true);
[95042fd]572}
[89298e3]573
[9ad03fe]574/** Instruction access bit fault handler.
575 *
576 * @param vector Interruption vector.
[25d7709]577 * @param istate Structure with saved interruption state.
[9ad03fe]578 */
[7f1c620]579void instruction_access_bit_fault(uint64_t vector, istate_t *istate)
[95042fd]580{
[567807b1]581 region_register rr;
582 rid_t rid;
[7f1c620]583 uintptr_t va;
[567807b1]584 pte_t *t;
585
586 va = istate->cr_ifa; /* faulting address */
587 rr.word = rr_read(VA2VRN(va));
588 rid = rr.map.rid;
[9ad03fe]589
[2299914]590 page_table_lock(AS, true);
[567807b1]591 t = page_mapping_find(AS, va);
[9ad03fe]592 ASSERT(t && t->p);
[567807b1]593 if (t && t->p && t->x) {
[9ad03fe]594 /*
595 * Update the Accessed bit in page tables and reinsert
596 * the mapping into ITC.
597 */
598 t->a = true;
599 itc_pte_copy(t);
[567807b1]600 } else {
601 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
[fbf7b4c]602 fault_if_from_uspace(istate,"Page fault at %p",va);
[3ee8a075]603 panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
[567807b1]604 t->a = true;
605 itc_pte_copy(t);
606 }
[9ad03fe]607 }
[2299914]608 page_table_unlock(AS, true);
[95042fd]609}
[89298e3]610
[9ad03fe]611/** Data access bit fault handler.
612 *
613 * @param vector Interruption vector.
[25d7709]614 * @param istate Structure with saved interruption state.
[9ad03fe]615 */
[7f1c620]616void data_access_bit_fault(uint64_t vector, istate_t *istate)
[95042fd]617{
[567807b1]618 region_register rr;
619 rid_t rid;
[7f1c620]620 uintptr_t va;
[9ad03fe]621 pte_t *t;
622
[567807b1]623 va = istate->cr_ifa; /* faulting address */
624 rr.word = rr_read(VA2VRN(va));
625 rid = rr.map.rid;
626
[2299914]627 page_table_lock(AS, true);
[567807b1]628 t = page_mapping_find(AS, va);
[9ad03fe]629 ASSERT(t && t->p);
630 if (t && t->p) {
631 /*
632 * Update the Accessed bit in page tables and reinsert
633 * the mapping into DTC.
634 */
635 t->a = true;
636 dtc_pte_copy(t);
[567807b1]637 } else {
638 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
[fbf7b4c]639 fault_if_from_uspace(istate,"Page fault at %p",va);
[3ee8a075]640 panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
[567807b1]641 t->a = true;
642 itc_pte_copy(t);
643 }
[9ad03fe]644 }
[2299914]645 page_table_unlock(AS, true);
[89298e3]646}
647
[9ad03fe]648/** Page not present fault handler.
649 *
650 * @param vector Interruption vector.
[25d7709]651 * @param istate Structure with saved interruption state.
[9ad03fe]652 */
[7f1c620]653void page_not_present(uint64_t vector, istate_t *istate)
[95042fd]654{
[9ad03fe]655 region_register rr;
[567807b1]656 rid_t rid;
[7f1c620]657 uintptr_t va;
[9ad03fe]658 pte_t *t;
659
[25d7709]660 va = istate->cr_ifa; /* faulting address */
[567807b1]661 rr.word = rr_read(VA2VRN(va));
662 rid = rr.map.rid;
663
[2299914]664 page_table_lock(AS, true);
[9ad03fe]665 t = page_mapping_find(AS, va);
666 ASSERT(t);
667
668 if (t->p) {
669 /*
670 * If the Present bit is set in page hash table, just copy it
671 * and update ITC/DTC.
672 */
673 if (t->x)
674 itc_pte_copy(t);
675 else
676 dtc_pte_copy(t);
[2299914]677 page_table_unlock(AS, true);
[9ad03fe]678 } else {
[2299914]679 page_table_unlock(AS, true);
[567807b1]680 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
[fbf7b4c]681 fault_if_from_uspace(istate,"Page fault at %p",va);
[3ee8a075]682 panic("%s: va=%p, rid=%d\n", __func__, va, rid);
[9ad03fe]683 }
684 }
[95042fd]685}
[b45c443]686
[ee289cf0]687/** @}
[b45c443]688 */
Note: See TracBrowser for help on using the repository browser.