source: mainline/kernel/arch/ia64/src/mm/tlb.c@ 11675207

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 11675207 was 11675207, checked in by jermar <jermar@…>, 17 years ago

Move everything to kernel/.

  • Property mode set to 100644
File size: 16.6 KB
RevLine 
[36b01bb2]1/*
2 * Copyright (C) 2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[b45c443]29 /** @addtogroup ia64mm
30 * @{
31 */
32/** @file
33 */
34
[36b01bb2]35/*
36 * TLB management.
37 */
38
39#include <mm/tlb.h>
[a0d74fd]40#include <mm/asid.h>
[9ad03fe]41#include <mm/page.h>
42#include <mm/as.h>
[bc78c75]43#include <arch/mm/tlb.h>
[a0d74fd]44#include <arch/mm/page.h>
[68091bd]45#include <arch/mm/vhpt.h>
[89298e3]46#include <arch/barrier.h>
[2c49fbbe]47#include <arch/interrupt.h>
[7c322bd]48#include <arch/pal/pal.h>
49#include <arch/asm.h>
[95042fd]50#include <typedefs.h>
[2c49fbbe]51#include <panic.h>
[1065603e]52#include <print.h>
[9ad03fe]53#include <arch.h>
[a175a67]54#include <interrupt.h>
[36b01bb2]55
[ef67bab]56/** Invalidate all TLB entries. */
[36b01bb2]57void tlb_invalidate_all(void)
58{
[1065603e]59 ipl_t ipl;
[7f1c620]60 uintptr_t adr;
61 uint32_t count1, count2, stride1, stride2;
[7c322bd]62
63 int i,j;
64
[1065603e]65 adr = PAL_PTCE_INFO_BASE();
66 count1 = PAL_PTCE_INFO_COUNT1();
67 count2 = PAL_PTCE_INFO_COUNT2();
68 stride1 = PAL_PTCE_INFO_STRIDE1();
69 stride2 = PAL_PTCE_INFO_STRIDE2();
[7c322bd]70
[1065603e]71 ipl = interrupts_disable();
72
73 for(i = 0; i < count1; i++) {
74 for(j = 0; j < count2; j++) {
75 __asm__ volatile (
76 "ptc.e %0 ;;"
[7c322bd]77 :
[1065603e]78 : "r" (adr)
[7c322bd]79 );
[1065603e]80 adr += stride2;
[7c322bd]81 }
[1065603e]82 adr += stride1;
[7c322bd]83 }
84
[1065603e]85 interrupts_restore(ipl);
[7c322bd]86
87 srlz_d();
88 srlz_i();
[68091bd]89#ifdef CONFIG_VHPT
90 vhpt_invalidate_all();
91#endif
[36b01bb2]92}
93
94/** Invalidate entries belonging to an address space.
95 *
96 * @param asid Address space identifier.
97 */
98void tlb_invalidate_asid(asid_t asid)
99{
[a82500ce]100 tlb_invalidate_all();
[36b01bb2]101}
[bc78c75]102
[a82500ce]103
[7f1c620]104void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
[a82500ce]105{
[d0cf9de]106 region_register rr;
107 bool restore_rr = false;
[1065603e]108 int b = 0;
109 int c = cnt;
[9bda3af6]110
[7f1c620]111 uintptr_t va;
[1065603e]112 va = page;
[d0cf9de]113
114 rr.word = rr_read(VA2VRN(va));
115 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
116 /*
117 * The selected region register does not contain required RID.
118 * Save the old content of the register and replace the RID.
119 */
120 region_register rr0;
121
122 rr0 = rr;
123 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
124 rr_write(VA2VRN(va), rr0.word);
125 srlz_d();
126 srlz_i();
127 }
128
[1065603e]129 while(c >>= 1)
130 b++;
131 b >>= 1;
[7f1c620]132 uint64_t ps;
[d0cf9de]133
[1065603e]134 switch (b) {
[d0cf9de]135 case 0: /*cnt 1-3*/
[1065603e]136 ps = PAGE_WIDTH;
[d0cf9de]137 break;
138 case 1: /*cnt 4-15*/
[9bda3af6]139 /*cnt=((cnt-1)/4)+1;*/
[1065603e]140 ps = PAGE_WIDTH+2;
141 va &= ~((1<<ps)-1);
[d0cf9de]142 break;
143 case 2: /*cnt 16-63*/
[9bda3af6]144 /*cnt=((cnt-1)/16)+1;*/
[1065603e]145 ps = PAGE_WIDTH+4;
146 va &= ~((1<<ps)-1);
[d0cf9de]147 break;
148 case 3: /*cnt 64-255*/
[9bda3af6]149 /*cnt=((cnt-1)/64)+1;*/
[1065603e]150 ps = PAGE_WIDTH+6;
151 va &= ~((1<<ps)-1);
[d0cf9de]152 break;
153 case 4: /*cnt 256-1023*/
[9bda3af6]154 /*cnt=((cnt-1)/256)+1;*/
[1065603e]155 ps = PAGE_WIDTH+8;
156 va &= ~((1<<ps)-1);
[d0cf9de]157 break;
158 case 5: /*cnt 1024-4095*/
[9bda3af6]159 /*cnt=((cnt-1)/1024)+1;*/
[1065603e]160 ps = PAGE_WIDTH+10;
161 va &= ~((1<<ps)-1);
[d0cf9de]162 break;
163 case 6: /*cnt 4096-16383*/
[9bda3af6]164 /*cnt=((cnt-1)/4096)+1;*/
[1065603e]165 ps = PAGE_WIDTH+12;
166 va &= ~((1<<ps)-1);
[d0cf9de]167 break;
168 case 7: /*cnt 16384-65535*/
169 case 8: /*cnt 65536-(256K-1)*/
[9bda3af6]170 /*cnt=((cnt-1)/16384)+1;*/
[1065603e]171 ps = PAGE_WIDTH+14;
172 va &= ~((1<<ps)-1);
[d0cf9de]173 break;
174 default:
[9bda3af6]175 /*cnt=((cnt-1)/(16384*16))+1;*/
[d0cf9de]176 ps=PAGE_WIDTH+18;
177 va&=~((1<<ps)-1);
178 break;
179 }
[9bda3af6]180 /*cnt+=(page!=va);*/
[1065603e]181 for(; va<(page+cnt*(PAGE_SIZE)); va += (1<<ps)) {
182 __asm__ volatile (
[9bda3af6]183 "ptc.l %0,%1;;"
184 :
[1065603e]185 : "r" (va), "r" (ps<<2)
[9bda3af6]186 );
[d0cf9de]187 }
188 srlz_d();
189 srlz_i();
190
191 if (restore_rr) {
192 rr_write(VA2VRN(va), rr.word);
193 srlz_d();
194 srlz_i();
195 }
[a82500ce]196}
197
[95042fd]198/** Insert data into data translation cache.
199 *
200 * @param va Virtual page address.
201 * @param asid Address space identifier.
202 * @param entry The rest of TLB entry as required by TLB insertion format.
203 */
[7f1c620]204void dtc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
[b994a60]205{
[95042fd]206 tc_mapping_insert(va, asid, entry, true);
207}
[bc78c75]208
[95042fd]209/** Insert data into instruction translation cache.
210 *
211 * @param va Virtual page address.
212 * @param asid Address space identifier.
213 * @param entry The rest of TLB entry as required by TLB insertion format.
214 */
[7f1c620]215void itc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
[b994a60]216{
[95042fd]217 tc_mapping_insert(va, asid, entry, false);
218}
[bc78c75]219
[95042fd]220/** Insert data into instruction or data translation cache.
221 *
222 * @param va Virtual page address.
223 * @param asid Address space identifier.
224 * @param entry The rest of TLB entry as required by TLB insertion format.
225 * @param dtc If true, insert into data translation cache, use instruction translation cache otherwise.
226 */
[7f1c620]227void tc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtc)
[bc78c75]228{
229 region_register rr;
[95042fd]230 bool restore_rr = false;
[bc78c75]231
[a0d74fd]232 rr.word = rr_read(VA2VRN(va));
233 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
[95042fd]234 /*
235 * The selected region register does not contain required RID.
236 * Save the old content of the register and replace the RID.
237 */
[bc78c75]238 region_register rr0;
[95042fd]239
240 rr0 = rr;
[a0d74fd]241 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
242 rr_write(VA2VRN(va), rr0.word);
[89298e3]243 srlz_d();
[95042fd]244 srlz_i();
245 }
246
247 __asm__ volatile (
248 "mov r8=psr;;\n"
[2c49fbbe]249 "rsm %0;;\n" /* PSR_IC_MASK */
[95042fd]250 "srlz.d;;\n"
251 "srlz.i;;\n"
252 "mov cr.ifa=%1\n" /* va */
253 "mov cr.itir=%2;;\n" /* entry.word[1] */
254 "cmp.eq p6,p7 = %4,r0;;\n" /* decide between itc and dtc */
255 "(p6) itc.i %3;;\n"
256 "(p7) itc.d %3;;\n"
257 "mov psr.l=r8;;\n"
258 "srlz.d;;\n"
259 :
[2c49fbbe]260 : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (dtc)
261 : "p6", "p7", "r8"
[95042fd]262 );
263
264 if (restore_rr) {
[a0d74fd]265 rr_write(VA2VRN(va), rr.word);
[95042fd]266 srlz_d();
267 srlz_i();
[bc78c75]268 }
269}
270
[95042fd]271/** Insert data into instruction translation register.
272 *
273 * @param va Virtual page address.
274 * @param asid Address space identifier.
275 * @param entry The rest of TLB entry as required by TLB insertion format.
276 * @param tr Translation register.
277 */
[7f1c620]278void itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr)
[bc78c75]279{
[95042fd]280 tr_mapping_insert(va, asid, entry, false, tr);
[bc78c75]281}
282
[95042fd]283/** Insert data into data translation register.
284 *
285 * @param va Virtual page address.
286 * @param asid Address space identifier.
287 * @param entry The rest of TLB entry as required by TLB insertion format.
288 * @param tr Translation register.
289 */
[7f1c620]290void dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr)
[95042fd]291{
292 tr_mapping_insert(va, asid, entry, true, tr);
293}
[bc78c75]294
[95042fd]295/** Insert data into instruction or data translation register.
296 *
297 * @param va Virtual page address.
298 * @param asid Address space identifier.
299 * @param entry The rest of TLB entry as required by TLB insertion format.
[abbc16e]300 * @param dtr If true, insert into data translation register, use instruction translation register otherwise.
[95042fd]301 * @param tr Translation register.
302 */
[7f1c620]303void tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr)
[89298e3]304{
305 region_register rr;
[95042fd]306 bool restore_rr = false;
[89298e3]307
[a0d74fd]308 rr.word = rr_read(VA2VRN(va));
309 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
[95042fd]310 /*
311 * The selected region register does not contain required RID.
312 * Save the old content of the register and replace the RID.
313 */
[89298e3]314 region_register rr0;
[95042fd]315
316 rr0 = rr;
[a0d74fd]317 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
318 rr_write(VA2VRN(va), rr0.word);
[89298e3]319 srlz_d();
[95042fd]320 srlz_i();
[89298e3]321 }
322
[95042fd]323 __asm__ volatile (
324 "mov r8=psr;;\n"
[2c49fbbe]325 "rsm %0;;\n" /* PSR_IC_MASK */
[95042fd]326 "srlz.d;;\n"
327 "srlz.i;;\n"
328 "mov cr.ifa=%1\n" /* va */
329 "mov cr.itir=%2;;\n" /* entry.word[1] */
330 "cmp.eq p6,p7=%5,r0;;\n" /* decide between itr and dtr */
331 "(p6) itr.i itr[%4]=%3;;\n"
332 "(p7) itr.d dtr[%4]=%3;;\n"
333 "mov psr.l=r8;;\n"
334 "srlz.d;;\n"
335 :
[2c49fbbe]336 : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (tr), "r" (dtr)
337 : "p6", "p7", "r8"
[95042fd]338 );
339
340 if (restore_rr) {
[a0d74fd]341 rr_write(VA2VRN(va), rr.word);
[95042fd]342 srlz_d();
343 srlz_i();
344 }
[89298e3]345}
346
[a0d74fd]347/** Insert data into DTLB.
348 *
[208259c]349 * @param page Virtual page address including VRN bits.
350 * @param frame Physical frame address.
[a0d74fd]351 * @param dtr If true, insert into data translation register, use data translation cache otherwise.
352 * @param tr Translation register if dtr is true, ignored otherwise.
353 */
[7f1c620]354void dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr, index_t tr)
[a0d74fd]355{
356 tlb_entry_t entry;
357
358 entry.word[0] = 0;
359 entry.word[1] = 0;
360
361 entry.p = true; /* present */
362 entry.ma = MA_WRITEBACK;
363 entry.a = true; /* already accessed */
364 entry.d = true; /* already dirty */
365 entry.pl = PL_KERNEL;
366 entry.ar = AR_READ | AR_WRITE;
367 entry.ppn = frame >> PPN_SHIFT;
368 entry.ps = PAGE_WIDTH;
369
370 if (dtr)
371 dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
372 else
373 dtc_mapping_insert(page, ASID_KERNEL, entry);
374}
375
[208259c]376/** Purge kernel entries from DTR.
377 *
378 * Purge DTR entries used by the kernel.
379 *
380 * @param page Virtual page address including VRN bits.
381 * @param width Width of the purge in bits.
382 */
[7f1c620]383void dtr_purge(uintptr_t page, count_t width)
[208259c]384{
385 __asm__ volatile ("ptr.d %0, %1\n" : : "r" (page), "r" (width<<2));
386}
387
388
[9ad03fe]389/** Copy content of PTE into data translation cache.
390 *
391 * @param t PTE.
392 */
393void dtc_pte_copy(pte_t *t)
394{
395 tlb_entry_t entry;
396
397 entry.word[0] = 0;
398 entry.word[1] = 0;
399
400 entry.p = t->p;
401 entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
402 entry.a = t->a;
403 entry.d = t->d;
404 entry.pl = t->k ? PL_KERNEL : PL_USER;
405 entry.ar = t->w ? AR_WRITE : AR_READ;
406 entry.ppn = t->frame >> PPN_SHIFT;
407 entry.ps = PAGE_WIDTH;
408
409 dtc_mapping_insert(t->page, t->as->asid, entry);
[68091bd]410#ifdef CONFIG_VHPT
411 vhpt_mapping_insert(t->page, t->as->asid, entry);
412#endif
[9ad03fe]413}
414
415/** Copy content of PTE into instruction translation cache.
416 *
417 * @param t PTE.
418 */
419void itc_pte_copy(pte_t *t)
420{
421 tlb_entry_t entry;
422
423 entry.word[0] = 0;
424 entry.word[1] = 0;
425
426 ASSERT(t->x);
427
428 entry.p = t->p;
429 entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
430 entry.a = t->a;
431 entry.pl = t->k ? PL_KERNEL : PL_USER;
432 entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
433 entry.ppn = t->frame >> PPN_SHIFT;
434 entry.ps = PAGE_WIDTH;
435
436 itc_mapping_insert(t->page, t->as->asid, entry);
[68091bd]437#ifdef CONFIG_VHPT
438 vhpt_mapping_insert(t->page, t->as->asid, entry);
439#endif
[9ad03fe]440}
441
442/** Instruction TLB fault handler for faults with VHPT turned off.
443 *
444 * @param vector Interruption vector.
[25d7709]445 * @param istate Structure with saved interruption state.
[9ad03fe]446 */
[7f1c620]447void alternate_instruction_tlb_fault(uint64_t vector, istate_t *istate)
[89298e3]448{
[9ad03fe]449 region_register rr;
[567807b1]450 rid_t rid;
[7f1c620]451 uintptr_t va;
[9ad03fe]452 pte_t *t;
453
[25d7709]454 va = istate->cr_ifa; /* faulting address */
[567807b1]455 rr.word = rr_read(VA2VRN(va));
456 rid = rr.map.rid;
457
[2299914]458 page_table_lock(AS, true);
[9ad03fe]459 t = page_mapping_find(AS, va);
460 if (t) {
461 /*
462 * The mapping was found in software page hash table.
463 * Insert it into data translation cache.
464 */
465 itc_pte_copy(t);
[2299914]466 page_table_unlock(AS, true);
[9ad03fe]467 } else {
468 /*
469 * Forward the page fault to address space page fault handler.
470 */
[2299914]471 page_table_unlock(AS, true);
[567807b1]472 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
[fbf7b4c]473 fault_if_from_uspace(istate,"Page fault at %p",va);
[567807b1]474 panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
[9ad03fe]475 }
476 }
[95042fd]477}
[89298e3]478
[9ad03fe]479/** Data TLB fault handler for faults with VHPT turned off.
[a0d74fd]480 *
481 * @param vector Interruption vector.
[25d7709]482 * @param istate Structure with saved interruption state.
[a0d74fd]483 */
[7f1c620]484void alternate_data_tlb_fault(uint64_t vector, istate_t *istate)
[95042fd]485{
[a0d74fd]486 region_register rr;
487 rid_t rid;
[7f1c620]488 uintptr_t va;
[9ad03fe]489 pte_t *t;
[a0d74fd]490
[25d7709]491 va = istate->cr_ifa; /* faulting address */
[a0d74fd]492 rr.word = rr_read(VA2VRN(va));
493 rid = rr.map.rid;
494 if (RID2ASID(rid) == ASID_KERNEL) {
495 if (VA2VRN(va) == VRN_KERNEL) {
496 /*
497 * Provide KA2PA(identity) mapping for faulting piece of
498 * kernel address space.
499 */
[9ad03fe]500 dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
[a0d74fd]501 return;
502 }
503 }
[b994a60]504
[2299914]505 page_table_lock(AS, true);
[9ad03fe]506 t = page_mapping_find(AS, va);
507 if (t) {
508 /*
509 * The mapping was found in software page hash table.
510 * Insert it into data translation cache.
511 */
512 dtc_pte_copy(t);
[2299914]513 page_table_unlock(AS, true);
[9ad03fe]514 } else {
515 /*
516 * Forward the page fault to address space page fault handler.
517 */
[2299914]518 page_table_unlock(AS, true);
[567807b1]519 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
[fbf7b4c]520 fault_if_from_uspace(istate,"Page fault at %p",va);
[cf85e24c]521 panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
[9ad03fe]522 }
523 }
[95042fd]524}
[89298e3]525
[9ad03fe]526/** Data nested TLB fault handler.
527 *
528 * This fault should not occur.
529 *
530 * @param vector Interruption vector.
[25d7709]531 * @param istate Structure with saved interruption state.
[9ad03fe]532 */
[7f1c620]533void data_nested_tlb_fault(uint64_t vector, istate_t *istate)
[95042fd]534{
535 panic("%s\n", __FUNCTION__);
536}
[89298e3]537
[9ad03fe]538/** Data Dirty bit fault handler.
539 *
540 * @param vector Interruption vector.
[25d7709]541 * @param istate Structure with saved interruption state.
[9ad03fe]542 */
[7f1c620]543void data_dirty_bit_fault(uint64_t vector, istate_t *istate)
[95042fd]544{
[567807b1]545 region_register rr;
546 rid_t rid;
[7f1c620]547 uintptr_t va;
[9ad03fe]548 pte_t *t;
[567807b1]549
550 va = istate->cr_ifa; /* faulting address */
551 rr.word = rr_read(VA2VRN(va));
552 rid = rr.map.rid;
[9ad03fe]553
[2299914]554 page_table_lock(AS, true);
[567807b1]555 t = page_mapping_find(AS, va);
[9ad03fe]556 ASSERT(t && t->p);
[567807b1]557 if (t && t->p && t->w) {
[9ad03fe]558 /*
559 * Update the Dirty bit in page tables and reinsert
560 * the mapping into DTC.
561 */
562 t->d = true;
563 dtc_pte_copy(t);
[567807b1]564 } else {
565 if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
[fbf7b4c]566 fault_if_from_uspace(istate,"Page fault at %p",va);
[567807b1]567 panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
568 t->d = true;
569 dtc_pte_copy(t);
570 }
[9ad03fe]571 }
[2299914]572 page_table_unlock(AS, true);
[95042fd]573}
[89298e3]574
[9ad03fe]575/** Instruction access bit fault handler.
576 *
577 * @param vector Interruption vector.
[25d7709]578 * @param istate Structure with saved interruption state.
[9ad03fe]579 */
[7f1c620]580void instruction_access_bit_fault(uint64_t vector, istate_t *istate)
[95042fd]581{
[567807b1]582 region_register rr;
583 rid_t rid;
[7f1c620]584 uintptr_t va;
[567807b1]585 pte_t *t;
586
587 va = istate->cr_ifa; /* faulting address */
588 rr.word = rr_read(VA2VRN(va));
589 rid = rr.map.rid;
[9ad03fe]590
[2299914]591 page_table_lock(AS, true);
[567807b1]592 t = page_mapping_find(AS, va);
[9ad03fe]593 ASSERT(t && t->p);
[567807b1]594 if (t && t->p && t->x) {
[9ad03fe]595 /*
596 * Update the Accessed bit in page tables and reinsert
597 * the mapping into ITC.
598 */
599 t->a = true;
600 itc_pte_copy(t);
[567807b1]601 } else {
602 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
[fbf7b4c]603 fault_if_from_uspace(istate,"Page fault at %p",va);
[567807b1]604 panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
605 t->a = true;
606 itc_pte_copy(t);
607 }
[9ad03fe]608 }
[2299914]609 page_table_unlock(AS, true);
[95042fd]610}
[89298e3]611
[9ad03fe]612/** Data access bit fault handler.
613 *
614 * @param vector Interruption vector.
[25d7709]615 * @param istate Structure with saved interruption state.
[9ad03fe]616 */
[7f1c620]617void data_access_bit_fault(uint64_t vector, istate_t *istate)
[95042fd]618{
[567807b1]619 region_register rr;
620 rid_t rid;
[7f1c620]621 uintptr_t va;
[9ad03fe]622 pte_t *t;
623
[567807b1]624 va = istate->cr_ifa; /* faulting address */
625 rr.word = rr_read(VA2VRN(va));
626 rid = rr.map.rid;
627
[2299914]628 page_table_lock(AS, true);
[567807b1]629 t = page_mapping_find(AS, va);
[9ad03fe]630 ASSERT(t && t->p);
631 if (t && t->p) {
632 /*
633 * Update the Accessed bit in page tables and reinsert
634 * the mapping into DTC.
635 */
636 t->a = true;
637 dtc_pte_copy(t);
[567807b1]638 } else {
639 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
[fbf7b4c]640 fault_if_from_uspace(istate,"Page fault at %p",va);
[567807b1]641 panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
642 t->a = true;
643 itc_pte_copy(t);
644 }
[9ad03fe]645 }
[2299914]646 page_table_unlock(AS, true);
[89298e3]647}
648
[9ad03fe]649/** Page not present fault handler.
650 *
651 * @param vector Interruption vector.
[25d7709]652 * @param istate Structure with saved interruption state.
[9ad03fe]653 */
[7f1c620]654void page_not_present(uint64_t vector, istate_t *istate)
[95042fd]655{
[9ad03fe]656 region_register rr;
[567807b1]657 rid_t rid;
[7f1c620]658 uintptr_t va;
[9ad03fe]659 pte_t *t;
660
[25d7709]661 va = istate->cr_ifa; /* faulting address */
[567807b1]662 rr.word = rr_read(VA2VRN(va));
663 rid = rr.map.rid;
664
[2299914]665 page_table_lock(AS, true);
[9ad03fe]666 t = page_mapping_find(AS, va);
667 ASSERT(t);
668
669 if (t->p) {
670 /*
671 * If the Present bit is set in page hash table, just copy it
672 * and update ITC/DTC.
673 */
674 if (t->x)
675 itc_pte_copy(t);
676 else
677 dtc_pte_copy(t);
[2299914]678 page_table_unlock(AS, true);
[9ad03fe]679 } else {
[2299914]680 page_table_unlock(AS, true);
[567807b1]681 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
[fbf7b4c]682 fault_if_from_uspace(istate,"Page fault at %p",va);
[567807b1]683 panic("%s: va=%p, rid=%d\n", __FUNCTION__, va, rid);
[9ad03fe]684 }
685 }
[95042fd]686}
[b45c443]687
688 /** @}
689 */
690
Note: See TracBrowser for help on using the repository browser.