source: mainline/kernel/arch/ia64/src/mm/tlb.c@ d8c0dc5

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since d8c0dc5 was ef5de6d, checked in by Jakub Jermar <jakub@…>, 17 years ago

Remove most of the ia64 dead / commented out code.

  • Property mode set to 100644
File size: 17.6 KB
RevLine 
[36b01bb2]1/*
[df4ed85]2 * Copyright (c) 2006 Jakub Jermar
[36b01bb2]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[ee289cf0]29/** @addtogroup ia64mm
[b45c443]30 * @{
31 */
32/** @file
33 */
34
[36b01bb2]35/*
36 * TLB management.
37 */
38
39#include <mm/tlb.h>
[a0d74fd]40#include <mm/asid.h>
[9ad03fe]41#include <mm/page.h>
42#include <mm/as.h>
[bc78c75]43#include <arch/mm/tlb.h>
[a0d74fd]44#include <arch/mm/page.h>
[68091bd]45#include <arch/mm/vhpt.h>
[89298e3]46#include <arch/barrier.h>
[2c49fbbe]47#include <arch/interrupt.h>
[7c322bd]48#include <arch/pal/pal.h>
49#include <arch/asm.h>
[2c49fbbe]50#include <panic.h>
[1065603e]51#include <print.h>
[9ad03fe]52#include <arch.h>
[a175a67]53#include <interrupt.h>
[36b01bb2]54
[ef67bab]55/** Invalidate all TLB entries. */
[36b01bb2]56void tlb_invalidate_all(void)
57{
[ee289cf0]58 ipl_t ipl;
59 uintptr_t adr;
60 uint32_t count1, count2, stride1, stride2;
[7c322bd]61
[6c441cf8]62 unsigned int i, j;
[7c322bd]63
[ee289cf0]64 adr = PAL_PTCE_INFO_BASE();
65 count1 = PAL_PTCE_INFO_COUNT1();
66 count2 = PAL_PTCE_INFO_COUNT2();
67 stride1 = PAL_PTCE_INFO_STRIDE1();
68 stride2 = PAL_PTCE_INFO_STRIDE2();
[7c322bd]69
[ee289cf0]70 ipl = interrupts_disable();
71
[6c441cf8]72 for (i = 0; i < count1; i++) {
73 for (j = 0; j < count2; j++) {
[e7b7be3f]74 asm volatile (
[ee289cf0]75 "ptc.e %0 ;;"
76 :
77 : "r" (adr)
78 );
79 adr += stride2;
[7c322bd]80 }
[ee289cf0]81 adr += stride1;
82 }
[7c322bd]83
[ee289cf0]84 interrupts_restore(ipl);
[7c322bd]85
[ee289cf0]86 srlz_d();
87 srlz_i();
[68091bd]88#ifdef CONFIG_VHPT
[ee289cf0]89 vhpt_invalidate_all();
[68091bd]90#endif
[36b01bb2]91}
92
93/** Invalidate entries belonging to an address space.
94 *
95 * @param asid Address space identifier.
96 */
97void tlb_invalidate_asid(asid_t asid)
98{
[a82500ce]99 tlb_invalidate_all();
[36b01bb2]100}
[bc78c75]101
[a82500ce]102
[7f1c620]103void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
[a82500ce]104{
[d0cf9de]105 region_register rr;
106 bool restore_rr = false;
[1065603e]107 int b = 0;
108 int c = cnt;
[9bda3af6]109
[7f1c620]110 uintptr_t va;
[1065603e]111 va = page;
[d0cf9de]112
113 rr.word = rr_read(VA2VRN(va));
114 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
115 /*
116 * The selected region register does not contain required RID.
117 * Save the old content of the register and replace the RID.
118 */
119 region_register rr0;
120
121 rr0 = rr;
122 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
123 rr_write(VA2VRN(va), rr0.word);
124 srlz_d();
125 srlz_i();
126 }
127
[1065603e]128 while(c >>= 1)
129 b++;
130 b >>= 1;
[7f1c620]131 uint64_t ps;
[d0cf9de]132
[1065603e]133 switch (b) {
[ee289cf0]134 case 0: /*cnt 1-3*/
135 ps = PAGE_WIDTH;
136 break;
137 case 1: /*cnt 4-15*/
138 ps = PAGE_WIDTH+2;
139 va &= ~((1<<ps)-1);
140 break;
141 case 2: /*cnt 16-63*/
142 ps = PAGE_WIDTH+4;
143 va &= ~((1<<ps)-1);
144 break;
145 case 3: /*cnt 64-255*/
146 ps = PAGE_WIDTH+6;
147 va &= ~((1<<ps)-1);
148 break;
149 case 4: /*cnt 256-1023*/
150 ps = PAGE_WIDTH+8;
151 va &= ~((1<<ps)-1);
152 break;
153 case 5: /*cnt 1024-4095*/
154 ps = PAGE_WIDTH+10;
155 va &= ~((1<<ps)-1);
156 break;
157 case 6: /*cnt 4096-16383*/
158 ps = PAGE_WIDTH+12;
159 va &= ~((1<<ps)-1);
160 break;
161 case 7: /*cnt 16384-65535*/
162 case 8: /*cnt 65536-(256K-1)*/
163 ps = PAGE_WIDTH+14;
164 va &= ~((1<<ps)-1);
165 break;
166 default:
167 ps=PAGE_WIDTH+18;
168 va&=~((1<<ps)-1);
169 break;
[d0cf9de]170 }
[1065603e]171 for(; va<(page+cnt*(PAGE_SIZE)); va += (1<<ps)) {
[e7b7be3f]172 asm volatile (
[9bda3af6]173 "ptc.l %0,%1;;"
174 :
[1065603e]175 : "r" (va), "r" (ps<<2)
[9bda3af6]176 );
[d0cf9de]177 }
178 srlz_d();
179 srlz_i();
180
181 if (restore_rr) {
182 rr_write(VA2VRN(va), rr.word);
183 srlz_d();
184 srlz_i();
185 }
[a82500ce]186}
187
[95042fd]188/** Insert data into data translation cache.
189 *
190 * @param va Virtual page address.
191 * @param asid Address space identifier.
192 * @param entry The rest of TLB entry as required by TLB insertion format.
193 */
[7f1c620]194void dtc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
[b994a60]195{
[95042fd]196 tc_mapping_insert(va, asid, entry, true);
197}
[bc78c75]198
[95042fd]199/** Insert data into instruction translation cache.
200 *
201 * @param va Virtual page address.
202 * @param asid Address space identifier.
203 * @param entry The rest of TLB entry as required by TLB insertion format.
204 */
[7f1c620]205void itc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
[b994a60]206{
[95042fd]207 tc_mapping_insert(va, asid, entry, false);
208}
[bc78c75]209
[95042fd]210/** Insert data into instruction or data translation cache.
211 *
212 * @param va Virtual page address.
213 * @param asid Address space identifier.
214 * @param entry The rest of TLB entry as required by TLB insertion format.
215 * @param dtc If true, insert into data translation cache, use instruction translation cache otherwise.
216 */
[7f1c620]217void tc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtc)
[bc78c75]218{
219 region_register rr;
[95042fd]220 bool restore_rr = false;
[bc78c75]221
[a0d74fd]222 rr.word = rr_read(VA2VRN(va));
223 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
[95042fd]224 /*
225 * The selected region register does not contain required RID.
226 * Save the old content of the register and replace the RID.
227 */
[bc78c75]228 region_register rr0;
[95042fd]229
230 rr0 = rr;
[a0d74fd]231 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
232 rr_write(VA2VRN(va), rr0.word);
[89298e3]233 srlz_d();
[95042fd]234 srlz_i();
235 }
236
[e7b7be3f]237 asm volatile (
[95042fd]238 "mov r8=psr;;\n"
[2c49fbbe]239 "rsm %0;;\n" /* PSR_IC_MASK */
[95042fd]240 "srlz.d;;\n"
241 "srlz.i;;\n"
242 "mov cr.ifa=%1\n" /* va */
243 "mov cr.itir=%2;;\n" /* entry.word[1] */
244 "cmp.eq p6,p7 = %4,r0;;\n" /* decide between itc and dtc */
245 "(p6) itc.i %3;;\n"
246 "(p7) itc.d %3;;\n"
247 "mov psr.l=r8;;\n"
248 "srlz.d;;\n"
249 :
[2c49fbbe]250 : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (dtc)
251 : "p6", "p7", "r8"
[95042fd]252 );
253
254 if (restore_rr) {
[a0d74fd]255 rr_write(VA2VRN(va), rr.word);
[95042fd]256 srlz_d();
257 srlz_i();
[bc78c75]258 }
259}
260
[95042fd]261/** Insert data into instruction translation register.
262 *
263 * @param va Virtual page address.
264 * @param asid Address space identifier.
265 * @param entry The rest of TLB entry as required by TLB insertion format.
266 * @param tr Translation register.
267 */
[7f1c620]268void itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr)
[bc78c75]269{
[95042fd]270 tr_mapping_insert(va, asid, entry, false, tr);
[bc78c75]271}
272
[95042fd]273/** Insert data into data translation register.
274 *
275 * @param va Virtual page address.
276 * @param asid Address space identifier.
277 * @param entry The rest of TLB entry as required by TLB insertion format.
278 * @param tr Translation register.
279 */
[7f1c620]280void dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr)
[95042fd]281{
282 tr_mapping_insert(va, asid, entry, true, tr);
283}
[bc78c75]284
[95042fd]285/** Insert data into instruction or data translation register.
286 *
287 * @param va Virtual page address.
288 * @param asid Address space identifier.
289 * @param entry The rest of TLB entry as required by TLB insertion format.
[abbc16e]290 * @param dtr If true, insert into data translation register, use instruction translation register otherwise.
[95042fd]291 * @param tr Translation register.
292 */
[7f1c620]293void tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr)
[89298e3]294{
295 region_register rr;
[95042fd]296 bool restore_rr = false;
[89298e3]297
[a0d74fd]298 rr.word = rr_read(VA2VRN(va));
299 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
[95042fd]300 /*
301 * The selected region register does not contain required RID.
302 * Save the old content of the register and replace the RID.
303 */
[89298e3]304 region_register rr0;
[95042fd]305
306 rr0 = rr;
[a0d74fd]307 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
308 rr_write(VA2VRN(va), rr0.word);
[89298e3]309 srlz_d();
[95042fd]310 srlz_i();
[89298e3]311 }
312
[e7b7be3f]313 asm volatile (
[95042fd]314 "mov r8=psr;;\n"
[2c49fbbe]315 "rsm %0;;\n" /* PSR_IC_MASK */
[95042fd]316 "srlz.d;;\n"
317 "srlz.i;;\n"
318 "mov cr.ifa=%1\n" /* va */
319 "mov cr.itir=%2;;\n" /* entry.word[1] */
320 "cmp.eq p6,p7=%5,r0;;\n" /* decide between itr and dtr */
321 "(p6) itr.i itr[%4]=%3;;\n"
322 "(p7) itr.d dtr[%4]=%3;;\n"
323 "mov psr.l=r8;;\n"
324 "srlz.d;;\n"
325 :
[2c49fbbe]326 : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (tr), "r" (dtr)
327 : "p6", "p7", "r8"
[95042fd]328 );
329
330 if (restore_rr) {
[a0d74fd]331 rr_write(VA2VRN(va), rr.word);
[95042fd]332 srlz_d();
333 srlz_i();
334 }
[89298e3]335}
336
[a0d74fd]337/** Insert data into DTLB.
338 *
[208259c]339 * @param page Virtual page address including VRN bits.
340 * @param frame Physical frame address.
[a0d74fd]341 * @param dtr If true, insert into data translation register, use data translation cache otherwise.
342 * @param tr Translation register if dtr is true, ignored otherwise.
343 */
[7f1c620]344void dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr, index_t tr)
[a0d74fd]345{
346 tlb_entry_t entry;
347
348 entry.word[0] = 0;
349 entry.word[1] = 0;
350
351 entry.p = true; /* present */
352 entry.ma = MA_WRITEBACK;
353 entry.a = true; /* already accessed */
354 entry.d = true; /* already dirty */
355 entry.pl = PL_KERNEL;
356 entry.ar = AR_READ | AR_WRITE;
357 entry.ppn = frame >> PPN_SHIFT;
358 entry.ps = PAGE_WIDTH;
359
360 if (dtr)
361 dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
362 else
363 dtc_mapping_insert(page, ASID_KERNEL, entry);
364}
365
[208259c]366/** Purge kernel entries from DTR.
367 *
368 * Purge DTR entries used by the kernel.
369 *
370 * @param page Virtual page address including VRN bits.
371 * @param width Width of the purge in bits.
372 */
[7f1c620]373void dtr_purge(uintptr_t page, count_t width)
[208259c]374{
[e7b7be3f]375 asm volatile ("ptr.d %0, %1\n" : : "r" (page), "r" (width<<2));
[208259c]376}
377
378
[9ad03fe]379/** Copy content of PTE into data translation cache.
380 *
381 * @param t PTE.
382 */
383void dtc_pte_copy(pte_t *t)
384{
385 tlb_entry_t entry;
386
387 entry.word[0] = 0;
388 entry.word[1] = 0;
389
390 entry.p = t->p;
391 entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
392 entry.a = t->a;
393 entry.d = t->d;
394 entry.pl = t->k ? PL_KERNEL : PL_USER;
395 entry.ar = t->w ? AR_WRITE : AR_READ;
396 entry.ppn = t->frame >> PPN_SHIFT;
397 entry.ps = PAGE_WIDTH;
398
399 dtc_mapping_insert(t->page, t->as->asid, entry);
[68091bd]400#ifdef CONFIG_VHPT
401 vhpt_mapping_insert(t->page, t->as->asid, entry);
402#endif
[9ad03fe]403}
404
405/** Copy content of PTE into instruction translation cache.
406 *
407 * @param t PTE.
408 */
409void itc_pte_copy(pte_t *t)
410{
411 tlb_entry_t entry;
412
413 entry.word[0] = 0;
414 entry.word[1] = 0;
415
416 ASSERT(t->x);
417
418 entry.p = t->p;
419 entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
420 entry.a = t->a;
421 entry.pl = t->k ? PL_KERNEL : PL_USER;
422 entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
423 entry.ppn = t->frame >> PPN_SHIFT;
424 entry.ps = PAGE_WIDTH;
425
426 itc_mapping_insert(t->page, t->as->asid, entry);
[68091bd]427#ifdef CONFIG_VHPT
428 vhpt_mapping_insert(t->page, t->as->asid, entry);
429#endif
[9ad03fe]430}
431
432/** Instruction TLB fault handler for faults with VHPT turned off.
433 *
434 * @param vector Interruption vector.
[25d7709]435 * @param istate Structure with saved interruption state.
[9ad03fe]436 */
[7f1c620]437void alternate_instruction_tlb_fault(uint64_t vector, istate_t *istate)
[89298e3]438{
[9ad03fe]439 region_register rr;
[567807b1]440 rid_t rid;
[7f1c620]441 uintptr_t va;
[9ad03fe]442 pte_t *t;
443
[25d7709]444 va = istate->cr_ifa; /* faulting address */
[567807b1]445 rr.word = rr_read(VA2VRN(va));
446 rid = rr.map.rid;
447
[2299914]448 page_table_lock(AS, true);
[9ad03fe]449 t = page_mapping_find(AS, va);
450 if (t) {
451 /*
452 * The mapping was found in software page hash table.
453 * Insert it into data translation cache.
454 */
455 itc_pte_copy(t);
[2299914]456 page_table_unlock(AS, true);
[9ad03fe]457 } else {
458 /*
459 * Forward the page fault to address space page fault handler.
460 */
[2299914]461 page_table_unlock(AS, true);
[567807b1]462 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
[fbf7b4c]463 fault_if_from_uspace(istate,"Page fault at %p",va);
[3ee8a075]464 panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
[9ad03fe]465 }
466 }
[95042fd]467}
[89298e3]468
[46321fb]469
470
471static int is_io_page_accessible(int page)
472{
473 if(TASK->arch.iomap) return bitmap_get(TASK->arch.iomap,page);
474 else return 0;
475}
476
477#define IO_FRAME_BASE 0xFFFFC000000
478
479/** There is special handling of memmaped lagacy io, because
480 * of 4KB sized access
481 * only for userspace
482 *
483 * @param va virtual address of page fault
484 * @param istate Structure with saved interruption state.
485 *
486 *
487 * @return 1 on success, 0 on fail
488 */
489static int try_memmap_io_insertion(uintptr_t va, istate_t *istate)
490{
491 if((va >= IO_OFFSET ) && (va < IO_OFFSET + (1<<IO_PAGE_WIDTH)))
492 if(TASK){
493
494 uint64_t io_page=(va & ((1<<IO_PAGE_WIDTH)-1)) >> (USPACE_IO_PAGE_WIDTH);
495 if(is_io_page_accessible(io_page)){
496 uint64_t page,frame;
497
498 page = IO_OFFSET + (1 << USPACE_IO_PAGE_WIDTH) * io_page;
499 frame = IO_FRAME_BASE + (1 << USPACE_IO_PAGE_WIDTH) * io_page;
500
501
502 tlb_entry_t entry;
503
504 entry.word[0] = 0;
505 entry.word[1] = 0;
506
507 entry.p = true; /* present */
508 entry.ma = MA_UNCACHEABLE;
509 entry.a = true; /* already accessed */
510 entry.d = true; /* already dirty */
511 entry.pl = PL_USER;
512 entry.ar = AR_READ | AR_WRITE;
[ef5de6d]513 entry.ppn = frame >> PPN_SHIFT;
[46321fb]514 entry.ps = USPACE_IO_PAGE_WIDTH;
515
[ef5de6d]516 dtc_mapping_insert(page, TASK->as->asid, entry);
[46321fb]517 return 1;
518 }else {
519 fault_if_from_uspace(istate,"IO access fault at %p",va);
520 return 0;
521 }
522 } else
523 return 0;
524 else
525 return 0;
526
527 return 0;
528
529}
530
531
532
533
[9ad03fe]534/** Data TLB fault handler for faults with VHPT turned off.
[a0d74fd]535 *
536 * @param vector Interruption vector.
[25d7709]537 * @param istate Structure with saved interruption state.
[a0d74fd]538 */
[7f1c620]539void alternate_data_tlb_fault(uint64_t vector, istate_t *istate)
[95042fd]540{
[a0d74fd]541 region_register rr;
542 rid_t rid;
[7f1c620]543 uintptr_t va;
[9ad03fe]544 pte_t *t;
[a0d74fd]545
[25d7709]546 va = istate->cr_ifa; /* faulting address */
[a0d74fd]547 rr.word = rr_read(VA2VRN(va));
548 rid = rr.map.rid;
549 if (RID2ASID(rid) == ASID_KERNEL) {
550 if (VA2VRN(va) == VRN_KERNEL) {
551 /*
552 * Provide KA2PA(identity) mapping for faulting piece of
553 * kernel address space.
554 */
[9ad03fe]555 dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
[a0d74fd]556 return;
557 }
558 }
[b994a60]559
[2299914]560 page_table_lock(AS, true);
[9ad03fe]561 t = page_mapping_find(AS, va);
562 if (t) {
563 /*
[f47fd19]564 * The mapping was found in the software page hash table.
[9ad03fe]565 * Insert it into data translation cache.
566 */
567 dtc_pte_copy(t);
[2299914]568 page_table_unlock(AS, true);
[9ad03fe]569 } else {
[46321fb]570 page_table_unlock(AS, true);
571 if (try_memmap_io_insertion(va,istate)) return;
[9ad03fe]572 /*
[f47fd19]573 * Forward the page fault to the address space page fault handler.
[9ad03fe]574 */
[567807b1]575 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
[fbf7b4c]576 fault_if_from_uspace(istate,"Page fault at %p",va);
[3ee8a075]577 panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
[9ad03fe]578 }
579 }
[95042fd]580}
[89298e3]581
[9ad03fe]582/** Data nested TLB fault handler.
583 *
584 * This fault should not occur.
585 *
586 * @param vector Interruption vector.
[25d7709]587 * @param istate Structure with saved interruption state.
[9ad03fe]588 */
[7f1c620]589void data_nested_tlb_fault(uint64_t vector, istate_t *istate)
[95042fd]590{
[3ee8a075]591 panic("%s\n", __func__);
[95042fd]592}
[89298e3]593
[9ad03fe]594/** Data Dirty bit fault handler.
595 *
596 * @param vector Interruption vector.
[25d7709]597 * @param istate Structure with saved interruption state.
[9ad03fe]598 */
[7f1c620]599void data_dirty_bit_fault(uint64_t vector, istate_t *istate)
[95042fd]600{
[567807b1]601 region_register rr;
602 rid_t rid;
[7f1c620]603 uintptr_t va;
[9ad03fe]604 pte_t *t;
[567807b1]605
606 va = istate->cr_ifa; /* faulting address */
607 rr.word = rr_read(VA2VRN(va));
608 rid = rr.map.rid;
[9ad03fe]609
[2299914]610 page_table_lock(AS, true);
[567807b1]611 t = page_mapping_find(AS, va);
[9ad03fe]612 ASSERT(t && t->p);
[567807b1]613 if (t && t->p && t->w) {
[9ad03fe]614 /*
615 * Update the Dirty bit in page tables and reinsert
616 * the mapping into DTC.
617 */
618 t->d = true;
619 dtc_pte_copy(t);
[567807b1]620 } else {
621 if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
[fbf7b4c]622 fault_if_from_uspace(istate,"Page fault at %p",va);
[3ee8a075]623 panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
[567807b1]624 t->d = true;
625 dtc_pte_copy(t);
626 }
[9ad03fe]627 }
[2299914]628 page_table_unlock(AS, true);
[95042fd]629}
[89298e3]630
[9ad03fe]631/** Instruction access bit fault handler.
632 *
633 * @param vector Interruption vector.
[25d7709]634 * @param istate Structure with saved interruption state.
[9ad03fe]635 */
[7f1c620]636void instruction_access_bit_fault(uint64_t vector, istate_t *istate)
[95042fd]637{
[567807b1]638 region_register rr;
639 rid_t rid;
[7f1c620]640 uintptr_t va;
[567807b1]641 pte_t *t;
642
643 va = istate->cr_ifa; /* faulting address */
644 rr.word = rr_read(VA2VRN(va));
645 rid = rr.map.rid;
[9ad03fe]646
[2299914]647 page_table_lock(AS, true);
[567807b1]648 t = page_mapping_find(AS, va);
[9ad03fe]649 ASSERT(t && t->p);
[567807b1]650 if (t && t->p && t->x) {
[9ad03fe]651 /*
652 * Update the Accessed bit in page tables and reinsert
653 * the mapping into ITC.
654 */
655 t->a = true;
656 itc_pte_copy(t);
[567807b1]657 } else {
658 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
[fbf7b4c]659 fault_if_from_uspace(istate,"Page fault at %p",va);
[3ee8a075]660 panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
[567807b1]661 t->a = true;
662 itc_pte_copy(t);
663 }
[9ad03fe]664 }
[2299914]665 page_table_unlock(AS, true);
[95042fd]666}
[89298e3]667
[9ad03fe]668/** Data access bit fault handler.
669 *
670 * @param vector Interruption vector.
[25d7709]671 * @param istate Structure with saved interruption state.
[9ad03fe]672 */
[7f1c620]673void data_access_bit_fault(uint64_t vector, istate_t *istate)
[95042fd]674{
[567807b1]675 region_register rr;
676 rid_t rid;
[7f1c620]677 uintptr_t va;
[9ad03fe]678 pte_t *t;
679
[567807b1]680 va = istate->cr_ifa; /* faulting address */
681 rr.word = rr_read(VA2VRN(va));
682 rid = rr.map.rid;
683
[2299914]684 page_table_lock(AS, true);
[567807b1]685 t = page_mapping_find(AS, va);
[9ad03fe]686 ASSERT(t && t->p);
687 if (t && t->p) {
688 /*
689 * Update the Accessed bit in page tables and reinsert
690 * the mapping into DTC.
691 */
692 t->a = true;
693 dtc_pte_copy(t);
[567807b1]694 } else {
695 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
[fbf7b4c]696 fault_if_from_uspace(istate,"Page fault at %p",va);
[3ee8a075]697 panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
[567807b1]698 t->a = true;
699 itc_pte_copy(t);
700 }
[9ad03fe]701 }
[2299914]702 page_table_unlock(AS, true);
[89298e3]703}
704
[9ad03fe]705/** Page not present fault handler.
706 *
707 * @param vector Interruption vector.
[25d7709]708 * @param istate Structure with saved interruption state.
[9ad03fe]709 */
[7f1c620]710void page_not_present(uint64_t vector, istate_t *istate)
[95042fd]711{
[9ad03fe]712 region_register rr;
[567807b1]713 rid_t rid;
[7f1c620]714 uintptr_t va;
[9ad03fe]715 pte_t *t;
716
[25d7709]717 va = istate->cr_ifa; /* faulting address */
[567807b1]718 rr.word = rr_read(VA2VRN(va));
719 rid = rr.map.rid;
720
[2299914]721 page_table_lock(AS, true);
[9ad03fe]722 t = page_mapping_find(AS, va);
723 ASSERT(t);
724
725 if (t->p) {
726 /*
727 * If the Present bit is set in page hash table, just copy it
728 * and update ITC/DTC.
729 */
730 if (t->x)
731 itc_pte_copy(t);
732 else
733 dtc_pte_copy(t);
[2299914]734 page_table_unlock(AS, true);
[9ad03fe]735 } else {
[2299914]736 page_table_unlock(AS, true);
[567807b1]737 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
[fbf7b4c]738 fault_if_from_uspace(istate,"Page fault at %p",va);
[3ee8a075]739 panic("%s: va=%p, rid=%d\n", __func__, va, rid);
[9ad03fe]740 }
741 }
[95042fd]742}
[b45c443]743
[ee289cf0]744/** @}
[b45c443]745 */
Note: See TracBrowser for help on using the repository browser.