source: mainline/kernel/arch/ia64/src/mm/tlb.c@ 50b3d30

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 50b3d30 was 46321fb, checked in by Jakub Vana <jakub.vana@…>, 17 years ago

IA64: Userspace I/O support

  • Property mode set to 100644
File size: 18.0 KB
RevLine 
[36b01bb2]1/*
[df4ed85]2 * Copyright (c) 2006 Jakub Jermar
[36b01bb2]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[ee289cf0]29/** @addtogroup ia64mm
[b45c443]30 * @{
31 */
32/** @file
33 */
34
[36b01bb2]35/*
36 * TLB management.
37 */
38
39#include <mm/tlb.h>
[a0d74fd]40#include <mm/asid.h>
[9ad03fe]41#include <mm/page.h>
42#include <mm/as.h>
[bc78c75]43#include <arch/mm/tlb.h>
[a0d74fd]44#include <arch/mm/page.h>
[68091bd]45#include <arch/mm/vhpt.h>
[89298e3]46#include <arch/barrier.h>
[2c49fbbe]47#include <arch/interrupt.h>
[7c322bd]48#include <arch/pal/pal.h>
49#include <arch/asm.h>
[2c49fbbe]50#include <panic.h>
[1065603e]51#include <print.h>
[9ad03fe]52#include <arch.h>
[a175a67]53#include <interrupt.h>
[36b01bb2]54
[ef67bab]55/** Invalidate all TLB entries. */
[36b01bb2]56void tlb_invalidate_all(void)
57{
[ee289cf0]58 ipl_t ipl;
59 uintptr_t adr;
60 uint32_t count1, count2, stride1, stride2;
[7c322bd]61
[6c441cf8]62 unsigned int i, j;
[7c322bd]63
[ee289cf0]64 adr = PAL_PTCE_INFO_BASE();
65 count1 = PAL_PTCE_INFO_COUNT1();
66 count2 = PAL_PTCE_INFO_COUNT2();
67 stride1 = PAL_PTCE_INFO_STRIDE1();
68 stride2 = PAL_PTCE_INFO_STRIDE2();
[7c322bd]69
[ee289cf0]70 ipl = interrupts_disable();
71
[6c441cf8]72 for (i = 0; i < count1; i++) {
73 for (j = 0; j < count2; j++) {
[e7b7be3f]74 asm volatile (
[ee289cf0]75 "ptc.e %0 ;;"
76 :
77 : "r" (adr)
78 );
79 adr += stride2;
[7c322bd]80 }
[ee289cf0]81 adr += stride1;
82 }
[7c322bd]83
[ee289cf0]84 interrupts_restore(ipl);
[7c322bd]85
[ee289cf0]86 srlz_d();
87 srlz_i();
[68091bd]88#ifdef CONFIG_VHPT
[ee289cf0]89 vhpt_invalidate_all();
[68091bd]90#endif
[36b01bb2]91}
92
93/** Invalidate entries belonging to an address space.
94 *
95 * @param asid Address space identifier.
96 */
97void tlb_invalidate_asid(asid_t asid)
98{
[a82500ce]99 tlb_invalidate_all();
[36b01bb2]100}
[bc78c75]101
[a82500ce]102
[7f1c620]103void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
[a82500ce]104{
[d0cf9de]105 region_register rr;
106 bool restore_rr = false;
[1065603e]107 int b = 0;
108 int c = cnt;
[9bda3af6]109
[7f1c620]110 uintptr_t va;
[1065603e]111 va = page;
[d0cf9de]112
113 rr.word = rr_read(VA2VRN(va));
114 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
115 /*
116 * The selected region register does not contain required RID.
117 * Save the old content of the register and replace the RID.
118 */
119 region_register rr0;
120
121 rr0 = rr;
122 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
123 rr_write(VA2VRN(va), rr0.word);
124 srlz_d();
125 srlz_i();
126 }
127
[1065603e]128 while(c >>= 1)
129 b++;
130 b >>= 1;
[7f1c620]131 uint64_t ps;
[d0cf9de]132
[1065603e]133 switch (b) {
[ee289cf0]134 case 0: /*cnt 1-3*/
135 ps = PAGE_WIDTH;
136 break;
137 case 1: /*cnt 4-15*/
138 /*cnt=((cnt-1)/4)+1;*/
139 ps = PAGE_WIDTH+2;
140 va &= ~((1<<ps)-1);
141 break;
142 case 2: /*cnt 16-63*/
143 /*cnt=((cnt-1)/16)+1;*/
144 ps = PAGE_WIDTH+4;
145 va &= ~((1<<ps)-1);
146 break;
147 case 3: /*cnt 64-255*/
148 /*cnt=((cnt-1)/64)+1;*/
149 ps = PAGE_WIDTH+6;
150 va &= ~((1<<ps)-1);
151 break;
152 case 4: /*cnt 256-1023*/
153 /*cnt=((cnt-1)/256)+1;*/
154 ps = PAGE_WIDTH+8;
155 va &= ~((1<<ps)-1);
156 break;
157 case 5: /*cnt 1024-4095*/
158 /*cnt=((cnt-1)/1024)+1;*/
159 ps = PAGE_WIDTH+10;
160 va &= ~((1<<ps)-1);
161 break;
162 case 6: /*cnt 4096-16383*/
163 /*cnt=((cnt-1)/4096)+1;*/
164 ps = PAGE_WIDTH+12;
165 va &= ~((1<<ps)-1);
166 break;
167 case 7: /*cnt 16384-65535*/
168 case 8: /*cnt 65536-(256K-1)*/
169 /*cnt=((cnt-1)/16384)+1;*/
170 ps = PAGE_WIDTH+14;
171 va &= ~((1<<ps)-1);
172 break;
173 default:
174 /*cnt=((cnt-1)/(16384*16))+1;*/
175 ps=PAGE_WIDTH+18;
176 va&=~((1<<ps)-1);
177 break;
[d0cf9de]178 }
[9bda3af6]179 /*cnt+=(page!=va);*/
[1065603e]180 for(; va<(page+cnt*(PAGE_SIZE)); va += (1<<ps)) {
[e7b7be3f]181 asm volatile (
[9bda3af6]182 "ptc.l %0,%1;;"
183 :
[1065603e]184 : "r" (va), "r" (ps<<2)
[9bda3af6]185 );
[d0cf9de]186 }
187 srlz_d();
188 srlz_i();
189
190 if (restore_rr) {
191 rr_write(VA2VRN(va), rr.word);
192 srlz_d();
193 srlz_i();
194 }
[a82500ce]195}
196
[95042fd]197/** Insert data into data translation cache.
198 *
199 * @param va Virtual page address.
200 * @param asid Address space identifier.
201 * @param entry The rest of TLB entry as required by TLB insertion format.
202 */
[7f1c620]203void dtc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
[b994a60]204{
[95042fd]205 tc_mapping_insert(va, asid, entry, true);
206}
[bc78c75]207
[95042fd]208/** Insert data into instruction translation cache.
209 *
210 * @param va Virtual page address.
211 * @param asid Address space identifier.
212 * @param entry The rest of TLB entry as required by TLB insertion format.
213 */
[7f1c620]214void itc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
[b994a60]215{
[95042fd]216 tc_mapping_insert(va, asid, entry, false);
217}
[bc78c75]218
[95042fd]219/** Insert data into instruction or data translation cache.
220 *
221 * @param va Virtual page address.
222 * @param asid Address space identifier.
223 * @param entry The rest of TLB entry as required by TLB insertion format.
224 * @param dtc If true, insert into data translation cache, use instruction translation cache otherwise.
225 */
[7f1c620]226void tc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtc)
[bc78c75]227{
228 region_register rr;
[95042fd]229 bool restore_rr = false;
[bc78c75]230
[a0d74fd]231 rr.word = rr_read(VA2VRN(va));
232 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
[95042fd]233 /*
234 * The selected region register does not contain required RID.
235 * Save the old content of the register and replace the RID.
236 */
[bc78c75]237 region_register rr0;
[95042fd]238
239 rr0 = rr;
[a0d74fd]240 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
241 rr_write(VA2VRN(va), rr0.word);
[89298e3]242 srlz_d();
[95042fd]243 srlz_i();
244 }
245
[e7b7be3f]246 asm volatile (
[95042fd]247 "mov r8=psr;;\n"
[2c49fbbe]248 "rsm %0;;\n" /* PSR_IC_MASK */
[95042fd]249 "srlz.d;;\n"
250 "srlz.i;;\n"
251 "mov cr.ifa=%1\n" /* va */
252 "mov cr.itir=%2;;\n" /* entry.word[1] */
253 "cmp.eq p6,p7 = %4,r0;;\n" /* decide between itc and dtc */
254 "(p6) itc.i %3;;\n"
255 "(p7) itc.d %3;;\n"
256 "mov psr.l=r8;;\n"
257 "srlz.d;;\n"
258 :
[2c49fbbe]259 : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (dtc)
260 : "p6", "p7", "r8"
[95042fd]261 );
262
263 if (restore_rr) {
[a0d74fd]264 rr_write(VA2VRN(va), rr.word);
[95042fd]265 srlz_d();
266 srlz_i();
[bc78c75]267 }
268}
269
[95042fd]270/** Insert data into instruction translation register.
271 *
272 * @param va Virtual page address.
273 * @param asid Address space identifier.
274 * @param entry The rest of TLB entry as required by TLB insertion format.
275 * @param tr Translation register.
276 */
[7f1c620]277void itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr)
[bc78c75]278{
[95042fd]279 tr_mapping_insert(va, asid, entry, false, tr);
[bc78c75]280}
281
[95042fd]282/** Insert data into data translation register.
283 *
284 * @param va Virtual page address.
285 * @param asid Address space identifier.
286 * @param entry The rest of TLB entry as required by TLB insertion format.
287 * @param tr Translation register.
288 */
[7f1c620]289void dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr)
[95042fd]290{
291 tr_mapping_insert(va, asid, entry, true, tr);
292}
[bc78c75]293
[95042fd]294/** Insert data into instruction or data translation register.
295 *
296 * @param va Virtual page address.
297 * @param asid Address space identifier.
298 * @param entry The rest of TLB entry as required by TLB insertion format.
[abbc16e]299 * @param dtr If true, insert into data translation register, use instruction translation register otherwise.
[95042fd]300 * @param tr Translation register.
301 */
[7f1c620]302void tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr)
[89298e3]303{
304 region_register rr;
[95042fd]305 bool restore_rr = false;
[89298e3]306
[a0d74fd]307 rr.word = rr_read(VA2VRN(va));
308 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
[95042fd]309 /*
310 * The selected region register does not contain required RID.
311 * Save the old content of the register and replace the RID.
312 */
[89298e3]313 region_register rr0;
[95042fd]314
315 rr0 = rr;
[a0d74fd]316 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
317 rr_write(VA2VRN(va), rr0.word);
[89298e3]318 srlz_d();
[95042fd]319 srlz_i();
[89298e3]320 }
321
[e7b7be3f]322 asm volatile (
[95042fd]323 "mov r8=psr;;\n"
[2c49fbbe]324 "rsm %0;;\n" /* PSR_IC_MASK */
[95042fd]325 "srlz.d;;\n"
326 "srlz.i;;\n"
327 "mov cr.ifa=%1\n" /* va */
328 "mov cr.itir=%2;;\n" /* entry.word[1] */
329 "cmp.eq p6,p7=%5,r0;;\n" /* decide between itr and dtr */
330 "(p6) itr.i itr[%4]=%3;;\n"
331 "(p7) itr.d dtr[%4]=%3;;\n"
332 "mov psr.l=r8;;\n"
333 "srlz.d;;\n"
334 :
[2c49fbbe]335 : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (tr), "r" (dtr)
336 : "p6", "p7", "r8"
[95042fd]337 );
338
339 if (restore_rr) {
[a0d74fd]340 rr_write(VA2VRN(va), rr.word);
[95042fd]341 srlz_d();
342 srlz_i();
343 }
[89298e3]344}
345
[a0d74fd]346/** Insert data into DTLB.
347 *
[208259c]348 * @param page Virtual page address including VRN bits.
349 * @param frame Physical frame address.
[a0d74fd]350 * @param dtr If true, insert into data translation register, use data translation cache otherwise.
351 * @param tr Translation register if dtr is true, ignored otherwise.
352 */
[7f1c620]353void dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr, index_t tr)
[a0d74fd]354{
355 tlb_entry_t entry;
356
357 entry.word[0] = 0;
358 entry.word[1] = 0;
359
360 entry.p = true; /* present */
361 entry.ma = MA_WRITEBACK;
362 entry.a = true; /* already accessed */
363 entry.d = true; /* already dirty */
364 entry.pl = PL_KERNEL;
365 entry.ar = AR_READ | AR_WRITE;
366 entry.ppn = frame >> PPN_SHIFT;
367 entry.ps = PAGE_WIDTH;
368
369 if (dtr)
370 dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
371 else
372 dtc_mapping_insert(page, ASID_KERNEL, entry);
373}
374
[208259c]375/** Purge kernel entries from DTR.
376 *
377 * Purge DTR entries used by the kernel.
378 *
379 * @param page Virtual page address including VRN bits.
380 * @param width Width of the purge in bits.
381 */
[7f1c620]382void dtr_purge(uintptr_t page, count_t width)
[208259c]383{
[e7b7be3f]384 asm volatile ("ptr.d %0, %1\n" : : "r" (page), "r" (width<<2));
[208259c]385}
386
387
[9ad03fe]388/** Copy content of PTE into data translation cache.
389 *
390 * @param t PTE.
391 */
392void dtc_pte_copy(pte_t *t)
393{
394 tlb_entry_t entry;
395
396 entry.word[0] = 0;
397 entry.word[1] = 0;
398
399 entry.p = t->p;
400 entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
401 entry.a = t->a;
402 entry.d = t->d;
403 entry.pl = t->k ? PL_KERNEL : PL_USER;
404 entry.ar = t->w ? AR_WRITE : AR_READ;
405 entry.ppn = t->frame >> PPN_SHIFT;
406 entry.ps = PAGE_WIDTH;
407
408 dtc_mapping_insert(t->page, t->as->asid, entry);
[68091bd]409#ifdef CONFIG_VHPT
410 vhpt_mapping_insert(t->page, t->as->asid, entry);
411#endif
[9ad03fe]412}
413
414/** Copy content of PTE into instruction translation cache.
415 *
416 * @param t PTE.
417 */
418void itc_pte_copy(pte_t *t)
419{
420 tlb_entry_t entry;
421
422 entry.word[0] = 0;
423 entry.word[1] = 0;
424
425 ASSERT(t->x);
426
427 entry.p = t->p;
428 entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
429 entry.a = t->a;
430 entry.pl = t->k ? PL_KERNEL : PL_USER;
431 entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
432 entry.ppn = t->frame >> PPN_SHIFT;
433 entry.ps = PAGE_WIDTH;
434
435 itc_mapping_insert(t->page, t->as->asid, entry);
[68091bd]436#ifdef CONFIG_VHPT
437 vhpt_mapping_insert(t->page, t->as->asid, entry);
438#endif
[9ad03fe]439}
440
441/** Instruction TLB fault handler for faults with VHPT turned off.
442 *
443 * @param vector Interruption vector.
[25d7709]444 * @param istate Structure with saved interruption state.
[9ad03fe]445 */
[7f1c620]446void alternate_instruction_tlb_fault(uint64_t vector, istate_t *istate)
[89298e3]447{
[9ad03fe]448 region_register rr;
[567807b1]449 rid_t rid;
[7f1c620]450 uintptr_t va;
[9ad03fe]451 pte_t *t;
452
[25d7709]453 va = istate->cr_ifa; /* faulting address */
[567807b1]454 rr.word = rr_read(VA2VRN(va));
455 rid = rr.map.rid;
456
[2299914]457 page_table_lock(AS, true);
[9ad03fe]458 t = page_mapping_find(AS, va);
459 if (t) {
460 /*
461 * The mapping was found in software page hash table.
462 * Insert it into data translation cache.
463 */
464 itc_pte_copy(t);
[2299914]465 page_table_unlock(AS, true);
[9ad03fe]466 } else {
467 /*
468 * Forward the page fault to address space page fault handler.
469 */
[2299914]470 page_table_unlock(AS, true);
[567807b1]471 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
[fbf7b4c]472 fault_if_from_uspace(istate,"Page fault at %p",va);
[3ee8a075]473 panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
[9ad03fe]474 }
475 }
[95042fd]476}
[89298e3]477
[46321fb]478
479
480static int is_io_page_accessible(int page)
481{
482 if(TASK->arch.iomap) return bitmap_get(TASK->arch.iomap,page);
483 else return 0;
484}
485
486#define IO_FRAME_BASE 0xFFFFC000000
487
488/** There is special handling of memmaped lagacy io, because
489 * of 4KB sized access
490 * only for userspace
491 *
492 * @param va virtual address of page fault
493 * @param istate Structure with saved interruption state.
494 *
495 *
496 * @return 1 on success, 0 on fail
497 */
498static int try_memmap_io_insertion(uintptr_t va, istate_t *istate)
499{
500 if((va >= IO_OFFSET ) && (va < IO_OFFSET + (1<<IO_PAGE_WIDTH)))
501 if(TASK){
502
503 uint64_t io_page=(va & ((1<<IO_PAGE_WIDTH)-1)) >> (USPACE_IO_PAGE_WIDTH);
504 if(is_io_page_accessible(io_page)){
505 //printf("Insert %llX\n",va);
506
507 uint64_t page,frame;
508
509 page = IO_OFFSET + (1 << USPACE_IO_PAGE_WIDTH) * io_page;
510 frame = IO_FRAME_BASE + (1 << USPACE_IO_PAGE_WIDTH) * io_page;
511
512
513 tlb_entry_t entry;
514
515 entry.word[0] = 0;
516 entry.word[1] = 0;
517
518 entry.p = true; /* present */
519 entry.ma = MA_UNCACHEABLE;
520 entry.a = true; /* already accessed */
521 entry.d = true; /* already dirty */
522 entry.pl = PL_USER;
523 entry.ar = AR_READ | AR_WRITE;
524 entry.ppn = frame >> PPN_SHIFT; //MUSIM spocitat frame
525 entry.ps = USPACE_IO_PAGE_WIDTH;
526
527 dtc_mapping_insert(page, TASK->as->asid, entry); //Musim zjistit ASID
528 return 1;
529 }else {
530 fault_if_from_uspace(istate,"IO access fault at %p",va);
531 return 0;
532 }
533 } else
534 return 0;
535 else
536 return 0;
537
538 return 0;
539
540}
541
542
543
544
[9ad03fe]545/** Data TLB fault handler for faults with VHPT turned off.
[a0d74fd]546 *
547 * @param vector Interruption vector.
[25d7709]548 * @param istate Structure with saved interruption state.
[a0d74fd]549 */
[7f1c620]550void alternate_data_tlb_fault(uint64_t vector, istate_t *istate)
[95042fd]551{
[a0d74fd]552 region_register rr;
553 rid_t rid;
[7f1c620]554 uintptr_t va;
[9ad03fe]555 pte_t *t;
[a0d74fd]556
[25d7709]557 va = istate->cr_ifa; /* faulting address */
[a0d74fd]558 rr.word = rr_read(VA2VRN(va));
559 rid = rr.map.rid;
560 if (RID2ASID(rid) == ASID_KERNEL) {
561 if (VA2VRN(va) == VRN_KERNEL) {
562 /*
563 * Provide KA2PA(identity) mapping for faulting piece of
564 * kernel address space.
565 */
[9ad03fe]566 dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
[a0d74fd]567 return;
568 }
569 }
[b994a60]570
[2299914]571 page_table_lock(AS, true);
[9ad03fe]572 t = page_mapping_find(AS, va);
573 if (t) {
574 /*
[f47fd19]575 * The mapping was found in the software page hash table.
[9ad03fe]576 * Insert it into data translation cache.
577 */
578 dtc_pte_copy(t);
[2299914]579 page_table_unlock(AS, true);
[9ad03fe]580 } else {
[46321fb]581 page_table_unlock(AS, true);
582 if (try_memmap_io_insertion(va,istate)) return;
[9ad03fe]583 /*
[f47fd19]584 * Forward the page fault to the address space page fault handler.
[9ad03fe]585 */
[567807b1]586 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
[fbf7b4c]587 fault_if_from_uspace(istate,"Page fault at %p",va);
[3ee8a075]588 panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
[9ad03fe]589 }
590 }
[95042fd]591}
[89298e3]592
[9ad03fe]593/** Data nested TLB fault handler.
594 *
595 * This fault should not occur.
596 *
597 * @param vector Interruption vector.
[25d7709]598 * @param istate Structure with saved interruption state.
[9ad03fe]599 */
[7f1c620]600void data_nested_tlb_fault(uint64_t vector, istate_t *istate)
[95042fd]601{
[3ee8a075]602 panic("%s\n", __func__);
[95042fd]603}
[89298e3]604
[9ad03fe]605/** Data Dirty bit fault handler.
606 *
607 * @param vector Interruption vector.
[25d7709]608 * @param istate Structure with saved interruption state.
[9ad03fe]609 */
[7f1c620]610void data_dirty_bit_fault(uint64_t vector, istate_t *istate)
[95042fd]611{
[567807b1]612 region_register rr;
613 rid_t rid;
[7f1c620]614 uintptr_t va;
[9ad03fe]615 pte_t *t;
[567807b1]616
617 va = istate->cr_ifa; /* faulting address */
618 rr.word = rr_read(VA2VRN(va));
619 rid = rr.map.rid;
[9ad03fe]620
[2299914]621 page_table_lock(AS, true);
[567807b1]622 t = page_mapping_find(AS, va);
[9ad03fe]623 ASSERT(t && t->p);
[567807b1]624 if (t && t->p && t->w) {
[9ad03fe]625 /*
626 * Update the Dirty bit in page tables and reinsert
627 * the mapping into DTC.
628 */
629 t->d = true;
630 dtc_pte_copy(t);
[567807b1]631 } else {
632 if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
[fbf7b4c]633 fault_if_from_uspace(istate,"Page fault at %p",va);
[3ee8a075]634 panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
[567807b1]635 t->d = true;
636 dtc_pte_copy(t);
637 }
[9ad03fe]638 }
[2299914]639 page_table_unlock(AS, true);
[95042fd]640}
[89298e3]641
[9ad03fe]642/** Instruction access bit fault handler.
643 *
644 * @param vector Interruption vector.
[25d7709]645 * @param istate Structure with saved interruption state.
[9ad03fe]646 */
[7f1c620]647void instruction_access_bit_fault(uint64_t vector, istate_t *istate)
[95042fd]648{
[567807b1]649 region_register rr;
650 rid_t rid;
[7f1c620]651 uintptr_t va;
[567807b1]652 pte_t *t;
653
654 va = istate->cr_ifa; /* faulting address */
655 rr.word = rr_read(VA2VRN(va));
656 rid = rr.map.rid;
[9ad03fe]657
[2299914]658 page_table_lock(AS, true);
[567807b1]659 t = page_mapping_find(AS, va);
[9ad03fe]660 ASSERT(t && t->p);
[567807b1]661 if (t && t->p && t->x) {
[9ad03fe]662 /*
663 * Update the Accessed bit in page tables and reinsert
664 * the mapping into ITC.
665 */
666 t->a = true;
667 itc_pte_copy(t);
[567807b1]668 } else {
669 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
[fbf7b4c]670 fault_if_from_uspace(istate,"Page fault at %p",va);
[3ee8a075]671 panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
[567807b1]672 t->a = true;
673 itc_pte_copy(t);
674 }
[9ad03fe]675 }
[2299914]676 page_table_unlock(AS, true);
[95042fd]677}
[89298e3]678
[9ad03fe]679/** Data access bit fault handler.
680 *
681 * @param vector Interruption vector.
[25d7709]682 * @param istate Structure with saved interruption state.
[9ad03fe]683 */
[7f1c620]684void data_access_bit_fault(uint64_t vector, istate_t *istate)
[95042fd]685{
[567807b1]686 region_register rr;
687 rid_t rid;
[7f1c620]688 uintptr_t va;
[9ad03fe]689 pte_t *t;
690
[567807b1]691 va = istate->cr_ifa; /* faulting address */
692 rr.word = rr_read(VA2VRN(va));
693 rid = rr.map.rid;
694
[2299914]695 page_table_lock(AS, true);
[567807b1]696 t = page_mapping_find(AS, va);
[9ad03fe]697 ASSERT(t && t->p);
698 if (t && t->p) {
699 /*
700 * Update the Accessed bit in page tables and reinsert
701 * the mapping into DTC.
702 */
703 t->a = true;
704 dtc_pte_copy(t);
[567807b1]705 } else {
706 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
[fbf7b4c]707 fault_if_from_uspace(istate,"Page fault at %p",va);
[3ee8a075]708 panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
[567807b1]709 t->a = true;
710 itc_pte_copy(t);
711 }
[9ad03fe]712 }
[2299914]713 page_table_unlock(AS, true);
[89298e3]714}
715
[9ad03fe]716/** Page not present fault handler.
717 *
718 * @param vector Interruption vector.
[25d7709]719 * @param istate Structure with saved interruption state.
[9ad03fe]720 */
[7f1c620]721void page_not_present(uint64_t vector, istate_t *istate)
[95042fd]722{
[9ad03fe]723 region_register rr;
[567807b1]724 rid_t rid;
[7f1c620]725 uintptr_t va;
[9ad03fe]726 pte_t *t;
727
[25d7709]728 va = istate->cr_ifa; /* faulting address */
[567807b1]729 rr.word = rr_read(VA2VRN(va));
730 rid = rr.map.rid;
731
[2299914]732 page_table_lock(AS, true);
[9ad03fe]733 t = page_mapping_find(AS, va);
734 ASSERT(t);
735
736 if (t->p) {
737 /*
738 * If the Present bit is set in page hash table, just copy it
739 * and update ITC/DTC.
740 */
741 if (t->x)
742 itc_pte_copy(t);
743 else
744 dtc_pte_copy(t);
[2299914]745 page_table_unlock(AS, true);
[9ad03fe]746 } else {
[2299914]747 page_table_unlock(AS, true);
[567807b1]748 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
[fbf7b4c]749 fault_if_from_uspace(istate,"Page fault at %p",va);
[3ee8a075]750 panic("%s: va=%p, rid=%d\n", __func__, va, rid);
[9ad03fe]751 }
752 }
[95042fd]753}
[b45c443]754
[ee289cf0]755/** @}
[b45c443]756 */
Note: See TracBrowser for help on using the repository browser.