source: mainline/kernel/arch/ia64/src/mm/tlb.c@ 98000fb

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 98000fb was 98000fb, checked in by Martin Decky <martin@…>, 16 years ago

remove redundant index_t and count_t types (which were always quite ambiguous and not actually needed)

  • Property mode set to 100644
File size: 17.8 KB
RevLine 
[36b01bb2]1/*
[df4ed85]2 * Copyright (c) 2006 Jakub Jermar
[36b01bb2]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[ee289cf0]29/** @addtogroup ia64mm
[b45c443]30 * @{
31 */
32/** @file
33 */
34
[36b01bb2]35/*
36 * TLB management.
37 */
38
39#include <mm/tlb.h>
[a0d74fd]40#include <mm/asid.h>
[9ad03fe]41#include <mm/page.h>
42#include <mm/as.h>
[bc78c75]43#include <arch/mm/tlb.h>
[a0d74fd]44#include <arch/mm/page.h>
[68091bd]45#include <arch/mm/vhpt.h>
[89298e3]46#include <arch/barrier.h>
[2c49fbbe]47#include <arch/interrupt.h>
[7c322bd]48#include <arch/pal/pal.h>
49#include <arch/asm.h>
[2c49fbbe]50#include <panic.h>
[1065603e]51#include <print.h>
[9ad03fe]52#include <arch.h>
[a175a67]53#include <interrupt.h>
[36b01bb2]54
[ef67bab]55/** Invalidate all TLB entries. */
[36b01bb2]56void tlb_invalidate_all(void)
57{
[ee289cf0]58 ipl_t ipl;
59 uintptr_t adr;
60 uint32_t count1, count2, stride1, stride2;
[7c322bd]61
[6c441cf8]62 unsigned int i, j;
[7c322bd]63
[ee289cf0]64 adr = PAL_PTCE_INFO_BASE();
65 count1 = PAL_PTCE_INFO_COUNT1();
66 count2 = PAL_PTCE_INFO_COUNT2();
67 stride1 = PAL_PTCE_INFO_STRIDE1();
68 stride2 = PAL_PTCE_INFO_STRIDE2();
[7c322bd]69
[ee289cf0]70 ipl = interrupts_disable();
71
[6c441cf8]72 for (i = 0; i < count1; i++) {
73 for (j = 0; j < count2; j++) {
[e7b7be3f]74 asm volatile (
[ee289cf0]75 "ptc.e %0 ;;"
76 :
77 : "r" (adr)
78 );
79 adr += stride2;
[7c322bd]80 }
[ee289cf0]81 adr += stride1;
82 }
[7c322bd]83
[ee289cf0]84 interrupts_restore(ipl);
[7c322bd]85
[ee289cf0]86 srlz_d();
87 srlz_i();
[68091bd]88#ifdef CONFIG_VHPT
[ee289cf0]89 vhpt_invalidate_all();
[68091bd]90#endif
[36b01bb2]91}
92
93/** Invalidate entries belonging to an address space.
94 *
[666773c]95 * @param asid Address space identifier.
[36b01bb2]96 */
97void tlb_invalidate_asid(asid_t asid)
98{
[a82500ce]99 tlb_invalidate_all();
[36b01bb2]100}
[bc78c75]101
[a82500ce]102
[98000fb]103void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt)
[a82500ce]104{
[d0cf9de]105 region_register rr;
106 bool restore_rr = false;
[1065603e]107 int b = 0;
108 int c = cnt;
[9bda3af6]109
[7f1c620]110 uintptr_t va;
[1065603e]111 va = page;
[d0cf9de]112
113 rr.word = rr_read(VA2VRN(va));
114 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
115 /*
116 * The selected region register does not contain required RID.
117 * Save the old content of the register and replace the RID.
118 */
119 region_register rr0;
120
121 rr0 = rr;
122 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
123 rr_write(VA2VRN(va), rr0.word);
124 srlz_d();
125 srlz_i();
126 }
127
[1065603e]128 while(c >>= 1)
129 b++;
130 b >>= 1;
[7f1c620]131 uint64_t ps;
[d0cf9de]132
[1065603e]133 switch (b) {
[666773c]134 case 0: /* cnt 1 - 3 */
[ee289cf0]135 ps = PAGE_WIDTH;
136 break;
[666773c]137 case 1: /* cnt 4 - 15 */
138 ps = PAGE_WIDTH + 2;
139 va &= ~((1 << ps) - 1);
[ee289cf0]140 break;
[666773c]141 case 2: /* cnt 16 - 63 */
142 ps = PAGE_WIDTH + 4;
143 va &= ~((1 << ps) - 1);
[ee289cf0]144 break;
[666773c]145 case 3: /* cnt 64 - 255 */
146 ps = PAGE_WIDTH + 6;
147 va &= ~((1 << ps) - 1);
[ee289cf0]148 break;
[666773c]149 case 4: /* cnt 256 - 1023 */
150 ps = PAGE_WIDTH + 8;
151 va &= ~((1 << ps) - 1);
[ee289cf0]152 break;
[666773c]153 case 5: /* cnt 1024 - 4095 */
154 ps = PAGE_WIDTH + 10;
155 va &= ~((1 << ps) - 1);
[ee289cf0]156 break;
[666773c]157 case 6: /* cnt 4096 - 16383 */
158 ps = PAGE_WIDTH + 12;
159 va &= ~((1 << ps) - 1);
[ee289cf0]160 break;
[666773c]161 case 7: /* cnt 16384 - 65535 */
162 case 8: /* cnt 65536 - (256K - 1) */
163 ps = PAGE_WIDTH + 14;
164 va &= ~((1 << ps) - 1);
[ee289cf0]165 break;
166 default:
[666773c]167 ps = PAGE_WIDTH + 18;
168 va &= ~((1 << ps) - 1);
[ee289cf0]169 break;
[d0cf9de]170 }
[666773c]171 for(; va < (page + cnt * PAGE_SIZE); va += (1 << ps))
172 asm volatile ("ptc.l %0, %1;;" :: "r" (va), "r" (ps << 2));
[d0cf9de]173 srlz_d();
174 srlz_i();
175
176 if (restore_rr) {
177 rr_write(VA2VRN(va), rr.word);
178 srlz_d();
179 srlz_i();
180 }
[a82500ce]181}
182
[95042fd]183/** Insert data into data translation cache.
184 *
[666773c]185 * @param va Virtual page address.
186 * @param asid Address space identifier.
187 * @param entry The rest of TLB entry as required by TLB insertion
188 * format.
[95042fd]189 */
[7f1c620]190void dtc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
[b994a60]191{
[95042fd]192 tc_mapping_insert(va, asid, entry, true);
193}
[bc78c75]194
[95042fd]195/** Insert data into instruction translation cache.
196 *
[666773c]197 * @param va Virtual page address.
198 * @param asid Address space identifier.
199 * @param entry The rest of TLB entry as required by TLB insertion
200 * format.
[95042fd]201 */
[7f1c620]202void itc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
[b994a60]203{
[95042fd]204 tc_mapping_insert(va, asid, entry, false);
205}
[bc78c75]206
[95042fd]207/** Insert data into instruction or data translation cache.
208 *
[666773c]209 * @param va Virtual page address.
210 * @param asid Address space identifier.
211 * @param entry The rest of TLB entry as required by TLB insertion
212 * format.
213 * @param dtc If true, insert into data translation cache, use
214 * instruction translation cache otherwise.
[95042fd]215 */
[7f1c620]216void tc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtc)
[bc78c75]217{
218 region_register rr;
[95042fd]219 bool restore_rr = false;
[bc78c75]220
[a0d74fd]221 rr.word = rr_read(VA2VRN(va));
222 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
[95042fd]223 /*
224 * The selected region register does not contain required RID.
225 * Save the old content of the register and replace the RID.
226 */
[bc78c75]227 region_register rr0;
[95042fd]228
229 rr0 = rr;
[a0d74fd]230 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
231 rr_write(VA2VRN(va), rr0.word);
[89298e3]232 srlz_d();
[95042fd]233 srlz_i();
234 }
235
[e7b7be3f]236 asm volatile (
[666773c]237 "mov r8 = psr;;\n"
[2c49fbbe]238 "rsm %0;;\n" /* PSR_IC_MASK */
[95042fd]239 "srlz.d;;\n"
240 "srlz.i;;\n"
[666773c]241 "mov cr.ifa = %1\n" /* va */
242 "mov cr.itir = %2;;\n" /* entry.word[1] */
243 "cmp.eq p6,p7 = %4,r0;;\n" /* decide between itc and dtc */
[95042fd]244 "(p6) itc.i %3;;\n"
245 "(p7) itc.d %3;;\n"
[666773c]246 "mov psr.l = r8;;\n"
[95042fd]247 "srlz.d;;\n"
248 :
[666773c]249 : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]),
250 "r" (entry.word[0]), "r" (dtc)
[2c49fbbe]251 : "p6", "p7", "r8"
[95042fd]252 );
253
254 if (restore_rr) {
[a0d74fd]255 rr_write(VA2VRN(va), rr.word);
[95042fd]256 srlz_d();
257 srlz_i();
[bc78c75]258 }
259}
260
[95042fd]261/** Insert data into instruction translation register.
262 *
[666773c]263 * @param va Virtual page address.
264 * @param asid Address space identifier.
265 * @param entry The rest of TLB entry as required by TLB insertion
266 * format.
267 * @param tr Translation register.
[95042fd]268 */
[666773c]269void
[98000fb]270itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, size_t tr)
[bc78c75]271{
[95042fd]272 tr_mapping_insert(va, asid, entry, false, tr);
[bc78c75]273}
274
[95042fd]275/** Insert data into data translation register.
276 *
[666773c]277 * @param va Virtual page address.
278 * @param asid Address space identifier.
279 * @param entry The rest of TLB entry as required by TLB insertion
280 * format.
281 * @param tr Translation register.
[95042fd]282 */
[666773c]283void
[98000fb]284dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, size_t tr)
[95042fd]285{
286 tr_mapping_insert(va, asid, entry, true, tr);
287}
[bc78c75]288
[95042fd]289/** Insert data into instruction or data translation register.
290 *
[666773c]291 * @param va Virtual page address.
292 * @param asid Address space identifier.
293 * @param entry The rest of TLB entry as required by TLB insertion
294 * format.
295 * @param dtr If true, insert into data translation register, use
296 * instruction translation register otherwise.
297 * @param tr Translation register.
[95042fd]298 */
[666773c]299void
300tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr,
[98000fb]301 size_t tr)
[89298e3]302{
303 region_register rr;
[95042fd]304 bool restore_rr = false;
[89298e3]305
[a0d74fd]306 rr.word = rr_read(VA2VRN(va));
307 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
[95042fd]308 /*
309 * The selected region register does not contain required RID.
310 * Save the old content of the register and replace the RID.
311 */
[89298e3]312 region_register rr0;
[95042fd]313
314 rr0 = rr;
[a0d74fd]315 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
316 rr_write(VA2VRN(va), rr0.word);
[89298e3]317 srlz_d();
[95042fd]318 srlz_i();
[89298e3]319 }
320
[e7b7be3f]321 asm volatile (
[666773c]322 "mov r8 = psr;;\n"
[2c49fbbe]323 "rsm %0;;\n" /* PSR_IC_MASK */
[95042fd]324 "srlz.d;;\n"
325 "srlz.i;;\n"
[666773c]326 "mov cr.ifa = %1\n" /* va */
327 "mov cr.itir = %2;;\n" /* entry.word[1] */
328 "cmp.eq p6,p7 = %5,r0;;\n" /* decide between itr and dtr */
329 "(p6) itr.i itr[%4] = %3;;\n"
330 "(p7) itr.d dtr[%4] = %3;;\n"
331 "mov psr.l = r8;;\n"
[95042fd]332 "srlz.d;;\n"
333 :
[666773c]334 : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]),
335 "r" (entry.word[0]), "r" (tr), "r" (dtr)
[2c49fbbe]336 : "p6", "p7", "r8"
[95042fd]337 );
338
339 if (restore_rr) {
[a0d74fd]340 rr_write(VA2VRN(va), rr.word);
[95042fd]341 srlz_d();
342 srlz_i();
343 }
[89298e3]344}
345
[a0d74fd]346/** Insert data into DTLB.
347 *
[666773c]348 * @param page Virtual page address including VRN bits.
349 * @param frame Physical frame address.
350 * @param dtr If true, insert into data translation register, use data
351 * translation cache otherwise.
352 * @param tr Translation register if dtr is true, ignored otherwise.
[a0d74fd]353 */
[666773c]354void
355dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr,
[98000fb]356 size_t tr)
[a0d74fd]357{
358 tlb_entry_t entry;
359
360 entry.word[0] = 0;
361 entry.word[1] = 0;
362
363 entry.p = true; /* present */
364 entry.ma = MA_WRITEBACK;
365 entry.a = true; /* already accessed */
366 entry.d = true; /* already dirty */
367 entry.pl = PL_KERNEL;
368 entry.ar = AR_READ | AR_WRITE;
369 entry.ppn = frame >> PPN_SHIFT;
370 entry.ps = PAGE_WIDTH;
371
372 if (dtr)
373 dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
374 else
375 dtc_mapping_insert(page, ASID_KERNEL, entry);
376}
377
[208259c]378/** Purge kernel entries from DTR.
379 *
380 * Purge DTR entries used by the kernel.
381 *
[666773c]382 * @param page Virtual page address including VRN bits.
383 * @param width Width of the purge in bits.
[208259c]384 */
[98000fb]385void dtr_purge(uintptr_t page, size_t width)
[208259c]386{
[666773c]387 asm volatile ("ptr.d %0, %1\n" : : "r" (page), "r" (width << 2));
[208259c]388}
389
390
[9ad03fe]391/** Copy content of PTE into data translation cache.
392 *
[666773c]393 * @param t PTE.
[9ad03fe]394 */
395void dtc_pte_copy(pte_t *t)
396{
397 tlb_entry_t entry;
398
399 entry.word[0] = 0;
400 entry.word[1] = 0;
401
402 entry.p = t->p;
403 entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
404 entry.a = t->a;
405 entry.d = t->d;
406 entry.pl = t->k ? PL_KERNEL : PL_USER;
407 entry.ar = t->w ? AR_WRITE : AR_READ;
408 entry.ppn = t->frame >> PPN_SHIFT;
409 entry.ps = PAGE_WIDTH;
410
411 dtc_mapping_insert(t->page, t->as->asid, entry);
[68091bd]412#ifdef CONFIG_VHPT
413 vhpt_mapping_insert(t->page, t->as->asid, entry);
414#endif
[9ad03fe]415}
416
417/** Copy content of PTE into instruction translation cache.
418 *
[666773c]419 * @param t PTE.
[9ad03fe]420 */
421void itc_pte_copy(pte_t *t)
422{
423 tlb_entry_t entry;
424
425 entry.word[0] = 0;
426 entry.word[1] = 0;
427
428 ASSERT(t->x);
429
430 entry.p = t->p;
431 entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
432 entry.a = t->a;
433 entry.pl = t->k ? PL_KERNEL : PL_USER;
434 entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
435 entry.ppn = t->frame >> PPN_SHIFT;
436 entry.ps = PAGE_WIDTH;
437
438 itc_mapping_insert(t->page, t->as->asid, entry);
[68091bd]439#ifdef CONFIG_VHPT
440 vhpt_mapping_insert(t->page, t->as->asid, entry);
441#endif
[9ad03fe]442}
443
444/** Instruction TLB fault handler for faults with VHPT turned off.
445 *
[666773c]446 * @param vector Interruption vector.
447 * @param istate Structure with saved interruption state.
[9ad03fe]448 */
[7f1c620]449void alternate_instruction_tlb_fault(uint64_t vector, istate_t *istate)
[89298e3]450{
[9ad03fe]451 region_register rr;
[567807b1]452 rid_t rid;
[7f1c620]453 uintptr_t va;
[9ad03fe]454 pte_t *t;
455
[25d7709]456 va = istate->cr_ifa; /* faulting address */
[567807b1]457 rr.word = rr_read(VA2VRN(va));
458 rid = rr.map.rid;
459
[2299914]460 page_table_lock(AS, true);
[9ad03fe]461 t = page_mapping_find(AS, va);
462 if (t) {
463 /*
464 * The mapping was found in software page hash table.
465 * Insert it into data translation cache.
466 */
467 itc_pte_copy(t);
[2299914]468 page_table_unlock(AS, true);
[9ad03fe]469 } else {
470 /*
471 * Forward the page fault to address space page fault handler.
472 */
[2299914]473 page_table_unlock(AS, true);
[567807b1]474 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
[f651e80]475 fault_if_from_uspace(istate,"Page fault at %p.",va);
476 panic("%s: va=%p, rid=%d, iip=%p.", __func__, va, rid,
[666773c]477 istate->cr_iip);
[9ad03fe]478 }
479 }
[95042fd]480}
[89298e3]481
[46321fb]482static int is_io_page_accessible(int page)
483{
[666773c]484 if (TASK->arch.iomap)
[38f6add]485 return bitmap_get(TASK->arch.iomap, page);
[666773c]486 else
487 return 0;
[46321fb]488}
489
490#define IO_FRAME_BASE 0xFFFFC000000
491
[666773c]492/**
493 * There is special handling of memory mapped legacy io, because of 4KB sized
494 * access for userspace.
[46321fb]495 *
[666773c]496 * @param va Virtual address of page fault.
497 * @param istate Structure with saved interruption state.
[46321fb]498 *
[666773c]499 * @return One on success, zero on failure.
[46321fb]500 */
501static int try_memmap_io_insertion(uintptr_t va, istate_t *istate)
502{
[666773c]503 if ((va >= IO_OFFSET ) && (va < IO_OFFSET + (1 << IO_PAGE_WIDTH))) {
504 if (TASK) {
505 uint64_t io_page = (va & ((1 << IO_PAGE_WIDTH) - 1)) >>
506 USPACE_IO_PAGE_WIDTH;
[46321fb]507
[666773c]508 if (is_io_page_accessible(io_page)) {
509 uint64_t page, frame;
[46321fb]510
[666773c]511 page = IO_OFFSET +
512 (1 << USPACE_IO_PAGE_WIDTH) * io_page;
513 frame = IO_FRAME_BASE +
514 (1 << USPACE_IO_PAGE_WIDTH) * io_page;
[46321fb]515
516 tlb_entry_t entry;
517
518 entry.word[0] = 0;
519 entry.word[1] = 0;
520
[666773c]521 entry.p = true; /* present */
[46321fb]522 entry.ma = MA_UNCACHEABLE;
[666773c]523 entry.a = true; /* already accessed */
524 entry.d = true; /* already dirty */
[46321fb]525 entry.pl = PL_USER;
526 entry.ar = AR_READ | AR_WRITE;
[ef5de6d]527 entry.ppn = frame >> PPN_SHIFT;
[46321fb]528 entry.ps = USPACE_IO_PAGE_WIDTH;
529
[ef5de6d]530 dtc_mapping_insert(page, TASK->as->asid, entry);
[46321fb]531 return 1;
[666773c]532 } else {
533 fault_if_from_uspace(istate,
[f651e80]534 "IO access fault at %p.", va);
[666773c]535 }
536 }
537 }
[46321fb]538
539 return 0;
540}
541
[9ad03fe]542/** Data TLB fault handler for faults with VHPT turned off.
[a0d74fd]543 *
[666773c]544 * @param vector Interruption vector.
545 * @param istate Structure with saved interruption state.
[a0d74fd]546 */
[7f1c620]547void alternate_data_tlb_fault(uint64_t vector, istate_t *istate)
[95042fd]548{
[a0d74fd]549 region_register rr;
550 rid_t rid;
[7f1c620]551 uintptr_t va;
[9ad03fe]552 pte_t *t;
[a0d74fd]553
[25d7709]554 va = istate->cr_ifa; /* faulting address */
[a0d74fd]555 rr.word = rr_read(VA2VRN(va));
556 rid = rr.map.rid;
557 if (RID2ASID(rid) == ASID_KERNEL) {
558 if (VA2VRN(va) == VRN_KERNEL) {
559 /*
560 * Provide KA2PA(identity) mapping for faulting piece of
561 * kernel address space.
562 */
[9ad03fe]563 dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
[a0d74fd]564 return;
565 }
566 }
[b994a60]567
[2299914]568 page_table_lock(AS, true);
[9ad03fe]569 t = page_mapping_find(AS, va);
570 if (t) {
571 /*
[f47fd19]572 * The mapping was found in the software page hash table.
[9ad03fe]573 * Insert it into data translation cache.
574 */
575 dtc_pte_copy(t);
[2299914]576 page_table_unlock(AS, true);
[9ad03fe]577 } else {
[46321fb]578 page_table_unlock(AS, true);
[666773c]579 if (try_memmap_io_insertion(va, istate))
580 return;
[9ad03fe]581 /*
[666773c]582 * Forward the page fault to the address space page fault
583 * handler.
[9ad03fe]584 */
[567807b1]585 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
[f651e80]586 fault_if_from_uspace(istate,"Page fault at %p.",va);
587 panic("%s: va=%p, rid=%d, iip=%p.", __func__, va, rid,
[666773c]588 istate->cr_iip);
[9ad03fe]589 }
590 }
[95042fd]591}
[89298e3]592
[9ad03fe]593/** Data nested TLB fault handler.
594 *
595 * This fault should not occur.
596 *
[666773c]597 * @param vector Interruption vector.
598 * @param istate Structure with saved interruption state.
[9ad03fe]599 */
[7f1c620]600void data_nested_tlb_fault(uint64_t vector, istate_t *istate)
[95042fd]601{
[f651e80]602 panic("%s.", __func__);
[95042fd]603}
[89298e3]604
[9ad03fe]605/** Data Dirty bit fault handler.
606 *
[666773c]607 * @param vector Interruption vector.
608 * @param istate Structure with saved interruption state.
[9ad03fe]609 */
[7f1c620]610void data_dirty_bit_fault(uint64_t vector, istate_t *istate)
[95042fd]611{
[567807b1]612 region_register rr;
613 rid_t rid;
[7f1c620]614 uintptr_t va;
[9ad03fe]615 pte_t *t;
[567807b1]616
617 va = istate->cr_ifa; /* faulting address */
618 rr.word = rr_read(VA2VRN(va));
619 rid = rr.map.rid;
[9ad03fe]620
[2299914]621 page_table_lock(AS, true);
[567807b1]622 t = page_mapping_find(AS, va);
[9ad03fe]623 ASSERT(t && t->p);
[567807b1]624 if (t && t->p && t->w) {
[9ad03fe]625 /*
626 * Update the Dirty bit in page tables and reinsert
627 * the mapping into DTC.
628 */
629 t->d = true;
630 dtc_pte_copy(t);
[567807b1]631 } else {
632 if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
[f651e80]633 fault_if_from_uspace(istate,"Page fault at %p.",va);
634 panic("%s: va=%p, rid=%d, iip=%p.", __func__, va, rid,
[666773c]635 istate->cr_iip);
[567807b1]636 }
[9ad03fe]637 }
[2299914]638 page_table_unlock(AS, true);
[95042fd]639}
[89298e3]640
[9ad03fe]641/** Instruction access bit fault handler.
642 *
[666773c]643 * @param vector Interruption vector.
644 * @param istate Structure with saved interruption state.
[9ad03fe]645 */
[7f1c620]646void instruction_access_bit_fault(uint64_t vector, istate_t *istate)
[95042fd]647{
[567807b1]648 region_register rr;
649 rid_t rid;
[7f1c620]650 uintptr_t va;
[567807b1]651 pte_t *t;
652
653 va = istate->cr_ifa; /* faulting address */
654 rr.word = rr_read(VA2VRN(va));
655 rid = rr.map.rid;
[9ad03fe]656
[2299914]657 page_table_lock(AS, true);
[567807b1]658 t = page_mapping_find(AS, va);
[9ad03fe]659 ASSERT(t && t->p);
[567807b1]660 if (t && t->p && t->x) {
[9ad03fe]661 /*
662 * Update the Accessed bit in page tables and reinsert
663 * the mapping into ITC.
664 */
665 t->a = true;
666 itc_pte_copy(t);
[567807b1]667 } else {
668 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
[f651e80]669 fault_if_from_uspace(istate, "Page fault at %p.", va);
670 panic("%s: va=%p, rid=%d, iip=%p.", __func__, va, rid,
[666773c]671 istate->cr_iip);
[567807b1]672 }
[9ad03fe]673 }
[2299914]674 page_table_unlock(AS, true);
[95042fd]675}
[89298e3]676
[9ad03fe]677/** Data access bit fault handler.
678 *
679 * @param vector Interruption vector.
[25d7709]680 * @param istate Structure with saved interruption state.
[9ad03fe]681 */
[7f1c620]682void data_access_bit_fault(uint64_t vector, istate_t *istate)
[95042fd]683{
[567807b1]684 region_register rr;
685 rid_t rid;
[7f1c620]686 uintptr_t va;
[9ad03fe]687 pte_t *t;
688
[567807b1]689 va = istate->cr_ifa; /* faulting address */
690 rr.word = rr_read(VA2VRN(va));
691 rid = rr.map.rid;
692
[2299914]693 page_table_lock(AS, true);
[567807b1]694 t = page_mapping_find(AS, va);
[9ad03fe]695 ASSERT(t && t->p);
696 if (t && t->p) {
697 /*
698 * Update the Accessed bit in page tables and reinsert
699 * the mapping into DTC.
700 */
701 t->a = true;
702 dtc_pte_copy(t);
[567807b1]703 } else {
704 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
[f651e80]705 fault_if_from_uspace(istate, "Page fault at %p.", va);
706 panic("%s: va=%p, rid=%d, iip=%p.", __func__, va, rid,
[666773c]707 istate->cr_iip);
[567807b1]708 }
[9ad03fe]709 }
[2299914]710 page_table_unlock(AS, true);
[89298e3]711}
712
[9ad03fe]713/** Page not present fault handler.
714 *
715 * @param vector Interruption vector.
[25d7709]716 * @param istate Structure with saved interruption state.
[9ad03fe]717 */
[7f1c620]718void page_not_present(uint64_t vector, istate_t *istate)
[95042fd]719{
[9ad03fe]720 region_register rr;
[567807b1]721 rid_t rid;
[7f1c620]722 uintptr_t va;
[9ad03fe]723 pte_t *t;
724
[25d7709]725 va = istate->cr_ifa; /* faulting address */
[567807b1]726 rr.word = rr_read(VA2VRN(va));
727 rid = rr.map.rid;
728
[2299914]729 page_table_lock(AS, true);
[9ad03fe]730 t = page_mapping_find(AS, va);
731 ASSERT(t);
732
733 if (t->p) {
734 /*
735 * If the Present bit is set in page hash table, just copy it
736 * and update ITC/DTC.
737 */
738 if (t->x)
739 itc_pte_copy(t);
740 else
741 dtc_pte_copy(t);
[2299914]742 page_table_unlock(AS, true);
[9ad03fe]743 } else {
[2299914]744 page_table_unlock(AS, true);
[567807b1]745 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
[f651e80]746 fault_if_from_uspace(istate, "Page fault at %p.", va);
747 panic("%s: va=%p, rid=%d.", __func__, va, rid);
[9ad03fe]748 }
749 }
[95042fd]750}
[b45c443]751
[9979acb]752void tlb_arch_init(void)
753{
754}
755
756void tlb_print(void)
757{
758}
759
[ee289cf0]760/** @}
[b45c443]761 */
Note: See TracBrowser for help on using the repository browser.