source: mainline/kernel/arch/ia64/src/mm/tlb.c@ 8ccd2ea

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 8ccd2ea was 3ee8a075, checked in by Jakub Jermar <jakub@…>, 18 years ago

Replace gcc-specific FUNCTION with C99 func.
suncc's xregs=no%float can be used only on sparc64.

  • Property mode set to 100644
File size: 16.4 KB
Line 
1/*
2 * Copyright (c) 2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup ia64mm
30 * @{
31 */
32/** @file
33 */
34
35/*
36 * TLB management.
37 */
38
39#include <mm/tlb.h>
40#include <mm/asid.h>
41#include <mm/page.h>
42#include <mm/as.h>
43#include <arch/mm/tlb.h>
44#include <arch/mm/page.h>
45#include <arch/mm/vhpt.h>
46#include <arch/barrier.h>
47#include <arch/interrupt.h>
48#include <arch/pal/pal.h>
49#include <arch/asm.h>
50#include <panic.h>
51#include <print.h>
52#include <arch.h>
53#include <interrupt.h>
54
55/** Invalidate all TLB entries. */
56void tlb_invalidate_all(void)
57{
58 ipl_t ipl;
59 uintptr_t adr;
60 uint32_t count1, count2, stride1, stride2;
61
62 int i, j;
63
64 adr = PAL_PTCE_INFO_BASE();
65 count1 = PAL_PTCE_INFO_COUNT1();
66 count2 = PAL_PTCE_INFO_COUNT2();
67 stride1 = PAL_PTCE_INFO_STRIDE1();
68 stride2 = PAL_PTCE_INFO_STRIDE2();
69
70 ipl = interrupts_disable();
71
72 for(i = 0; i < count1; i++) {
73 for(j = 0; j < count2; j++) {
74 asm volatile (
75 "ptc.e %0 ;;"
76 :
77 : "r" (adr)
78 );
79 adr += stride2;
80 }
81 adr += stride1;
82 }
83
84 interrupts_restore(ipl);
85
86 srlz_d();
87 srlz_i();
88#ifdef CONFIG_VHPT
89 vhpt_invalidate_all();
90#endif
91}
92
93/** Invalidate entries belonging to an address space.
94 *
95 * @param asid Address space identifier.
96 */
97void tlb_invalidate_asid(asid_t asid)
98{
99 tlb_invalidate_all();
100}
101
102
103void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
104{
105 region_register rr;
106 bool restore_rr = false;
107 int b = 0;
108 int c = cnt;
109
110 uintptr_t va;
111 va = page;
112
113 rr.word = rr_read(VA2VRN(va));
114 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
115 /*
116 * The selected region register does not contain required RID.
117 * Save the old content of the register and replace the RID.
118 */
119 region_register rr0;
120
121 rr0 = rr;
122 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
123 rr_write(VA2VRN(va), rr0.word);
124 srlz_d();
125 srlz_i();
126 }
127
128 while(c >>= 1)
129 b++;
130 b >>= 1;
131 uint64_t ps;
132
133 switch (b) {
134 case 0: /*cnt 1-3*/
135 ps = PAGE_WIDTH;
136 break;
137 case 1: /*cnt 4-15*/
138 /*cnt=((cnt-1)/4)+1;*/
139 ps = PAGE_WIDTH+2;
140 va &= ~((1<<ps)-1);
141 break;
142 case 2: /*cnt 16-63*/
143 /*cnt=((cnt-1)/16)+1;*/
144 ps = PAGE_WIDTH+4;
145 va &= ~((1<<ps)-1);
146 break;
147 case 3: /*cnt 64-255*/
148 /*cnt=((cnt-1)/64)+1;*/
149 ps = PAGE_WIDTH+6;
150 va &= ~((1<<ps)-1);
151 break;
152 case 4: /*cnt 256-1023*/
153 /*cnt=((cnt-1)/256)+1;*/
154 ps = PAGE_WIDTH+8;
155 va &= ~((1<<ps)-1);
156 break;
157 case 5: /*cnt 1024-4095*/
158 /*cnt=((cnt-1)/1024)+1;*/
159 ps = PAGE_WIDTH+10;
160 va &= ~((1<<ps)-1);
161 break;
162 case 6: /*cnt 4096-16383*/
163 /*cnt=((cnt-1)/4096)+1;*/
164 ps = PAGE_WIDTH+12;
165 va &= ~((1<<ps)-1);
166 break;
167 case 7: /*cnt 16384-65535*/
168 case 8: /*cnt 65536-(256K-1)*/
169 /*cnt=((cnt-1)/16384)+1;*/
170 ps = PAGE_WIDTH+14;
171 va &= ~((1<<ps)-1);
172 break;
173 default:
174 /*cnt=((cnt-1)/(16384*16))+1;*/
175 ps=PAGE_WIDTH+18;
176 va&=~((1<<ps)-1);
177 break;
178 }
179 /*cnt+=(page!=va);*/
180 for(; va<(page+cnt*(PAGE_SIZE)); va += (1<<ps)) {
181 asm volatile (
182 "ptc.l %0,%1;;"
183 :
184 : "r" (va), "r" (ps<<2)
185 );
186 }
187 srlz_d();
188 srlz_i();
189
190 if (restore_rr) {
191 rr_write(VA2VRN(va), rr.word);
192 srlz_d();
193 srlz_i();
194 }
195}
196
197/** Insert data into data translation cache.
198 *
199 * @param va Virtual page address.
200 * @param asid Address space identifier.
201 * @param entry The rest of TLB entry as required by TLB insertion format.
202 */
203void dtc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
204{
205 tc_mapping_insert(va, asid, entry, true);
206}
207
208/** Insert data into instruction translation cache.
209 *
210 * @param va Virtual page address.
211 * @param asid Address space identifier.
212 * @param entry The rest of TLB entry as required by TLB insertion format.
213 */
214void itc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
215{
216 tc_mapping_insert(va, asid, entry, false);
217}
218
219/** Insert data into instruction or data translation cache.
220 *
221 * @param va Virtual page address.
222 * @param asid Address space identifier.
223 * @param entry The rest of TLB entry as required by TLB insertion format.
224 * @param dtc If true, insert into data translation cache, use instruction translation cache otherwise.
225 */
226void tc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtc)
227{
228 region_register rr;
229 bool restore_rr = false;
230
231 rr.word = rr_read(VA2VRN(va));
232 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
233 /*
234 * The selected region register does not contain required RID.
235 * Save the old content of the register and replace the RID.
236 */
237 region_register rr0;
238
239 rr0 = rr;
240 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
241 rr_write(VA2VRN(va), rr0.word);
242 srlz_d();
243 srlz_i();
244 }
245
246 asm volatile (
247 "mov r8=psr;;\n"
248 "rsm %0;;\n" /* PSR_IC_MASK */
249 "srlz.d;;\n"
250 "srlz.i;;\n"
251 "mov cr.ifa=%1\n" /* va */
252 "mov cr.itir=%2;;\n" /* entry.word[1] */
253 "cmp.eq p6,p7 = %4,r0;;\n" /* decide between itc and dtc */
254 "(p6) itc.i %3;;\n"
255 "(p7) itc.d %3;;\n"
256 "mov psr.l=r8;;\n"
257 "srlz.d;;\n"
258 :
259 : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (dtc)
260 : "p6", "p7", "r8"
261 );
262
263 if (restore_rr) {
264 rr_write(VA2VRN(va), rr.word);
265 srlz_d();
266 srlz_i();
267 }
268}
269
270/** Insert data into instruction translation register.
271 *
272 * @param va Virtual page address.
273 * @param asid Address space identifier.
274 * @param entry The rest of TLB entry as required by TLB insertion format.
275 * @param tr Translation register.
276 */
277void itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr)
278{
279 tr_mapping_insert(va, asid, entry, false, tr);
280}
281
282/** Insert data into data translation register.
283 *
284 * @param va Virtual page address.
285 * @param asid Address space identifier.
286 * @param entry The rest of TLB entry as required by TLB insertion format.
287 * @param tr Translation register.
288 */
289void dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr)
290{
291 tr_mapping_insert(va, asid, entry, true, tr);
292}
293
294/** Insert data into instruction or data translation register.
295 *
296 * @param va Virtual page address.
297 * @param asid Address space identifier.
298 * @param entry The rest of TLB entry as required by TLB insertion format.
299 * @param dtr If true, insert into data translation register, use instruction translation register otherwise.
300 * @param tr Translation register.
301 */
302void tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr)
303{
304 region_register rr;
305 bool restore_rr = false;
306
307 rr.word = rr_read(VA2VRN(va));
308 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
309 /*
310 * The selected region register does not contain required RID.
311 * Save the old content of the register and replace the RID.
312 */
313 region_register rr0;
314
315 rr0 = rr;
316 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
317 rr_write(VA2VRN(va), rr0.word);
318 srlz_d();
319 srlz_i();
320 }
321
322 asm volatile (
323 "mov r8=psr;;\n"
324 "rsm %0;;\n" /* PSR_IC_MASK */
325 "srlz.d;;\n"
326 "srlz.i;;\n"
327 "mov cr.ifa=%1\n" /* va */
328 "mov cr.itir=%2;;\n" /* entry.word[1] */
329 "cmp.eq p6,p7=%5,r0;;\n" /* decide between itr and dtr */
330 "(p6) itr.i itr[%4]=%3;;\n"
331 "(p7) itr.d dtr[%4]=%3;;\n"
332 "mov psr.l=r8;;\n"
333 "srlz.d;;\n"
334 :
335 : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (tr), "r" (dtr)
336 : "p6", "p7", "r8"
337 );
338
339 if (restore_rr) {
340 rr_write(VA2VRN(va), rr.word);
341 srlz_d();
342 srlz_i();
343 }
344}
345
346/** Insert data into DTLB.
347 *
348 * @param page Virtual page address including VRN bits.
349 * @param frame Physical frame address.
350 * @param dtr If true, insert into data translation register, use data translation cache otherwise.
351 * @param tr Translation register if dtr is true, ignored otherwise.
352 */
353void dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr, index_t tr)
354{
355 tlb_entry_t entry;
356
357 entry.word[0] = 0;
358 entry.word[1] = 0;
359
360 entry.p = true; /* present */
361 entry.ma = MA_WRITEBACK;
362 entry.a = true; /* already accessed */
363 entry.d = true; /* already dirty */
364 entry.pl = PL_KERNEL;
365 entry.ar = AR_READ | AR_WRITE;
366 entry.ppn = frame >> PPN_SHIFT;
367 entry.ps = PAGE_WIDTH;
368
369 if (dtr)
370 dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
371 else
372 dtc_mapping_insert(page, ASID_KERNEL, entry);
373}
374
375/** Purge kernel entries from DTR.
376 *
377 * Purge DTR entries used by the kernel.
378 *
379 * @param page Virtual page address including VRN bits.
380 * @param width Width of the purge in bits.
381 */
382void dtr_purge(uintptr_t page, count_t width)
383{
384 asm volatile ("ptr.d %0, %1\n" : : "r" (page), "r" (width<<2));
385}
386
387
388/** Copy content of PTE into data translation cache.
389 *
390 * @param t PTE.
391 */
392void dtc_pte_copy(pte_t *t)
393{
394 tlb_entry_t entry;
395
396 entry.word[0] = 0;
397 entry.word[1] = 0;
398
399 entry.p = t->p;
400 entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
401 entry.a = t->a;
402 entry.d = t->d;
403 entry.pl = t->k ? PL_KERNEL : PL_USER;
404 entry.ar = t->w ? AR_WRITE : AR_READ;
405 entry.ppn = t->frame >> PPN_SHIFT;
406 entry.ps = PAGE_WIDTH;
407
408 dtc_mapping_insert(t->page, t->as->asid, entry);
409#ifdef CONFIG_VHPT
410 vhpt_mapping_insert(t->page, t->as->asid, entry);
411#endif
412}
413
414/** Copy content of PTE into instruction translation cache.
415 *
416 * @param t PTE.
417 */
418void itc_pte_copy(pte_t *t)
419{
420 tlb_entry_t entry;
421
422 entry.word[0] = 0;
423 entry.word[1] = 0;
424
425 ASSERT(t->x);
426
427 entry.p = t->p;
428 entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
429 entry.a = t->a;
430 entry.pl = t->k ? PL_KERNEL : PL_USER;
431 entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
432 entry.ppn = t->frame >> PPN_SHIFT;
433 entry.ps = PAGE_WIDTH;
434
435 itc_mapping_insert(t->page, t->as->asid, entry);
436#ifdef CONFIG_VHPT
437 vhpt_mapping_insert(t->page, t->as->asid, entry);
438#endif
439}
440
441/** Instruction TLB fault handler for faults with VHPT turned off.
442 *
443 * @param vector Interruption vector.
444 * @param istate Structure with saved interruption state.
445 */
446void alternate_instruction_tlb_fault(uint64_t vector, istate_t *istate)
447{
448 region_register rr;
449 rid_t rid;
450 uintptr_t va;
451 pte_t *t;
452
453 va = istate->cr_ifa; /* faulting address */
454 rr.word = rr_read(VA2VRN(va));
455 rid = rr.map.rid;
456
457 page_table_lock(AS, true);
458 t = page_mapping_find(AS, va);
459 if (t) {
460 /*
461 * The mapping was found in software page hash table.
462 * Insert it into data translation cache.
463 */
464 itc_pte_copy(t);
465 page_table_unlock(AS, true);
466 } else {
467 /*
468 * Forward the page fault to address space page fault handler.
469 */
470 page_table_unlock(AS, true);
471 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
472 fault_if_from_uspace(istate,"Page fault at %p",va);
473 panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
474 }
475 }
476}
477
478/** Data TLB fault handler for faults with VHPT turned off.
479 *
480 * @param vector Interruption vector.
481 * @param istate Structure with saved interruption state.
482 */
483void alternate_data_tlb_fault(uint64_t vector, istate_t *istate)
484{
485 region_register rr;
486 rid_t rid;
487 uintptr_t va;
488 pte_t *t;
489
490 va = istate->cr_ifa; /* faulting address */
491 rr.word = rr_read(VA2VRN(va));
492 rid = rr.map.rid;
493 if (RID2ASID(rid) == ASID_KERNEL) {
494 if (VA2VRN(va) == VRN_KERNEL) {
495 /*
496 * Provide KA2PA(identity) mapping for faulting piece of
497 * kernel address space.
498 */
499 dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
500 return;
501 }
502 }
503
504 page_table_lock(AS, true);
505 t = page_mapping_find(AS, va);
506 if (t) {
507 /*
508 * The mapping was found in the software page hash table.
509 * Insert it into data translation cache.
510 */
511 dtc_pte_copy(t);
512 page_table_unlock(AS, true);
513 } else {
514 /*
515 * Forward the page fault to the address space page fault handler.
516 */
517 page_table_unlock(AS, true);
518 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
519 fault_if_from_uspace(istate,"Page fault at %p",va);
520 panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
521 }
522 }
523}
524
525/** Data nested TLB fault handler.
526 *
527 * This fault should not occur.
528 *
529 * @param vector Interruption vector.
530 * @param istate Structure with saved interruption state.
531 */
532void data_nested_tlb_fault(uint64_t vector, istate_t *istate)
533{
534 panic("%s\n", __func__);
535}
536
537/** Data Dirty bit fault handler.
538 *
539 * @param vector Interruption vector.
540 * @param istate Structure with saved interruption state.
541 */
542void data_dirty_bit_fault(uint64_t vector, istate_t *istate)
543{
544 region_register rr;
545 rid_t rid;
546 uintptr_t va;
547 pte_t *t;
548
549 va = istate->cr_ifa; /* faulting address */
550 rr.word = rr_read(VA2VRN(va));
551 rid = rr.map.rid;
552
553 page_table_lock(AS, true);
554 t = page_mapping_find(AS, va);
555 ASSERT(t && t->p);
556 if (t && t->p && t->w) {
557 /*
558 * Update the Dirty bit in page tables and reinsert
559 * the mapping into DTC.
560 */
561 t->d = true;
562 dtc_pte_copy(t);
563 } else {
564 if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
565 fault_if_from_uspace(istate,"Page fault at %p",va);
566 panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
567 t->d = true;
568 dtc_pte_copy(t);
569 }
570 }
571 page_table_unlock(AS, true);
572}
573
574/** Instruction access bit fault handler.
575 *
576 * @param vector Interruption vector.
577 * @param istate Structure with saved interruption state.
578 */
579void instruction_access_bit_fault(uint64_t vector, istate_t *istate)
580{
581 region_register rr;
582 rid_t rid;
583 uintptr_t va;
584 pte_t *t;
585
586 va = istate->cr_ifa; /* faulting address */
587 rr.word = rr_read(VA2VRN(va));
588 rid = rr.map.rid;
589
590 page_table_lock(AS, true);
591 t = page_mapping_find(AS, va);
592 ASSERT(t && t->p);
593 if (t && t->p && t->x) {
594 /*
595 * Update the Accessed bit in page tables and reinsert
596 * the mapping into ITC.
597 */
598 t->a = true;
599 itc_pte_copy(t);
600 } else {
601 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
602 fault_if_from_uspace(istate,"Page fault at %p",va);
603 panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
604 t->a = true;
605 itc_pte_copy(t);
606 }
607 }
608 page_table_unlock(AS, true);
609}
610
611/** Data access bit fault handler.
612 *
613 * @param vector Interruption vector.
614 * @param istate Structure with saved interruption state.
615 */
616void data_access_bit_fault(uint64_t vector, istate_t *istate)
617{
618 region_register rr;
619 rid_t rid;
620 uintptr_t va;
621 pte_t *t;
622
623 va = istate->cr_ifa; /* faulting address */
624 rr.word = rr_read(VA2VRN(va));
625 rid = rr.map.rid;
626
627 page_table_lock(AS, true);
628 t = page_mapping_find(AS, va);
629 ASSERT(t && t->p);
630 if (t && t->p) {
631 /*
632 * Update the Accessed bit in page tables and reinsert
633 * the mapping into DTC.
634 */
635 t->a = true;
636 dtc_pte_copy(t);
637 } else {
638 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
639 fault_if_from_uspace(istate,"Page fault at %p",va);
640 panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
641 t->a = true;
642 itc_pte_copy(t);
643 }
644 }
645 page_table_unlock(AS, true);
646}
647
648/** Page not present fault handler.
649 *
650 * @param vector Interruption vector.
651 * @param istate Structure with saved interruption state.
652 */
653void page_not_present(uint64_t vector, istate_t *istate)
654{
655 region_register rr;
656 rid_t rid;
657 uintptr_t va;
658 pte_t *t;
659
660 va = istate->cr_ifa; /* faulting address */
661 rr.word = rr_read(VA2VRN(va));
662 rid = rr.map.rid;
663
664 page_table_lock(AS, true);
665 t = page_mapping_find(AS, va);
666 ASSERT(t);
667
668 if (t->p) {
669 /*
670 * If the Present bit is set in page hash table, just copy it
671 * and update ITC/DTC.
672 */
673 if (t->x)
674 itc_pte_copy(t);
675 else
676 dtc_pte_copy(t);
677 page_table_unlock(AS, true);
678 } else {
679 page_table_unlock(AS, true);
680 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
681 fault_if_from_uspace(istate,"Page fault at %p",va);
682 panic("%s: va=%p, rid=%d\n", __func__, va, rid);
683 }
684 }
685}
686
687/** @}
688 */
Note: See TracBrowser for help on using the repository browser.