source: mainline/arch/ia64/src/mm/tlb.c@ a175a67

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since a175a67 was a175a67, checked in by Jakub Vana <jakub.vana@…>, 19 years ago

itanium faulting task kill

  • Property mode set to 100644
File size: 16.2 KB
Line 
1/*
2 * Copyright (C) 2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/*
30 * TLB management.
31 */
32
33#include <mm/tlb.h>
34#include <mm/asid.h>
35#include <mm/page.h>
36#include <mm/as.h>
37#include <arch/mm/tlb.h>
38#include <arch/mm/page.h>
39#include <arch/mm/vhpt.h>
40#include <arch/barrier.h>
41#include <arch/interrupt.h>
42#include <arch/pal/pal.h>
43#include <arch/asm.h>
44#include <typedefs.h>
45#include <panic.h>
46#include <print.h>
47#include <arch.h>
48#include <interrupt.h>
49
50/** Invalidate all TLB entries. */
51void tlb_invalidate_all(void)
52{
53 ipl_t ipl;
54 __address adr;
55 __u32 count1, count2, stride1, stride2;
56
57 int i,j;
58
59 adr = PAL_PTCE_INFO_BASE();
60 count1 = PAL_PTCE_INFO_COUNT1();
61 count2 = PAL_PTCE_INFO_COUNT2();
62 stride1 = PAL_PTCE_INFO_STRIDE1();
63 stride2 = PAL_PTCE_INFO_STRIDE2();
64
65 ipl = interrupts_disable();
66
67 for(i = 0; i < count1; i++) {
68 for(j = 0; j < count2; j++) {
69 __asm__ volatile (
70 "ptc.e %0 ;;"
71 :
72 : "r" (adr)
73 );
74 adr += stride2;
75 }
76 adr += stride1;
77 }
78
79 interrupts_restore(ipl);
80
81 srlz_d();
82 srlz_i();
83#ifdef CONFIG_VHPT
84 vhpt_invalidate_all();
85#endif
86}
87
88/** Invalidate entries belonging to an address space.
89 *
90 * @param asid Address space identifier.
91 */
92void tlb_invalidate_asid(asid_t asid)
93{
94 tlb_invalidate_all();
95}
96
97
98void tlb_invalidate_pages(asid_t asid, __address page, count_t cnt)
99{
100 region_register rr;
101 bool restore_rr = false;
102 int b = 0;
103 int c = cnt;
104
105 __address va;
106 va = page;
107
108 rr.word = rr_read(VA2VRN(va));
109 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
110 /*
111 * The selected region register does not contain required RID.
112 * Save the old content of the register and replace the RID.
113 */
114 region_register rr0;
115
116 rr0 = rr;
117 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
118 rr_write(VA2VRN(va), rr0.word);
119 srlz_d();
120 srlz_i();
121 }
122
123 while(c >>= 1)
124 b++;
125 b >>= 1;
126 __u64 ps;
127
128 switch (b) {
129 case 0: /*cnt 1-3*/
130 ps = PAGE_WIDTH;
131 break;
132 case 1: /*cnt 4-15*/
133 /*cnt=((cnt-1)/4)+1;*/
134 ps = PAGE_WIDTH+2;
135 va &= ~((1<<ps)-1);
136 break;
137 case 2: /*cnt 16-63*/
138 /*cnt=((cnt-1)/16)+1;*/
139 ps = PAGE_WIDTH+4;
140 va &= ~((1<<ps)-1);
141 break;
142 case 3: /*cnt 64-255*/
143 /*cnt=((cnt-1)/64)+1;*/
144 ps = PAGE_WIDTH+6;
145 va &= ~((1<<ps)-1);
146 break;
147 case 4: /*cnt 256-1023*/
148 /*cnt=((cnt-1)/256)+1;*/
149 ps = PAGE_WIDTH+8;
150 va &= ~((1<<ps)-1);
151 break;
152 case 5: /*cnt 1024-4095*/
153 /*cnt=((cnt-1)/1024)+1;*/
154 ps = PAGE_WIDTH+10;
155 va &= ~((1<<ps)-1);
156 break;
157 case 6: /*cnt 4096-16383*/
158 /*cnt=((cnt-1)/4096)+1;*/
159 ps = PAGE_WIDTH+12;
160 va &= ~((1<<ps)-1);
161 break;
162 case 7: /*cnt 16384-65535*/
163 case 8: /*cnt 65536-(256K-1)*/
164 /*cnt=((cnt-1)/16384)+1;*/
165 ps = PAGE_WIDTH+14;
166 va &= ~((1<<ps)-1);
167 break;
168 default:
169 /*cnt=((cnt-1)/(16384*16))+1;*/
170 ps=PAGE_WIDTH+18;
171 va&=~((1<<ps)-1);
172 break;
173 }
174 /*cnt+=(page!=va);*/
175 for(; va<(page+cnt*(PAGE_SIZE)); va += (1<<ps)) {
176 __asm__ volatile (
177 "ptc.l %0,%1;;"
178 :
179 : "r" (va), "r" (ps<<2)
180 );
181 }
182 srlz_d();
183 srlz_i();
184
185 if (restore_rr) {
186 rr_write(VA2VRN(va), rr.word);
187 srlz_d();
188 srlz_i();
189 }
190}
191
192
193/** Insert data into data translation cache.
194 *
195 * @param va Virtual page address.
196 * @param asid Address space identifier.
197 * @param entry The rest of TLB entry as required by TLB insertion format.
198 */
199void dtc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry)
200{
201 tc_mapping_insert(va, asid, entry, true);
202}
203
204/** Insert data into instruction translation cache.
205 *
206 * @param va Virtual page address.
207 * @param asid Address space identifier.
208 * @param entry The rest of TLB entry as required by TLB insertion format.
209 */
210void itc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry)
211{
212 tc_mapping_insert(va, asid, entry, false);
213}
214
215/** Insert data into instruction or data translation cache.
216 *
217 * @param va Virtual page address.
218 * @param asid Address space identifier.
219 * @param entry The rest of TLB entry as required by TLB insertion format.
220 * @param dtc If true, insert into data translation cache, use instruction translation cache otherwise.
221 */
222void tc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtc)
223{
224 region_register rr;
225 bool restore_rr = false;
226
227 rr.word = rr_read(VA2VRN(va));
228 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
229 /*
230 * The selected region register does not contain required RID.
231 * Save the old content of the register and replace the RID.
232 */
233 region_register rr0;
234
235 rr0 = rr;
236 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
237 rr_write(VA2VRN(va), rr0.word);
238 srlz_d();
239 srlz_i();
240 }
241
242 __asm__ volatile (
243 "mov r8=psr;;\n"
244 "rsm %0;;\n" /* PSR_IC_MASK */
245 "srlz.d;;\n"
246 "srlz.i;;\n"
247 "mov cr.ifa=%1\n" /* va */
248 "mov cr.itir=%2;;\n" /* entry.word[1] */
249 "cmp.eq p6,p7 = %4,r0;;\n" /* decide between itc and dtc */
250 "(p6) itc.i %3;;\n"
251 "(p7) itc.d %3;;\n"
252 "mov psr.l=r8;;\n"
253 "srlz.d;;\n"
254 :
255 : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (dtc)
256 : "p6", "p7", "r8"
257 );
258
259 if (restore_rr) {
260 rr_write(VA2VRN(va), rr.word);
261 srlz_d();
262 srlz_i();
263 }
264}
265
266/** Insert data into instruction translation register.
267 *
268 * @param va Virtual page address.
269 * @param asid Address space identifier.
270 * @param entry The rest of TLB entry as required by TLB insertion format.
271 * @param tr Translation register.
272 */
273void itr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr)
274{
275 tr_mapping_insert(va, asid, entry, false, tr);
276}
277
278/** Insert data into data translation register.
279 *
280 * @param va Virtual page address.
281 * @param asid Address space identifier.
282 * @param entry The rest of TLB entry as required by TLB insertion format.
283 * @param tr Translation register.
284 */
285void dtr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr)
286{
287 tr_mapping_insert(va, asid, entry, true, tr);
288}
289
290/** Insert data into instruction or data translation register.
291 *
292 * @param va Virtual page address.
293 * @param asid Address space identifier.
294 * @param entry The rest of TLB entry as required by TLB insertion format.
295 * @param dtc If true, insert into data translation register, use instruction translation register otherwise.
296 * @param tr Translation register.
297 */
298void tr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr)
299{
300 region_register rr;
301 bool restore_rr = false;
302
303 rr.word = rr_read(VA2VRN(va));
304 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
305 /*
306 * The selected region register does not contain required RID.
307 * Save the old content of the register and replace the RID.
308 */
309 region_register rr0;
310
311 rr0 = rr;
312 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
313 rr_write(VA2VRN(va), rr0.word);
314 srlz_d();
315 srlz_i();
316 }
317
318 __asm__ volatile (
319 "mov r8=psr;;\n"
320 "rsm %0;;\n" /* PSR_IC_MASK */
321 "srlz.d;;\n"
322 "srlz.i;;\n"
323 "mov cr.ifa=%1\n" /* va */
324 "mov cr.itir=%2;;\n" /* entry.word[1] */
325 "cmp.eq p6,p7=%5,r0;;\n" /* decide between itr and dtr */
326 "(p6) itr.i itr[%4]=%3;;\n"
327 "(p7) itr.d dtr[%4]=%3;;\n"
328 "mov psr.l=r8;;\n"
329 "srlz.d;;\n"
330 :
331 : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (tr), "r" (dtr)
332 : "p6", "p7", "r8"
333 );
334
335 if (restore_rr) {
336 rr_write(VA2VRN(va), rr.word);
337 srlz_d();
338 srlz_i();
339 }
340}
341
342/** Insert data into DTLB.
343 *
344 * @param va Virtual page address.
345 * @param asid Address space identifier.
346 * @param entry The rest of TLB entry as required by TLB insertion format.
347 * @param dtr If true, insert into data translation register, use data translation cache otherwise.
348 * @param tr Translation register if dtr is true, ignored otherwise.
349 */
350void dtlb_kernel_mapping_insert(__address page, __address frame, bool dtr, index_t tr)
351{
352 tlb_entry_t entry;
353
354 entry.word[0] = 0;
355 entry.word[1] = 0;
356
357 entry.p = true; /* present */
358 entry.ma = MA_WRITEBACK;
359 entry.a = true; /* already accessed */
360 entry.d = true; /* already dirty */
361 entry.pl = PL_KERNEL;
362 entry.ar = AR_READ | AR_WRITE;
363 entry.ppn = frame >> PPN_SHIFT;
364 entry.ps = PAGE_WIDTH;
365
366 if (dtr)
367 dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
368 else
369 dtc_mapping_insert(page, ASID_KERNEL, entry);
370}
371
372/** Copy content of PTE into data translation cache.
373 *
374 * @param t PTE.
375 */
376void dtc_pte_copy(pte_t *t)
377{
378 tlb_entry_t entry;
379
380 entry.word[0] = 0;
381 entry.word[1] = 0;
382
383 entry.p = t->p;
384 entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
385 entry.a = t->a;
386 entry.d = t->d;
387 entry.pl = t->k ? PL_KERNEL : PL_USER;
388 entry.ar = t->w ? AR_WRITE : AR_READ;
389 entry.ppn = t->frame >> PPN_SHIFT;
390 entry.ps = PAGE_WIDTH;
391
392 dtc_mapping_insert(t->page, t->as->asid, entry);
393#ifdef CONFIG_VHPT
394 vhpt_mapping_insert(t->page, t->as->asid, entry);
395#endif
396}
397
398/** Copy content of PTE into instruction translation cache.
399 *
400 * @param t PTE.
401 */
402void itc_pte_copy(pte_t *t)
403{
404 tlb_entry_t entry;
405
406 entry.word[0] = 0;
407 entry.word[1] = 0;
408
409 ASSERT(t->x);
410
411 entry.p = t->p;
412 entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
413 entry.a = t->a;
414 entry.pl = t->k ? PL_KERNEL : PL_USER;
415 entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
416 entry.ppn = t->frame >> PPN_SHIFT;
417 entry.ps = PAGE_WIDTH;
418
419 itc_mapping_insert(t->page, t->as->asid, entry);
420#ifdef CONFIG_VHPT
421 vhpt_mapping_insert(t->page, t->as->asid, entry);
422#endif
423}
424
425/** Instruction TLB fault handler for faults with VHPT turned off.
426 *
427 * @param vector Interruption vector.
428 * @param istate Structure with saved interruption state.
429 */
430void alternate_instruction_tlb_fault(__u64 vector, istate_t *istate)
431{
432 region_register rr;
433 rid_t rid;
434 __address va;
435 pte_t *t;
436
437 va = istate->cr_ifa; /* faulting address */
438 rr.word = rr_read(VA2VRN(va));
439 rid = rr.map.rid;
440
441 page_table_lock(AS, true);
442 t = page_mapping_find(AS, va);
443 if (t) {
444 /*
445 * The mapping was found in software page hash table.
446 * Insert it into data translation cache.
447 */
448 itc_pte_copy(t);
449 page_table_unlock(AS, true);
450 } else {
451 /*
452 * Forward the page fault to address space page fault handler.
453 */
454 page_table_unlock(AS, true);
455 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
456 fault_if_from_uspace(istate,"Page fault at %P",va);
457 panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
458 }
459 }
460}
461
462/** Data TLB fault handler for faults with VHPT turned off.
463 *
464 * @param vector Interruption vector.
465 * @param istate Structure with saved interruption state.
466 */
467void alternate_data_tlb_fault(__u64 vector, istate_t *istate)
468{
469 region_register rr;
470 rid_t rid;
471 __address va;
472 pte_t *t;
473
474 va = istate->cr_ifa; /* faulting address */
475 rr.word = rr_read(VA2VRN(va));
476 rid = rr.map.rid;
477 if (RID2ASID(rid) == ASID_KERNEL) {
478 if (VA2VRN(va) == VRN_KERNEL) {
479 /*
480 * Provide KA2PA(identity) mapping for faulting piece of
481 * kernel address space.
482 */
483 dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
484 return;
485 }
486 }
487
488 page_table_lock(AS, true);
489 t = page_mapping_find(AS, va);
490 if (t) {
491 /*
492 * The mapping was found in software page hash table.
493 * Insert it into data translation cache.
494 */
495 dtc_pte_copy(t);
496 page_table_unlock(AS, true);
497 } else {
498 /*
499 * Forward the page fault to address space page fault handler.
500 */
501 page_table_unlock(AS, true);
502 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
503 fault_if_from_uspace(istate,"Page fault at %P",va);
504 panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
505 }
506 }
507}
508
509/** Data nested TLB fault handler.
510 *
511 * This fault should not occur.
512 *
513 * @param vector Interruption vector.
514 * @param istate Structure with saved interruption state.
515 */
516void data_nested_tlb_fault(__u64 vector, istate_t *istate)
517{
518 panic("%s\n", __FUNCTION__);
519}
520
521/** Data Dirty bit fault handler.
522 *
523 * @param vector Interruption vector.
524 * @param istate Structure with saved interruption state.
525 */
526void data_dirty_bit_fault(__u64 vector, istate_t *istate)
527{
528 region_register rr;
529 rid_t rid;
530 __address va;
531 pte_t *t;
532
533 va = istate->cr_ifa; /* faulting address */
534 rr.word = rr_read(VA2VRN(va));
535 rid = rr.map.rid;
536
537 page_table_lock(AS, true);
538 t = page_mapping_find(AS, va);
539 ASSERT(t && t->p);
540 if (t && t->p && t->w) {
541 /*
542 * Update the Dirty bit in page tables and reinsert
543 * the mapping into DTC.
544 */
545 t->d = true;
546 dtc_pte_copy(t);
547 } else {
548 if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
549 fault_if_from_uspace(istate,"Page fault at %P",va);
550 panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
551 t->d = true;
552 dtc_pte_copy(t);
553 }
554 }
555 page_table_unlock(AS, true);
556}
557
558/** Instruction access bit fault handler.
559 *
560 * @param vector Interruption vector.
561 * @param istate Structure with saved interruption state.
562 */
563void instruction_access_bit_fault(__u64 vector, istate_t *istate)
564{
565 region_register rr;
566 rid_t rid;
567 __address va;
568 pte_t *t;
569
570 va = istate->cr_ifa; /* faulting address */
571 rr.word = rr_read(VA2VRN(va));
572 rid = rr.map.rid;
573
574 page_table_lock(AS, true);
575 t = page_mapping_find(AS, va);
576 ASSERT(t && t->p);
577 if (t && t->p && t->x) {
578 /*
579 * Update the Accessed bit in page tables and reinsert
580 * the mapping into ITC.
581 */
582 t->a = true;
583 itc_pte_copy(t);
584 } else {
585 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
586 fault_if_from_uspace(istate,"Page fault at %P",va);
587 panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
588 t->a = true;
589 itc_pte_copy(t);
590 }
591 }
592 page_table_unlock(AS, true);
593}
594
595/** Data access bit fault handler.
596 *
597 * @param vector Interruption vector.
598 * @param istate Structure with saved interruption state.
599 */
600void data_access_bit_fault(__u64 vector, istate_t *istate)
601{
602 region_register rr;
603 rid_t rid;
604 __address va;
605 pte_t *t;
606
607 va = istate->cr_ifa; /* faulting address */
608 rr.word = rr_read(VA2VRN(va));
609 rid = rr.map.rid;
610
611 page_table_lock(AS, true);
612 t = page_mapping_find(AS, va);
613 ASSERT(t && t->p);
614 if (t && t->p) {
615 /*
616 * Update the Accessed bit in page tables and reinsert
617 * the mapping into DTC.
618 */
619 t->a = true;
620 dtc_pte_copy(t);
621 } else {
622 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
623 fault_if_from_uspace(istate,"Page fault at %P",va);
624 panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
625 t->a = true;
626 itc_pte_copy(t);
627 }
628 }
629 page_table_unlock(AS, true);
630}
631
632/** Page not present fault handler.
633 *
634 * @param vector Interruption vector.
635 * @param istate Structure with saved interruption state.
636 */
637void page_not_present(__u64 vector, istate_t *istate)
638{
639 region_register rr;
640 rid_t rid;
641 __address va;
642 pte_t *t;
643
644 va = istate->cr_ifa; /* faulting address */
645 rr.word = rr_read(VA2VRN(va));
646 rid = rr.map.rid;
647
648 page_table_lock(AS, true);
649 t = page_mapping_find(AS, va);
650 ASSERT(t);
651
652 if (t->p) {
653 /*
654 * If the Present bit is set in page hash table, just copy it
655 * and update ITC/DTC.
656 */
657 if (t->x)
658 itc_pte_copy(t);
659 else
660 dtc_pte_copy(t);
661 page_table_unlock(AS, true);
662 } else {
663 page_table_unlock(AS, true);
664 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
665 fault_if_from_uspace(istate,"Page fault at %P",va);
666 panic("%s: va=%p, rid=%d\n", __FUNCTION__, va, rid);
667 }
668 }
669}
Note: See TracBrowser for help on using the repository browser.