source: mainline/arch/ia64/src/mm/tlb.c@ 9bda3af6

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 9bda3af6 was 9bda3af6, checked in by Jakub Vana <jakub.vana@…>, 19 years ago

Better purge page selection.

  • Property mode set to 100644
File size: 14.1 KB
RevLine 
[36b01bb2]1/*
2 * Copyright (C) 2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/*
30 * TLB management.
31 */
32
33#include <mm/tlb.h>
[a0d74fd]34#include <mm/asid.h>
[9ad03fe]35#include <mm/page.h>
36#include <mm/as.h>
[bc78c75]37#include <arch/mm/tlb.h>
[a0d74fd]38#include <arch/mm/page.h>
[89298e3]39#include <arch/barrier.h>
[2c49fbbe]40#include <arch/interrupt.h>
[7c322bd]41#include <arch/pal/pal.h>
42#include <arch/asm.h>
[95042fd]43#include <typedefs.h>
[2c49fbbe]44#include <panic.h>
[9ad03fe]45#include <arch.h>
[36b01bb2]46
[d0cf9de]47
48
[ef67bab]49/** Invalidate all TLB entries. */
[36b01bb2]50void tlb_invalidate_all(void)
51{
[7c322bd]52 __address adr;
53 __u32 count1,count2,stride1,stride2;
54
55 int i,j;
56
57 adr=PAL_PTCE_INFO_BASE();
58 count1=PAL_PTCE_INFO_COUNT1();
59 count2=PAL_PTCE_INFO_COUNT2();
60 stride1=PAL_PTCE_INFO_STRIDE1();
61 stride2=PAL_PTCE_INFO_STRIDE2();
62
63 interrupts_disable();
64
65 for(i=0;i<count1;i++)
66 {
67 for(j=0;j<count2;j++)
68 {
69 asm volatile
70 (
71 "ptc.e %0;;"
72 :
73 :"r" (adr)
74 );
75 adr+=stride2;
76 }
77 adr+=stride1;
78 }
79
80 interrupts_enable();
81
82 srlz_d();
83 srlz_i();
[36b01bb2]84}
85
86/** Invalidate entries belonging to an address space.
87 *
88 * @param asid Address space identifier.
89 */
90void tlb_invalidate_asid(asid_t asid)
91{
92 /* TODO */
[a82500ce]93 tlb_invalidate_all();
[36b01bb2]94}
[bc78c75]95
[a82500ce]96
[9bda3af6]97void tlb_invalidate_pages(asid_t asid, __address page, count_t cnt)
[a82500ce]98{
99
100
[d0cf9de]101 region_register rr;
102 bool restore_rr = false;
103 int b=0;
104 int c=cnt;
[9bda3af6]105
106 __address va;
107 va=page;
[d0cf9de]108
109 rr.word = rr_read(VA2VRN(va));
110 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
111 /*
112 * The selected region register does not contain required RID.
113 * Save the old content of the register and replace the RID.
114 */
115 region_register rr0;
116
117 rr0 = rr;
118 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
119 rr_write(VA2VRN(va), rr0.word);
120 srlz_d();
121 srlz_i();
122 }
123
124 while(c>>=1) b++;
125 b>>=1;
126 __u64 ps;
127
128 switch(b)
129 {
130 case 0: /*cnt 1-3*/
131 {
132 ps=PAGE_WIDTH;
133 break;
134 }
135 case 1: /*cnt 4-15*/
136 {
[9bda3af6]137 /*cnt=((cnt-1)/4)+1;*/
[d0cf9de]138 ps=PAGE_WIDTH+2;
139 va&=~((1<<ps)-1);
140 break;
141 }
142 case 2: /*cnt 16-63*/
143 {
[9bda3af6]144 /*cnt=((cnt-1)/16)+1;*/
[d0cf9de]145 ps=PAGE_WIDTH+4;
146 va&=~((1<<ps)-1);
147 break;
148 }
149 case 3: /*cnt 64-255*/
150 {
[9bda3af6]151 /*cnt=((cnt-1)/64)+1;*/
[d0cf9de]152 ps=PAGE_WIDTH+6;
153 va&=~((1<<ps)-1);
154 break;
155 }
156 case 4: /*cnt 256-1023*/
157 {
[9bda3af6]158 /*cnt=((cnt-1)/256)+1;*/
[d0cf9de]159 ps=PAGE_WIDTH+8;
160 va&=~((1<<ps)-1);
161 break;
162 }
163 case 5: /*cnt 1024-4095*/
164 {
[9bda3af6]165 /*cnt=((cnt-1)/1024)+1;*/
[d0cf9de]166 ps=PAGE_WIDTH+10;
167 va&=~((1<<ps)-1);
168 break;
169 }
170 case 6: /*cnt 4096-16383*/
171 {
[9bda3af6]172 /*cnt=((cnt-1)/4096)+1;*/
[d0cf9de]173 ps=PAGE_WIDTH+12;
174 va&=~((1<<ps)-1);
175 break;
176 }
177 case 7: /*cnt 16384-65535*/
178 case 8: /*cnt 65536-(256K-1)*/
179 {
[9bda3af6]180 /*cnt=((cnt-1)/16384)+1;*/
[d0cf9de]181 ps=PAGE_WIDTH+14;
182 va&=~((1<<ps)-1);
183 break;
184 }
185 default:
186 {
[9bda3af6]187 /*cnt=((cnt-1)/(16384*16))+1;*/
[d0cf9de]188 ps=PAGE_WIDTH+18;
189 va&=~((1<<ps)-1);
190 break;
191 }
192 }
[9bda3af6]193 /*cnt+=(page!=va);*/
194 for(;va<(page+cnt*(PAGE_SIZE));va+=(1<<ps)) {
195 __asm__ volatile
196 (
197 "ptc.l %0,%1;;"
198 :
199 : "r"(va), "r"(ps<<2)
200 );
[d0cf9de]201 }
202 srlz_d();
203 srlz_i();
204
205
206 if (restore_rr) {
207 rr_write(VA2VRN(va), rr.word);
208 srlz_d();
209 srlz_i();
210 }
211
212
[a82500ce]213}
214
215
[95042fd]216/** Insert data into data translation cache.
217 *
218 * @param va Virtual page address.
219 * @param asid Address space identifier.
220 * @param entry The rest of TLB entry as required by TLB insertion format.
221 */
[b994a60]222void dtc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry)
223{
[95042fd]224 tc_mapping_insert(va, asid, entry, true);
225}
[bc78c75]226
[95042fd]227/** Insert data into instruction translation cache.
228 *
229 * @param va Virtual page address.
230 * @param asid Address space identifier.
231 * @param entry The rest of TLB entry as required by TLB insertion format.
232 */
[b994a60]233void itc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry)
234{
[95042fd]235 tc_mapping_insert(va, asid, entry, false);
236}
[bc78c75]237
[95042fd]238/** Insert data into instruction or data translation cache.
239 *
240 * @param va Virtual page address.
241 * @param asid Address space identifier.
242 * @param entry The rest of TLB entry as required by TLB insertion format.
243 * @param dtc If true, insert into data translation cache, use instruction translation cache otherwise.
244 */
245void tc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtc)
[bc78c75]246{
247 region_register rr;
[95042fd]248 bool restore_rr = false;
[bc78c75]249
[a0d74fd]250 rr.word = rr_read(VA2VRN(va));
251 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
[95042fd]252 /*
253 * The selected region register does not contain required RID.
254 * Save the old content of the register and replace the RID.
255 */
[bc78c75]256 region_register rr0;
[95042fd]257
258 rr0 = rr;
[a0d74fd]259 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
260 rr_write(VA2VRN(va), rr0.word);
[89298e3]261 srlz_d();
[95042fd]262 srlz_i();
263 }
264
265 __asm__ volatile (
266 "mov r8=psr;;\n"
[2c49fbbe]267 "rsm %0;;\n" /* PSR_IC_MASK */
[95042fd]268 "srlz.d;;\n"
269 "srlz.i;;\n"
270 "mov cr.ifa=%1\n" /* va */
271 "mov cr.itir=%2;;\n" /* entry.word[1] */
272 "cmp.eq p6,p7 = %4,r0;;\n" /* decide between itc and dtc */
273 "(p6) itc.i %3;;\n"
274 "(p7) itc.d %3;;\n"
275 "mov psr.l=r8;;\n"
276 "srlz.d;;\n"
277 :
[2c49fbbe]278 : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (dtc)
279 : "p6", "p7", "r8"
[95042fd]280 );
281
282 if (restore_rr) {
[a0d74fd]283 rr_write(VA2VRN(va), rr.word);
[95042fd]284 srlz_d();
285 srlz_i();
[bc78c75]286 }
287}
288
[95042fd]289/** Insert data into instruction translation register.
290 *
291 * @param va Virtual page address.
292 * @param asid Address space identifier.
293 * @param entry The rest of TLB entry as required by TLB insertion format.
294 * @param tr Translation register.
295 */
296void itr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr)
[bc78c75]297{
[95042fd]298 tr_mapping_insert(va, asid, entry, false, tr);
[bc78c75]299}
300
[95042fd]301/** Insert data into data translation register.
302 *
303 * @param va Virtual page address.
304 * @param asid Address space identifier.
305 * @param entry The rest of TLB entry as required by TLB insertion format.
306 * @param tr Translation register.
307 */
308void dtr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr)
309{
310 tr_mapping_insert(va, asid, entry, true, tr);
311}
[bc78c75]312
[95042fd]313/** Insert data into instruction or data translation register.
314 *
315 * @param va Virtual page address.
316 * @param asid Address space identifier.
317 * @param entry The rest of TLB entry as required by TLB insertion format.
318 * @param dtc If true, insert into data translation register, use instruction translation register otherwise.
319 * @param tr Translation register.
320 */
321void tr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr)
[89298e3]322{
323 region_register rr;
[95042fd]324 bool restore_rr = false;
[89298e3]325
[a0d74fd]326 rr.word = rr_read(VA2VRN(va));
327 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
[95042fd]328 /*
329 * The selected region register does not contain required RID.
330 * Save the old content of the register and replace the RID.
331 */
[89298e3]332 region_register rr0;
[95042fd]333
334 rr0 = rr;
[a0d74fd]335 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
336 rr_write(VA2VRN(va), rr0.word);
[89298e3]337 srlz_d();
[95042fd]338 srlz_i();
[89298e3]339 }
340
[95042fd]341 __asm__ volatile (
342 "mov r8=psr;;\n"
[2c49fbbe]343 "rsm %0;;\n" /* PSR_IC_MASK */
[95042fd]344 "srlz.d;;\n"
345 "srlz.i;;\n"
346 "mov cr.ifa=%1\n" /* va */
347 "mov cr.itir=%2;;\n" /* entry.word[1] */
348 "cmp.eq p6,p7=%5,r0;;\n" /* decide between itr and dtr */
349 "(p6) itr.i itr[%4]=%3;;\n"
350 "(p7) itr.d dtr[%4]=%3;;\n"
351 "mov psr.l=r8;;\n"
352 "srlz.d;;\n"
353 :
[2c49fbbe]354 : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (tr), "r" (dtr)
355 : "p6", "p7", "r8"
[95042fd]356 );
357
358 if (restore_rr) {
[a0d74fd]359 rr_write(VA2VRN(va), rr.word);
[95042fd]360 srlz_d();
361 srlz_i();
362 }
[89298e3]363}
364
[a0d74fd]365/** Insert data into DTLB.
366 *
367 * @param va Virtual page address.
368 * @param asid Address space identifier.
369 * @param entry The rest of TLB entry as required by TLB insertion format.
370 * @param dtr If true, insert into data translation register, use data translation cache otherwise.
371 * @param tr Translation register if dtr is true, ignored otherwise.
372 */
[9ad03fe]373void dtlb_kernel_mapping_insert(__address page, __address frame, bool dtr, index_t tr)
[a0d74fd]374{
375 tlb_entry_t entry;
376
377 entry.word[0] = 0;
378 entry.word[1] = 0;
379
380 entry.p = true; /* present */
381 entry.ma = MA_WRITEBACK;
382 entry.a = true; /* already accessed */
383 entry.d = true; /* already dirty */
384 entry.pl = PL_KERNEL;
385 entry.ar = AR_READ | AR_WRITE;
386 entry.ppn = frame >> PPN_SHIFT;
387 entry.ps = PAGE_WIDTH;
388
389 if (dtr)
390 dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
391 else
392 dtc_mapping_insert(page, ASID_KERNEL, entry);
393}
394
[9ad03fe]395/** Copy content of PTE into data translation cache.
396 *
397 * @param t PTE.
398 */
399void dtc_pte_copy(pte_t *t)
400{
401 tlb_entry_t entry;
402
403 entry.word[0] = 0;
404 entry.word[1] = 0;
405
406 entry.p = t->p;
407 entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
408 entry.a = t->a;
409 entry.d = t->d;
410 entry.pl = t->k ? PL_KERNEL : PL_USER;
411 entry.ar = t->w ? AR_WRITE : AR_READ;
412 entry.ppn = t->frame >> PPN_SHIFT;
413 entry.ps = PAGE_WIDTH;
414
415 dtc_mapping_insert(t->page, t->as->asid, entry);
416}
417
418/** Copy content of PTE into instruction translation cache.
419 *
420 * @param t PTE.
421 */
422void itc_pte_copy(pte_t *t)
423{
424 tlb_entry_t entry;
425
426 entry.word[0] = 0;
427 entry.word[1] = 0;
428
429 ASSERT(t->x);
430
431 entry.p = t->p;
432 entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
433 entry.a = t->a;
434 entry.pl = t->k ? PL_KERNEL : PL_USER;
435 entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
436 entry.ppn = t->frame >> PPN_SHIFT;
437 entry.ps = PAGE_WIDTH;
438
439 itc_mapping_insert(t->page, t->as->asid, entry);
440}
441
442/** Instruction TLB fault handler for faults with VHPT turned off.
443 *
444 * @param vector Interruption vector.
445 * @param pstate Structure with saved interruption state.
446 */
[2c49fbbe]447void alternate_instruction_tlb_fault(__u64 vector, struct exception_regdump *pstate)
[89298e3]448{
[9ad03fe]449 region_register rr;
450 __address va;
451 pte_t *t;
452
453 va = pstate->cr_ifa; /* faulting address */
454 t = page_mapping_find(AS, va);
455 if (t) {
456 /*
457 * The mapping was found in software page hash table.
458 * Insert it into data translation cache.
459 */
460 itc_pte_copy(t);
461 } else {
462 /*
463 * Forward the page fault to address space page fault handler.
464 */
465 if (!as_page_fault(va)) {
466 panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid);
467 }
468 }
[95042fd]469}
[89298e3]470
[9ad03fe]471/** Data TLB fault handler for faults with VHPT turned off.
[a0d74fd]472 *
473 * @param vector Interruption vector.
474 * @param pstate Structure with saved interruption state.
475 */
[2c49fbbe]476void alternate_data_tlb_fault(__u64 vector, struct exception_regdump *pstate)
[95042fd]477{
[a0d74fd]478 region_register rr;
479 rid_t rid;
480 __address va;
[9ad03fe]481 pte_t *t;
[a0d74fd]482
483 va = pstate->cr_ifa; /* faulting address */
484 rr.word = rr_read(VA2VRN(va));
485 rid = rr.map.rid;
486 if (RID2ASID(rid) == ASID_KERNEL) {
487 if (VA2VRN(va) == VRN_KERNEL) {
488 /*
489 * Provide KA2PA(identity) mapping for faulting piece of
490 * kernel address space.
491 */
[9ad03fe]492 dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
[a0d74fd]493 return;
494 }
495 }
[b994a60]496
[9ad03fe]497 t = page_mapping_find(AS, va);
498 if (t) {
499 /*
500 * The mapping was found in software page hash table.
501 * Insert it into data translation cache.
502 */
503 dtc_pte_copy(t);
504 } else {
505 /*
506 * Forward the page fault to address space page fault handler.
507 */
508 if (!as_page_fault(va)) {
509 panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid);
510 }
511 }
[95042fd]512}
[89298e3]513
[9ad03fe]514/** Data nested TLB fault handler.
515 *
516 * This fault should not occur.
517 *
518 * @param vector Interruption vector.
519 * @param pstate Structure with saved interruption state.
520 */
[2c49fbbe]521void data_nested_tlb_fault(__u64 vector, struct exception_regdump *pstate)
[95042fd]522{
523 panic("%s\n", __FUNCTION__);
524}
[89298e3]525
[9ad03fe]526/** Data Dirty bit fault handler.
527 *
528 * @param vector Interruption vector.
529 * @param pstate Structure with saved interruption state.
530 */
[2c49fbbe]531void data_dirty_bit_fault(__u64 vector, struct exception_regdump *pstate)
[95042fd]532{
[9ad03fe]533 pte_t *t;
534
535 t = page_mapping_find(AS, pstate->cr_ifa);
536 ASSERT(t && t->p);
537 if (t && t->p) {
538 /*
539 * Update the Dirty bit in page tables and reinsert
540 * the mapping into DTC.
541 */
542 t->d = true;
543 dtc_pte_copy(t);
544 }
[95042fd]545}
[89298e3]546
[9ad03fe]547/** Instruction access bit fault handler.
548 *
549 * @param vector Interruption vector.
550 * @param pstate Structure with saved interruption state.
551 */
[2c49fbbe]552void instruction_access_bit_fault(__u64 vector, struct exception_regdump *pstate)
[95042fd]553{
[9ad03fe]554 pte_t *t;
555
556 t = page_mapping_find(AS, pstate->cr_ifa);
557 ASSERT(t && t->p);
558 if (t && t->p) {
559 /*
560 * Update the Accessed bit in page tables and reinsert
561 * the mapping into ITC.
562 */
563 t->a = true;
564 itc_pte_copy(t);
565 }
[95042fd]566}
[89298e3]567
[9ad03fe]568/** Data access bit fault handler.
569 *
570 * @param vector Interruption vector.
571 * @param pstate Structure with saved interruption state.
572 */
[2c49fbbe]573void data_access_bit_fault(__u64 vector, struct exception_regdump *pstate)
[95042fd]574{
[9ad03fe]575 pte_t *t;
576
577 t = page_mapping_find(AS, pstate->cr_ifa);
578 ASSERT(t && t->p);
579 if (t && t->p) {
580 /*
581 * Update the Accessed bit in page tables and reinsert
582 * the mapping into DTC.
583 */
584 t->a = true;
585 dtc_pte_copy(t);
586 }
[89298e3]587}
588
[9ad03fe]589/** Page not present fault handler.
590 *
591 * @param vector Interruption vector.
592 * @param pstate Structure with saved interruption state.
593 */
[2c49fbbe]594void page_not_present(__u64 vector, struct exception_regdump *pstate)
[95042fd]595{
[9ad03fe]596 region_register rr;
597 __address va;
598 pte_t *t;
599
600 va = pstate->cr_ifa; /* faulting address */
601 t = page_mapping_find(AS, va);
602 ASSERT(t);
603
604 if (t->p) {
605 /*
606 * If the Present bit is set in page hash table, just copy it
607 * and update ITC/DTC.
608 */
609 if (t->x)
610 itc_pte_copy(t);
611 else
612 dtc_pte_copy(t);
613 } else {
614 if (!as_page_fault(va)) {
615 panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid);
616 }
617 }
[95042fd]618}
Note: See TracBrowser for help on using the repository browser.