source: mainline/arch/ia64/src/mm/tlb.c@ 9bda3af6

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 9bda3af6 was 9bda3af6, checked in by Jakub Vana <jakub.vana@…>, 19 years ago

Better purge page selection.

  • Property mode set to 100644
File size: 14.1 KB
Line 
1/*
2 * Copyright (C) 2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/*
30 * TLB management.
31 */
32
33#include <mm/tlb.h>
34#include <mm/asid.h>
35#include <mm/page.h>
36#include <mm/as.h>
37#include <arch/mm/tlb.h>
38#include <arch/mm/page.h>
39#include <arch/barrier.h>
40#include <arch/interrupt.h>
41#include <arch/pal/pal.h>
42#include <arch/asm.h>
43#include <typedefs.h>
44#include <panic.h>
45#include <arch.h>
46
47
48
49/** Invalidate all TLB entries. */
50void tlb_invalidate_all(void)
51{
52 __address adr;
53 __u32 count1,count2,stride1,stride2;
54
55 int i,j;
56
57 adr=PAL_PTCE_INFO_BASE();
58 count1=PAL_PTCE_INFO_COUNT1();
59 count2=PAL_PTCE_INFO_COUNT2();
60 stride1=PAL_PTCE_INFO_STRIDE1();
61 stride2=PAL_PTCE_INFO_STRIDE2();
62
63 interrupts_disable();
64
65 for(i=0;i<count1;i++)
66 {
67 for(j=0;j<count2;j++)
68 {
69 asm volatile
70 (
71 "ptc.e %0;;"
72 :
73 :"r" (adr)
74 );
75 adr+=stride2;
76 }
77 adr+=stride1;
78 }
79
80 interrupts_enable();
81
82 srlz_d();
83 srlz_i();
84}
85
86/** Invalidate entries belonging to an address space.
87 *
88 * @param asid Address space identifier.
89 */
90void tlb_invalidate_asid(asid_t asid)
91{
92 /* TODO */
93 tlb_invalidate_all();
94}
95
96
97void tlb_invalidate_pages(asid_t asid, __address page, count_t cnt)
98{
99
100
101 region_register rr;
102 bool restore_rr = false;
103 int b=0;
104 int c=cnt;
105
106 __address va;
107 va=page;
108
109 rr.word = rr_read(VA2VRN(va));
110 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
111 /*
112 * The selected region register does not contain required RID.
113 * Save the old content of the register and replace the RID.
114 */
115 region_register rr0;
116
117 rr0 = rr;
118 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
119 rr_write(VA2VRN(va), rr0.word);
120 srlz_d();
121 srlz_i();
122 }
123
124 while(c>>=1) b++;
125 b>>=1;
126 __u64 ps;
127
128 switch(b)
129 {
130 case 0: /*cnt 1-3*/
131 {
132 ps=PAGE_WIDTH;
133 break;
134 }
135 case 1: /*cnt 4-15*/
136 {
137 /*cnt=((cnt-1)/4)+1;*/
138 ps=PAGE_WIDTH+2;
139 va&=~((1<<ps)-1);
140 break;
141 }
142 case 2: /*cnt 16-63*/
143 {
144 /*cnt=((cnt-1)/16)+1;*/
145 ps=PAGE_WIDTH+4;
146 va&=~((1<<ps)-1);
147 break;
148 }
149 case 3: /*cnt 64-255*/
150 {
151 /*cnt=((cnt-1)/64)+1;*/
152 ps=PAGE_WIDTH+6;
153 va&=~((1<<ps)-1);
154 break;
155 }
156 case 4: /*cnt 256-1023*/
157 {
158 /*cnt=((cnt-1)/256)+1;*/
159 ps=PAGE_WIDTH+8;
160 va&=~((1<<ps)-1);
161 break;
162 }
163 case 5: /*cnt 1024-4095*/
164 {
165 /*cnt=((cnt-1)/1024)+1;*/
166 ps=PAGE_WIDTH+10;
167 va&=~((1<<ps)-1);
168 break;
169 }
170 case 6: /*cnt 4096-16383*/
171 {
172 /*cnt=((cnt-1)/4096)+1;*/
173 ps=PAGE_WIDTH+12;
174 va&=~((1<<ps)-1);
175 break;
176 }
177 case 7: /*cnt 16384-65535*/
178 case 8: /*cnt 65536-(256K-1)*/
179 {
180 /*cnt=((cnt-1)/16384)+1;*/
181 ps=PAGE_WIDTH+14;
182 va&=~((1<<ps)-1);
183 break;
184 }
185 default:
186 {
187 /*cnt=((cnt-1)/(16384*16))+1;*/
188 ps=PAGE_WIDTH+18;
189 va&=~((1<<ps)-1);
190 break;
191 }
192 }
193 /*cnt+=(page!=va);*/
194 for(;va<(page+cnt*(PAGE_SIZE));va+=(1<<ps)) {
195 __asm__ volatile
196 (
197 "ptc.l %0,%1;;"
198 :
199 : "r"(va), "r"(ps<<2)
200 );
201 }
202 srlz_d();
203 srlz_i();
204
205
206 if (restore_rr) {
207 rr_write(VA2VRN(va), rr.word);
208 srlz_d();
209 srlz_i();
210 }
211
212
213}
214
215
216/** Insert data into data translation cache.
217 *
218 * @param va Virtual page address.
219 * @param asid Address space identifier.
220 * @param entry The rest of TLB entry as required by TLB insertion format.
221 */
222void dtc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry)
223{
224 tc_mapping_insert(va, asid, entry, true);
225}
226
227/** Insert data into instruction translation cache.
228 *
229 * @param va Virtual page address.
230 * @param asid Address space identifier.
231 * @param entry The rest of TLB entry as required by TLB insertion format.
232 */
233void itc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry)
234{
235 tc_mapping_insert(va, asid, entry, false);
236}
237
238/** Insert data into instruction or data translation cache.
239 *
240 * @param va Virtual page address.
241 * @param asid Address space identifier.
242 * @param entry The rest of TLB entry as required by TLB insertion format.
243 * @param dtc If true, insert into data translation cache, use instruction translation cache otherwise.
244 */
245void tc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtc)
246{
247 region_register rr;
248 bool restore_rr = false;
249
250 rr.word = rr_read(VA2VRN(va));
251 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
252 /*
253 * The selected region register does not contain required RID.
254 * Save the old content of the register and replace the RID.
255 */
256 region_register rr0;
257
258 rr0 = rr;
259 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
260 rr_write(VA2VRN(va), rr0.word);
261 srlz_d();
262 srlz_i();
263 }
264
265 __asm__ volatile (
266 "mov r8=psr;;\n"
267 "rsm %0;;\n" /* PSR_IC_MASK */
268 "srlz.d;;\n"
269 "srlz.i;;\n"
270 "mov cr.ifa=%1\n" /* va */
271 "mov cr.itir=%2;;\n" /* entry.word[1] */
272 "cmp.eq p6,p7 = %4,r0;;\n" /* decide between itc and dtc */
273 "(p6) itc.i %3;;\n"
274 "(p7) itc.d %3;;\n"
275 "mov psr.l=r8;;\n"
276 "srlz.d;;\n"
277 :
278 : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (dtc)
279 : "p6", "p7", "r8"
280 );
281
282 if (restore_rr) {
283 rr_write(VA2VRN(va), rr.word);
284 srlz_d();
285 srlz_i();
286 }
287}
288
289/** Insert data into instruction translation register.
290 *
291 * @param va Virtual page address.
292 * @param asid Address space identifier.
293 * @param entry The rest of TLB entry as required by TLB insertion format.
294 * @param tr Translation register.
295 */
296void itr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr)
297{
298 tr_mapping_insert(va, asid, entry, false, tr);
299}
300
301/** Insert data into data translation register.
302 *
303 * @param va Virtual page address.
304 * @param asid Address space identifier.
305 * @param entry The rest of TLB entry as required by TLB insertion format.
306 * @param tr Translation register.
307 */
308void dtr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr)
309{
310 tr_mapping_insert(va, asid, entry, true, tr);
311}
312
313/** Insert data into instruction or data translation register.
314 *
315 * @param va Virtual page address.
316 * @param asid Address space identifier.
317 * @param entry The rest of TLB entry as required by TLB insertion format.
318 * @param dtc If true, insert into data translation register, use instruction translation register otherwise.
319 * @param tr Translation register.
320 */
321void tr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr)
322{
323 region_register rr;
324 bool restore_rr = false;
325
326 rr.word = rr_read(VA2VRN(va));
327 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
328 /*
329 * The selected region register does not contain required RID.
330 * Save the old content of the register and replace the RID.
331 */
332 region_register rr0;
333
334 rr0 = rr;
335 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
336 rr_write(VA2VRN(va), rr0.word);
337 srlz_d();
338 srlz_i();
339 }
340
341 __asm__ volatile (
342 "mov r8=psr;;\n"
343 "rsm %0;;\n" /* PSR_IC_MASK */
344 "srlz.d;;\n"
345 "srlz.i;;\n"
346 "mov cr.ifa=%1\n" /* va */
347 "mov cr.itir=%2;;\n" /* entry.word[1] */
348 "cmp.eq p6,p7=%5,r0;;\n" /* decide between itr and dtr */
349 "(p6) itr.i itr[%4]=%3;;\n"
350 "(p7) itr.d dtr[%4]=%3;;\n"
351 "mov psr.l=r8;;\n"
352 "srlz.d;;\n"
353 :
354 : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (tr), "r" (dtr)
355 : "p6", "p7", "r8"
356 );
357
358 if (restore_rr) {
359 rr_write(VA2VRN(va), rr.word);
360 srlz_d();
361 srlz_i();
362 }
363}
364
365/** Insert data into DTLB.
366 *
367 * @param va Virtual page address.
368 * @param asid Address space identifier.
369 * @param entry The rest of TLB entry as required by TLB insertion format.
370 * @param dtr If true, insert into data translation register, use data translation cache otherwise.
371 * @param tr Translation register if dtr is true, ignored otherwise.
372 */
373void dtlb_kernel_mapping_insert(__address page, __address frame, bool dtr, index_t tr)
374{
375 tlb_entry_t entry;
376
377 entry.word[0] = 0;
378 entry.word[1] = 0;
379
380 entry.p = true; /* present */
381 entry.ma = MA_WRITEBACK;
382 entry.a = true; /* already accessed */
383 entry.d = true; /* already dirty */
384 entry.pl = PL_KERNEL;
385 entry.ar = AR_READ | AR_WRITE;
386 entry.ppn = frame >> PPN_SHIFT;
387 entry.ps = PAGE_WIDTH;
388
389 if (dtr)
390 dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
391 else
392 dtc_mapping_insert(page, ASID_KERNEL, entry);
393}
394
395/** Copy content of PTE into data translation cache.
396 *
397 * @param t PTE.
398 */
399void dtc_pte_copy(pte_t *t)
400{
401 tlb_entry_t entry;
402
403 entry.word[0] = 0;
404 entry.word[1] = 0;
405
406 entry.p = t->p;
407 entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
408 entry.a = t->a;
409 entry.d = t->d;
410 entry.pl = t->k ? PL_KERNEL : PL_USER;
411 entry.ar = t->w ? AR_WRITE : AR_READ;
412 entry.ppn = t->frame >> PPN_SHIFT;
413 entry.ps = PAGE_WIDTH;
414
415 dtc_mapping_insert(t->page, t->as->asid, entry);
416}
417
418/** Copy content of PTE into instruction translation cache.
419 *
420 * @param t PTE.
421 */
422void itc_pte_copy(pte_t *t)
423{
424 tlb_entry_t entry;
425
426 entry.word[0] = 0;
427 entry.word[1] = 0;
428
429 ASSERT(t->x);
430
431 entry.p = t->p;
432 entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
433 entry.a = t->a;
434 entry.pl = t->k ? PL_KERNEL : PL_USER;
435 entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
436 entry.ppn = t->frame >> PPN_SHIFT;
437 entry.ps = PAGE_WIDTH;
438
439 itc_mapping_insert(t->page, t->as->asid, entry);
440}
441
442/** Instruction TLB fault handler for faults with VHPT turned off.
443 *
444 * @param vector Interruption vector.
445 * @param pstate Structure with saved interruption state.
446 */
447void alternate_instruction_tlb_fault(__u64 vector, struct exception_regdump *pstate)
448{
449 region_register rr;
450 __address va;
451 pte_t *t;
452
453 va = pstate->cr_ifa; /* faulting address */
454 t = page_mapping_find(AS, va);
455 if (t) {
456 /*
457 * The mapping was found in software page hash table.
458 * Insert it into data translation cache.
459 */
460 itc_pte_copy(t);
461 } else {
462 /*
463 * Forward the page fault to address space page fault handler.
464 */
465 if (!as_page_fault(va)) {
466 panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid);
467 }
468 }
469}
470
471/** Data TLB fault handler for faults with VHPT turned off.
472 *
473 * @param vector Interruption vector.
474 * @param pstate Structure with saved interruption state.
475 */
476void alternate_data_tlb_fault(__u64 vector, struct exception_regdump *pstate)
477{
478 region_register rr;
479 rid_t rid;
480 __address va;
481 pte_t *t;
482
483 va = pstate->cr_ifa; /* faulting address */
484 rr.word = rr_read(VA2VRN(va));
485 rid = rr.map.rid;
486 if (RID2ASID(rid) == ASID_KERNEL) {
487 if (VA2VRN(va) == VRN_KERNEL) {
488 /*
489 * Provide KA2PA(identity) mapping for faulting piece of
490 * kernel address space.
491 */
492 dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
493 return;
494 }
495 }
496
497 t = page_mapping_find(AS, va);
498 if (t) {
499 /*
500 * The mapping was found in software page hash table.
501 * Insert it into data translation cache.
502 */
503 dtc_pte_copy(t);
504 } else {
505 /*
506 * Forward the page fault to address space page fault handler.
507 */
508 if (!as_page_fault(va)) {
509 panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid);
510 }
511 }
512}
513
514/** Data nested TLB fault handler.
515 *
516 * This fault should not occur.
517 *
518 * @param vector Interruption vector.
519 * @param pstate Structure with saved interruption state.
520 */
521void data_nested_tlb_fault(__u64 vector, struct exception_regdump *pstate)
522{
523 panic("%s\n", __FUNCTION__);
524}
525
526/** Data Dirty bit fault handler.
527 *
528 * @param vector Interruption vector.
529 * @param pstate Structure with saved interruption state.
530 */
531void data_dirty_bit_fault(__u64 vector, struct exception_regdump *pstate)
532{
533 pte_t *t;
534
535 t = page_mapping_find(AS, pstate->cr_ifa);
536 ASSERT(t && t->p);
537 if (t && t->p) {
538 /*
539 * Update the Dirty bit in page tables and reinsert
540 * the mapping into DTC.
541 */
542 t->d = true;
543 dtc_pte_copy(t);
544 }
545}
546
547/** Instruction access bit fault handler.
548 *
549 * @param vector Interruption vector.
550 * @param pstate Structure with saved interruption state.
551 */
552void instruction_access_bit_fault(__u64 vector, struct exception_regdump *pstate)
553{
554 pte_t *t;
555
556 t = page_mapping_find(AS, pstate->cr_ifa);
557 ASSERT(t && t->p);
558 if (t && t->p) {
559 /*
560 * Update the Accessed bit in page tables and reinsert
561 * the mapping into ITC.
562 */
563 t->a = true;
564 itc_pte_copy(t);
565 }
566}
567
568/** Data access bit fault handler.
569 *
570 * @param vector Interruption vector.
571 * @param pstate Structure with saved interruption state.
572 */
573void data_access_bit_fault(__u64 vector, struct exception_regdump *pstate)
574{
575 pte_t *t;
576
577 t = page_mapping_find(AS, pstate->cr_ifa);
578 ASSERT(t && t->p);
579 if (t && t->p) {
580 /*
581 * Update the Accessed bit in page tables and reinsert
582 * the mapping into DTC.
583 */
584 t->a = true;
585 dtc_pte_copy(t);
586 }
587}
588
589/** Page not present fault handler.
590 *
591 * @param vector Interruption vector.
592 * @param pstate Structure with saved interruption state.
593 */
594void page_not_present(__u64 vector, struct exception_regdump *pstate)
595{
596 region_register rr;
597 __address va;
598 pte_t *t;
599
600 va = pstate->cr_ifa; /* faulting address */
601 t = page_mapping_find(AS, va);
602 ASSERT(t);
603
604 if (t->p) {
605 /*
606 * If the Present bit is set in page hash table, just copy it
607 * and update ITC/DTC.
608 */
609 if (t->x)
610 itc_pte_copy(t);
611 else
612 dtc_pte_copy(t);
613 } else {
614 if (!as_page_fault(va)) {
615 panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid);
616 }
617 }
618}
Note: See TracBrowser for help on using the repository browser.