source: mainline/arch/ia64/src/mm/tlb.c@ e3c762cd

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since e3c762cd was e3c762cd, checked in by Jakub Jermar <jakub@…>, 19 years ago

Complete implementation of copy_from_uspace() and copy_to_uspace()
for amd64 and ia32. Other architectures still compile and run,
but need to implement their own assembly-only memcpy(), memcpy_from_uspace(),
memcpy_to_uspace() and their failover parts. For these architectures
only dummy implementations are provided.

  • Property mode set to 100644
File size: 14.7 KB
Line 
1/*
2 * Copyright (C) 2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/*
30 * TLB management.
31 */
32
33#include <mm/tlb.h>
34#include <mm/asid.h>
35#include <mm/page.h>
36#include <mm/as.h>
37#include <arch/mm/tlb.h>
38#include <arch/mm/page.h>
39#include <arch/mm/vhpt.h>
40#include <arch/barrier.h>
41#include <arch/interrupt.h>
42#include <arch/pal/pal.h>
43#include <arch/asm.h>
44#include <typedefs.h>
45#include <panic.h>
46#include <print.h>
47#include <arch.h>
48
49/** Invalidate all TLB entries. */
50void tlb_invalidate_all(void)
51{
52 ipl_t ipl;
53 __address adr;
54 __u32 count1, count2, stride1, stride2;
55
56 int i,j;
57
58 adr = PAL_PTCE_INFO_BASE();
59 count1 = PAL_PTCE_INFO_COUNT1();
60 count2 = PAL_PTCE_INFO_COUNT2();
61 stride1 = PAL_PTCE_INFO_STRIDE1();
62 stride2 = PAL_PTCE_INFO_STRIDE2();
63
64 ipl = interrupts_disable();
65
66 for(i = 0; i < count1; i++) {
67 for(j = 0; j < count2; j++) {
68 __asm__ volatile (
69 "ptc.e %0 ;;"
70 :
71 : "r" (adr)
72 );
73 adr += stride2;
74 }
75 adr += stride1;
76 }
77
78 interrupts_restore(ipl);
79
80 srlz_d();
81 srlz_i();
82#ifdef CONFIG_VHPT
83 vhpt_invalidate_all();
84#endif
85}
86
87/** Invalidate entries belonging to an address space.
88 *
89 * @param asid Address space identifier.
90 */
91void tlb_invalidate_asid(asid_t asid)
92{
93 tlb_invalidate_all();
94}
95
96
97void tlb_invalidate_pages(asid_t asid, __address page, count_t cnt)
98{
99 region_register rr;
100 bool restore_rr = false;
101 int b = 0;
102 int c = cnt;
103
104 __address va;
105 va = page;
106
107 rr.word = rr_read(VA2VRN(va));
108 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
109 /*
110 * The selected region register does not contain required RID.
111 * Save the old content of the register and replace the RID.
112 */
113 region_register rr0;
114
115 rr0 = rr;
116 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
117 rr_write(VA2VRN(va), rr0.word);
118 srlz_d();
119 srlz_i();
120 }
121
122 while(c >>= 1)
123 b++;
124 b >>= 1;
125 __u64 ps;
126
127 switch (b) {
128 case 0: /*cnt 1-3*/
129 ps = PAGE_WIDTH;
130 break;
131 case 1: /*cnt 4-15*/
132 /*cnt=((cnt-1)/4)+1;*/
133 ps = PAGE_WIDTH+2;
134 va &= ~((1<<ps)-1);
135 break;
136 case 2: /*cnt 16-63*/
137 /*cnt=((cnt-1)/16)+1;*/
138 ps = PAGE_WIDTH+4;
139 va &= ~((1<<ps)-1);
140 break;
141 case 3: /*cnt 64-255*/
142 /*cnt=((cnt-1)/64)+1;*/
143 ps = PAGE_WIDTH+6;
144 va &= ~((1<<ps)-1);
145 break;
146 case 4: /*cnt 256-1023*/
147 /*cnt=((cnt-1)/256)+1;*/
148 ps = PAGE_WIDTH+8;
149 va &= ~((1<<ps)-1);
150 break;
151 case 5: /*cnt 1024-4095*/
152 /*cnt=((cnt-1)/1024)+1;*/
153 ps = PAGE_WIDTH+10;
154 va &= ~((1<<ps)-1);
155 break;
156 case 6: /*cnt 4096-16383*/
157 /*cnt=((cnt-1)/4096)+1;*/
158 ps = PAGE_WIDTH+12;
159 va &= ~((1<<ps)-1);
160 break;
161 case 7: /*cnt 16384-65535*/
162 case 8: /*cnt 65536-(256K-1)*/
163 /*cnt=((cnt-1)/16384)+1;*/
164 ps = PAGE_WIDTH+14;
165 va &= ~((1<<ps)-1);
166 break;
167 default:
168 /*cnt=((cnt-1)/(16384*16))+1;*/
169 ps=PAGE_WIDTH+18;
170 va&=~((1<<ps)-1);
171 break;
172 }
173 /*cnt+=(page!=va);*/
174 for(; va<(page+cnt*(PAGE_SIZE)); va += (1<<ps)) {
175 __asm__ volatile (
176 "ptc.l %0,%1;;"
177 :
178 : "r" (va), "r" (ps<<2)
179 );
180 }
181 srlz_d();
182 srlz_i();
183
184 if (restore_rr) {
185 rr_write(VA2VRN(va), rr.word);
186 srlz_d();
187 srlz_i();
188 }
189}
190
191
192/** Insert data into data translation cache.
193 *
194 * @param va Virtual page address.
195 * @param asid Address space identifier.
196 * @param entry The rest of TLB entry as required by TLB insertion format.
197 */
198void dtc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry)
199{
200 tc_mapping_insert(va, asid, entry, true);
201}
202
203/** Insert data into instruction translation cache.
204 *
205 * @param va Virtual page address.
206 * @param asid Address space identifier.
207 * @param entry The rest of TLB entry as required by TLB insertion format.
208 */
209void itc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry)
210{
211 tc_mapping_insert(va, asid, entry, false);
212}
213
214/** Insert data into instruction or data translation cache.
215 *
216 * @param va Virtual page address.
217 * @param asid Address space identifier.
218 * @param entry The rest of TLB entry as required by TLB insertion format.
219 * @param dtc If true, insert into data translation cache, use instruction translation cache otherwise.
220 */
221void tc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtc)
222{
223 region_register rr;
224 bool restore_rr = false;
225
226 rr.word = rr_read(VA2VRN(va));
227 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
228 /*
229 * The selected region register does not contain required RID.
230 * Save the old content of the register and replace the RID.
231 */
232 region_register rr0;
233
234 rr0 = rr;
235 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
236 rr_write(VA2VRN(va), rr0.word);
237 srlz_d();
238 srlz_i();
239 }
240
241 __asm__ volatile (
242 "mov r8=psr;;\n"
243 "rsm %0;;\n" /* PSR_IC_MASK */
244 "srlz.d;;\n"
245 "srlz.i;;\n"
246 "mov cr.ifa=%1\n" /* va */
247 "mov cr.itir=%2;;\n" /* entry.word[1] */
248 "cmp.eq p6,p7 = %4,r0;;\n" /* decide between itc and dtc */
249 "(p6) itc.i %3;;\n"
250 "(p7) itc.d %3;;\n"
251 "mov psr.l=r8;;\n"
252 "srlz.d;;\n"
253 :
254 : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (dtc)
255 : "p6", "p7", "r8"
256 );
257
258 if (restore_rr) {
259 rr_write(VA2VRN(va), rr.word);
260 srlz_d();
261 srlz_i();
262 }
263}
264
265/** Insert data into instruction translation register.
266 *
267 * @param va Virtual page address.
268 * @param asid Address space identifier.
269 * @param entry The rest of TLB entry as required by TLB insertion format.
270 * @param tr Translation register.
271 */
272void itr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr)
273{
274 tr_mapping_insert(va, asid, entry, false, tr);
275}
276
277/** Insert data into data translation register.
278 *
279 * @param va Virtual page address.
280 * @param asid Address space identifier.
281 * @param entry The rest of TLB entry as required by TLB insertion format.
282 * @param tr Translation register.
283 */
284void dtr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr)
285{
286 tr_mapping_insert(va, asid, entry, true, tr);
287}
288
289/** Insert data into instruction or data translation register.
290 *
291 * @param va Virtual page address.
292 * @param asid Address space identifier.
293 * @param entry The rest of TLB entry as required by TLB insertion format.
294 * @param dtc If true, insert into data translation register, use instruction translation register otherwise.
295 * @param tr Translation register.
296 */
297void tr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr)
298{
299 region_register rr;
300 bool restore_rr = false;
301
302 rr.word = rr_read(VA2VRN(va));
303 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
304 /*
305 * The selected region register does not contain required RID.
306 * Save the old content of the register and replace the RID.
307 */
308 region_register rr0;
309
310 rr0 = rr;
311 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
312 rr_write(VA2VRN(va), rr0.word);
313 srlz_d();
314 srlz_i();
315 }
316
317 __asm__ volatile (
318 "mov r8=psr;;\n"
319 "rsm %0;;\n" /* PSR_IC_MASK */
320 "srlz.d;;\n"
321 "srlz.i;;\n"
322 "mov cr.ifa=%1\n" /* va */
323 "mov cr.itir=%2;;\n" /* entry.word[1] */
324 "cmp.eq p6,p7=%5,r0;;\n" /* decide between itr and dtr */
325 "(p6) itr.i itr[%4]=%3;;\n"
326 "(p7) itr.d dtr[%4]=%3;;\n"
327 "mov psr.l=r8;;\n"
328 "srlz.d;;\n"
329 :
330 : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (tr), "r" (dtr)
331 : "p6", "p7", "r8"
332 );
333
334 if (restore_rr) {
335 rr_write(VA2VRN(va), rr.word);
336 srlz_d();
337 srlz_i();
338 }
339}
340
341/** Insert data into DTLB.
342 *
343 * @param va Virtual page address.
344 * @param asid Address space identifier.
345 * @param entry The rest of TLB entry as required by TLB insertion format.
346 * @param dtr If true, insert into data translation register, use data translation cache otherwise.
347 * @param tr Translation register if dtr is true, ignored otherwise.
348 */
349void dtlb_kernel_mapping_insert(__address page, __address frame, bool dtr, index_t tr)
350{
351 tlb_entry_t entry;
352
353 entry.word[0] = 0;
354 entry.word[1] = 0;
355
356 entry.p = true; /* present */
357 entry.ma = MA_WRITEBACK;
358 entry.a = true; /* already accessed */
359 entry.d = true; /* already dirty */
360 entry.pl = PL_KERNEL;
361 entry.ar = AR_READ | AR_WRITE;
362 entry.ppn = frame >> PPN_SHIFT;
363 entry.ps = PAGE_WIDTH;
364
365 if (dtr)
366 dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
367 else
368 dtc_mapping_insert(page, ASID_KERNEL, entry);
369}
370
371/** Copy content of PTE into data translation cache.
372 *
373 * @param t PTE.
374 */
375void dtc_pte_copy(pte_t *t)
376{
377 tlb_entry_t entry;
378
379 entry.word[0] = 0;
380 entry.word[1] = 0;
381
382 entry.p = t->p;
383 entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
384 entry.a = t->a;
385 entry.d = t->d;
386 entry.pl = t->k ? PL_KERNEL : PL_USER;
387 entry.ar = t->w ? AR_WRITE : AR_READ;
388 entry.ppn = t->frame >> PPN_SHIFT;
389 entry.ps = PAGE_WIDTH;
390
391 dtc_mapping_insert(t->page, t->as->asid, entry);
392#ifdef CONFIG_VHPT
393 vhpt_mapping_insert(t->page, t->as->asid, entry);
394#endif
395}
396
397/** Copy content of PTE into instruction translation cache.
398 *
399 * @param t PTE.
400 */
401void itc_pte_copy(pte_t *t)
402{
403 tlb_entry_t entry;
404
405 entry.word[0] = 0;
406 entry.word[1] = 0;
407
408 ASSERT(t->x);
409
410 entry.p = t->p;
411 entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
412 entry.a = t->a;
413 entry.pl = t->k ? PL_KERNEL : PL_USER;
414 entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
415 entry.ppn = t->frame >> PPN_SHIFT;
416 entry.ps = PAGE_WIDTH;
417
418 itc_mapping_insert(t->page, t->as->asid, entry);
419#ifdef CONFIG_VHPT
420 vhpt_mapping_insert(t->page, t->as->asid, entry);
421#endif
422}
423
424/** Instruction TLB fault handler for faults with VHPT turned off.
425 *
426 * @param vector Interruption vector.
427 * @param istate Structure with saved interruption state.
428 */
429void alternate_instruction_tlb_fault(__u64 vector, istate_t *istate)
430{
431 region_register rr;
432 __address va;
433 pte_t *t;
434
435 va = istate->cr_ifa; /* faulting address */
436 page_table_lock(AS, true);
437 t = page_mapping_find(AS, va);
438 if (t) {
439 /*
440 * The mapping was found in software page hash table.
441 * Insert it into data translation cache.
442 */
443 itc_pte_copy(t);
444 page_table_unlock(AS, true);
445 } else {
446 /*
447 * Forward the page fault to address space page fault handler.
448 */
449 page_table_unlock(AS, true);
450 if (as_page_fault(va, istate) == AS_PF_FAULT) {
451 panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, istate->cr_ifa, rr.map.rid, istate->cr_iip);
452 }
453 }
454}
455
456/** Data TLB fault handler for faults with VHPT turned off.
457 *
458 * @param vector Interruption vector.
459 * @param istate Structure with saved interruption state.
460 */
461void alternate_data_tlb_fault(__u64 vector, istate_t *istate)
462{
463 region_register rr;
464 rid_t rid;
465 __address va;
466 pte_t *t;
467
468 va = istate->cr_ifa; /* faulting address */
469 rr.word = rr_read(VA2VRN(va));
470 rid = rr.map.rid;
471 if (RID2ASID(rid) == ASID_KERNEL) {
472 if (VA2VRN(va) == VRN_KERNEL) {
473 /*
474 * Provide KA2PA(identity) mapping for faulting piece of
475 * kernel address space.
476 */
477 dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
478 return;
479 }
480 }
481
482 page_table_lock(AS, true);
483 t = page_mapping_find(AS, va);
484 if (t) {
485 /*
486 * The mapping was found in software page hash table.
487 * Insert it into data translation cache.
488 */
489 dtc_pte_copy(t);
490 page_table_unlock(AS, true);
491 } else {
492 /*
493 * Forward the page fault to address space page fault handler.
494 */
495 page_table_unlock(AS, true);
496 if (as_page_fault(va, istate) == AS_PF_FAULT) {
497 panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
498 }
499 }
500}
501
502/** Data nested TLB fault handler.
503 *
504 * This fault should not occur.
505 *
506 * @param vector Interruption vector.
507 * @param istate Structure with saved interruption state.
508 */
509void data_nested_tlb_fault(__u64 vector, istate_t *istate)
510{
511 panic("%s\n", __FUNCTION__);
512}
513
514/** Data Dirty bit fault handler.
515 *
516 * @param vector Interruption vector.
517 * @param istate Structure with saved interruption state.
518 */
519void data_dirty_bit_fault(__u64 vector, istate_t *istate)
520{
521 pte_t *t;
522
523 page_table_lock(AS, true);
524 t = page_mapping_find(AS, istate->cr_ifa);
525 ASSERT(t && t->p);
526 if (t && t->p) {
527 /*
528 * Update the Dirty bit in page tables and reinsert
529 * the mapping into DTC.
530 */
531 t->d = true;
532 dtc_pte_copy(t);
533 }
534 page_table_unlock(AS, true);
535}
536
537/** Instruction access bit fault handler.
538 *
539 * @param vector Interruption vector.
540 * @param istate Structure with saved interruption state.
541 */
542void instruction_access_bit_fault(__u64 vector, istate_t *istate)
543{
544 pte_t *t;
545
546 page_table_lock(AS, true);
547 t = page_mapping_find(AS, istate->cr_ifa);
548 ASSERT(t && t->p);
549 if (t && t->p) {
550 /*
551 * Update the Accessed bit in page tables and reinsert
552 * the mapping into ITC.
553 */
554 t->a = true;
555 itc_pte_copy(t);
556 }
557 page_table_unlock(AS, true);
558}
559
560/** Data access bit fault handler.
561 *
562 * @param vector Interruption vector.
563 * @param istate Structure with saved interruption state.
564 */
565void data_access_bit_fault(__u64 vector, istate_t *istate)
566{
567 pte_t *t;
568
569 page_table_lock(AS, true);
570 t = page_mapping_find(AS, istate->cr_ifa);
571 ASSERT(t && t->p);
572 if (t && t->p) {
573 /*
574 * Update the Accessed bit in page tables and reinsert
575 * the mapping into DTC.
576 */
577 t->a = true;
578 dtc_pte_copy(t);
579 }
580 page_table_unlock(AS, true);
581}
582
583/** Page not present fault handler.
584 *
585 * @param vector Interruption vector.
586 * @param istate Structure with saved interruption state.
587 */
588void page_not_present(__u64 vector, istate_t *istate)
589{
590 region_register rr;
591 __address va;
592 pte_t *t;
593
594 va = istate->cr_ifa; /* faulting address */
595 page_table_lock(AS, true);
596 t = page_mapping_find(AS, va);
597 ASSERT(t);
598
599 if (t->p) {
600 /*
601 * If the Present bit is set in page hash table, just copy it
602 * and update ITC/DTC.
603 */
604 if (t->x)
605 itc_pte_copy(t);
606 else
607 dtc_pte_copy(t);
608 page_table_unlock(AS, true);
609 } else {
610 page_table_unlock(AS, true);
611 if (as_page_fault(va, istate) == AS_PF_FAULT) {
612 panic("%s: va=%p, rid=%d\n", __FUNCTION__, va, rr.map.rid);
613 }
614 }
615}
Note: See TracBrowser for help on using the repository browser.