source: mainline/arch/ia64/src/mm/tlb.c@ a82500ce

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since a82500ce was a82500ce, checked in by Jakub Vana <jakub.vana@…>, 19 years ago

Two frame stack (standard stack + RSE) on Itanium

  • Property mode set to 100644
File size: 12.3 KB
Line 
1/*
2 * Copyright (C) 2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/*
30 * TLB management.
31 */
32
33#include <mm/tlb.h>
34#include <mm/asid.h>
35#include <mm/page.h>
36#include <mm/as.h>
37#include <arch/mm/tlb.h>
38#include <arch/mm/page.h>
39#include <arch/barrier.h>
40#include <arch/interrupt.h>
41#include <arch/pal/pal.h>
42#include <arch/asm.h>
43#include <typedefs.h>
44#include <panic.h>
45#include <arch.h>
46
47/** Invalidate all TLB entries. */
48void tlb_invalidate_all(void)
49{
50 __address adr;
51 __u32 count1,count2,stride1,stride2;
52
53 int i,j;
54
55 adr=PAL_PTCE_INFO_BASE();
56 count1=PAL_PTCE_INFO_COUNT1();
57 count2=PAL_PTCE_INFO_COUNT2();
58 stride1=PAL_PTCE_INFO_STRIDE1();
59 stride2=PAL_PTCE_INFO_STRIDE2();
60
61 interrupts_disable();
62
63 for(i=0;i<count1;i++)
64 {
65 for(j=0;j<count2;j++)
66 {
67 asm volatile
68 (
69 "ptc.e %0;;"
70 :
71 :"r" (adr)
72 );
73 adr+=stride2;
74 }
75 adr+=stride1;
76 }
77
78 interrupts_enable();
79
80 srlz_d();
81 srlz_i();
82}
83
84/** Invalidate entries belonging to an address space.
85 *
86 * @param asid Address space identifier.
87 */
88void tlb_invalidate_asid(asid_t asid)
89{
90 /* TODO */
91 tlb_invalidate_all();
92}
93
94
95void tlb_invalidate_pages(asid_t asid, __address page, count_t cnt)
96{
97
98
99}
100
101
102/** Insert data into data translation cache.
103 *
104 * @param va Virtual page address.
105 * @param asid Address space identifier.
106 * @param entry The rest of TLB entry as required by TLB insertion format.
107 */
108void dtc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry)
109{
110 tc_mapping_insert(va, asid, entry, true);
111}
112
113/** Insert data into instruction translation cache.
114 *
115 * @param va Virtual page address.
116 * @param asid Address space identifier.
117 * @param entry The rest of TLB entry as required by TLB insertion format.
118 */
119void itc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry)
120{
121 tc_mapping_insert(va, asid, entry, false);
122}
123
124/** Insert data into instruction or data translation cache.
125 *
126 * @param va Virtual page address.
127 * @param asid Address space identifier.
128 * @param entry The rest of TLB entry as required by TLB insertion format.
129 * @param dtc If true, insert into data translation cache, use instruction translation cache otherwise.
130 */
131void tc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtc)
132{
133 region_register rr;
134 bool restore_rr = false;
135
136 rr.word = rr_read(VA2VRN(va));
137 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
138 /*
139 * The selected region register does not contain required RID.
140 * Save the old content of the register and replace the RID.
141 */
142 region_register rr0;
143
144 rr0 = rr;
145 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
146 rr_write(VA2VRN(va), rr0.word);
147 srlz_d();
148 srlz_i();
149 }
150
151 __asm__ volatile (
152 "mov r8=psr;;\n"
153 "rsm %0;;\n" /* PSR_IC_MASK */
154 "srlz.d;;\n"
155 "srlz.i;;\n"
156 "mov cr.ifa=%1\n" /* va */
157 "mov cr.itir=%2;;\n" /* entry.word[1] */
158 "cmp.eq p6,p7 = %4,r0;;\n" /* decide between itc and dtc */
159 "(p6) itc.i %3;;\n"
160 "(p7) itc.d %3;;\n"
161 "mov psr.l=r8;;\n"
162 "srlz.d;;\n"
163 :
164 : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (dtc)
165 : "p6", "p7", "r8"
166 );
167
168 if (restore_rr) {
169 rr_write(VA2VRN(va), rr.word);
170 srlz_d();
171 srlz_i();
172 }
173}
174
175/** Insert data into instruction translation register.
176 *
177 * @param va Virtual page address.
178 * @param asid Address space identifier.
179 * @param entry The rest of TLB entry as required by TLB insertion format.
180 * @param tr Translation register.
181 */
182void itr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr)
183{
184 tr_mapping_insert(va, asid, entry, false, tr);
185}
186
187/** Insert data into data translation register.
188 *
189 * @param va Virtual page address.
190 * @param asid Address space identifier.
191 * @param entry The rest of TLB entry as required by TLB insertion format.
192 * @param tr Translation register.
193 */
194void dtr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr)
195{
196 tr_mapping_insert(va, asid, entry, true, tr);
197}
198
199/** Insert data into instruction or data translation register.
200 *
201 * @param va Virtual page address.
202 * @param asid Address space identifier.
203 * @param entry The rest of TLB entry as required by TLB insertion format.
204 * @param dtc If true, insert into data translation register, use instruction translation register otherwise.
205 * @param tr Translation register.
206 */
207void tr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr)
208{
209 region_register rr;
210 bool restore_rr = false;
211
212 rr.word = rr_read(VA2VRN(va));
213 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
214 /*
215 * The selected region register does not contain required RID.
216 * Save the old content of the register and replace the RID.
217 */
218 region_register rr0;
219
220 rr0 = rr;
221 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
222 rr_write(VA2VRN(va), rr0.word);
223 srlz_d();
224 srlz_i();
225 }
226
227 __asm__ volatile (
228 "mov r8=psr;;\n"
229 "rsm %0;;\n" /* PSR_IC_MASK */
230 "srlz.d;;\n"
231 "srlz.i;;\n"
232 "mov cr.ifa=%1\n" /* va */
233 "mov cr.itir=%2;;\n" /* entry.word[1] */
234 "cmp.eq p6,p7=%5,r0;;\n" /* decide between itr and dtr */
235 "(p6) itr.i itr[%4]=%3;;\n"
236 "(p7) itr.d dtr[%4]=%3;;\n"
237 "mov psr.l=r8;;\n"
238 "srlz.d;;\n"
239 :
240 : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (tr), "r" (dtr)
241 : "p6", "p7", "r8"
242 );
243
244 if (restore_rr) {
245 rr_write(VA2VRN(va), rr.word);
246 srlz_d();
247 srlz_i();
248 }
249}
250
251/** Insert data into DTLB.
252 *
253 * @param va Virtual page address.
254 * @param asid Address space identifier.
255 * @param entry The rest of TLB entry as required by TLB insertion format.
256 * @param dtr If true, insert into data translation register, use data translation cache otherwise.
257 * @param tr Translation register if dtr is true, ignored otherwise.
258 */
259void dtlb_kernel_mapping_insert(__address page, __address frame, bool dtr, index_t tr)
260{
261 tlb_entry_t entry;
262
263 entry.word[0] = 0;
264 entry.word[1] = 0;
265
266 entry.p = true; /* present */
267 entry.ma = MA_WRITEBACK;
268 entry.a = true; /* already accessed */
269 entry.d = true; /* already dirty */
270 entry.pl = PL_KERNEL;
271 entry.ar = AR_READ | AR_WRITE;
272 entry.ppn = frame >> PPN_SHIFT;
273 entry.ps = PAGE_WIDTH;
274
275 if (dtr)
276 dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
277 else
278 dtc_mapping_insert(page, ASID_KERNEL, entry);
279}
280
281/** Copy content of PTE into data translation cache.
282 *
283 * @param t PTE.
284 */
285void dtc_pte_copy(pte_t *t)
286{
287 tlb_entry_t entry;
288
289 entry.word[0] = 0;
290 entry.word[1] = 0;
291
292 entry.p = t->p;
293 entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
294 entry.a = t->a;
295 entry.d = t->d;
296 entry.pl = t->k ? PL_KERNEL : PL_USER;
297 entry.ar = t->w ? AR_WRITE : AR_READ;
298 entry.ppn = t->frame >> PPN_SHIFT;
299 entry.ps = PAGE_WIDTH;
300
301 dtc_mapping_insert(t->page, t->as->asid, entry);
302}
303
304/** Copy content of PTE into instruction translation cache.
305 *
306 * @param t PTE.
307 */
308void itc_pte_copy(pte_t *t)
309{
310 tlb_entry_t entry;
311
312 entry.word[0] = 0;
313 entry.word[1] = 0;
314
315 ASSERT(t->x);
316
317 entry.p = t->p;
318 entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
319 entry.a = t->a;
320 entry.pl = t->k ? PL_KERNEL : PL_USER;
321 entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
322 entry.ppn = t->frame >> PPN_SHIFT;
323 entry.ps = PAGE_WIDTH;
324
325 itc_mapping_insert(t->page, t->as->asid, entry);
326}
327
328/** Instruction TLB fault handler for faults with VHPT turned off.
329 *
330 * @param vector Interruption vector.
331 * @param pstate Structure with saved interruption state.
332 */
333void alternate_instruction_tlb_fault(__u64 vector, struct exception_regdump *pstate)
334{
335 region_register rr;
336 __address va;
337 pte_t *t;
338
339 va = pstate->cr_ifa; /* faulting address */
340 t = page_mapping_find(AS, va);
341 if (t) {
342 /*
343 * The mapping was found in software page hash table.
344 * Insert it into data translation cache.
345 */
346 itc_pte_copy(t);
347 } else {
348 /*
349 * Forward the page fault to address space page fault handler.
350 */
351 if (!as_page_fault(va)) {
352 panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid);
353 }
354 }
355}
356
357/** Data TLB fault handler for faults with VHPT turned off.
358 *
359 * @param vector Interruption vector.
360 * @param pstate Structure with saved interruption state.
361 */
362void alternate_data_tlb_fault(__u64 vector, struct exception_regdump *pstate)
363{
364 region_register rr;
365 rid_t rid;
366 __address va;
367 pte_t *t;
368
369 va = pstate->cr_ifa; /* faulting address */
370 rr.word = rr_read(VA2VRN(va));
371 rid = rr.map.rid;
372 if (RID2ASID(rid) == ASID_KERNEL) {
373 if (VA2VRN(va) == VRN_KERNEL) {
374 /*
375 * Provide KA2PA(identity) mapping for faulting piece of
376 * kernel address space.
377 */
378 dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
379 return;
380 }
381 }
382
383 t = page_mapping_find(AS, va);
384 if (t) {
385 /*
386 * The mapping was found in software page hash table.
387 * Insert it into data translation cache.
388 */
389 dtc_pte_copy(t);
390 } else {
391 /*
392 * Forward the page fault to address space page fault handler.
393 */
394 if (!as_page_fault(va)) {
395 panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid);
396 }
397 }
398}
399
400/** Data nested TLB fault handler.
401 *
402 * This fault should not occur.
403 *
404 * @param vector Interruption vector.
405 * @param pstate Structure with saved interruption state.
406 */
407void data_nested_tlb_fault(__u64 vector, struct exception_regdump *pstate)
408{
409 panic("%s\n", __FUNCTION__);
410}
411
412/** Data Dirty bit fault handler.
413 *
414 * @param vector Interruption vector.
415 * @param pstate Structure with saved interruption state.
416 */
417void data_dirty_bit_fault(__u64 vector, struct exception_regdump *pstate)
418{
419 pte_t *t;
420
421 t = page_mapping_find(AS, pstate->cr_ifa);
422 ASSERT(t && t->p);
423 if (t && t->p) {
424 /*
425 * Update the Dirty bit in page tables and reinsert
426 * the mapping into DTC.
427 */
428 t->d = true;
429 dtc_pte_copy(t);
430 }
431}
432
433/** Instruction access bit fault handler.
434 *
435 * @param vector Interruption vector.
436 * @param pstate Structure with saved interruption state.
437 */
438void instruction_access_bit_fault(__u64 vector, struct exception_regdump *pstate)
439{
440 pte_t *t;
441
442 t = page_mapping_find(AS, pstate->cr_ifa);
443 ASSERT(t && t->p);
444 if (t && t->p) {
445 /*
446 * Update the Accessed bit in page tables and reinsert
447 * the mapping into ITC.
448 */
449 t->a = true;
450 itc_pte_copy(t);
451 }
452}
453
454/** Data access bit fault handler.
455 *
456 * @param vector Interruption vector.
457 * @param pstate Structure with saved interruption state.
458 */
459void data_access_bit_fault(__u64 vector, struct exception_regdump *pstate)
460{
461 pte_t *t;
462
463 t = page_mapping_find(AS, pstate->cr_ifa);
464 ASSERT(t && t->p);
465 if (t && t->p) {
466 /*
467 * Update the Accessed bit in page tables and reinsert
468 * the mapping into DTC.
469 */
470 t->a = true;
471 dtc_pte_copy(t);
472 }
473}
474
475/** Page not present fault handler.
476 *
477 * @param vector Interruption vector.
478 * @param pstate Structure with saved interruption state.
479 */
480void page_not_present(__u64 vector, struct exception_regdump *pstate)
481{
482 region_register rr;
483 __address va;
484 pte_t *t;
485
486 va = pstate->cr_ifa; /* faulting address */
487 t = page_mapping_find(AS, va);
488 ASSERT(t);
489
490 if (t->p) {
491 /*
492 * If the Present bit is set in page hash table, just copy it
493 * and update ITC/DTC.
494 */
495 if (t->x)
496 itc_pte_copy(t);
497 else
498 dtc_pte_copy(t);
499 } else {
500 if (!as_page_fault(va)) {
501 panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid);
502 }
503 }
504}
Note: See TracBrowser for help on using the repository browser.