source: mainline/kernel/arch/mips32/src/mm/tlb.c@ 0ab362c

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 0ab362c was aaa6af2, checked in by Jakub Jermar <jakub@…>, 14 years ago

Do not take AS→mutex when reading AS→asid in mips32 TLB-miss handler.
The asid is guaranteed to remain constant while somebody is using the
address space. Moreover, taking the mutex at the beginning of the
miss handler, i.e. before the page tables are searched, breaks kernel
non-identity when someone accesses a non-identity page while holding
e.g. a spinlock (the debug kernel will hit an assertion).

  • Property mode set to 100644
File size: 11.8 KB
Line 
1/*
2 * Copyright (c) 2003-2004 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup mips32mm
30 * @{
31 */
32/** @file
33 */
34
35#include <arch/mm/tlb.h>
36#include <mm/asid.h>
37#include <mm/tlb.h>
38#include <mm/page.h>
39#include <mm/as.h>
40#include <arch/cp0.h>
41#include <panic.h>
42#include <arch.h>
43#include <synch/mutex.h>
44#include <print.h>
45#include <debug.h>
46#include <align.h>
47#include <interrupt.h>
48#include <symtab.h>
49
50static void tlb_refill_fail(istate_t *);
51static void tlb_invalid_fail(istate_t *);
52static void tlb_modified_fail(istate_t *);
53
54static pte_t *find_mapping_and_check(uintptr_t, int, istate_t *, int *);
55
56/** Initialize TLB.
57 *
58 * Invalidate all entries and mark wired entries.
59 */
60void tlb_arch_init(void)
61{
62 int i;
63
64 cp0_pagemask_write(TLB_PAGE_MASK_16K);
65 cp0_entry_hi_write(0);
66 cp0_entry_lo0_write(0);
67 cp0_entry_lo1_write(0);
68
69 /* Clear and initialize TLB. */
70
71 for (i = 0; i < TLB_ENTRY_COUNT; i++) {
72 cp0_index_write(i);
73 tlbwi();
74 }
75
76 /*
77 * The kernel is going to make use of some wired
78 * entries (e.g. mapping kernel stacks in kseg3).
79 */
80 cp0_wired_write(TLB_WIRED);
81}
82
83/** Process TLB Refill Exception.
84 *
85 * @param istate Interrupted register context.
86 */
87void tlb_refill(istate_t *istate)
88{
89 entry_lo_t lo;
90 entry_hi_t hi;
91 asid_t asid;
92 uintptr_t badvaddr;
93 pte_t *pte;
94 int pfrc;
95
96 badvaddr = cp0_badvaddr_read();
97 asid = AS->asid;
98
99 pte = find_mapping_and_check(badvaddr, PF_ACCESS_READ, istate, &pfrc);
100 if (!pte) {
101 switch (pfrc) {
102 case AS_PF_FAULT:
103 goto fail;
104 break;
105 case AS_PF_DEFER:
106 /*
107 * The page fault came during copy_from_uspace()
108 * or copy_to_uspace().
109 */
110 return;
111 default:
112 panic("Unexpected pfrc (%d).", pfrc);
113 }
114 }
115
116 /*
117 * Record access to PTE.
118 */
119 pte->a = 1;
120
121 tlb_prepare_entry_hi(&hi, asid, badvaddr);
122 tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->d, pte->cacheable,
123 pte->pfn);
124
125 /*
126 * New entry is to be inserted into TLB
127 */
128 cp0_entry_hi_write(hi.value);
129 if ((badvaddr / PAGE_SIZE) % 2 == 0) {
130 cp0_entry_lo0_write(lo.value);
131 cp0_entry_lo1_write(0);
132 }
133 else {
134 cp0_entry_lo0_write(0);
135 cp0_entry_lo1_write(lo.value);
136 }
137 cp0_pagemask_write(TLB_PAGE_MASK_16K);
138 tlbwr();
139
140 return;
141
142fail:
143 tlb_refill_fail(istate);
144}
145
146/** Process TLB Invalid Exception.
147 *
148 * @param istate Interrupted register context.
149 */
150void tlb_invalid(istate_t *istate)
151{
152 tlb_index_t index;
153 uintptr_t badvaddr;
154 entry_lo_t lo;
155 entry_hi_t hi;
156 pte_t *pte;
157 int pfrc;
158
159 badvaddr = cp0_badvaddr_read();
160
161 /*
162 * Locate the faulting entry in TLB.
163 */
164 hi.value = cp0_entry_hi_read();
165 tlb_prepare_entry_hi(&hi, hi.asid, badvaddr);
166 cp0_entry_hi_write(hi.value);
167 tlbp();
168 index.value = cp0_index_read();
169
170 /*
171 * Fail if the entry is not in TLB.
172 */
173 if (index.p) {
174 printf("TLB entry not found.\n");
175 goto fail;
176 }
177
178 pte = find_mapping_and_check(badvaddr, PF_ACCESS_READ, istate, &pfrc);
179 if (!pte) {
180 switch (pfrc) {
181 case AS_PF_FAULT:
182 goto fail;
183 break;
184 case AS_PF_DEFER:
185 /*
186 * The page fault came during copy_from_uspace()
187 * or copy_to_uspace().
188 */
189 return;
190 default:
191 panic("Unexpected pfrc (%d).", pfrc);
192 }
193 }
194
195 /*
196 * Read the faulting TLB entry.
197 */
198 tlbr();
199
200 /*
201 * Record access to PTE.
202 */
203 pte->a = 1;
204
205 tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->d, pte->cacheable,
206 pte->pfn);
207
208 /*
209 * The entry is to be updated in TLB.
210 */
211 if ((badvaddr / PAGE_SIZE) % 2 == 0)
212 cp0_entry_lo0_write(lo.value);
213 else
214 cp0_entry_lo1_write(lo.value);
215 cp0_pagemask_write(TLB_PAGE_MASK_16K);
216 tlbwi();
217
218 return;
219
220fail:
221 tlb_invalid_fail(istate);
222}
223
224/** Process TLB Modified Exception.
225 *
226 * @param istate Interrupted register context.
227 */
228void tlb_modified(istate_t *istate)
229{
230 tlb_index_t index;
231 uintptr_t badvaddr;
232 entry_lo_t lo;
233 entry_hi_t hi;
234 pte_t *pte;
235 int pfrc;
236
237 badvaddr = cp0_badvaddr_read();
238
239 /*
240 * Locate the faulting entry in TLB.
241 */
242 hi.value = cp0_entry_hi_read();
243 tlb_prepare_entry_hi(&hi, hi.asid, badvaddr);
244 cp0_entry_hi_write(hi.value);
245 tlbp();
246 index.value = cp0_index_read();
247
248 /*
249 * Fail if the entry is not in TLB.
250 */
251 if (index.p) {
252 printf("TLB entry not found.\n");
253 goto fail;
254 }
255
256 pte = find_mapping_and_check(badvaddr, PF_ACCESS_WRITE, istate, &pfrc);
257 if (!pte) {
258 switch (pfrc) {
259 case AS_PF_FAULT:
260 goto fail;
261 break;
262 case AS_PF_DEFER:
263 /*
264 * The page fault came during copy_from_uspace()
265 * or copy_to_uspace().
266 */
267 return;
268 default:
269 panic("Unexpected pfrc (%d).", pfrc);
270 }
271 }
272
273 /*
274 * Read the faulting TLB entry.
275 */
276 tlbr();
277
278 /*
279 * Record access and write to PTE.
280 */
281 pte->a = 1;
282 pte->d = 1;
283
284 tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->w, pte->cacheable,
285 pte->pfn);
286
287 /*
288 * The entry is to be updated in TLB.
289 */
290 if ((badvaddr / PAGE_SIZE) % 2 == 0)
291 cp0_entry_lo0_write(lo.value);
292 else
293 cp0_entry_lo1_write(lo.value);
294 cp0_pagemask_write(TLB_PAGE_MASK_16K);
295 tlbwi();
296
297 return;
298
299fail:
300 tlb_modified_fail(istate);
301}
302
303void tlb_refill_fail(istate_t *istate)
304{
305 uintptr_t va = cp0_badvaddr_read();
306
307 fault_if_from_uspace(istate, "TLB Refill Exception on %p.",
308 (void *) va);
309 panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, "TLB Refill Exception.");
310}
311
312
313void tlb_invalid_fail(istate_t *istate)
314{
315 uintptr_t va = cp0_badvaddr_read();
316
317 fault_if_from_uspace(istate, "TLB Invalid Exception on %p.",
318 (void *) va);
319 panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, "TLB Invalid Exception.");
320}
321
322void tlb_modified_fail(istate_t *istate)
323{
324 uintptr_t va = cp0_badvaddr_read();
325
326 fault_if_from_uspace(istate, "TLB Modified Exception on %p.",
327 (void *) va);
328 panic_memtrap(istate, PF_ACCESS_WRITE, va, "TLB Modified Exception.");
329}
330
331/** Try to find PTE for faulting address.
332 *
333 * @param badvaddr Faulting virtual address.
334 * @param access Access mode that caused the fault.
335 * @param istate Pointer to interrupted state.
336 * @param pfrc Pointer to variable where as_page_fault() return code
337 * will be stored.
338 *
339 * @return PTE on success, NULL otherwise.
340 */
341pte_t *
342find_mapping_and_check(uintptr_t badvaddr, int access, istate_t *istate,
343 int *pfrc)
344{
345 entry_hi_t hi;
346 pte_t *pte;
347
348 hi.value = cp0_entry_hi_read();
349
350 /*
351 * Handler cannot succeed if the ASIDs don't match.
352 */
353 if (hi.asid != AS->asid) {
354 printf("EntryHi.asid=%d, AS->asid=%d\n", hi.asid, AS->asid);
355 return NULL;
356 }
357
358 /*
359 * Check if the mapping exists in page tables.
360 */
361 pte = page_mapping_find(AS, badvaddr, true);
362 if (pte && pte->p && (pte->w || access != PF_ACCESS_WRITE)) {
363 /*
364 * Mapping found in page tables.
365 * Immediately succeed.
366 */
367 return pte;
368 } else {
369 int rc;
370
371 /*
372 * Mapping not found in page tables.
373 * Resort to higher-level page fault handler.
374 */
375 switch (rc = as_page_fault(badvaddr, access, istate)) {
376 case AS_PF_OK:
377 /*
378 * The higher-level page fault handler succeeded,
379 * The mapping ought to be in place.
380 */
381 pte = page_mapping_find(AS, badvaddr, true);
382 ASSERT(pte && pte->p);
383 ASSERT(pte->w || access != PF_ACCESS_WRITE);
384 return pte;
385 case AS_PF_DEFER:
386 *pfrc = AS_PF_DEFER;
387 return NULL;
388 case AS_PF_FAULT:
389 *pfrc = AS_PF_FAULT;
390 return NULL;
391 default:
392 panic("Unexpected rc (%d).", rc);
393 }
394
395 }
396}
397
398void
399tlb_prepare_entry_lo(entry_lo_t *lo, bool g, bool v, bool d, bool cacheable,
400 uintptr_t pfn)
401{
402 lo->value = 0;
403 lo->g = g;
404 lo->v = v;
405 lo->d = d;
406 lo->c = cacheable ? PAGE_CACHEABLE_EXC_WRITE : PAGE_UNCACHED;
407 lo->pfn = pfn;
408}
409
410void tlb_prepare_entry_hi(entry_hi_t *hi, asid_t asid, uintptr_t addr)
411{
412 hi->value = ALIGN_DOWN(addr, PAGE_SIZE * 2);
413 hi->asid = asid;
414}
415
416/** Print contents of TLB. */
417void tlb_print(void)
418{
419 page_mask_t mask;
420 entry_lo_t lo0, lo1;
421 entry_hi_t hi, hi_save;
422 unsigned int i;
423
424 hi_save.value = cp0_entry_hi_read();
425
426 printf("[nr] [asid] [vpn2] [mask] [gvdc] [pfn ]\n");
427
428 for (i = 0; i < TLB_ENTRY_COUNT; i++) {
429 cp0_index_write(i);
430 tlbr();
431
432 mask.value = cp0_pagemask_read();
433 hi.value = cp0_entry_hi_read();
434 lo0.value = cp0_entry_lo0_read();
435 lo1.value = cp0_entry_lo1_read();
436
437 printf("%-4u %-6u %#6x %#6x %1u%1u%1u%1u %#6x\n",
438 i, hi.asid, hi.vpn2, mask.mask,
439 lo0.g, lo0.v, lo0.d, lo0.c, lo0.pfn);
440 printf(" %1u%1u%1u%1u %#6x\n",
441 lo1.g, lo1.v, lo1.d, lo1.c, lo1.pfn);
442 }
443
444 cp0_entry_hi_write(hi_save.value);
445}
446
447/** Invalidate all not wired TLB entries. */
448void tlb_invalidate_all(void)
449{
450 ipl_t ipl;
451 entry_lo_t lo0, lo1;
452 entry_hi_t hi_save;
453 int i;
454
455 hi_save.value = cp0_entry_hi_read();
456 ipl = interrupts_disable();
457
458 for (i = TLB_WIRED; i < TLB_ENTRY_COUNT; i++) {
459 cp0_index_write(i);
460 tlbr();
461
462 lo0.value = cp0_entry_lo0_read();
463 lo1.value = cp0_entry_lo1_read();
464
465 lo0.v = 0;
466 lo1.v = 0;
467
468 cp0_entry_lo0_write(lo0.value);
469 cp0_entry_lo1_write(lo1.value);
470
471 tlbwi();
472 }
473
474 interrupts_restore(ipl);
475 cp0_entry_hi_write(hi_save.value);
476}
477
478/** Invalidate all TLB entries belonging to specified address space.
479 *
480 * @param asid Address space identifier.
481 */
482void tlb_invalidate_asid(asid_t asid)
483{
484 ipl_t ipl;
485 entry_lo_t lo0, lo1;
486 entry_hi_t hi, hi_save;
487 int i;
488
489 ASSERT(asid != ASID_INVALID);
490
491 hi_save.value = cp0_entry_hi_read();
492 ipl = interrupts_disable();
493
494 for (i = 0; i < TLB_ENTRY_COUNT; i++) {
495 cp0_index_write(i);
496 tlbr();
497
498 hi.value = cp0_entry_hi_read();
499
500 if (hi.asid == asid) {
501 lo0.value = cp0_entry_lo0_read();
502 lo1.value = cp0_entry_lo1_read();
503
504 lo0.v = 0;
505 lo1.v = 0;
506
507 cp0_entry_lo0_write(lo0.value);
508 cp0_entry_lo1_write(lo1.value);
509
510 tlbwi();
511 }
512 }
513
514 interrupts_restore(ipl);
515 cp0_entry_hi_write(hi_save.value);
516}
517
518/** Invalidate TLB entries for specified page range belonging to specified
519 * address space.
520 *
521 * @param asid Address space identifier.
522 * @param page First page whose TLB entry is to be invalidated.
523 * @param cnt Number of entries to invalidate.
524 */
525void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt)
526{
527 unsigned int i;
528 ipl_t ipl;
529 entry_lo_t lo0, lo1;
530 entry_hi_t hi, hi_save;
531 tlb_index_t index;
532
533 if (asid == ASID_INVALID)
534 return;
535
536 hi_save.value = cp0_entry_hi_read();
537 ipl = interrupts_disable();
538
539 for (i = 0; i < cnt + 1; i += 2) {
540 hi.value = 0;
541 tlb_prepare_entry_hi(&hi, asid, page + i * PAGE_SIZE);
542 cp0_entry_hi_write(hi.value);
543
544 tlbp();
545 index.value = cp0_index_read();
546
547 if (!index.p) {
548 /*
549 * Entry was found, index register contains valid
550 * index.
551 */
552 tlbr();
553
554 lo0.value = cp0_entry_lo0_read();
555 lo1.value = cp0_entry_lo1_read();
556
557 lo0.v = 0;
558 lo1.v = 0;
559
560 cp0_entry_lo0_write(lo0.value);
561 cp0_entry_lo1_write(lo1.value);
562
563 tlbwi();
564 }
565 }
566
567 interrupts_restore(ipl);
568 cp0_entry_hi_write(hi_save.value);
569}
570
571/** @}
572 */
Note: See TracBrowser for help on using the repository browser.