source: mainline/arch/mips32/src/mm/tlb.c@ e3c762cd

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since e3c762cd was e3c762cd, checked in by Jakub Jermar <jakub@…>, 19 years ago

Complete implementation of copy_from_uspace() and copy_to_uspace()
for amd64 and ia32. Other architectures still compile and run,
but need to implement their own assembly-only memcpy(), memcpy_from_uspace(),
memcpy_to_uspace() and their failover parts. For these architectures
only dummy implementations are provided.

  • Property mode set to 100644
File size: 12.5 KB
Line 
1/*
2 * Copyright (C) 2003-2004 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <arch/mm/tlb.h>
30#include <mm/asid.h>
31#include <mm/tlb.h>
32#include <mm/page.h>
33#include <mm/as.h>
34#include <arch/cp0.h>
35#include <panic.h>
36#include <arch.h>
37#include <symtab.h>
38#include <synch/spinlock.h>
39#include <print.h>
40#include <debug.h>
41#include <align.h>
42
43static void tlb_refill_fail(istate_t *istate);
44static void tlb_invalid_fail(istate_t *istate);
45static void tlb_modified_fail(istate_t *istate);
46
47static pte_t *find_mapping_and_check(__address badvaddr, istate_t *istate, int *pfrc);
48
49static void prepare_entry_lo(entry_lo_t *lo, bool g, bool v, bool d, bool cacheable, __address pfn);
50static void prepare_entry_hi(entry_hi_t *hi, asid_t asid, __address addr);
51
52/** Initialize TLB
53 *
54 * Initialize TLB.
55 * Invalidate all entries and mark wired entries.
56 */
57void tlb_arch_init(void)
58{
59 int i;
60
61 cp0_pagemask_write(TLB_PAGE_MASK_16K);
62 cp0_entry_hi_write(0);
63 cp0_entry_lo0_write(0);
64 cp0_entry_lo1_write(0);
65
66 /* Clear and initialize TLB. */
67
68 for (i = 0; i < TLB_ENTRY_COUNT; i++) {
69 cp0_index_write(i);
70 tlbwi();
71 }
72
73
74 /*
75 * The kernel is going to make use of some wired
76 * entries (e.g. mapping kernel stacks in kseg3).
77 */
78 cp0_wired_write(TLB_WIRED);
79}
80
81/** Process TLB Refill Exception
82 *
83 * Process TLB Refill Exception.
84 *
85 * @param istate Interrupted register context.
86 */
87void tlb_refill(istate_t *istate)
88{
89 entry_lo_t lo;
90 entry_hi_t hi;
91 asid_t asid;
92 __address badvaddr;
93 pte_t *pte;
94 int pfrc;
95
96 badvaddr = cp0_badvaddr_read();
97
98 spinlock_lock(&AS->lock);
99 asid = AS->asid;
100 spinlock_unlock(&AS->lock);
101
102 page_table_lock(AS, true);
103
104 pte = find_mapping_and_check(badvaddr, istate, &pfrc);
105 if (!pte) {
106 switch (pfrc) {
107 case AS_PF_FAULT:
108 goto fail;
109 break;
110 case AS_PF_DEFER:
111 /*
112 * The page fault came during copy_from_uspace()
113 * or copy_to_uspace().
114 */
115 page_table_unlock(AS, true);
116 return;
117 default:
118 panic("unexpected pfrc (%d)\n", pfrc);
119 }
120 }
121
122 /*
123 * Record access to PTE.
124 */
125 pte->a = 1;
126
127 prepare_entry_hi(&hi, asid, badvaddr);
128 prepare_entry_lo(&lo, pte->g, pte->p, pte->d, pte->cacheable, pte->pfn);
129
130 /*
131 * New entry is to be inserted into TLB
132 */
133 cp0_entry_hi_write(hi.value);
134 if ((badvaddr/PAGE_SIZE) % 2 == 0) {
135 cp0_entry_lo0_write(lo.value);
136 cp0_entry_lo1_write(0);
137 }
138 else {
139 cp0_entry_lo0_write(0);
140 cp0_entry_lo1_write(lo.value);
141 }
142 cp0_pagemask_write(TLB_PAGE_MASK_16K);
143 tlbwr();
144
145 page_table_unlock(AS, true);
146 return;
147
148fail:
149 page_table_unlock(AS, true);
150 tlb_refill_fail(istate);
151}
152
153/** Process TLB Invalid Exception
154 *
155 * Process TLB Invalid Exception.
156 *
157 * @param istate Interrupted register context.
158 */
159void tlb_invalid(istate_t *istate)
160{
161 tlb_index_t index;
162 __address badvaddr;
163 entry_lo_t lo;
164 entry_hi_t hi;
165 pte_t *pte;
166 int pfrc;
167
168 badvaddr = cp0_badvaddr_read();
169
170 /*
171 * Locate the faulting entry in TLB.
172 */
173 hi.value = cp0_entry_hi_read();
174 prepare_entry_hi(&hi, hi.asid, badvaddr);
175 cp0_entry_hi_write(hi.value);
176 tlbp();
177 index.value = cp0_index_read();
178
179 page_table_lock(AS, true);
180
181 /*
182 * Fail if the entry is not in TLB.
183 */
184 if (index.p) {
185 printf("TLB entry not found.\n");
186 goto fail;
187 }
188
189 pte = find_mapping_and_check(badvaddr, istate, &pfrc);
190 if (!pte) {
191 switch (pfrc) {
192 case AS_PF_FAULT:
193 goto fail;
194 break;
195 case AS_PF_DEFER:
196 /*
197 * The page fault came during copy_from_uspace()
198 * or copy_to_uspace().
199 */
200 page_table_unlock(AS, true);
201 return;
202 default:
203 panic("unexpected pfrc (%d)\n", pfrc);
204 }
205 }
206
207 /*
208 * Read the faulting TLB entry.
209 */
210 tlbr();
211
212 /*
213 * Record access to PTE.
214 */
215 pte->a = 1;
216
217 prepare_entry_lo(&lo, pte->g, pte->p, pte->d, pte->cacheable, pte->pfn);
218
219 /*
220 * The entry is to be updated in TLB.
221 */
222 if ((badvaddr/PAGE_SIZE) % 2 == 0)
223 cp0_entry_lo0_write(lo.value);
224 else
225 cp0_entry_lo1_write(lo.value);
226 cp0_pagemask_write(TLB_PAGE_MASK_16K);
227 tlbwi();
228
229 page_table_unlock(AS, true);
230 return;
231
232fail:
233 page_table_unlock(AS, true);
234 tlb_invalid_fail(istate);
235}
236
237/** Process TLB Modified Exception
238 *
239 * Process TLB Modified Exception.
240 *
241 * @param istate Interrupted register context.
242 */
243void tlb_modified(istate_t *istate)
244{
245 tlb_index_t index;
246 __address badvaddr;
247 entry_lo_t lo;
248 entry_hi_t hi;
249 pte_t *pte;
250 int pfrc;
251
252 badvaddr = cp0_badvaddr_read();
253
254 /*
255 * Locate the faulting entry in TLB.
256 */
257 hi.value = cp0_entry_hi_read();
258 prepare_entry_hi(&hi, hi.asid, badvaddr);
259 cp0_entry_hi_write(hi.value);
260 tlbp();
261 index.value = cp0_index_read();
262
263 page_table_lock(AS, true);
264
265 /*
266 * Fail if the entry is not in TLB.
267 */
268 if (index.p) {
269 printf("TLB entry not found.\n");
270 goto fail;
271 }
272
273 pte = find_mapping_and_check(badvaddr, istate, &pfrc);
274 if (!pte) {
275 switch (pfrc) {
276 case AS_PF_FAULT:
277 goto fail;
278 break;
279 case AS_PF_DEFER:
280 /*
281 * The page fault came during copy_from_uspace()
282 * or copy_to_uspace().
283 */
284 page_table_unlock(AS, true);
285 return;
286 default:
287 panic("unexpected pfrc (%d)\n", pfrc);
288 }
289 }
290
291 /*
292 * Fail if the page is not writable.
293 */
294 if (!pte->w)
295 goto fail;
296
297 /*
298 * Read the faulting TLB entry.
299 */
300 tlbr();
301
302 /*
303 * Record access and write to PTE.
304 */
305 pte->a = 1;
306 pte->d = 1;
307
308 prepare_entry_lo(&lo, pte->g, pte->p, pte->w, pte->cacheable, pte->pfn);
309
310 /*
311 * The entry is to be updated in TLB.
312 */
313 if ((badvaddr/PAGE_SIZE) % 2 == 0)
314 cp0_entry_lo0_write(lo.value);
315 else
316 cp0_entry_lo1_write(lo.value);
317 cp0_pagemask_write(TLB_PAGE_MASK_16K);
318 tlbwi();
319
320 page_table_unlock(AS, true);
321 return;
322
323fail:
324 page_table_unlock(AS, true);
325 tlb_modified_fail(istate);
326}
327
328void tlb_refill_fail(istate_t *istate)
329{
330 char *symbol = "";
331 char *sym2 = "";
332
333 char *s = get_symtab_entry(istate->epc);
334 if (s)
335 symbol = s;
336 s = get_symtab_entry(istate->ra);
337 if (s)
338 sym2 = s;
339 panic("%X: TLB Refill Exception at %X(%s<-%s)\n", cp0_badvaddr_read(), istate->epc, symbol, sym2);
340}
341
342
343void tlb_invalid_fail(istate_t *istate)
344{
345 char *symbol = "";
346
347 char *s = get_symtab_entry(istate->epc);
348 if (s)
349 symbol = s;
350 panic("%X: TLB Invalid Exception at %X(%s)\n", cp0_badvaddr_read(), istate->epc, symbol);
351}
352
353void tlb_modified_fail(istate_t *istate)
354{
355 char *symbol = "";
356
357 char *s = get_symtab_entry(istate->epc);
358 if (s)
359 symbol = s;
360 panic("%X: TLB Modified Exception at %X(%s)\n", cp0_badvaddr_read(), istate->epc, symbol);
361}
362
363/** Try to find PTE for faulting address
364 *
365 * Try to find PTE for faulting address.
366 * The AS->lock must be held on entry to this function.
367 *
368 * @param badvaddr Faulting virtual address.
369 * @param istate Pointer to interrupted state.
370 * @param pfrc Pointer to variable where as_page_fault() return code will be stored.
371 *
372 * @return PTE on success, NULL otherwise.
373 */
374pte_t *find_mapping_and_check(__address badvaddr, istate_t *istate, int *pfrc)
375{
376 entry_hi_t hi;
377 pte_t *pte;
378
379 hi.value = cp0_entry_hi_read();
380
381 /*
382 * Handler cannot succeed if the ASIDs don't match.
383 */
384 if (hi.asid != AS->asid) {
385 printf("EntryHi.asid=%d, AS->asid=%d\n", hi.asid, AS->asid);
386 return NULL;
387 }
388
389 /*
390 * Check if the mapping exists in page tables.
391 */
392 pte = page_mapping_find(AS, badvaddr);
393 if (pte && pte->p) {
394 /*
395 * Mapping found in page tables.
396 * Immediately succeed.
397 */
398 return pte;
399 } else {
400 int rc;
401
402 /*
403 * Mapping not found in page tables.
404 * Resort to higher-level page fault handler.
405 */
406 page_table_unlock(AS, true);
407 switch (rc = as_page_fault(badvaddr, istate)) {
408 case AS_PF_OK:
409 /*
410 * The higher-level page fault handler succeeded,
411 * The mapping ought to be in place.
412 */
413 page_table_lock(AS, true);
414 pte = page_mapping_find(AS, badvaddr);
415 ASSERT(pte && pte->p);
416 return pte;
417 break;
418 case AS_PF_DEFER:
419 page_table_lock(AS, true);
420 *pfrc = AS_PF_DEFER;
421 return NULL;
422 break;
423 case AS_PF_FAULT:
424 page_table_lock(AS, true);
425 printf("Page fault.\n");
426 *pfrc = AS_PF_FAULT;
427 return NULL;
428 break;
429 default:
430 panic("unexpected rc (%d)\n", rc);
431 }
432
433 }
434}
435
436void prepare_entry_lo(entry_lo_t *lo, bool g, bool v, bool d, bool cacheable, __address pfn)
437{
438 lo->value = 0;
439 lo->g = g;
440 lo->v = v;
441 lo->d = d;
442 lo->c = cacheable ? PAGE_CACHEABLE_EXC_WRITE : PAGE_UNCACHED;
443 lo->pfn = pfn;
444}
445
446void prepare_entry_hi(entry_hi_t *hi, asid_t asid, __address addr)
447{
448 hi->value = ALIGN_DOWN(addr, PAGE_SIZE * 2);
449 hi->asid = asid;
450}
451
452/** Print contents of TLB. */
453void tlb_print(void)
454{
455 page_mask_t mask;
456 entry_lo_t lo0, lo1;
457 entry_hi_t hi, hi_save;
458 int i;
459
460 hi_save.value = cp0_entry_hi_read();
461
462 printf("TLB:\n");
463 for (i = 0; i < TLB_ENTRY_COUNT; i++) {
464 cp0_index_write(i);
465 tlbr();
466
467 mask.value = cp0_pagemask_read();
468 hi.value = cp0_entry_hi_read();
469 lo0.value = cp0_entry_lo0_read();
470 lo1.value = cp0_entry_lo1_read();
471
472 printf("%d: asid=%d, vpn2=%d, mask=%d\tg[0]=%d, v[0]=%d, d[0]=%d, c[0]=%hhd, pfn[0]=%d\n"
473 "\t\t\t\tg[1]=%d, v[1]=%d, d[1]=%d, c[1]=%hhd, pfn[1]=%d\n",
474 i, hi.asid, hi.vpn2, mask.mask, lo0.g, lo0.v, lo0.d, lo0.c, lo0.pfn,
475 lo1.g, lo1.v, lo1.d, lo1.c, lo1.pfn);
476 }
477
478 cp0_entry_hi_write(hi_save.value);
479}
480
481/** Invalidate all not wired TLB entries. */
482void tlb_invalidate_all(void)
483{
484 ipl_t ipl;
485 entry_lo_t lo0, lo1;
486 entry_hi_t hi_save;
487 int i;
488
489 hi_save.value = cp0_entry_hi_read();
490 ipl = interrupts_disable();
491
492 for (i = TLB_WIRED; i < TLB_ENTRY_COUNT; i++) {
493 cp0_index_write(i);
494 tlbr();
495
496 lo0.value = cp0_entry_lo0_read();
497 lo1.value = cp0_entry_lo1_read();
498
499 lo0.v = 0;
500 lo1.v = 0;
501
502 cp0_entry_lo0_write(lo0.value);
503 cp0_entry_lo1_write(lo1.value);
504
505 tlbwi();
506 }
507
508 interrupts_restore(ipl);
509 cp0_entry_hi_write(hi_save.value);
510}
511
512/** Invalidate all TLB entries belonging to specified address space.
513 *
514 * @param asid Address space identifier.
515 */
516void tlb_invalidate_asid(asid_t asid)
517{
518 ipl_t ipl;
519 entry_lo_t lo0, lo1;
520 entry_hi_t hi, hi_save;
521 int i;
522
523 ASSERT(asid != ASID_INVALID);
524
525 hi_save.value = cp0_entry_hi_read();
526 ipl = interrupts_disable();
527
528 for (i = 0; i < TLB_ENTRY_COUNT; i++) {
529 cp0_index_write(i);
530 tlbr();
531
532 hi.value = cp0_entry_hi_read();
533
534 if (hi.asid == asid) {
535 lo0.value = cp0_entry_lo0_read();
536 lo1.value = cp0_entry_lo1_read();
537
538 lo0.v = 0;
539 lo1.v = 0;
540
541 cp0_entry_lo0_write(lo0.value);
542 cp0_entry_lo1_write(lo1.value);
543
544 tlbwi();
545 }
546 }
547
548 interrupts_restore(ipl);
549 cp0_entry_hi_write(hi_save.value);
550}
551
552/** Invalidate TLB entries for specified page range belonging to specified address space.
553 *
554 * @param asid Address space identifier.
555 * @param page First page whose TLB entry is to be invalidated.
556 * @param cnt Number of entries to invalidate.
557 */
558void tlb_invalidate_pages(asid_t asid, __address page, count_t cnt)
559{
560 int i;
561 ipl_t ipl;
562 entry_lo_t lo0, lo1;
563 entry_hi_t hi, hi_save;
564 tlb_index_t index;
565
566 ASSERT(asid != ASID_INVALID);
567
568 hi_save.value = cp0_entry_hi_read();
569 ipl = interrupts_disable();
570
571 for (i = 0; i < cnt+1; i+=2) {
572 hi.value = 0;
573 prepare_entry_hi(&hi, asid, page + i * PAGE_SIZE);
574 cp0_entry_hi_write(hi.value);
575
576 tlbp();
577 index.value = cp0_index_read();
578
579 if (!index.p) {
580 /* Entry was found, index register contains valid index. */
581 tlbr();
582
583 lo0.value = cp0_entry_lo0_read();
584 lo1.value = cp0_entry_lo1_read();
585
586 lo0.v = 0;
587 lo1.v = 0;
588
589 cp0_entry_lo0_write(lo0.value);
590 cp0_entry_lo1_write(lo1.value);
591
592 tlbwi();
593 }
594 }
595
596 interrupts_restore(ipl);
597 cp0_entry_hi_write(hi_save.value);
598}
Note: See TracBrowser for help on using the repository browser.