source: mainline/kernel/arch/mips32/src/mm/tlb.c@ a35b458

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since a35b458 was a35b458, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 7 years ago

style: Remove trailing whitespace on _all_ lines, including empty ones, for particular file types.

Command used: tools/srepl '\s\+$' '' -- *.c *.h *.py *.sh *.s *.S *.ag

Currently, whitespace on empty lines is very inconsistent.
There are two basic choices: Either remove the whitespace, or keep empty lines
indented to the level of surrounding code. The former is AFAICT more common,
and also much easier to do automatically.

Alternatively, we could write script for automatic indentation, and use that
instead. However, if such a script exists, it's possible to use the indented
style locally, by having the editor apply relevant conversions on load/save,
without affecting remote repository. IMO, it makes more sense to adopt
the simpler rule.

  • Property mode set to 100644
File size: 9.5 KB
Line 
1/*
2 * Copyright (c) 2003-2004 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup mips32mm
30 * @{
31 */
32/** @file
33 */
34
35#include <arch/mm/tlb.h>
36#include <mm/asid.h>
37#include <mm/tlb.h>
38#include <mm/page.h>
39#include <mm/as.h>
40#include <arch/cp0.h>
41#include <panic.h>
42#include <arch.h>
43#include <synch/mutex.h>
44#include <print.h>
45#include <log.h>
46#include <assert.h>
47#include <align.h>
48#include <interrupt.h>
49#include <symtab.h>
50
51#define PFN_SHIFT 12
52#define VPN_SHIFT 12
53
54#define ADDR2HI_VPN(a) ((a) >> VPN_SHIFT)
55#define ADDR2HI_VPN2(a) (ADDR2HI_VPN((a)) >> 1)
56
57#define HI_VPN2ADDR(vpn) ((vpn) << VPN_SHIFT)
58#define HI_VPN22ADDR(vpn2) (HI_VPN2ADDR(vpn2) << 1)
59
60#define LO_PFN2ADDR(pfn) ((pfn) << PFN_SHIFT)
61
62#define BANK_SELECT_BIT(a) (((a) >> PAGE_WIDTH) & 1)
63
64/** Initialize TLB.
65 *
66 * Invalidate all entries and mark wired entries.
67 */
68void tlb_arch_init(void)
69{
70 int i;
71
72 cp0_pagemask_write(TLB_PAGE_MASK_16K);
73 cp0_entry_hi_write(0);
74 cp0_entry_lo0_write(0);
75 cp0_entry_lo1_write(0);
76
77 /* Clear and initialize TLB. */
78
79 for (i = 0; i < TLB_ENTRY_COUNT; i++) {
80 cp0_index_write(i);
81 tlbwi();
82 }
83
84 /*
85 * The kernel is going to make use of some wired
86 * entries (e.g. mapping kernel stacks in kseg3).
87 */
88 cp0_wired_write(TLB_WIRED);
89}
90
91/** Process TLB Refill Exception.
92 *
93 * @param istate Interrupted register context.
94 */
95void tlb_refill(istate_t *istate)
96{
97 entry_lo_t lo;
98 uintptr_t badvaddr;
99 pte_t pte;
100
101 badvaddr = cp0_badvaddr_read();
102
103 bool found = page_mapping_find(AS, badvaddr, true, &pte);
104 if (found && pte.p) {
105 /*
106 * Record access to PTE.
107 */
108 pte.a = 1;
109
110 tlb_prepare_entry_lo(&lo, pte.g, pte.p, pte.d,
111 pte.cacheable, pte.pfn);
112
113 page_mapping_update(AS, badvaddr, true, &pte);
114
115 /*
116 * New entry is to be inserted into TLB
117 */
118 if (BANK_SELECT_BIT(badvaddr) == 0) {
119 cp0_entry_lo0_write(lo.value);
120 cp0_entry_lo1_write(0);
121 } else {
122 cp0_entry_lo0_write(0);
123 cp0_entry_lo1_write(lo.value);
124 }
125 cp0_pagemask_write(TLB_PAGE_MASK_16K);
126 tlbwr();
127 return;
128 }
129
130 (void) as_page_fault(badvaddr, PF_ACCESS_READ, istate);
131}
132
133/** Process TLB Invalid Exception.
134 *
135 * @param istate Interrupted register context.
136 */
137void tlb_invalid(istate_t *istate)
138{
139 entry_lo_t lo;
140 tlb_index_t index;
141 uintptr_t badvaddr;
142 pte_t pte;
143
144 /*
145 * Locate the faulting entry in TLB.
146 */
147 tlbp();
148 index.value = cp0_index_read();
149
150#if defined(PROCESSOR_4Kc)
151 /*
152 * This can happen on a 4Kc when Status.EXL is 1 and there is a TLB miss.
153 * EXL is 1 when interrupts are disabled. The combination of a TLB miss
154 * and disabled interrupts is possible in copy_to/from_uspace().
155 */
156 if (index.p) {
157 tlb_refill(istate);
158 return;
159 }
160#endif
161
162 assert(!index.p);
163
164 badvaddr = cp0_badvaddr_read();
165
166 bool found = page_mapping_find(AS, badvaddr, true, &pte);
167 if (found && pte.p) {
168 /*
169 * Read the faulting TLB entry.
170 */
171 tlbr();
172
173 /*
174 * Record access to PTE.
175 */
176 pte.a = 1;
177
178 tlb_prepare_entry_lo(&lo, pte.g, pte.p, pte.d,
179 pte.cacheable, pte.pfn);
180
181 page_mapping_update(AS, badvaddr, true, &pte);
182
183 /*
184 * The entry is to be updated in TLB.
185 */
186 if (BANK_SELECT_BIT(badvaddr) == 0)
187 cp0_entry_lo0_write(lo.value);
188 else
189 cp0_entry_lo1_write(lo.value);
190 tlbwi();
191 return;
192 }
193
194 (void) as_page_fault(badvaddr, PF_ACCESS_READ, istate);
195}
196
197/** Process TLB Modified Exception.
198 *
199 * @param istate Interrupted register context.
200 */
201void tlb_modified(istate_t *istate)
202{
203 entry_lo_t lo;
204 tlb_index_t index;
205 uintptr_t badvaddr;
206 pte_t pte;
207
208 badvaddr = cp0_badvaddr_read();
209
210 /*
211 * Locate the faulting entry in TLB.
212 */
213 tlbp();
214 index.value = cp0_index_read();
215
216 /*
217 * Emit warning if the entry is not in TLB.
218 *
219 * We do not assert on this because this could be a manifestation of
220 * an emulator bug, such as QEMU Bug #1128935:
221 * https://bugs.launchpad.net/qemu/+bug/1128935
222 */
223 if (index.p) {
224 log(LF_ARCH, LVL_WARN, "%s: TLBP failed in exception handler (badvaddr=%#"
225 PRIxn ", ASID=%d).\n", __func__, badvaddr,
226 AS ? AS->asid : -1);
227 return;
228 }
229
230 bool found = page_mapping_find(AS, badvaddr, true, &pte);
231 if (found && pte.p && pte.w) {
232 /*
233 * Read the faulting TLB entry.
234 */
235 tlbr();
236
237 /*
238 * Record access and write to PTE.
239 */
240 pte.a = 1;
241 pte.d = 1;
242
243 tlb_prepare_entry_lo(&lo, pte.g, pte.p, pte.w,
244 pte.cacheable, pte.pfn);
245
246 page_mapping_update(AS, badvaddr, true, &pte);
247
248 /*
249 * The entry is to be updated in TLB.
250 */
251 if (BANK_SELECT_BIT(badvaddr) == 0)
252 cp0_entry_lo0_write(lo.value);
253 else
254 cp0_entry_lo1_write(lo.value);
255 tlbwi();
256 return;
257 }
258
259 (void) as_page_fault(badvaddr, PF_ACCESS_WRITE, istate);
260}
261
262void
263tlb_prepare_entry_lo(entry_lo_t *lo, bool g, bool v, bool d, bool cacheable,
264 uintptr_t pfn)
265{
266 lo->value = 0;
267 lo->g = g;
268 lo->v = v;
269 lo->d = d;
270 lo->c = cacheable ? PAGE_CACHEABLE_EXC_WRITE : PAGE_UNCACHED;
271 lo->pfn = pfn;
272}
273
274void tlb_prepare_entry_hi(entry_hi_t *hi, asid_t asid, uintptr_t addr)
275{
276 hi->value = 0;
277 hi->vpn2 = ADDR2HI_VPN2(ALIGN_DOWN(addr, PAGE_SIZE));
278 hi->asid = asid;
279}
280
281/** Print contents of TLB. */
282void tlb_print(void)
283{
284 page_mask_t mask, mask_save;
285 entry_lo_t lo0, lo0_save, lo1, lo1_save;
286 entry_hi_t hi, hi_save;
287 unsigned int i;
288
289 hi_save.value = cp0_entry_hi_read();
290 lo0_save.value = cp0_entry_lo0_read();
291 lo1_save.value = cp0_entry_lo1_read();
292 mask_save.value = cp0_pagemask_read();
293
294 printf("[nr] [asid] [vpn2 ] [mask] [gvdc] [pfn ]\n");
295
296 for (i = 0; i < TLB_ENTRY_COUNT; i++) {
297 cp0_index_write(i);
298 tlbr();
299
300 mask.value = cp0_pagemask_read();
301 hi.value = cp0_entry_hi_read();
302 lo0.value = cp0_entry_lo0_read();
303 lo1.value = cp0_entry_lo1_read();
304
305 printf("%-4u %-6u %0#10x %-#6x %1u%1u%1u%1u %0#10x\n",
306 i, hi.asid, HI_VPN22ADDR(hi.vpn2), mask.mask,
307 lo0.g, lo0.v, lo0.d, lo0.c, LO_PFN2ADDR(lo0.pfn));
308 printf(" %1u%1u%1u%1u %0#10x\n",
309 lo1.g, lo1.v, lo1.d, lo1.c, LO_PFN2ADDR(lo1.pfn));
310 }
311
312 cp0_entry_hi_write(hi_save.value);
313 cp0_entry_lo0_write(lo0_save.value);
314 cp0_entry_lo1_write(lo1_save.value);
315 cp0_pagemask_write(mask_save.value);
316}
317
318/** Invalidate all not wired TLB entries. */
319void tlb_invalidate_all(void)
320{
321 entry_lo_t lo0, lo1;
322 entry_hi_t hi_save;
323 int i;
324
325 assert(interrupts_disabled());
326
327 hi_save.value = cp0_entry_hi_read();
328
329 for (i = TLB_WIRED; i < TLB_ENTRY_COUNT; i++) {
330 cp0_index_write(i);
331 tlbr();
332
333 lo0.value = cp0_entry_lo0_read();
334 lo1.value = cp0_entry_lo1_read();
335
336 lo0.v = 0;
337 lo1.v = 0;
338
339 cp0_entry_lo0_write(lo0.value);
340 cp0_entry_lo1_write(lo1.value);
341
342 tlbwi();
343 }
344
345 cp0_entry_hi_write(hi_save.value);
346}
347
348/** Invalidate all TLB entries belonging to specified address space.
349 *
350 * @param asid Address space identifier.
351 */
352void tlb_invalidate_asid(asid_t asid)
353{
354 entry_lo_t lo0, lo1;
355 entry_hi_t hi, hi_save;
356 int i;
357
358 assert(interrupts_disabled());
359 assert(asid != ASID_INVALID);
360
361 hi_save.value = cp0_entry_hi_read();
362
363 for (i = 0; i < TLB_ENTRY_COUNT; i++) {
364 cp0_index_write(i);
365 tlbr();
366
367 hi.value = cp0_entry_hi_read();
368
369 if (hi.asid == asid) {
370 lo0.value = cp0_entry_lo0_read();
371 lo1.value = cp0_entry_lo1_read();
372
373 lo0.v = 0;
374 lo1.v = 0;
375
376 cp0_entry_lo0_write(lo0.value);
377 cp0_entry_lo1_write(lo1.value);
378
379 tlbwi();
380 }
381 }
382
383 cp0_entry_hi_write(hi_save.value);
384}
385
386/** Invalidate TLB entries for specified page range belonging to specified
387 * address space.
388 *
389 * @param asid Address space identifier.
390 * @param page First page whose TLB entry is to be invalidated.
391 * @param cnt Number of entries to invalidate.
392 */
393void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt)
394{
395 unsigned int i;
396 entry_lo_t lo0, lo1;
397 entry_hi_t hi, hi_save;
398 tlb_index_t index;
399
400 assert(interrupts_disabled());
401
402 if (asid == ASID_INVALID)
403 return;
404
405 hi_save.value = cp0_entry_hi_read();
406
407 for (i = 0; i < cnt + 1; i += 2) {
408 tlb_prepare_entry_hi(&hi, asid, page + i * PAGE_SIZE);
409 cp0_entry_hi_write(hi.value);
410
411 tlbp();
412 index.value = cp0_index_read();
413
414 if (!index.p) {
415 /*
416 * Entry was found, index register contains valid
417 * index.
418 */
419 tlbr();
420
421 lo0.value = cp0_entry_lo0_read();
422 lo1.value = cp0_entry_lo1_read();
423
424 lo0.v = 0;
425 lo1.v = 0;
426
427 cp0_entry_lo0_write(lo0.value);
428 cp0_entry_lo1_write(lo1.value);
429
430 tlbwi();
431 }
432 }
433
434 cp0_entry_hi_write(hi_save.value);
435}
436
437/** @}
438 */
Note: See TracBrowser for help on using the repository browser.