source: mainline/kernel/arch/mips32/src/mm/tlb.c@ a35b458

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since a35b458 was a35b458, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 7 years ago

style: Remove trailing whitespace on _all_ lines, including empty ones, for particular file types.

Command used: tools/srepl '\s\+$' '' -- *.c *.h *.py *.sh *.s *.S *.ag

Currently, whitespace on empty lines is very inconsistent.
There are two basic choices: Either remove the whitespace, or keep empty lines
indented to the level of surrounding code. The former is AFAICT more common,
and also much easier to do automatically.

Alternatively, we could write script for automatic indentation, and use that
instead. However, if such a script exists, it's possible to use the indented
style locally, by having the editor apply relevant conversions on load/save,
without affecting remote repository. IMO, it makes more sense to adopt
the simpler rule.

  • Property mode set to 100644
File size: 9.5 KB
RevLine 
[f761f1eb]1/*
[df4ed85]2 * Copyright (c) 2003-2004 Jakub Jermar
[f761f1eb]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[7f341820]29/** @addtogroup mips32mm
[b45c443]30 * @{
31 */
32/** @file
33 */
34
[f761f1eb]35#include <arch/mm/tlb.h>
[4512d7e]36#include <mm/asid.h>
[f761f1eb]37#include <mm/tlb.h>
[1084a784]38#include <mm/page.h>
[20d50a1]39#include <mm/as.h>
[f761f1eb]40#include <arch/cp0.h>
41#include <panic.h>
42#include <arch.h>
[7f341820]43#include <synch/mutex.h>
[1084a784]44#include <print.h>
[b2fa1204]45#include <log.h>
[63e27ef]46#include <assert.h>
[2d01bbd]47#include <align.h>
[874621f]48#include <interrupt.h>
[e2b762ec]49#include <symtab.h>
50
[b0c2075]51#define PFN_SHIFT 12
52#define VPN_SHIFT 12
53
54#define ADDR2HI_VPN(a) ((a) >> VPN_SHIFT)
55#define ADDR2HI_VPN2(a) (ADDR2HI_VPN((a)) >> 1)
56
57#define HI_VPN2ADDR(vpn) ((vpn) << VPN_SHIFT)
58#define HI_VPN22ADDR(vpn2) (HI_VPN2ADDR(vpn2) << 1)
59
60#define LO_PFN2ADDR(pfn) ((pfn) << PFN_SHIFT)
61
62#define BANK_SELECT_BIT(a) (((a) >> PAGE_WIDTH) & 1)
[e05b956]63
[91befde0]64/** Initialize TLB.
[1084a784]65 *
66 * Invalidate all entries and mark wired entries.
67 */
[b00fdde]68void tlb_arch_init(void)
[ce031f0]69{
[dd14cced]70 int i;
71
[ce031f0]72 cp0_pagemask_write(TLB_PAGE_MASK_16K);
[dd14cced]73 cp0_entry_hi_write(0);
74 cp0_entry_lo0_write(0);
75 cp0_entry_lo1_write(0);
[ce031f0]76
[dd14cced]77 /* Clear and initialize TLB. */
[a35b458]78
[dd14cced]79 for (i = 0; i < TLB_ENTRY_COUNT; i++) {
80 cp0_index_write(i);
81 tlbwi();
82 }
[a35b458]83
[ce031f0]84 /*
85 * The kernel is going to make use of some wired
[1084a784]86 * entries (e.g. mapping kernel stacks in kseg3).
[ce031f0]87 */
88 cp0_wired_write(TLB_WIRED);
89}
90
[91befde0]91/** Process TLB Refill Exception.
[1084a784]92 *
[91befde0]93 * @param istate Interrupted register context.
[1084a784]94 */
[25d7709]95void tlb_refill(istate_t *istate)
[1084a784]96{
[e05b956]97 entry_lo_t lo;
[7f1c620]98 uintptr_t badvaddr;
[38dc82d]99 pte_t pte;
[a35b458]100
[1084a784]101 badvaddr = cp0_badvaddr_read();
[e05b956]102
[38dc82d]103 bool found = page_mapping_find(AS, badvaddr, true, &pte);
104 if (found && pte.p) {
[1dbc43f]105 /*
106 * Record access to PTE.
107 */
[38dc82d]108 pte.a = 1;
[38a1a84]109
[38dc82d]110 tlb_prepare_entry_lo(&lo, pte.g, pte.p, pte.d,
111 pte.cacheable, pte.pfn);
[1084a784]112
[346b12a2]113 page_mapping_update(AS, badvaddr, true, &pte);
114
[1dbc43f]115 /*
116 * New entry is to be inserted into TLB
117 */
[e05b956]118 if (BANK_SELECT_BIT(badvaddr) == 0) {
[1dbc43f]119 cp0_entry_lo0_write(lo.value);
120 cp0_entry_lo1_write(0);
121 } else {
122 cp0_entry_lo0_write(0);
123 cp0_entry_lo1_write(lo.value);
124 }
125 cp0_pagemask_write(TLB_PAGE_MASK_16K);
126 tlbwr();
[976c434]127 return;
[1084a784]128 }
[976c434]129
[59fb782]130 (void) as_page_fault(badvaddr, PF_ACCESS_READ, istate);
[1084a784]131}
132
[91befde0]133/** Process TLB Invalid Exception.
[38a1a84]134 *
[91befde0]135 * @param istate Interrupted register context.
[38a1a84]136 */
[25d7709]137void tlb_invalid(istate_t *istate)
[1084a784]138{
[e05b956]139 entry_lo_t lo;
[cc205f1]140 tlb_index_t index;
[7f1c620]141 uintptr_t badvaddr;
[38dc82d]142 pte_t pte;
[38a1a84]143
144 /*
145 * Locate the faulting entry in TLB.
146 */
147 tlbp();
[cc205f1]148 index.value = cp0_index_read();
[2299914]149
[cf538e7]150#if defined(PROCESSOR_4Kc)
151 /*
152 * This can happen on a 4Kc when Status.EXL is 1 and there is a TLB miss.
153 * EXL is 1 when interrupts are disabled. The combination of a TLB miss
154 * and disabled interrupts is possible in copy_to/from_uspace().
155 */
156 if (index.p) {
157 tlb_refill(istate);
158 return;
159 }
160#endif
161
[63e27ef]162 assert(!index.p);
[38a1a84]163
[e05b956]164 badvaddr = cp0_badvaddr_read();
165
[38dc82d]166 bool found = page_mapping_find(AS, badvaddr, true, &pte);
167 if (found && pte.p) {
[1dbc43f]168 /*
169 * Read the faulting TLB entry.
170 */
171 tlbr();
[38a1a84]172
[1dbc43f]173 /*
174 * Record access to PTE.
175 */
[38dc82d]176 pte.a = 1;
[38a1a84]177
[38dc82d]178 tlb_prepare_entry_lo(&lo, pte.g, pte.p, pte.d,
179 pte.cacheable, pte.pfn);
[38a1a84]180
[346b12a2]181 page_mapping_update(AS, badvaddr, true, &pte);
182
[1dbc43f]183 /*
184 * The entry is to be updated in TLB.
185 */
[e05b956]186 if (BANK_SELECT_BIT(badvaddr) == 0)
[1dbc43f]187 cp0_entry_lo0_write(lo.value);
188 else
189 cp0_entry_lo1_write(lo.value);
190 tlbwi();
[976c434]191 return;
[1dbc43f]192 }
[976c434]193
[59fb782]194 (void) as_page_fault(badvaddr, PF_ACCESS_READ, istate);
[1084a784]195}
196
[91befde0]197/** Process TLB Modified Exception.
[38a1a84]198 *
[91befde0]199 * @param istate Interrupted register context.
[38a1a84]200 */
[25d7709]201void tlb_modified(istate_t *istate)
[1084a784]202{
[e05b956]203 entry_lo_t lo;
[cc205f1]204 tlb_index_t index;
[7f1c620]205 uintptr_t badvaddr;
[38dc82d]206 pte_t pte;
[38a1a84]207
[ddfd158]208 badvaddr = cp0_badvaddr_read();
209
[38a1a84]210 /*
211 * Locate the faulting entry in TLB.
212 */
213 tlbp();
[cc205f1]214 index.value = cp0_index_read();
[2299914]215
[38a1a84]216 /*
[ddfd158]217 * Emit warning if the entry is not in TLB.
218 *
219 * We do not assert on this because this could be a manifestation of
220 * an emulator bug, such as QEMU Bug #1128935:
[1b20da0]221 * https://bugs.launchpad.net/qemu/+bug/1128935
[38a1a84]222 */
[ddfd158]223 if (index.p) {
[b2fa1204]224 log(LF_ARCH, LVL_WARN, "%s: TLBP failed in exception handler (badvaddr=%#"
[ddfd158]225 PRIxn ", ASID=%d).\n", __func__, badvaddr,
226 AS ? AS->asid : -1);
227 return;
228 }
[e05b956]229
[38dc82d]230 bool found = page_mapping_find(AS, badvaddr, true, &pte);
231 if (found && pte.p && pte.w) {
[1dbc43f]232 /*
233 * Read the faulting TLB entry.
234 */
235 tlbr();
[f761f1eb]236
[1dbc43f]237 /*
238 * Record access and write to PTE.
239 */
[38dc82d]240 pte.a = 1;
241 pte.d = 1;
[1084a784]242
[38dc82d]243 tlb_prepare_entry_lo(&lo, pte.g, pte.p, pte.w,
244 pte.cacheable, pte.pfn);
[f761f1eb]245
[346b12a2]246 page_mapping_update(AS, badvaddr, true, &pte);
247
[1dbc43f]248 /*
249 * The entry is to be updated in TLB.
250 */
[e05b956]251 if (BANK_SELECT_BIT(badvaddr) == 0)
[1dbc43f]252 cp0_entry_lo0_write(lo.value);
253 else
254 cp0_entry_lo1_write(lo.value);
255 tlbwi();
[976c434]256 return;
[1dbc43f]257 }
258
[59fb782]259 (void) as_page_fault(badvaddr, PF_ACCESS_WRITE, istate);
[38a1a84]260}
261
[91befde0]262void
263tlb_prepare_entry_lo(entry_lo_t *lo, bool g, bool v, bool d, bool cacheable,
264 uintptr_t pfn)
[38a1a84]265{
[8c5e6c7]266 lo->value = 0;
[38a1a84]267 lo->g = g;
268 lo->v = v;
269 lo->d = d;
[0882a9a]270 lo->c = cacheable ? PAGE_CACHEABLE_EXC_WRITE : PAGE_UNCACHED;
[38a1a84]271 lo->pfn = pfn;
[8c5e6c7]272}
273
[edebc15c]274void tlb_prepare_entry_hi(entry_hi_t *hi, asid_t asid, uintptr_t addr)
[8c5e6c7]275{
[e05b956]276 hi->value = 0;
[b0c2075]277 hi->vpn2 = ADDR2HI_VPN2(ALIGN_DOWN(addr, PAGE_SIZE));
[8c5e6c7]278 hi->asid = asid;
[38a1a84]279}
[b00fdde]280
[02055415]281/** Print contents of TLB. */
[b00fdde]282void tlb_print(void)
283{
[e05b956]284 page_mask_t mask, mask_save;
285 entry_lo_t lo0, lo0_save, lo1, lo1_save;
[f9425006]286 entry_hi_t hi, hi_save;
[a0f6a61]287 unsigned int i;
[02055415]288
[f9425006]289 hi_save.value = cp0_entry_hi_read();
[e05b956]290 lo0_save.value = cp0_entry_lo0_read();
291 lo1_save.value = cp0_entry_lo1_read();
292 mask_save.value = cp0_pagemask_read();
[a35b458]293
[e05b956]294 printf("[nr] [asid] [vpn2 ] [mask] [gvdc] [pfn ]\n");
[a35b458]295
[02055415]296 for (i = 0; i < TLB_ENTRY_COUNT; i++) {
297 cp0_index_write(i);
298 tlbr();
[a35b458]299
[0bd4f56d]300 mask.value = cp0_pagemask_read();
[02055415]301 hi.value = cp0_entry_hi_read();
302 lo0.value = cp0_entry_lo0_read();
303 lo1.value = cp0_entry_lo1_read();
[a35b458]304
[e05b956]305 printf("%-4u %-6u %0#10x %-#6x %1u%1u%1u%1u %0#10x\n",
[b0c2075]306 i, hi.asid, HI_VPN22ADDR(hi.vpn2), mask.mask,
307 lo0.g, lo0.v, lo0.d, lo0.c, LO_PFN2ADDR(lo0.pfn));
[e05b956]308 printf(" %1u%1u%1u%1u %0#10x\n",
[b0c2075]309 lo1.g, lo1.v, lo1.d, lo1.c, LO_PFN2ADDR(lo1.pfn));
[02055415]310 }
[a35b458]311
[f9425006]312 cp0_entry_hi_write(hi_save.value);
[e05b956]313 cp0_entry_lo0_write(lo0_save.value);
314 cp0_entry_lo1_write(lo1_save.value);
315 cp0_pagemask_write(mask_save.value);
[b00fdde]316}
[a98d2ec]317
[8ad925c]318/** Invalidate all not wired TLB entries. */
[a98d2ec]319void tlb_invalidate_all(void)
320{
[dd14cced]321 entry_lo_t lo0, lo1;
[f9425006]322 entry_hi_t hi_save;
[a98d2ec]323 int i;
324
[63e27ef]325 assert(interrupts_disabled());
[e05b956]326
[f9425006]327 hi_save.value = cp0_entry_hi_read();
[a98d2ec]328
[8ad925c]329 for (i = TLB_WIRED; i < TLB_ENTRY_COUNT; i++) {
[a98d2ec]330 cp0_index_write(i);
[dd14cced]331 tlbr();
332
333 lo0.value = cp0_entry_lo0_read();
334 lo1.value = cp0_entry_lo1_read();
335
336 lo0.v = 0;
337 lo1.v = 0;
338
339 cp0_entry_lo0_write(lo0.value);
340 cp0_entry_lo1_write(lo1.value);
[a35b458]341
[a98d2ec]342 tlbwi();
343 }
[a35b458]344
[f9425006]345 cp0_entry_hi_write(hi_save.value);
[a98d2ec]346}
347
348/** Invalidate all TLB entries belonging to specified address space.
349 *
350 * @param asid Address space identifier.
351 */
352void tlb_invalidate_asid(asid_t asid)
353{
[dd14cced]354 entry_lo_t lo0, lo1;
[f9425006]355 entry_hi_t hi, hi_save;
[a98d2ec]356 int i;
357
[63e27ef]358 assert(interrupts_disabled());
359 assert(asid != ASID_INVALID);
[dd14cced]360
[f9425006]361 hi_save.value = cp0_entry_hi_read();
[a35b458]362
[a98d2ec]363 for (i = 0; i < TLB_ENTRY_COUNT; i++) {
364 cp0_index_write(i);
365 tlbr();
[a35b458]366
[dd14cced]367 hi.value = cp0_entry_hi_read();
[a35b458]368
[a98d2ec]369 if (hi.asid == asid) {
[dd14cced]370 lo0.value = cp0_entry_lo0_read();
371 lo1.value = cp0_entry_lo1_read();
372
373 lo0.v = 0;
374 lo1.v = 0;
375
376 cp0_entry_lo0_write(lo0.value);
377 cp0_entry_lo1_write(lo1.value);
378
[a98d2ec]379 tlbwi();
380 }
381 }
[a35b458]382
[f9425006]383 cp0_entry_hi_write(hi_save.value);
[a98d2ec]384}
385
[91befde0]386/** Invalidate TLB entries for specified page range belonging to specified
387 * address space.
[a98d2ec]388 *
[91befde0]389 * @param asid Address space identifier.
390 * @param page First page whose TLB entry is to be invalidated.
391 * @param cnt Number of entries to invalidate.
[a98d2ec]392 */
[98000fb]393void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt)
[a98d2ec]394{
[6c441cf8]395 unsigned int i;
[dd14cced]396 entry_lo_t lo0, lo1;
[f9425006]397 entry_hi_t hi, hi_save;
[a98d2ec]398 tlb_index_t index;
[e05b956]399
[63e27ef]400 assert(interrupts_disabled());
[a35b458]401
[bd81386]402 if (asid == ASID_INVALID)
403 return;
[dd14cced]404
[f9425006]405 hi_save.value = cp0_entry_hi_read();
[a98d2ec]406
[6c441cf8]407 for (i = 0; i < cnt + 1; i += 2) {
[edebc15c]408 tlb_prepare_entry_hi(&hi, asid, page + i * PAGE_SIZE);
[4512d7e]409 cp0_entry_hi_write(hi.value);
[dd14cced]410
[4512d7e]411 tlbp();
412 index.value = cp0_index_read();
[a98d2ec]413
[4512d7e]414 if (!index.p) {
[91befde0]415 /*
416 * Entry was found, index register contains valid
417 * index.
418 */
[4512d7e]419 tlbr();
[dd14cced]420
[4512d7e]421 lo0.value = cp0_entry_lo0_read();
422 lo1.value = cp0_entry_lo1_read();
[dd14cced]423
[4512d7e]424 lo0.v = 0;
425 lo1.v = 0;
[dd14cced]426
[4512d7e]427 cp0_entry_lo0_write(lo0.value);
428 cp0_entry_lo1_write(lo1.value);
[dd14cced]429
[4512d7e]430 tlbwi();
431 }
[a98d2ec]432 }
[a35b458]433
[f9425006]434 cp0_entry_hi_write(hi_save.value);
[a98d2ec]435}
[b45c443]436
[a6dd361]437/** @}
[b45c443]438 */
Note: See TracBrowser for help on using the repository browser.