source: mainline/kernel/arch/sparc64/src/mm/sun4v/tlb.c

Last change on this file was bab75df6, checked in by Jiri Svoboda <jiri@…>, 7 years ago

Let kernel code get printf via the standard stdio header. Clean up unused includes.

  • Property mode set to 100644
File size: 10.0 KB
Line 
1/*
2 * Copyright (c) 2005 Jakub Jermar
3 * Copyright (c) 2008 Pavel Rimsky
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup kernel_sparc64_mm
31 * @{
32 */
33/** @file
34 */
35
36#include <mm/tlb.h>
37#include <mm/as.h>
38#include <mm/asid.h>
39#include <arch/sun4v/hypercall.h>
40#include <arch/mm/frame.h>
41#include <arch/mm/page.h>
42#include <arch/mm/tte.h>
43#include <arch/mm/tlb.h>
44#include <arch/interrupt.h>
45#include <assert.h>
46#include <interrupt.h>
47#include <arch.h>
48#include <stdio.h>
49#include <log.h>
50#include <typedefs.h>
51#include <config.h>
52#include <arch/trap/trap.h>
53#include <arch/trap/exception.h>
54#include <panic.h>
55#include <arch/asm.h>
56#include <arch/cpu.h>
57#include <arch/mm/pagesize.h>
58#include <genarch/mm/page_ht.h>
59
60#ifdef CONFIG_TSB
61#include <arch/mm/tsb.h>
62#endif
63
64static void itlb_pte_copy(pte_t *);
65static void dtlb_pte_copy(pte_t *, bool);
66
67/*
68 * The assembly language routine passes a 64-bit parameter to the Data Access
69 * MMU Miss and Data Access protection handlers, the parameter encapsulates
70 * a virtual address of the faulting page and the faulting context. The most
71 * significant 51 bits represent the VA of the faulting page and the least
72 * significant 13 vits represent the faulting context. The following macros
73 * extract the page and context out of the 64-bit parameter:
74 */
75
76/* extracts the VA of the faulting page */
77#define DMISS_ADDRESS(page_and_ctx) (((page_and_ctx) >> 13) << 13)
78
79/* extracts the faulting context */
80#define DMISS_CONTEXT(page_and_ctx) ((page_and_ctx) & 0x1fff)
81
82/**
83 * Descriptions of fault types from the MMU Fault status area.
84 *
85 * fault_type[i] contains description of error for which the IFT or DFT
86 * field of the MMU fault status area is i.
87 */
88static const char *fault_types[] = {
89 "unknown",
90 "fast miss",
91 "fast protection",
92 "MMU miss",
93 "invalid RA",
94 "privileged violation",
95 "protection violation",
96 "NFO access",
97 "so page/NFO side effect",
98 "invalid VA",
99 "invalid ASI",
100 "nc atomic",
101 "privileged action",
102 "unknown",
103 "unaligned access",
104 "invalid page size"
105};
106
107/** Array of MMU fault status areas. */
108extern mmu_fault_status_area_t mmu_fsas[MAX_NUM_STRANDS];
109
110/*
111 * Invalidate all non-locked DTLB and ITLB entries.
112 */
113void tlb_arch_init(void)
114{
115 tlb_invalidate_all();
116}
117
118/** Insert privileged mapping into DMMU TLB.
119 *
120 * @param page Virtual page address.
121 * @param frame Physical frame address.
122 * @param pagesize Page size.
123 * @param locked True for permanent mappings, false otherwise.
124 * @param cacheable True if the mapping is cacheable, false otherwise.
125 */
126void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize,
127 bool locked, bool cacheable)
128{
129 tte_data_t data;
130
131 data.value = 0;
132 data.v = true;
133 data.nfo = false;
134 data.ra = frame >> FRAME_WIDTH;
135 data.ie = false;
136 data.e = false;
137 data.cp = cacheable;
138#ifdef CONFIG_VIRT_IDX_DCACHE
139 data.cv = cacheable;
140#endif
141 data.p = true;
142 data.x = false;
143 data.w = true;
144 data.size = pagesize;
145
146 if (locked) {
147 __hypercall_fast4(
148 MMU_MAP_PERM_ADDR, page, 0, data.value, MMU_FLAG_DTLB);
149 } else {
150 __hypercall_hyperfast(
151 page, ASID_KERNEL, data.value, MMU_FLAG_DTLB, 0,
152 MMU_MAP_ADDR);
153 }
154}
155
156/** Copy PTE to TLB.
157 *
158 * @param t Page Table Entry to be copied.
159 * @param ro If true, the entry will be created read-only, regardless
160 * of its w field.
161 */
162void dtlb_pte_copy(pte_t *t, bool ro)
163{
164 tte_data_t data;
165
166 data.value = 0;
167 data.v = true;
168 data.nfo = false;
169 data.ra = (t->frame) >> FRAME_WIDTH;
170 data.ie = false;
171 data.e = false;
172 data.cp = t->c;
173#ifdef CONFIG_VIRT_IDX_DCACHE
174 data.cv = t->c;
175#endif
176 data.p = t->k;
177 data.x = false;
178 data.w = ro ? false : t->w;
179 data.size = PAGESIZE_8K;
180
181 __hypercall_hyperfast(
182 t->page, t->as->asid, data.value, MMU_FLAG_DTLB, 0, MMU_MAP_ADDR);
183}
184
185/** Copy PTE to ITLB.
186 *
187 * @param t Page Table Entry to be copied.
188 */
189void itlb_pte_copy(pte_t *t)
190{
191 tte_data_t data;
192
193 data.value = 0;
194 data.v = true;
195 data.nfo = false;
196 data.ra = (t->frame) >> FRAME_WIDTH;
197 data.ie = false;
198 data.e = false;
199 data.cp = t->c;
200 data.cv = false;
201 data.p = t->k;
202 data.x = true;
203 data.w = false;
204 data.size = PAGESIZE_8K;
205
206 __hypercall_hyperfast(
207 t->page, t->as->asid, data.value, MMU_FLAG_ITLB, 0, MMU_MAP_ADDR);
208}
209
210/** ITLB miss handler. */
211void fast_instruction_access_mmu_miss(unsigned int tt, istate_t *istate)
212{
213 uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
214 pte_t t;
215
216 bool found = page_mapping_find(AS, va, true, &t);
217 if (found && PTE_EXECUTABLE(&t)) {
218 assert(t.p);
219
220 /*
221 * The mapping was found in the software page hash table.
222 * Insert it into ITLB.
223 */
224 t.a = true;
225 itlb_pte_copy(&t);
226#ifdef CONFIG_TSB
227 itsb_pte_copy(&t);
228#endif
229 page_mapping_update(AS, va, true, &t);
230 } else {
231 /*
232 * Forward the page fault to the address space page fault
233 * handler.
234 */
235 as_page_fault(va, PF_ACCESS_EXEC, istate);
236 }
237}
238
239/** DTLB miss handler.
240 *
241 * Note that some faults (e.g. kernel faults) were already resolved by the
242 * low-level, assembly language part of the fast_data_access_mmu_miss handler.
243 *
244 * @param tt Trap type.
245 * @param istate Interrupted state saved on the stack.
246 */
247void fast_data_access_mmu_miss(unsigned int tt, istate_t *istate)
248{
249 pte_t t;
250 uintptr_t va = DMISS_ADDRESS(istate->tlb_tag_access);
251 uint16_t ctx = DMISS_CONTEXT(istate->tlb_tag_access);
252 as_t *as = AS;
253
254 if (ctx == ASID_KERNEL) {
255 if (va == 0) {
256 /* NULL access in kernel */
257 panic("NULL pointer dereference.");
258 } else if (va >= end_of_identity) {
259 /* Kernel non-identity */
260 as = AS_KERNEL;
261 } else {
262 panic("Unexpected kernel page fault.");
263 }
264 }
265
266 bool found = page_mapping_find(as, va, true, &t);
267 if (found) {
268 assert(t.p);
269
270 /*
271 * The mapping was found in the software page hash table.
272 * Insert it into DTLB.
273 */
274 t.a = true;
275 dtlb_pte_copy(&t, true);
276#ifdef CONFIG_TSB
277 dtsb_pte_copy(&t, true);
278#endif
279 page_mapping_update(as, va, true, &t);
280 } else {
281 /*
282 * Forward the page fault to the address space page fault
283 * handler.
284 */
285 as_page_fault(va, PF_ACCESS_READ, istate);
286 }
287}
288
289/** DTLB protection fault handler.
290 *
291 * @param tt Trap type.
292 * @param istate Interrupted state saved on the stack.
293 */
294void fast_data_access_protection(unsigned int tt, istate_t *istate)
295{
296 pte_t t;
297 uintptr_t va = DMISS_ADDRESS(istate->tlb_tag_access);
298 uint16_t ctx = DMISS_CONTEXT(istate->tlb_tag_access);
299 as_t *as = AS;
300
301 if (ctx == ASID_KERNEL)
302 as = AS_KERNEL;
303
304 bool found = page_mapping_find(as, va, true, &t);
305 if (found && PTE_WRITABLE(&t)) {
306 assert(t.p);
307
308 /*
309 * The mapping was found in the software page hash table and is
310 * writable. Demap the old mapping and insert an updated mapping
311 * into DTLB.
312 */
313 t.a = true;
314 t.d = true;
315 mmu_demap_page(va, ctx, MMU_FLAG_DTLB);
316 dtlb_pte_copy(&t, false);
317#ifdef CONFIG_TSB
318 dtsb_pte_copy(&t, false);
319#endif
320 page_mapping_update(as, va, true, &t);
321 } else {
322 /*
323 * Forward the page fault to the address space page fault
324 * handler.
325 */
326 as_page_fault(va, PF_ACCESS_WRITE, istate);
327 }
328}
329
330/*
331 * On Niagara this function does not work, as supervisor software is isolated
332 * from the TLB by the hypervisor and has no chance to investigate the TLB
333 * entries.
334 */
335void tlb_print(void)
336{
337 log(LF_ARCH, LVL_WARN, "Operation not possible on Niagara.");
338}
339
340/**
341 * Describes the exact condition which caused the last DMMU fault.
342 */
343void describe_dmmu_fault(void)
344{
345 uint64_t myid;
346 __hypercall_fast_ret1(0, 0, 0, 0, 0, CPU_MYID, &myid);
347
348 assert(mmu_fsas[myid].dft < 16);
349
350 printf("condition which caused the fault: %s\n",
351 fault_types[mmu_fsas[myid].dft]);
352}
353
354/** Invalidate all unlocked ITLB and DTLB entries. */
355void tlb_invalidate_all(void)
356{
357 uint64_t errno = __hypercall_fast3(MMU_DEMAP_ALL, 0, 0,
358 MMU_FLAG_DTLB | MMU_FLAG_ITLB);
359 if (errno != HV_EOK)
360 panic("Error code = %" PRIu64 ".\n", errno);
361}
362
363/** Invalidate all ITLB and DTLB entries that belong to specified ASID
364 * (Context).
365 *
366 * @param asid Address Space ID.
367 */
368void tlb_invalidate_asid(asid_t asid)
369{
370 /* switch to nucleus because we are mapped by the primary context */
371 nucleus_enter();
372
373 __hypercall_fast4(MMU_DEMAP_CTX, 0, 0, asid,
374 MMU_FLAG_ITLB | MMU_FLAG_DTLB);
375
376 nucleus_leave();
377}
378
379/** Invalidate all ITLB and DTLB entries for specified page range in specified
380 * address space.
381 *
382 * @param asid Address Space ID.
383 * @param page First page which to sweep out from ITLB and DTLB.
384 * @param cnt Number of ITLB and DTLB entries to invalidate.
385 */
386void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt)
387{
388 unsigned int i;
389
390 /* switch to nucleus because we are mapped by the primary context */
391 nucleus_enter();
392
393 for (i = 0; i < cnt; i++) {
394 __hypercall_fast5(MMU_DEMAP_PAGE, 0, 0, page + i * PAGE_SIZE,
395 asid, MMU_FLAG_DTLB | MMU_FLAG_ITLB);
396 }
397
398 nucleus_leave();
399}
400
401/** @}
402 */
Note: See TracBrowser for help on using the repository browser.