source: mainline/kernel/arch/sparc64/src/mm/sun4v/tlb.c@ 96b02eb9

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 96b02eb9 was 96b02eb9, checked in by Martin Decky <martin@…>, 15 years ago

more unification of basic types

  • use sysarg_t and native_t (unsigned and signed variant) in both kernel and uspace
  • remove ipcarg_t in favour of sysarg_t

(no change in functionality)

  • Property mode set to 100644
File size: 11.8 KB
Line 
1/*
2 * Copyright (c) 2005 Jakub Jermar
3 * Copyright (c) 2008 Pavel Rimsky
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup sparc64mm
31 * @{
32 */
33/** @file
34 */
35
36#include <mm/tlb.h>
37#include <mm/as.h>
38#include <mm/asid.h>
39#include <arch/sun4v/hypercall.h>
40#include <arch/mm/frame.h>
41#include <arch/mm/page.h>
42#include <arch/mm/tte.h>
43#include <arch/mm/tlb.h>
44#include <arch/interrupt.h>
45#include <interrupt.h>
46#include <arch.h>
47#include <print.h>
48#include <typedefs.h>
49#include <config.h>
50#include <arch/trap/trap.h>
51#include <arch/trap/exception.h>
52#include <panic.h>
53#include <arch/asm.h>
54#include <arch/cpu.h>
55#include <arch/mm/pagesize.h>
56#include <genarch/mm/page_ht.h>
57
58#ifdef CONFIG_TSB
59#include <arch/mm/tsb.h>
60#endif
61
62static void itlb_pte_copy(pte_t *);
63static void dtlb_pte_copy(pte_t *, bool);
64static void do_fast_instruction_access_mmu_miss_fault(istate_t *, uintptr_t,
65 const char *);
66static void do_fast_data_access_mmu_miss_fault(istate_t *, uint64_t,
67 const char *);
68static void do_fast_data_access_protection_fault(istate_t *,
69 uint64_t, const char *);
70
71/*
72 * The assembly language routine passes a 64-bit parameter to the Data Access
73 * MMU Miss and Data Access protection handlers, the parameter encapsulates
74 * a virtual address of the faulting page and the faulting context. The most
75 * significant 51 bits represent the VA of the faulting page and the least
76 * significant 13 vits represent the faulting context. The following macros
77 * extract the page and context out of the 64-bit parameter:
78 */
79
80/* extracts the VA of the faulting page */
81#define DMISS_ADDRESS(page_and_ctx) (((page_and_ctx) >> 13) << 13)
82
83/* extracts the faulting context */
84#define DMISS_CONTEXT(page_and_ctx) ((page_and_ctx) & 0x1fff)
85
86/**
87 * Descriptions of fault types from the MMU Fault status area.
88 *
89 * fault_type[i] contains description of error for which the IFT or DFT
90 * field of the MMU fault status area is i.
91 */
92static const char *fault_types[] = {
93 "unknown",
94 "fast miss",
95 "fast protection",
96 "MMU miss",
97 "invalid RA",
98 "privileged violation",
99 "protection violation",
100 "NFO access",
101 "so page/NFO side effect",
102 "invalid VA",
103 "invalid ASI",
104 "nc atomic",
105 "privileged action",
106 "unknown",
107 "unaligned access",
108 "invalid page size"
109 };
110
111/** Array of MMU fault status areas. */
112extern mmu_fault_status_area_t mmu_fsas[MAX_NUM_STRANDS];
113
114/*
115 * Invalidate all non-locked DTLB and ITLB entries.
116 */
117void tlb_arch_init(void)
118{
119 tlb_invalidate_all();
120}
121
122/** Insert privileged mapping into DMMU TLB.
123 *
124 * @param page Virtual page address.
125 * @param frame Physical frame address.
126 * @param pagesize Page size.
127 * @param locked True for permanent mappings, false otherwise.
128 * @param cacheable True if the mapping is cacheable, false otherwise.
129 */
130void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize,
131 bool locked, bool cacheable)
132{
133 tte_data_t data;
134
135 data.value = 0;
136 data.v = true;
137 data.nfo = false;
138 data.ra = frame >> FRAME_WIDTH;
139 data.ie = false;
140 data.e = false;
141 data.cp = cacheable;
142#ifdef CONFIG_VIRT_IDX_DCACHE
143 data.cv = cacheable;
144#endif
145 data.p = true;
146 data.x = false;
147 data.w = true;
148 data.size = pagesize;
149
150 if (locked) {
151 __hypercall_fast4(
152 MMU_MAP_PERM_ADDR, page, 0, data.value, MMU_FLAG_DTLB);
153 } else {
154 __hypercall_hyperfast(
155 page, ASID_KERNEL, data.value, MMU_FLAG_DTLB, 0,
156 MMU_MAP_ADDR);
157 }
158}
159
160/** Copy PTE to TLB.
161 *
162 * @param t Page Table Entry to be copied.
163 * @param ro If true, the entry will be created read-only, regardless
164 * of its w field.
165 */
166void dtlb_pte_copy(pte_t *t, bool ro)
167{
168 tte_data_t data;
169
170 data.value = 0;
171 data.v = true;
172 data.nfo = false;
173 data.ra = (t->frame) >> FRAME_WIDTH;
174 data.ie = false;
175 data.e = false;
176 data.cp = t->c;
177#ifdef CONFIG_VIRT_IDX_DCACHE
178 data.cv = t->c;
179#endif
180 data.p = t->k;
181 data.x = false;
182 data.w = ro ? false : t->w;
183 data.size = PAGESIZE_8K;
184
185 __hypercall_hyperfast(
186 t->page, t->as->asid, data.value, MMU_FLAG_DTLB, 0, MMU_MAP_ADDR);
187}
188
189/** Copy PTE to ITLB.
190 *
191 * @param t Page Table Entry to be copied.
192 */
193void itlb_pte_copy(pte_t *t)
194{
195 tte_data_t data;
196
197 data.value = 0;
198 data.v = true;
199 data.nfo = false;
200 data.ra = (t->frame) >> FRAME_WIDTH;
201 data.ie = false;
202 data.e = false;
203 data.cp = t->c;
204 data.cv = false;
205 data.p = t->k;
206 data.x = true;
207 data.w = false;
208 data.size = PAGESIZE_8K;
209
210 __hypercall_hyperfast(
211 t->page, t->as->asid, data.value, MMU_FLAG_ITLB, 0, MMU_MAP_ADDR);
212}
213
214/** ITLB miss handler. */
215void fast_instruction_access_mmu_miss(sysarg_t unused, istate_t *istate)
216{
217 uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
218 pte_t *t;
219
220 page_table_lock(AS, true);
221 t = page_mapping_find(AS, va);
222
223 if (t && PTE_EXECUTABLE(t)) {
224 /*
225 * The mapping was found in the software page hash table.
226 * Insert it into ITLB.
227 */
228 t->a = true;
229 itlb_pte_copy(t);
230#ifdef CONFIG_TSB
231 itsb_pte_copy(t);
232#endif
233 page_table_unlock(AS, true);
234 } else {
235 /*
236 * Forward the page fault to the address space page fault
237 * handler.
238 */
239 page_table_unlock(AS, true);
240 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
241 do_fast_instruction_access_mmu_miss_fault(istate,
242 istate->tpc, __func__);
243 }
244 }
245}
246
247/** DTLB miss handler.
248 *
249 * Note that some faults (e.g. kernel faults) were already resolved by the
250 * low-level, assembly language part of the fast_data_access_mmu_miss handler.
251 *
252 * @param page_and_ctx A 64-bit value describing the fault. The most
253 * significant 51 bits of the value contain the virtual
254 * address which caused the fault truncated to the page
255 * boundary. The least significant 13 bits of the value
256 * contain the number of the context in which the fault
257 * occurred.
258 * @param istate Interrupted state saved on the stack.
259 */
260void fast_data_access_mmu_miss(uint64_t page_and_ctx, istate_t *istate)
261{
262 pte_t *t;
263 uintptr_t va = DMISS_ADDRESS(page_and_ctx);
264 uint16_t ctx = DMISS_CONTEXT(page_and_ctx);
265
266 if (ctx == ASID_KERNEL) {
267 if (va == 0) {
268 /* NULL access in kernel */
269 do_fast_data_access_mmu_miss_fault(istate, page_and_ctx,
270 __func__);
271 }
272 do_fast_data_access_mmu_miss_fault(istate, page_and_ctx, "Unexpected "
273 "kernel page fault.");
274 }
275
276 page_table_lock(AS, true);
277 t = page_mapping_find(AS, va);
278 if (t) {
279 /*
280 * The mapping was found in the software page hash table.
281 * Insert it into DTLB.
282 */
283 t->a = true;
284 dtlb_pte_copy(t, true);
285#ifdef CONFIG_TSB
286 dtsb_pte_copy(t, true);
287#endif
288 page_table_unlock(AS, true);
289 } else {
290 /*
291 * Forward the page fault to the address space page fault
292 * handler.
293 */
294 page_table_unlock(AS, true);
295 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
296 do_fast_data_access_mmu_miss_fault(istate, page_and_ctx,
297 __func__);
298 }
299 }
300}
301
302/** DTLB protection fault handler.
303 *
304 * @param page_and_ctx A 64-bit value describing the fault. The most
305 * significant 51 bits of the value contain the virtual
306 * address which caused the fault truncated to the page
307 * boundary. The least significant 13 bits of the value
308 * contain the number of the context in which the fault
309 * occurred.
310 * @param istate Interrupted state saved on the stack.
311 */
312void fast_data_access_protection(uint64_t page_and_ctx, istate_t *istate)
313{
314 pte_t *t;
315 uintptr_t va = DMISS_ADDRESS(page_and_ctx);
316 uint16_t ctx = DMISS_CONTEXT(page_and_ctx);
317
318 page_table_lock(AS, true);
319 t = page_mapping_find(AS, va);
320 if (t && PTE_WRITABLE(t)) {
321 /*
322 * The mapping was found in the software page hash table and is
323 * writable. Demap the old mapping and insert an updated mapping
324 * into DTLB.
325 */
326 t->a = true;
327 t->d = true;
328 mmu_demap_page(va, ctx, MMU_FLAG_DTLB);
329 dtlb_pte_copy(t, false);
330#ifdef CONFIG_TSB
331 dtsb_pte_copy(t, false);
332#endif
333 page_table_unlock(AS, true);
334 } else {
335 /*
336 * Forward the page fault to the address space page fault
337 * handler.
338 */
339 page_table_unlock(AS, true);
340 if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
341 do_fast_data_access_protection_fault(istate, page_and_ctx,
342 __func__);
343 }
344 }
345}
346
347/*
348 * On Niagara this function does not work, as supervisor software is isolated
349 * from the TLB by the hypervisor and has no chance to investigate the TLB
350 * entries.
351 */
352void tlb_print(void)
353{
354 printf("Operation not possible on Niagara.\n");
355}
356
357void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, uintptr_t va,
358 const char *str)
359{
360 fault_if_from_uspace(istate, "%s, address=%p.", str,
361 (void *) va);
362 panic_memtrap(istate, PF_ACCESS_EXEC, va, str);
363}
364
365void do_fast_data_access_mmu_miss_fault(istate_t *istate,
366 uint64_t page_and_ctx, const char *str)
367{
368 fault_if_from_uspace(istate, "%s, page=%p (asid=%" PRId64 ").", str,
369 (void *) DMISS_ADDRESS(page_and_ctx), DMISS_CONTEXT(page_and_ctx));
370 panic_memtrap(istate, PF_ACCESS_UNKNOWN, DMISS_ADDRESS(page_and_ctx),
371 str);
372}
373
374void do_fast_data_access_protection_fault(istate_t *istate,
375 uint64_t page_and_ctx, const char *str)
376{
377 fault_if_from_uspace(istate, "%s, page=%p (asid=%" PRId64 ").", str,
378 (void *) DMISS_ADDRESS(page_and_ctx), DMISS_CONTEXT(page_and_ctx));
379 panic_memtrap(istate, PF_ACCESS_WRITE, DMISS_ADDRESS(page_and_ctx),
380 str);
381}
382
383/**
384 * Describes the exact condition which caused the last DMMU fault.
385 */
386void describe_dmmu_fault(void)
387{
388 uint64_t myid;
389 __hypercall_fast_ret1(0, 0, 0, 0, 0, CPU_MYID, &myid);
390
391 ASSERT(mmu_fsas[myid].dft < 16);
392
393 printf("condition which caused the fault: %s\n",
394 fault_types[mmu_fsas[myid].dft]);
395}
396
397/** Invalidate all unlocked ITLB and DTLB entries. */
398void tlb_invalidate_all(void)
399{
400 uint64_t errno = __hypercall_fast3(MMU_DEMAP_ALL, 0, 0,
401 MMU_FLAG_DTLB | MMU_FLAG_ITLB);
402 if (errno != HV_EOK)
403 panic("Error code = %" PRIu64 ".\n", errno);
404}
405
406/** Invalidate all ITLB and DTLB entries that belong to specified ASID
407 * (Context).
408 *
409 * @param asid Address Space ID.
410 */
411void tlb_invalidate_asid(asid_t asid)
412{
413 /* switch to nucleus because we are mapped by the primary context */
414 nucleus_enter();
415
416 __hypercall_fast4(MMU_DEMAP_CTX, 0, 0, asid,
417 MMU_FLAG_ITLB | MMU_FLAG_DTLB);
418
419 nucleus_leave();
420}
421
422/** Invalidate all ITLB and DTLB entries for specified page range in specified
423 * address space.
424 *
425 * @param asid Address Space ID.
426 * @param page First page which to sweep out from ITLB and DTLB.
427 * @param cnt Number of ITLB and DTLB entries to invalidate.
428 */
429void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt)
430{
431 unsigned int i;
432
433 /* switch to nucleus because we are mapped by the primary context */
434 nucleus_enter();
435
436 for (i = 0; i < cnt; i++) {
437 __hypercall_fast5(MMU_DEMAP_PAGE, 0, 0, page, asid,
438 MMU_FLAG_DTLB | MMU_FLAG_ITLB);
439 }
440
441 nucleus_leave();
442}
443
444/** @}
445 */
Note: See TracBrowser for help on using the repository browser.