source: mainline/kernel/arch/sparc64/src/mm/sun4v/tlb.c@ 3a0a4d8

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 3a0a4d8 was 730ff63, checked in by Jakub Jermar <jakub@…>, 12 years ago

Do not create kernel identity over the entire address space on sun4v.

  • Property mode set to 100644
File size: 10.2 KB
RevLine 
[0d04024]1/*
[df4ed85]2 * Copyright (c) 2005 Jakub Jermar
[3da11f37]3 * Copyright (c) 2008 Pavel Rimsky
[0d04024]4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
[7008097]30/** @addtogroup sparc64mm
[b45c443]31 * @{
32 */
33/** @file
34 */
35
[0d04024]36#include <mm/tlb.h>
[f47fd19]37#include <mm/as.h>
38#include <mm/asid.h>
[b4655da]39#include <arch/sun4v/hypercall.h>
[0cfc4d38]40#include <arch/mm/frame.h>
41#include <arch/mm/page.h>
[b4655da]42#include <arch/mm/tte.h>
43#include <arch/mm/tlb.h>
[f47fd19]44#include <arch/interrupt.h>
[e2bf639]45#include <interrupt.h>
[f47fd19]46#include <arch.h>
[0d04024]47#include <print.h>
[d99c1d2]48#include <typedefs.h>
[0cfc4d38]49#include <config.h>
[49b6d32]50#include <arch/trap/trap.h>
[7bb6b06]51#include <arch/trap/exception.h>
[008029d]52#include <panic.h>
[b6fba84]53#include <arch/asm.h>
[3da11f37]54#include <arch/cpu.h>
55#include <arch/mm/pagesize.h>
[387416b]56#include <genarch/mm/page_ht.h>
[02f441c0]57
[29b2bbf]58#ifdef CONFIG_TSB
59#include <arch/mm/tsb.h>
60#endif
61
[ba50a34]62static void itlb_pte_copy(pte_t *);
[77f65df]63static void dtlb_pte_copy(pte_t *, bool);
[5e53e02]64
65/*
66 * The assembly language routine passes a 64-bit parameter to the Data Access
67 * MMU Miss and Data Access protection handlers, the parameter encapsulates
68 * a virtual address of the faulting page and the faulting context. The most
69 * significant 51 bits represent the VA of the faulting page and the least
70 * significant 13 vits represent the faulting context. The following macros
71 * extract the page and context out of the 64-bit parameter:
72 */
73
74/* extracts the VA of the faulting page */
75#define DMISS_ADDRESS(page_and_ctx) (((page_and_ctx) >> 13) << 13)
76
77/* extracts the faulting context */
78#define DMISS_CONTEXT(page_and_ctx) ((page_and_ctx) & 0x1fff)
[f47fd19]79
[77f65df]80/**
81 * Descriptions of fault types from the MMU Fault status area.
82 *
83 * fault_type[i] contains description of error for which the IFT or DFT
84 * field of the MMU fault status area is i.
85 */
[a000878c]86static const char *fault_types[] = {
[77f65df]87 "unknown",
88 "fast miss",
89 "fast protection",
90 "MMU miss",
91 "invalid RA",
92 "privileged violation",
93 "protection violation",
94 "NFO access",
95 "so page/NFO side effect",
96 "invalid VA",
97 "invalid ASI",
98 "nc atomic",
99 "privileged action",
100 "unknown",
101 "unaligned access",
102 "invalid page size"
103 };
[0d04024]104
[77f65df]105/** Array of MMU fault status areas. */
106extern mmu_fault_status_area_t mmu_fsas[MAX_NUM_STRANDS];
[b4655da]107
[77f65df]108/*
109 * Invalidate all non-locked DTLB and ITLB entries.
110 */
[0d04024]111void tlb_arch_init(void)
112{
[b4655da]113 tlb_invalidate_all();
[97f1691]114}
[b6fba84]115
[97f1691]116/** Insert privileged mapping into DMMU TLB.
117 *
[965dc18]118 * @param page Virtual page address.
119 * @param frame Physical frame address.
120 * @param pagesize Page size.
121 * @param locked True for permanent mappings, false otherwise.
122 * @param cacheable True if the mapping is cacheable, false otherwise.
[97f1691]123 */
[2057572]124void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize,
125 bool locked, bool cacheable)
[97f1691]126{
[77f65df]127 tte_data_t data;
128
[02f441c0]129 data.value = 0;
130 data.v = true;
[77f65df]131 data.nfo = false;
132 data.ra = frame >> FRAME_WIDTH;
133 data.ie = false;
134 data.e = false;
[97f1691]135 data.cp = cacheable;
[92778f2]136#ifdef CONFIG_VIRT_IDX_DCACHE
[97f1691]137 data.cv = cacheable;
[77f65df]138#endif
[02f441c0]139 data.p = true;
[77f65df]140 data.x = false;
[02f441c0]141 data.w = true;
[77f65df]142 data.size = pagesize;
143
144 if (locked) {
145 __hypercall_fast4(
146 MMU_MAP_PERM_ADDR, page, 0, data.value, MMU_FLAG_DTLB);
147 } else {
148 __hypercall_hyperfast(
149 page, ASID_KERNEL, data.value, MMU_FLAG_DTLB, 0,
150 MMU_MAP_ADDR);
151 }
[0d04024]152}
153
[a7961271]154/** Copy PTE to TLB.
155 *
[965dc18]156 * @param t Page Table Entry to be copied.
157 * @param ro If true, the entry will be created read-only, regardless
158 * of its w field.
[a7961271]159 */
[5e53e02]160void dtlb_pte_copy(pte_t *t, bool ro)
[a7961271]161{
[5e53e02]162 tte_data_t data;
163
[a7961271]164 data.value = 0;
165 data.v = true;
[5e53e02]166 data.nfo = false;
167 data.ra = (t->frame) >> FRAME_WIDTH;
168 data.ie = false;
169 data.e = false;
[a7961271]170 data.cp = t->c;
[92778f2]171#ifdef CONFIG_VIRT_IDX_DCACHE
[a7961271]172 data.cv = t->c;
[5e53e02]173#endif
174 data.p = t->k;
175 data.x = false;
[a7961271]176 data.w = ro ? false : t->w;
[5e53e02]177 data.size = PAGESIZE_8K;
178
179 __hypercall_hyperfast(
180 t->page, t->as->asid, data.value, MMU_FLAG_DTLB, 0, MMU_MAP_ADDR);
[a7961271]181}
[5e53e02]182
[29b2bbf]183/** Copy PTE to ITLB.
184 *
[965dc18]185 * @param t Page Table Entry to be copied.
[29b2bbf]186 */
[ba50a34]187void itlb_pte_copy(pte_t *t)
[f47fd19]188{
[ba50a34]189 tte_data_t data;
[a7961271]190
191 data.value = 0;
192 data.v = true;
[ba50a34]193 data.nfo = false;
194 data.ra = (t->frame) >> FRAME_WIDTH;
195 data.ie = false;
196 data.e = false;
[a7961271]197 data.cp = t->c;
[ba50a34]198 data.cv = false;
199 data.p = t->k;
200 data.x = true;
[a7961271]201 data.w = false;
[ba50a34]202 data.size = PAGESIZE_8K;
[a7961271]203
[ba50a34]204 __hypercall_hyperfast(
205 t->page, t->as->asid, data.value, MMU_FLAG_ITLB, 0, MMU_MAP_ADDR);
[f47fd19]206}
207
[008029d]208/** ITLB miss handler. */
[96b02eb9]209void fast_instruction_access_mmu_miss(sysarg_t unused, istate_t *istate)
[008029d]210{
[ba50a34]211 uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
[a7961271]212 pte_t *t;
213
[0ff03f3]214 t = page_mapping_find(AS, va, true);
[ba50a34]215
[a7961271]216 if (t && PTE_EXECUTABLE(t)) {
217 /*
218 * The mapping was found in the software page hash table.
219 * Insert it into ITLB.
220 */
221 t->a = true;
[ba50a34]222 itlb_pte_copy(t);
[29b2bbf]223#ifdef CONFIG_TSB
[ba50a34]224 itsb_pte_copy(t);
[29b2bbf]225#endif
[a7961271]226 } else {
227 /*
[771cd22]228 * Forward the page fault to the address space page fault
229 * handler.
[7008097]230 */
[1dbc43f]231 as_page_fault(va, PF_ACCESS_EXEC, istate);
[a7961271]232 }
[008029d]233}
234
[f47fd19]235/** DTLB miss handler.
236 *
[771cd22]237 * Note that some faults (e.g. kernel faults) were already resolved by the
238 * low-level, assembly language part of the fast_data_access_mmu_miss handler.
[36f19c0]239 *
[ba50a34]240 * @param page_and_ctx A 64-bit value describing the fault. The most
241 * significant 51 bits of the value contain the virtual
242 * address which caused the fault truncated to the page
243 * boundary. The least significant 13 bits of the value
244 * contain the number of the context in which the fault
245 * occurred.
[965dc18]246 * @param istate Interrupted state saved on the stack.
[f47fd19]247 */
[ba50a34]248void fast_data_access_mmu_miss(uint64_t page_and_ctx, istate_t *istate)
[008029d]249{
[f47fd19]250 pte_t *t;
[ba50a34]251 uintptr_t va = DMISS_ADDRESS(page_and_ctx);
252 uint16_t ctx = DMISS_CONTEXT(page_and_ctx);
[730ff63]253 as_t *as = AS;
[7cb53f62]254
[ba50a34]255 if (ctx == ASID_KERNEL) {
256 if (va == 0) {
[f47fd19]257 /* NULL access in kernel */
[1dbc43f]258 panic("NULL pointer dereference.");
[730ff63]259 } else if (va >= end_of_identity) {
260 /* Kernel non-identity */
261 as = AS_KERNEL;
262 } else {
263 panic("Unexpected kernel page fault.");
[f47fd19]264 }
[68656282]265 }
266
[730ff63]267 t = page_mapping_find(as, va, true);
[f47fd19]268 if (t) {
269 /*
270 * The mapping was found in the software page hash table.
271 * Insert it into DTLB.
272 */
[a7961271]273 t->a = true;
[ba50a34]274 dtlb_pte_copy(t, true);
[29b2bbf]275#ifdef CONFIG_TSB
[ba50a34]276 dtsb_pte_copy(t, true);
[29b2bbf]277#endif
[f47fd19]278 } else {
279 /*
[2057572]280 * Forward the page fault to the address space page fault
281 * handler.
[f47fd19]282 */
[1dbc43f]283 as_page_fault(va, PF_ACCESS_READ, istate);
[f47fd19]284 }
[008029d]285}
286
[36f19c0]287/** DTLB protection fault handler.
288 *
[5e53e02]289 * @param page_and_ctx A 64-bit value describing the fault. The most
290 * significant 51 bits of the value contain the virtual
291 * address which caused the fault truncated to the page
292 * boundary. The least significant 13 bits of the value
293 * contain the number of the context in which the fault
294 * occurred.
[965dc18]295 * @param istate Interrupted state saved on the stack.
[36f19c0]296 */
[ba50a34]297void fast_data_access_protection(uint64_t page_and_ctx, istate_t *istate)
[008029d]298{
[e0b241f]299 pte_t *t;
[ba50a34]300 uintptr_t va = DMISS_ADDRESS(page_and_ctx);
301 uint16_t ctx = DMISS_CONTEXT(page_and_ctx);
[730ff63]302 as_t *as = AS;
[e0b241f]303
[730ff63]304 if (ctx == ASID_KERNEL)
305 as = AS_KERNEL;
306
307 t = page_mapping_find(as, va, true);
[e0b241f]308 if (t && PTE_WRITABLE(t)) {
309 /*
[771cd22]310 * The mapping was found in the software page hash table and is
311 * writable. Demap the old mapping and insert an updated mapping
312 * into DTLB.
[e0b241f]313 */
314 t->a = true;
315 t->d = true;
[ba50a34]316 mmu_demap_page(va, ctx, MMU_FLAG_DTLB);
317 dtlb_pte_copy(t, false);
[29b2bbf]318#ifdef CONFIG_TSB
[ba50a34]319 dtsb_pte_copy(t, false);
[29b2bbf]320#endif
[e0b241f]321 } else {
322 /*
[771cd22]323 * Forward the page fault to the address space page fault
324 * handler.
[e0b241f]325 */
[1dbc43f]326 as_page_fault(va, PF_ACCESS_WRITE, istate);
[e0b241f]327 }
[008029d]328}
329
[77f65df]330/*
331 * On Niagara this function does not work, as supervisor software is isolated
332 * from the TLB by the hypervisor and has no chance to investigate the TLB
333 * entries.
[965dc18]334 */
[0d04024]335void tlb_print(void)
336{
[ba50a34]337 printf("Operation not possible on Niagara.\n");
[0d04024]338}
[dbb6886]339
[ba50a34]340/**
341 * Describes the exact condition which caused the last DMMU fault.
342 */
343void describe_dmmu_fault(void)
[8cee705]344{
[ba50a34]345 uint64_t myid;
346 __hypercall_fast_ret1(0, 0, 0, 0, 0, CPU_MYID, &myid);
[8cee705]347
[ba50a34]348 ASSERT(mmu_fsas[myid].dft < 16);
349
350 printf("condition which caused the fault: %s\n",
351 fault_types[mmu_fsas[myid].dft]);
352}
353
354/** Invalidate all unlocked ITLB and DTLB entries. */
355void tlb_invalidate_all(void)
356{
357 uint64_t errno = __hypercall_fast3(MMU_DEMAP_ALL, 0, 0,
358 MMU_FLAG_DTLB | MMU_FLAG_ITLB);
[7e752b2]359 if (errno != HV_EOK)
360 panic("Error code = %" PRIu64 ".\n", errno);
[8cee705]361}
362
[771cd22]363/** Invalidate all ITLB and DTLB entries that belong to specified ASID
364 * (Context).
[dbb6886]365 *
366 * @param asid Address Space ID.
367 */
368void tlb_invalidate_asid(asid_t asid)
369{
[fd85ae5]370 /* switch to nucleus because we are mapped by the primary context */
371 nucleus_enter();
[77f65df]372
[3da11f37]373 __hypercall_fast4(MMU_DEMAP_CTX, 0, 0, asid,
374 MMU_FLAG_ITLB | MMU_FLAG_DTLB);
375
[fd85ae5]376 nucleus_leave();
[dbb6886]377}
378
[771cd22]379/** Invalidate all ITLB and DTLB entries for specified page range in specified
380 * address space.
[dbb6886]381 *
[965dc18]382 * @param asid Address Space ID.
383 * @param page First page which to sweep out from ITLB and DTLB.
384 * @param cnt Number of ITLB and DTLB entries to invalidate.
[dbb6886]385 */
[98000fb]386void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt)
[dbb6886]387{
[6c441cf8]388 unsigned int i;
[ed166f7]389
[fd85ae5]390 /* switch to nucleus because we are mapped by the primary context */
391 nucleus_enter();
[3da11f37]392
393 for (i = 0; i < cnt; i++) {
394 __hypercall_fast5(MMU_DEMAP_PAGE, 0, 0, page, asid,
395 MMU_FLAG_DTLB | MMU_FLAG_ITLB);
[4512d7e]396 }
[3da11f37]397
[fd85ae5]398 nucleus_leave();
[dbb6886]399}
[b45c443]400
[10b890b]401/** @}
[b45c443]402 */
Note: See TracBrowser for help on using the repository browser.