source: mainline/kernel/genarch/src/mm/page_pt.c@ 498ced1

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 498ced1 was a35b458, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 7 years ago

style: Remove trailing whitespace on _all_ lines, including empty ones, for particular file types.

Command used: tools/srepl '\s\+$' '' -- *.c *.h *.py *.sh *.s *.S *.ag

Currently, whitespace on empty lines is very inconsistent.
There are two basic choices: Either remove the whitespace, or keep empty lines
indented to the level of surrounding code. The former is AFAICT more common,
and also much easier to do automatically.

Alternatively, we could write script for automatic indentation, and use that
instead. However, if such a script exists, it's possible to use the indented
style locally, by having the editor apply relevant conversions on load/save,
without affecting remote repository. IMO, it makes more sense to adopt
the simpler rule.

  • Property mode set to 100644
File size: 12.5 KB
Line 
1/*
2 * Copyright (c) 2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup genarchmm
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Virtual Address Translation for hierarchical 4-level page tables.
36 */
37
38#include <assert.h>
39#include <genarch/mm/page_pt.h>
40#include <mm/page.h>
41#include <mm/frame.h>
42#include <mm/km.h>
43#include <mm/as.h>
44#include <arch/mm/page.h>
45#include <arch/mm/as.h>
46#include <arch/barrier.h>
47#include <typedefs.h>
48#include <arch/asm.h>
49#include <mem.h>
50#include <align.h>
51#include <macros.h>
52#include <bitops.h>
53
54static void pt_mapping_insert(as_t *, uintptr_t, uintptr_t, unsigned int);
55static void pt_mapping_remove(as_t *, uintptr_t);
56static bool pt_mapping_find(as_t *, uintptr_t, bool, pte_t *pte);
57static void pt_mapping_update(as_t *, uintptr_t, bool, pte_t *pte);
58static void pt_mapping_make_global(uintptr_t, size_t);
59
60page_mapping_operations_t pt_mapping_operations = {
61 .mapping_insert = pt_mapping_insert,
62 .mapping_remove = pt_mapping_remove,
63 .mapping_find = pt_mapping_find,
64 .mapping_update = pt_mapping_update,
65 .mapping_make_global = pt_mapping_make_global
66};
67
68/** Map page to frame using hierarchical page tables.
69 *
70 * Map virtual address page to physical address frame
71 * using flags.
72 *
73 * @param as Address space to wich page belongs.
74 * @param page Virtual address of the page to be mapped.
75 * @param frame Physical address of memory frame to which the mapping is done.
76 * @param flags Flags to be used for mapping.
77 *
78 */
79void pt_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame,
80 unsigned int flags)
81{
82 pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table);
83
84 assert(page_table_locked(as));
85
86 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT) {
87 pte_t *newpt = (pte_t *)
88 PA2KA(frame_alloc(PTL1_FRAMES, FRAME_LOWMEM, PTL1_SIZE - 1));
89 memsetb(newpt, PTL1_SIZE, 0);
90 SET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page), KA2PA(newpt));
91 SET_PTL1_FLAGS(ptl0, PTL0_INDEX(page),
92 PAGE_NOT_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE |
93 PAGE_WRITE);
94 /*
95 * Make sure that a concurrent hardware page table walk or
96 * pt_mapping_find() will see the new PTL1 only after it is
97 * fully initialized.
98 */
99 write_barrier();
100 SET_PTL1_PRESENT(ptl0, PTL0_INDEX(page));
101 }
102
103 pte_t *ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
104
105 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT) {
106 pte_t *newpt = (pte_t *)
107 PA2KA(frame_alloc(PTL2_FRAMES, FRAME_LOWMEM, PTL2_SIZE - 1));
108 memsetb(newpt, PTL2_SIZE, 0);
109 SET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page), KA2PA(newpt));
110 SET_PTL2_FLAGS(ptl1, PTL1_INDEX(page),
111 PAGE_NOT_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE |
112 PAGE_WRITE);
113 /*
114 * Make the new PTL2 visible only after it is fully initialized.
115 */
116 write_barrier();
117 SET_PTL2_PRESENT(ptl1, PTL1_INDEX(page));
118 }
119
120 pte_t *ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
121
122 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT) {
123 pte_t *newpt = (pte_t *)
124 PA2KA(frame_alloc(PTL3_FRAMES, FRAME_LOWMEM, PTL2_SIZE - 1));
125 memsetb(newpt, PTL2_SIZE, 0);
126 SET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page), KA2PA(newpt));
127 SET_PTL3_FLAGS(ptl2, PTL2_INDEX(page),
128 PAGE_NOT_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE |
129 PAGE_WRITE);
130 /*
131 * Make the new PTL3 visible only after it is fully initialized.
132 */
133 write_barrier();
134 SET_PTL3_PRESENT(ptl2, PTL2_INDEX(page));
135 }
136
137 pte_t *ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
138
139 SET_FRAME_ADDRESS(ptl3, PTL3_INDEX(page), frame);
140 SET_FRAME_FLAGS(ptl3, PTL3_INDEX(page), flags | PAGE_NOT_PRESENT);
141 /*
142 * Make the new mapping visible only after it is fully initialized.
143 */
144 write_barrier();
145 SET_FRAME_PRESENT(ptl3, PTL3_INDEX(page));
146}
147
148/** Remove mapping of page from hierarchical page tables.
149 *
150 * Remove any mapping of page within address space as.
151 * TLB shootdown should follow in order to make effects of
152 * this call visible.
153 *
154 * Empty page tables except PTL0 are freed.
155 *
156 * @param as Address space to wich page belongs.
157 * @param page Virtual address of the page to be demapped.
158 *
159 */
160void pt_mapping_remove(as_t *as, uintptr_t page)
161{
162 assert(page_table_locked(as));
163
164 /*
165 * First, remove the mapping, if it exists.
166 */
167
168 pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table);
169 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT)
170 return;
171
172 pte_t *ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
173 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT)
174 return;
175
176 pte_t *ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
177 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT)
178 return;
179
180 pte_t *ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
181
182 /*
183 * Destroy the mapping.
184 * Setting to PAGE_NOT_PRESENT is not sufficient.
185 * But we need SET_FRAME for possible PT coherence maintenance.
186 * At least on ARM.
187 */
188 //TODO: Fix this inconsistency
189 SET_FRAME_FLAGS(ptl3, PTL3_INDEX(page), PAGE_NOT_PRESENT);
190 memsetb(&ptl3[PTL3_INDEX(page)], sizeof(pte_t), 0);
191
192 /*
193 * Second, free all empty tables along the way from PTL3 down to PTL0
194 * except those needed for sharing the kernel non-identity mappings.
195 */
196
197 /* Check PTL3 */
198 bool empty = true;
199
200 unsigned int i;
201 for (i = 0; i < PTL3_ENTRIES; i++) {
202 if (PTE_VALID(&ptl3[i])) {
203 empty = false;
204 break;
205 }
206 }
207
208 if (empty) {
209 /*
210 * PTL3 is empty.
211 * Release the frame and remove PTL3 pointer from the parent
212 * table.
213 */
214#if (PTL2_ENTRIES != 0)
215 memsetb(&ptl2[PTL2_INDEX(page)], sizeof(pte_t), 0);
216#elif (PTL1_ENTRIES != 0)
217 memsetb(&ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0);
218#else
219 if (km_is_non_identity(page))
220 return;
221
222 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
223#endif
224 frame_free(KA2PA((uintptr_t) ptl3), PTL3_FRAMES);
225 } else {
226 /*
227 * PTL3 is not empty.
228 * Therefore, there must be a path from PTL0 to PTL3 and
229 * thus nothing to free in higher levels.
230 *
231 */
232 return;
233 }
234
235 /* Check PTL2, empty is still true */
236#if (PTL2_ENTRIES != 0)
237 for (i = 0; i < PTL2_ENTRIES; i++) {
238 if (PTE_VALID(&ptl2[i])) {
239 empty = false;
240 break;
241 }
242 }
243
244 if (empty) {
245 /*
246 * PTL2 is empty.
247 * Release the frame and remove PTL2 pointer from the parent
248 * table.
249 */
250#if (PTL1_ENTRIES != 0)
251 memsetb(&ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0);
252#else
253 if (km_is_non_identity(page))
254 return;
255
256 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
257#endif
258 frame_free(KA2PA((uintptr_t) ptl2), PTL2_FRAMES);
259 } else {
260 /*
261 * PTL2 is not empty.
262 * Therefore, there must be a path from PTL0 to PTL2 and
263 * thus nothing to free in higher levels.
264 *
265 */
266 return;
267 }
268#endif /* PTL2_ENTRIES != 0 */
269
270 /* check PTL1, empty is still true */
271#if (PTL1_ENTRIES != 0)
272 for (i = 0; i < PTL1_ENTRIES; i++) {
273 if (PTE_VALID(&ptl1[i])) {
274 empty = false;
275 break;
276 }
277 }
278
279 if (empty) {
280 /*
281 * PTL1 is empty.
282 * Release the frame and remove PTL1 pointer from the parent
283 * table.
284 */
285 if (km_is_non_identity(page))
286 return;
287
288 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
289 frame_free(KA2PA((uintptr_t) ptl1), PTL1_FRAMES);
290 }
291#endif /* PTL1_ENTRIES != 0 */
292}
293
294static pte_t *pt_mapping_find_internal(as_t *as, uintptr_t page, bool nolock)
295{
296 assert(nolock || page_table_locked(as));
297
298 pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table);
299 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT)
300 return NULL;
301
302 read_barrier();
303
304 pte_t *ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
305 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT)
306 return NULL;
307
308#if (PTL1_ENTRIES != 0)
309 /*
310 * Always read ptl2 only after we are sure it is present.
311 */
312 read_barrier();
313#endif
314
315 pte_t *ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
316 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT)
317 return NULL;
318
319#if (PTL2_ENTRIES != 0)
320 /*
321 * Always read ptl3 only after we are sure it is present.
322 */
323 read_barrier();
324#endif
325
326 pte_t *ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
327
328 return &ptl3[PTL3_INDEX(page)];
329}
330
331/** Find mapping for virtual page in hierarchical page tables.
332 *
333 * @param as Address space to which page belongs.
334 * @param page Virtual page.
335 * @param nolock True if the page tables need not be locked.
336 * @param[out] pte Structure that will receive a copy of the found PTE.
337 *
338 * @return True if the mapping was found, false otherwise.
339 */
340bool pt_mapping_find(as_t *as, uintptr_t page, bool nolock, pte_t *pte)
341{
342 pte_t *t = pt_mapping_find_internal(as, page, nolock);
343 if (t)
344 *pte = *t;
345 return t != NULL;
346}
347
348/** Update mapping for virtual page in hierarchical page tables.
349 *
350 * @param as Address space to which page belongs.
351 * @param page Virtual page.
352 * @param nolock True if the page tables need not be locked.
353 * @param[in] pte New PTE.
354 */
355void pt_mapping_update(as_t *as, uintptr_t page, bool nolock, pte_t *pte)
356{
357 pte_t *t = pt_mapping_find_internal(as, page, nolock);
358 if (!t)
359 panic("Updating non-existent PTE");
360
361 assert(PTE_VALID(t) == PTE_VALID(pte));
362 assert(PTE_PRESENT(t) == PTE_PRESENT(pte));
363 assert(PTE_GET_FRAME(t) == PTE_GET_FRAME(pte));
364 assert(PTE_WRITABLE(t) == PTE_WRITABLE(pte));
365 assert(PTE_EXECUTABLE(t) == PTE_EXECUTABLE(pte));
366
367 *t = *pte;
368}
369
370/** Return the size of the region mapped by a single PTL0 entry.
371 *
372 * @return Size of the region mapped by a single PTL0 entry.
373 */
374static uintptr_t ptl0_step_get(void)
375{
376 size_t va_bits;
377
378 va_bits = fnzb(PTL0_ENTRIES) + fnzb(PTL1_ENTRIES) + fnzb(PTL2_ENTRIES) +
379 fnzb(PTL3_ENTRIES) + PAGE_WIDTH;
380
381 return 1UL << (va_bits - fnzb(PTL0_ENTRIES));
382}
383
384/** Make the mappings in the given range global accross all address spaces.
385 *
386 * All PTL0 entries in the given range will be mapped to a next level page
387 * table. The next level page table will be allocated and cleared.
388 *
389 * pt_mapping_remove() will never deallocate these page tables even when there
390 * are no PTEs in them.
391 *
392 * @param as Address space.
393 * @param base Base address corresponding to the first PTL0 entry that will be
394 * altered by this function.
395 * @param size Size in bytes defining the range of PTL0 entries that will be
396 * altered by this function.
397 *
398 */
399void pt_mapping_make_global(uintptr_t base, size_t size)
400{
401 assert(size > 0);
402
403 uintptr_t ptl0 = PA2KA((uintptr_t) AS_KERNEL->genarch.page_table);
404 uintptr_t ptl0_step = ptl0_step_get();
405 size_t frames;
406
407#if (PTL1_ENTRIES != 0)
408 frames = PTL1_FRAMES;
409#elif (PTL2_ENTRIES != 0)
410 frames = PTL2_FRAMES;
411#else
412 frames = PTL3_FRAMES;
413#endif
414
415 for (uintptr_t addr = ALIGN_DOWN(base, ptl0_step);
416 addr - 1 < base + size - 1;
417 addr += ptl0_step) {
418 if (GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(addr)) != 0) {
419 assert(overlaps(addr, ptl0_step,
420 config.identity_base, config.identity_size));
421
422 /*
423 * This PTL0 entry also maps the kernel identity region,
424 * so it is already global and initialized.
425 */
426 continue;
427 }
428
429 uintptr_t l1 = PA2KA(frame_alloc(frames, FRAME_LOWMEM, 0));
430 memsetb((void *) l1, FRAMES2SIZE(frames), 0);
431 SET_PTL1_ADDRESS(ptl0, PTL0_INDEX(addr), KA2PA(l1));
432 SET_PTL1_FLAGS(ptl0, PTL0_INDEX(addr),
433 PAGE_PRESENT | PAGE_USER | PAGE_CACHEABLE |
434 PAGE_EXEC | PAGE_WRITE | PAGE_READ);
435 }
436}
437
438/** @}
439 */
Note: See TracBrowser for help on using the repository browser.