source: mainline/kernel/genarch/src/mm/page_pt.c

Last change on this file was b169619, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 20 months ago

Deduplicate mem functions

There are a number of functions which are copied between
kernel, libc, and potentially boot too. mem*() functions
are first such offenders. All this duplicate code will
be moved to directory 'common'.

  • Property mode set to 100644
File size: 12.5 KB
RevLine 
[6d7ffa65]1/*
[df4ed85]2 * Copyright (c) 2006 Jakub Jermar
[6d7ffa65]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[6404aca]29/** @addtogroup kernel_genarch_mm
[b45c443]30 * @{
31 */
32
[0f27b4c]33/**
[b45c443]34 * @file
[da1bafb]35 * @brief Virtual Address Translation for hierarchical 4-level page tables.
[0f27b4c]36 */
37
[63e27ef]38#include <assert.h>
[6d7ffa65]39#include <genarch/mm/page_pt.h>
40#include <mm/page.h>
41#include <mm/frame.h>
[c72dc15]42#include <mm/km.h>
[ef67bab]43#include <mm/as.h>
[6d7ffa65]44#include <arch/mm/page.h>
[fc1e4f6]45#include <arch/mm/as.h>
[05882233]46#include <barrier.h>
[d99c1d2]47#include <typedefs.h>
[6d7ffa65]48#include <arch/asm.h>
[b169619]49#include <memw.h>
[c868e2d]50#include <align.h>
51#include <macros.h>
[caed0279]52#include <bitops.h>
[6d7ffa65]53
[da1bafb]54static void pt_mapping_insert(as_t *, uintptr_t, uintptr_t, unsigned int);
55static void pt_mapping_remove(as_t *, uintptr_t);
[38dc82d]56static bool pt_mapping_find(as_t *, uintptr_t, bool, pte_t *pte);
[346b12a2]57static void pt_mapping_update(as_t *, uintptr_t, bool, pte_t *pte);
[c868e2d]58static void pt_mapping_make_global(uintptr_t, size_t);
[6d7ffa65]59
[61eb2ce2]60const page_mapping_operations_t pt_mapping_operations = {
[6d7ffa65]61 .mapping_insert = pt_mapping_insert,
[8f00329]62 .mapping_remove = pt_mapping_remove,
[c868e2d]63 .mapping_find = pt_mapping_find,
[346b12a2]64 .mapping_update = pt_mapping_update,
[c868e2d]65 .mapping_make_global = pt_mapping_make_global
[6d7ffa65]66};
67
68/** Map page to frame using hierarchical page tables.
69 *
[9179d0a]70 * Map virtual address page to physical address frame
71 * using flags.
[6d7ffa65]72 *
[da1bafb]73 * @param as Address space to wich page belongs.
74 * @param page Virtual address of the page to be mapped.
[6d7ffa65]75 * @param frame Physical address of memory frame to which the mapping is done.
76 * @param flags Flags to be used for mapping.
[da1bafb]77 *
[6d7ffa65]78 */
[da1bafb]79void pt_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame,
80 unsigned int flags)
[6d7ffa65]81{
[da1bafb]82 pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table);
[1d432f9]83
[63e27ef]84 assert(page_table_locked(as));
[a35b458]85
[6d7ffa65]86 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT) {
[b0c2075]87 pte_t *newpt = (pte_t *)
[f18d01b6]88 PA2KA(frame_alloc(PTL1_FRAMES, FRAME_LOWMEM, PTL1_SIZE - 1));
89 memsetb(newpt, PTL1_SIZE, 0);
[6d7ffa65]90 SET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page), KA2PA(newpt));
[6b326ea1]91 SET_PTL1_FLAGS(ptl0, PTL0_INDEX(page),
[609a417]92 PAGE_NOT_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE |
[6b326ea1]93 PAGE_WRITE);
[de73242]94 /*
95 * Make sure that a concurrent hardware page table walk or
96 * pt_mapping_find() will see the new PTL1 only after it is
97 * fully initialized.
98 */
[609a417]99 write_barrier();
100 SET_PTL1_PRESENT(ptl0, PTL0_INDEX(page));
[6d7ffa65]101 }
[a35b458]102
[da1bafb]103 pte_t *ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
[a35b458]104
[6d7ffa65]105 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT) {
[b0c2075]106 pte_t *newpt = (pte_t *)
[f18d01b6]107 PA2KA(frame_alloc(PTL2_FRAMES, FRAME_LOWMEM, PTL2_SIZE - 1));
108 memsetb(newpt, PTL2_SIZE, 0);
[6d7ffa65]109 SET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page), KA2PA(newpt));
[6b326ea1]110 SET_PTL2_FLAGS(ptl1, PTL1_INDEX(page),
[609a417]111 PAGE_NOT_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE |
[6b326ea1]112 PAGE_WRITE);
[de73242]113 /*
114 * Make the new PTL2 visible only after it is fully initialized.
115 */
[609a417]116 write_barrier();
[e40b8066]117 SET_PTL2_PRESENT(ptl1, PTL1_INDEX(page));
[6d7ffa65]118 }
[a35b458]119
[da1bafb]120 pte_t *ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
[a35b458]121
[6d7ffa65]122 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT) {
[b0c2075]123 pte_t *newpt = (pte_t *)
[f18d01b6]124 PA2KA(frame_alloc(PTL3_FRAMES, FRAME_LOWMEM, PTL2_SIZE - 1));
125 memsetb(newpt, PTL2_SIZE, 0);
[6d7ffa65]126 SET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page), KA2PA(newpt));
[6b326ea1]127 SET_PTL3_FLAGS(ptl2, PTL2_INDEX(page),
[609a417]128 PAGE_NOT_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE |
[6b326ea1]129 PAGE_WRITE);
[de73242]130 /*
131 * Make the new PTL3 visible only after it is fully initialized.
132 */
[609a417]133 write_barrier();
134 SET_PTL3_PRESENT(ptl2, PTL2_INDEX(page));
[6d7ffa65]135 }
[a35b458]136
[da1bafb]137 pte_t *ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
[a35b458]138
[6d7ffa65]139 SET_FRAME_ADDRESS(ptl3, PTL3_INDEX(page), frame);
[609a417]140 SET_FRAME_FLAGS(ptl3, PTL3_INDEX(page), flags | PAGE_NOT_PRESENT);
[de73242]141 /*
142 * Make the new mapping visible only after it is fully initialized.
143 */
[609a417]144 write_barrier();
145 SET_FRAME_PRESENT(ptl3, PTL3_INDEX(page));
[6d7ffa65]146}
147
[8f00329]148/** Remove mapping of page from hierarchical page tables.
149 *
[9179d0a]150 * Remove any mapping of page within address space as.
[8f00329]151 * TLB shootdown should follow in order to make effects of
152 * this call visible.
153 *
[ecbdc724]154 * Empty page tables except PTL0 are freed.
155 *
[da1bafb]156 * @param as Address space to wich page belongs.
[8f00329]157 * @param page Virtual address of the page to be demapped.
[da1bafb]158 *
[8f00329]159 */
[7f1c620]160void pt_mapping_remove(as_t *as, uintptr_t page)
[8f00329]161{
[63e27ef]162 assert(page_table_locked(as));
[1d432f9]163
[ecbdc724]164 /*
165 * First, remove the mapping, if it exists.
166 */
[a35b458]167
[da1bafb]168 pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table);
[8f00329]169 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT)
170 return;
[a35b458]171
[da1bafb]172 pte_t *ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
[8f00329]173 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT)
174 return;
[a35b458]175
[da1bafb]176 pte_t *ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
[8f00329]177 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT)
178 return;
[a35b458]179
[da1bafb]180 pte_t *ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
[a35b458]181
[c868e2d]182 /*
183 * Destroy the mapping.
184 * Setting to PAGE_NOT_PRESENT is not sufficient.
[15187c3]185 * But we need SET_FRAME for possible PT coherence maintenance.
186 * At least on ARM.
[c868e2d]187 */
[15187c3]188 //TODO: Fix this inconsistency
189 SET_FRAME_FLAGS(ptl3, PTL3_INDEX(page), PAGE_NOT_PRESENT);
[e32e092]190 memsetb(&ptl3[PTL3_INDEX(page)], sizeof(pte_t), 0);
[a35b458]191
[ecbdc724]192 /*
[c72dc15]193 * Second, free all empty tables along the way from PTL3 down to PTL0
194 * except those needed for sharing the kernel non-identity mappings.
[ecbdc724]195 */
[a35b458]196
[da1bafb]197 /* Check PTL3 */
198 bool empty = true;
[a35b458]199
[da1bafb]200 unsigned int i;
[ecbdc724]201 for (i = 0; i < PTL3_ENTRIES; i++) {
202 if (PTE_VALID(&ptl3[i])) {
203 empty = false;
204 break;
205 }
206 }
[a35b458]207
[ecbdc724]208 if (empty) {
209 /*
210 * PTL3 is empty.
[c72dc15]211 * Release the frame and remove PTL3 pointer from the parent
212 * table.
[ecbdc724]213 */
[da1bafb]214#if (PTL2_ENTRIES != 0)
215 memsetb(&ptl2[PTL2_INDEX(page)], sizeof(pte_t), 0);
216#elif (PTL1_ENTRIES != 0)
217 memsetb(&ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0);
218#else
[c72dc15]219 if (km_is_non_identity(page))
220 return;
221
[da1bafb]222 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
223#endif
[5df1963]224 frame_free(KA2PA((uintptr_t) ptl3), PTL3_FRAMES);
[ecbdc724]225 } else {
226 /*
227 * PTL3 is not empty.
228 * Therefore, there must be a path from PTL0 to PTL3 and
229 * thus nothing to free in higher levels.
[da1bafb]230 *
[ecbdc724]231 */
232 return;
233 }
[a35b458]234
[da1bafb]235 /* Check PTL2, empty is still true */
236#if (PTL2_ENTRIES != 0)
237 for (i = 0; i < PTL2_ENTRIES; i++) {
238 if (PTE_VALID(&ptl2[i])) {
239 empty = false;
240 break;
[ecbdc724]241 }
242 }
[a35b458]243
[da1bafb]244 if (empty) {
245 /*
246 * PTL2 is empty.
[c72dc15]247 * Release the frame and remove PTL2 pointer from the parent
248 * table.
[da1bafb]249 */
250#if (PTL1_ENTRIES != 0)
251 memsetb(&ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0);
252#else
[c72dc15]253 if (km_is_non_identity(page))
254 return;
255
[da1bafb]256 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
257#endif
[5df1963]258 frame_free(KA2PA((uintptr_t) ptl2), PTL2_FRAMES);
[da1bafb]259 } else {
260 /*
261 * PTL2 is not empty.
262 * Therefore, there must be a path from PTL0 to PTL2 and
263 * thus nothing to free in higher levels.
264 *
265 */
266 return;
267 }
268#endif /* PTL2_ENTRIES != 0 */
[a35b458]269
[ecbdc724]270 /* check PTL1, empty is still true */
[da1bafb]271#if (PTL1_ENTRIES != 0)
272 for (i = 0; i < PTL1_ENTRIES; i++) {
273 if (PTE_VALID(&ptl1[i])) {
274 empty = false;
275 break;
[ecbdc724]276 }
277 }
[a35b458]278
[da1bafb]279 if (empty) {
280 /*
281 * PTL1 is empty.
[c72dc15]282 * Release the frame and remove PTL1 pointer from the parent
283 * table.
[da1bafb]284 */
[c72dc15]285 if (km_is_non_identity(page))
286 return;
287
[da1bafb]288 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
[5df1963]289 frame_free(KA2PA((uintptr_t) ptl1), PTL1_FRAMES);
[da1bafb]290 }
291#endif /* PTL1_ENTRIES != 0 */
[8f00329]292}
293
[346b12a2]294static pte_t *pt_mapping_find_internal(as_t *as, uintptr_t page, bool nolock)
[6d7ffa65]295{
[63e27ef]296 assert(nolock || page_table_locked(as));
[1d432f9]297
[da1bafb]298 pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table);
[6d7ffa65]299 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT)
[346b12a2]300 return NULL;
[e943ecf]301
302 read_barrier();
[a35b458]303
[da1bafb]304 pte_t *ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
[6d7ffa65]305 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT)
[346b12a2]306 return NULL;
[e943ecf]307
308#if (PTL1_ENTRIES != 0)
[de73242]309 /*
310 * Always read ptl2 only after we are sure it is present.
311 */
[e943ecf]312 read_barrier();
313#endif
[a35b458]314
[da1bafb]315 pte_t *ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
[6d7ffa65]316 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT)
[346b12a2]317 return NULL;
[e943ecf]318
319#if (PTL2_ENTRIES != 0)
[de73242]320 /*
321 * Always read ptl3 only after we are sure it is present.
322 */
[e943ecf]323 read_barrier();
324#endif
[a35b458]325
[da1bafb]326 pte_t *ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
[a35b458]327
[346b12a2]328 return &ptl3[PTL3_INDEX(page)];
329}
330
331/** Find mapping for virtual page in hierarchical page tables.
332 *
333 * @param as Address space to which page belongs.
334 * @param page Virtual page.
335 * @param nolock True if the page tables need not be locked.
336 * @param[out] pte Structure that will receive a copy of the found PTE.
337 *
338 * @return True if the mapping was found, false otherwise.
339 */
340bool pt_mapping_find(as_t *as, uintptr_t page, bool nolock, pte_t *pte)
341{
342 pte_t *t = pt_mapping_find_internal(as, page, nolock);
343 if (t)
344 *pte = *t;
345 return t != NULL;
346}
347
348/** Update mapping for virtual page in hierarchical page tables.
349 *
350 * @param as Address space to which page belongs.
351 * @param page Virtual page.
352 * @param nolock True if the page tables need not be locked.
353 * @param[in] pte New PTE.
354 */
355void pt_mapping_update(as_t *as, uintptr_t page, bool nolock, pte_t *pte)
356{
357 pte_t *t = pt_mapping_find_internal(as, page, nolock);
358 if (!t)
[1b20da0]359 panic("Updating non-existent PTE");
[346b12a2]360
[63e27ef]361 assert(PTE_VALID(t) == PTE_VALID(pte));
362 assert(PTE_PRESENT(t) == PTE_PRESENT(pte));
363 assert(PTE_GET_FRAME(t) == PTE_GET_FRAME(pte));
364 assert(PTE_WRITABLE(t) == PTE_WRITABLE(pte));
365 assert(PTE_EXECUTABLE(t) == PTE_EXECUTABLE(pte));
[346b12a2]366
367 *t = *pte;
[6d7ffa65]368}
[b45c443]369
[caed0279]370/** Return the size of the region mapped by a single PTL0 entry.
371 *
372 * @return Size of the region mapped by a single PTL0 entry.
373 */
374static uintptr_t ptl0_step_get(void)
375{
376 size_t va_bits;
377
378 va_bits = fnzb(PTL0_ENTRIES) + fnzb(PTL1_ENTRIES) + fnzb(PTL2_ENTRIES) +
379 fnzb(PTL3_ENTRIES) + PAGE_WIDTH;
380
381 return 1UL << (va_bits - fnzb(PTL0_ENTRIES));
382}
383
[c868e2d]384/** Make the mappings in the given range global accross all address spaces.
385 *
386 * All PTL0 entries in the given range will be mapped to a next level page
387 * table. The next level page table will be allocated and cleared.
388 *
389 * pt_mapping_remove() will never deallocate these page tables even when there
390 * are no PTEs in them.
391 *
392 * @param as Address space.
393 * @param base Base address corresponding to the first PTL0 entry that will be
394 * altered by this function.
395 * @param size Size in bytes defining the range of PTL0 entries that will be
396 * altered by this function.
[e2a0d76]397 *
[c868e2d]398 */
399void pt_mapping_make_global(uintptr_t base, size_t size)
400{
[63e27ef]401 assert(size > 0);
[a35b458]402
[c868e2d]403 uintptr_t ptl0 = PA2KA((uintptr_t) AS_KERNEL->genarch.page_table);
[caed0279]404 uintptr_t ptl0_step = ptl0_step_get();
[b0c2075]405 size_t frames;
[a35b458]406
[c868e2d]407#if (PTL1_ENTRIES != 0)
[b0c2075]408 frames = PTL1_FRAMES;
[c868e2d]409#elif (PTL2_ENTRIES != 0)
[b0c2075]410 frames = PTL2_FRAMES;
[c868e2d]411#else
[b0c2075]412 frames = PTL3_FRAMES;
[c868e2d]413#endif
[a35b458]414
[e2a0d76]415 for (uintptr_t addr = ALIGN_DOWN(base, ptl0_step);
416 addr - 1 < base + size - 1;
[caed0279]417 addr += ptl0_step) {
[db8626d]418 if (GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(addr)) != 0) {
[63e27ef]419 assert(overlaps(addr, ptl0_step,
[17af882]420 config.identity_base, config.identity_size));
421
422 /*
423 * This PTL0 entry also maps the kernel identity region,
424 * so it is already global and initialized.
425 */
426 continue;
427 }
428
[b0c2075]429 uintptr_t l1 = PA2KA(frame_alloc(frames, FRAME_LOWMEM, 0));
430 memsetb((void *) l1, FRAMES2SIZE(frames), 0);
[c868e2d]431 SET_PTL1_ADDRESS(ptl0, PTL0_INDEX(addr), KA2PA(l1));
432 SET_PTL1_FLAGS(ptl0, PTL0_INDEX(addr),
[34ab31c0]433 PAGE_PRESENT | PAGE_USER | PAGE_CACHEABLE |
434 PAGE_EXEC | PAGE_WRITE | PAGE_READ);
[c868e2d]435 }
436}
437
[f47fd19]438/** @}
[b45c443]439 */
Note: See TracBrowser for help on using the repository browser.