source: mainline/kernel/genarch/src/mm/page_pt.c@ 346b12a2

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 346b12a2 was 346b12a2, checked in by Jakub Jermar <jakub@…>, 9 years ago

Add page_mapping_update()

page_mapping_update() can be used to safely update the accessed and dirty
bits of a PTE in the actual page tables.

  • Property mode set to 100644
File size: 12.5 KB
RevLine 
[6d7ffa65]1/*
[df4ed85]2 * Copyright (c) 2006 Jakub Jermar
[6d7ffa65]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[f47fd19]29/** @addtogroup genarchmm
[b45c443]30 * @{
31 */
32
[0f27b4c]33/**
[b45c443]34 * @file
[da1bafb]35 * @brief Virtual Address Translation for hierarchical 4-level page tables.
[0f27b4c]36 */
37
[6d7ffa65]38#include <genarch/mm/page_pt.h>
39#include <mm/page.h>
40#include <mm/frame.h>
[c72dc15]41#include <mm/km.h>
[ef67bab]42#include <mm/as.h>
[6d7ffa65]43#include <arch/mm/page.h>
[fc1e4f6]44#include <arch/mm/as.h>
[609a417]45#include <arch/barrier.h>
[d99c1d2]46#include <typedefs.h>
[6d7ffa65]47#include <arch/asm.h>
48#include <memstr.h>
[c868e2d]49#include <align.h>
50#include <macros.h>
[caed0279]51#include <bitops.h>
[6d7ffa65]52
[da1bafb]53static void pt_mapping_insert(as_t *, uintptr_t, uintptr_t, unsigned int);
54static void pt_mapping_remove(as_t *, uintptr_t);
[38dc82d]55static bool pt_mapping_find(as_t *, uintptr_t, bool, pte_t *pte);
[346b12a2]56static void pt_mapping_update(as_t *, uintptr_t, bool, pte_t *pte);
[c868e2d]57static void pt_mapping_make_global(uintptr_t, size_t);
[6d7ffa65]58
[f5935ed]59page_mapping_operations_t pt_mapping_operations = {
[6d7ffa65]60 .mapping_insert = pt_mapping_insert,
[8f00329]61 .mapping_remove = pt_mapping_remove,
[c868e2d]62 .mapping_find = pt_mapping_find,
[346b12a2]63 .mapping_update = pt_mapping_update,
[c868e2d]64 .mapping_make_global = pt_mapping_make_global
[6d7ffa65]65};
66
67/** Map page to frame using hierarchical page tables.
68 *
[9179d0a]69 * Map virtual address page to physical address frame
70 * using flags.
[6d7ffa65]71 *
[da1bafb]72 * @param as Address space to wich page belongs.
73 * @param page Virtual address of the page to be mapped.
[6d7ffa65]74 * @param frame Physical address of memory frame to which the mapping is done.
75 * @param flags Flags to be used for mapping.
[da1bafb]76 *
[6d7ffa65]77 */
[da1bafb]78void pt_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame,
79 unsigned int flags)
[6d7ffa65]80{
[da1bafb]81 pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table);
[1d432f9]82
83 ASSERT(page_table_locked(as));
[da1bafb]84
[6d7ffa65]85 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT) {
[b0c2075]86 pte_t *newpt = (pte_t *)
[f18d01b6]87 PA2KA(frame_alloc(PTL1_FRAMES, FRAME_LOWMEM, PTL1_SIZE - 1));
88 memsetb(newpt, PTL1_SIZE, 0);
[6d7ffa65]89 SET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page), KA2PA(newpt));
[6b326ea1]90 SET_PTL1_FLAGS(ptl0, PTL0_INDEX(page),
[609a417]91 PAGE_NOT_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE |
[6b326ea1]92 PAGE_WRITE);
[de73242]93 /*
94 * Make sure that a concurrent hardware page table walk or
95 * pt_mapping_find() will see the new PTL1 only after it is
96 * fully initialized.
97 */
[609a417]98 write_barrier();
99 SET_PTL1_PRESENT(ptl0, PTL0_INDEX(page));
[6d7ffa65]100 }
[da1bafb]101
102 pte_t *ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
103
[6d7ffa65]104 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT) {
[b0c2075]105 pte_t *newpt = (pte_t *)
[f18d01b6]106 PA2KA(frame_alloc(PTL2_FRAMES, FRAME_LOWMEM, PTL2_SIZE - 1));
107 memsetb(newpt, PTL2_SIZE, 0);
[6d7ffa65]108 SET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page), KA2PA(newpt));
[6b326ea1]109 SET_PTL2_FLAGS(ptl1, PTL1_INDEX(page),
[609a417]110 PAGE_NOT_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE |
[6b326ea1]111 PAGE_WRITE);
[de73242]112 /*
113 * Make the new PTL2 visible only after it is fully initialized.
114 */
[609a417]115 write_barrier();
[e40b8066]116 SET_PTL2_PRESENT(ptl1, PTL1_INDEX(page));
[6d7ffa65]117 }
[da1bafb]118
119 pte_t *ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
120
[6d7ffa65]121 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT) {
[b0c2075]122 pte_t *newpt = (pte_t *)
[f18d01b6]123 PA2KA(frame_alloc(PTL3_FRAMES, FRAME_LOWMEM, PTL2_SIZE - 1));
124 memsetb(newpt, PTL2_SIZE, 0);
[6d7ffa65]125 SET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page), KA2PA(newpt));
[6b326ea1]126 SET_PTL3_FLAGS(ptl2, PTL2_INDEX(page),
[609a417]127 PAGE_NOT_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE |
[6b326ea1]128 PAGE_WRITE);
[de73242]129 /*
130 * Make the new PTL3 visible only after it is fully initialized.
131 */
[609a417]132 write_barrier();
133 SET_PTL3_PRESENT(ptl2, PTL2_INDEX(page));
[6d7ffa65]134 }
[da1bafb]135
136 pte_t *ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
137
[6d7ffa65]138 SET_FRAME_ADDRESS(ptl3, PTL3_INDEX(page), frame);
[609a417]139 SET_FRAME_FLAGS(ptl3, PTL3_INDEX(page), flags | PAGE_NOT_PRESENT);
[de73242]140 /*
141 * Make the new mapping visible only after it is fully initialized.
142 */
[609a417]143 write_barrier();
144 SET_FRAME_PRESENT(ptl3, PTL3_INDEX(page));
[6d7ffa65]145}
146
[8f00329]147/** Remove mapping of page from hierarchical page tables.
148 *
[9179d0a]149 * Remove any mapping of page within address space as.
[8f00329]150 * TLB shootdown should follow in order to make effects of
151 * this call visible.
152 *
[ecbdc724]153 * Empty page tables except PTL0 are freed.
154 *
[da1bafb]155 * @param as Address space to wich page belongs.
[8f00329]156 * @param page Virtual address of the page to be demapped.
[da1bafb]157 *
[8f00329]158 */
[7f1c620]159void pt_mapping_remove(as_t *as, uintptr_t page)
[8f00329]160{
[1d432f9]161 ASSERT(page_table_locked(as));
162
[ecbdc724]163 /*
164 * First, remove the mapping, if it exists.
165 */
[da1bafb]166
167 pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table);
[8f00329]168 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT)
169 return;
[da1bafb]170
171 pte_t *ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
[8f00329]172 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT)
173 return;
[da1bafb]174
175 pte_t *ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
[8f00329]176 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT)
177 return;
[da1bafb]178
179 pte_t *ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
180
[c868e2d]181 /*
182 * Destroy the mapping.
183 * Setting to PAGE_NOT_PRESENT is not sufficient.
[15187c3]184 * But we need SET_FRAME for possible PT coherence maintenance.
185 * At least on ARM.
[c868e2d]186 */
[15187c3]187 //TODO: Fix this inconsistency
188 SET_FRAME_FLAGS(ptl3, PTL3_INDEX(page), PAGE_NOT_PRESENT);
[e32e092]189 memsetb(&ptl3[PTL3_INDEX(page)], sizeof(pte_t), 0);
[da1bafb]190
[ecbdc724]191 /*
[c72dc15]192 * Second, free all empty tables along the way from PTL3 down to PTL0
193 * except those needed for sharing the kernel non-identity mappings.
[ecbdc724]194 */
195
[da1bafb]196 /* Check PTL3 */
197 bool empty = true;
198
199 unsigned int i;
[ecbdc724]200 for (i = 0; i < PTL3_ENTRIES; i++) {
201 if (PTE_VALID(&ptl3[i])) {
202 empty = false;
203 break;
204 }
205 }
[da1bafb]206
[ecbdc724]207 if (empty) {
208 /*
209 * PTL3 is empty.
[c72dc15]210 * Release the frame and remove PTL3 pointer from the parent
211 * table.
[ecbdc724]212 */
[da1bafb]213#if (PTL2_ENTRIES != 0)
214 memsetb(&ptl2[PTL2_INDEX(page)], sizeof(pte_t), 0);
215#elif (PTL1_ENTRIES != 0)
216 memsetb(&ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0);
217#else
[c72dc15]218 if (km_is_non_identity(page))
219 return;
220
[da1bafb]221 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
222#endif
[5df1963]223 frame_free(KA2PA((uintptr_t) ptl3), PTL3_FRAMES);
[ecbdc724]224 } else {
225 /*
226 * PTL3 is not empty.
227 * Therefore, there must be a path from PTL0 to PTL3 and
228 * thus nothing to free in higher levels.
[da1bafb]229 *
[ecbdc724]230 */
231 return;
232 }
233
[da1bafb]234 /* Check PTL2, empty is still true */
235#if (PTL2_ENTRIES != 0)
236 for (i = 0; i < PTL2_ENTRIES; i++) {
237 if (PTE_VALID(&ptl2[i])) {
238 empty = false;
239 break;
[ecbdc724]240 }
241 }
[da1bafb]242
243 if (empty) {
244 /*
245 * PTL2 is empty.
[c72dc15]246 * Release the frame and remove PTL2 pointer from the parent
247 * table.
[da1bafb]248 */
249#if (PTL1_ENTRIES != 0)
250 memsetb(&ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0);
251#else
[c72dc15]252 if (km_is_non_identity(page))
253 return;
254
[da1bafb]255 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
256#endif
[5df1963]257 frame_free(KA2PA((uintptr_t) ptl2), PTL2_FRAMES);
[da1bafb]258 } else {
259 /*
260 * PTL2 is not empty.
261 * Therefore, there must be a path from PTL0 to PTL2 and
262 * thus nothing to free in higher levels.
263 *
264 */
265 return;
266 }
267#endif /* PTL2_ENTRIES != 0 */
268
[ecbdc724]269 /* check PTL1, empty is still true */
[da1bafb]270#if (PTL1_ENTRIES != 0)
271 for (i = 0; i < PTL1_ENTRIES; i++) {
272 if (PTE_VALID(&ptl1[i])) {
273 empty = false;
274 break;
[ecbdc724]275 }
276 }
[da1bafb]277
278 if (empty) {
279 /*
280 * PTL1 is empty.
[c72dc15]281 * Release the frame and remove PTL1 pointer from the parent
282 * table.
[da1bafb]283 */
[c72dc15]284 if (km_is_non_identity(page))
285 return;
286
[da1bafb]287 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
[5df1963]288 frame_free(KA2PA((uintptr_t) ptl1), PTL1_FRAMES);
[da1bafb]289 }
290#endif /* PTL1_ENTRIES != 0 */
[8f00329]291}
292
[346b12a2]293static pte_t *pt_mapping_find_internal(as_t *as, uintptr_t page, bool nolock)
[6d7ffa65]294{
[235e6c7]295 ASSERT(nolock || page_table_locked(as));
[1d432f9]296
[da1bafb]297 pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table);
[6d7ffa65]298 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT)
[346b12a2]299 return NULL;
[e943ecf]300
301 read_barrier();
[da1bafb]302
303 pte_t *ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
[6d7ffa65]304 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT)
[346b12a2]305 return NULL;
[e943ecf]306
307#if (PTL1_ENTRIES != 0)
[de73242]308 /*
309 * Always read ptl2 only after we are sure it is present.
310 */
[e943ecf]311 read_barrier();
312#endif
[da1bafb]313
314 pte_t *ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
[6d7ffa65]315 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT)
[346b12a2]316 return NULL;
[e943ecf]317
318#if (PTL2_ENTRIES != 0)
[de73242]319 /*
320 * Always read ptl3 only after we are sure it is present.
321 */
[e943ecf]322 read_barrier();
323#endif
[da1bafb]324
325 pte_t *ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
326
[346b12a2]327 return &ptl3[PTL3_INDEX(page)];
328}
329
330/** Find mapping for virtual page in hierarchical page tables.
331 *
332 * @param as Address space to which page belongs.
333 * @param page Virtual page.
334 * @param nolock True if the page tables need not be locked.
335 * @param[out] pte Structure that will receive a copy of the found PTE.
336 *
337 * @return True if the mapping was found, false otherwise.
338 */
339bool pt_mapping_find(as_t *as, uintptr_t page, bool nolock, pte_t *pte)
340{
341 pte_t *t = pt_mapping_find_internal(as, page, nolock);
342 if (t)
343 *pte = *t;
344 return t != NULL;
345}
346
347/** Update mapping for virtual page in hierarchical page tables.
348 *
349 * @param as Address space to which page belongs.
350 * @param page Virtual page.
351 * @param nolock True if the page tables need not be locked.
352 * @param[in] pte New PTE.
353 */
354void pt_mapping_update(as_t *as, uintptr_t page, bool nolock, pte_t *pte)
355{
356 pte_t *t = pt_mapping_find_internal(as, page, nolock);
357 if (!t)
358 panic("Updating non-existent PTE");
359
360 ASSERT(PTE_VALID(t) == PTE_VALID(pte));
361 ASSERT(PTE_PRESENT(t) == PTE_PRESENT(pte));
362 ASSERT(PTE_GET_FRAME(t) == PTE_GET_FRAME(pte));
363 ASSERT(PTE_WRITABLE(t) == PTE_WRITABLE(pte));
364 ASSERT(PTE_EXECUTABLE(t) == PTE_EXECUTABLE(pte));
365
366 *t = *pte;
[6d7ffa65]367}
[b45c443]368
[caed0279]369/** Return the size of the region mapped by a single PTL0 entry.
370 *
371 * @return Size of the region mapped by a single PTL0 entry.
372 */
373static uintptr_t ptl0_step_get(void)
374{
375 size_t va_bits;
376
377 va_bits = fnzb(PTL0_ENTRIES) + fnzb(PTL1_ENTRIES) + fnzb(PTL2_ENTRIES) +
378 fnzb(PTL3_ENTRIES) + PAGE_WIDTH;
379
380 return 1UL << (va_bits - fnzb(PTL0_ENTRIES));
381}
382
[c868e2d]383/** Make the mappings in the given range global accross all address spaces.
384 *
385 * All PTL0 entries in the given range will be mapped to a next level page
386 * table. The next level page table will be allocated and cleared.
387 *
388 * pt_mapping_remove() will never deallocate these page tables even when there
389 * are no PTEs in them.
390 *
391 * @param as Address space.
392 * @param base Base address corresponding to the first PTL0 entry that will be
393 * altered by this function.
394 * @param size Size in bytes defining the range of PTL0 entries that will be
395 * altered by this function.
[e2a0d76]396 *
[c868e2d]397 */
398void pt_mapping_make_global(uintptr_t base, size_t size)
399{
[e2a0d76]400 ASSERT(size > 0);
401
[c868e2d]402 uintptr_t ptl0 = PA2KA((uintptr_t) AS_KERNEL->genarch.page_table);
[caed0279]403 uintptr_t ptl0_step = ptl0_step_get();
[b0c2075]404 size_t frames;
[e2a0d76]405
[c868e2d]406#if (PTL1_ENTRIES != 0)
[b0c2075]407 frames = PTL1_FRAMES;
[c868e2d]408#elif (PTL2_ENTRIES != 0)
[b0c2075]409 frames = PTL2_FRAMES;
[c868e2d]410#else
[b0c2075]411 frames = PTL3_FRAMES;
[c868e2d]412#endif
[e2a0d76]413
414 for (uintptr_t addr = ALIGN_DOWN(base, ptl0_step);
415 addr - 1 < base + size - 1;
[caed0279]416 addr += ptl0_step) {
[17af882]417 if (GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(addr))) {
418 ASSERT(overlaps(addr, ptl0_step,
419 config.identity_base, config.identity_size));
420
421 /*
422 * This PTL0 entry also maps the kernel identity region,
423 * so it is already global and initialized.
424 */
425 continue;
426 }
427
[b0c2075]428 uintptr_t l1 = PA2KA(frame_alloc(frames, FRAME_LOWMEM, 0));
429 memsetb((void *) l1, FRAMES2SIZE(frames), 0);
[c868e2d]430 SET_PTL1_ADDRESS(ptl0, PTL0_INDEX(addr), KA2PA(l1));
431 SET_PTL1_FLAGS(ptl0, PTL0_INDEX(addr),
[34ab31c0]432 PAGE_PRESENT | PAGE_USER | PAGE_CACHEABLE |
433 PAGE_EXEC | PAGE_WRITE | PAGE_READ);
[c868e2d]434 }
435}
436
[f47fd19]437/** @}
[b45c443]438 */
Note: See TracBrowser for help on using the repository browser.