source: mainline/kernel/genarch/src/mm/page_pt.c@ e6becb9

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since e6becb9 was de73242, checked in by Jakub Jermar <jakub@…>, 13 years ago

Comment the purpose of the recently added barriers.

  • Property mode set to 100644
File size: 11.0 KB
RevLine 
[6d7ffa65]1/*
[df4ed85]2 * Copyright (c) 2006 Jakub Jermar
[6d7ffa65]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[f47fd19]29/** @addtogroup genarchmm
[b45c443]30 * @{
31 */
32
[0f27b4c]33/**
[b45c443]34 * @file
[da1bafb]35 * @brief Virtual Address Translation for hierarchical 4-level page tables.
[0f27b4c]36 */
37
[6d7ffa65]38#include <genarch/mm/page_pt.h>
39#include <mm/page.h>
40#include <mm/frame.h>
[c72dc15]41#include <mm/km.h>
[ef67bab]42#include <mm/as.h>
[6d7ffa65]43#include <arch/mm/page.h>
[fc1e4f6]44#include <arch/mm/as.h>
[609a417]45#include <arch/barrier.h>
[d99c1d2]46#include <typedefs.h>
[6d7ffa65]47#include <arch/asm.h>
48#include <memstr.h>
[c868e2d]49#include <align.h>
50#include <macros.h>
[caed0279]51#include <bitops.h>
[6d7ffa65]52
[da1bafb]53static void pt_mapping_insert(as_t *, uintptr_t, uintptr_t, unsigned int);
54static void pt_mapping_remove(as_t *, uintptr_t);
[235e6c7]55static pte_t *pt_mapping_find(as_t *, uintptr_t, bool);
[c868e2d]56static void pt_mapping_make_global(uintptr_t, size_t);
[6d7ffa65]57
[f5935ed]58page_mapping_operations_t pt_mapping_operations = {
[6d7ffa65]59 .mapping_insert = pt_mapping_insert,
[8f00329]60 .mapping_remove = pt_mapping_remove,
[c868e2d]61 .mapping_find = pt_mapping_find,
62 .mapping_make_global = pt_mapping_make_global
[6d7ffa65]63};
64
65/** Map page to frame using hierarchical page tables.
66 *
[9179d0a]67 * Map virtual address page to physical address frame
68 * using flags.
[6d7ffa65]69 *
[da1bafb]70 * @param as Address space to wich page belongs.
71 * @param page Virtual address of the page to be mapped.
[6d7ffa65]72 * @param frame Physical address of memory frame to which the mapping is done.
73 * @param flags Flags to be used for mapping.
[da1bafb]74 *
[6d7ffa65]75 */
[da1bafb]76void pt_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame,
77 unsigned int flags)
[6d7ffa65]78{
[da1bafb]79 pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table);
[1d432f9]80
81 ASSERT(page_table_locked(as));
[da1bafb]82
[6d7ffa65]83 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT) {
[6b326ea1]84 pte_t *newpt = (pte_t *) frame_alloc(PTL1_SIZE,
85 FRAME_LOWMEM | FRAME_KA);
[e32e092]86 memsetb(newpt, FRAME_SIZE << PTL1_SIZE, 0);
[6d7ffa65]87 SET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page), KA2PA(newpt));
[6b326ea1]88 SET_PTL1_FLAGS(ptl0, PTL0_INDEX(page),
[609a417]89 PAGE_NOT_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE |
[6b326ea1]90 PAGE_WRITE);
[de73242]91 /*
92 * Make sure that a concurrent hardware page table walk or
93 * pt_mapping_find() will see the new PTL1 only after it is
94 * fully initialized.
95 */
[609a417]96 write_barrier();
97 SET_PTL1_PRESENT(ptl0, PTL0_INDEX(page));
[6d7ffa65]98 }
[da1bafb]99
100 pte_t *ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
101
[6d7ffa65]102 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT) {
[6b326ea1]103 pte_t *newpt = (pte_t *) frame_alloc(PTL2_SIZE,
104 FRAME_LOWMEM | FRAME_KA);
[e32e092]105 memsetb(newpt, FRAME_SIZE << PTL2_SIZE, 0);
[6d7ffa65]106 SET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page), KA2PA(newpt));
[6b326ea1]107 SET_PTL2_FLAGS(ptl1, PTL1_INDEX(page),
[609a417]108 PAGE_NOT_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE |
[6b326ea1]109 PAGE_WRITE);
[de73242]110 /*
111 * Make the new PTL2 visible only after it is fully initialized.
112 */
[609a417]113 write_barrier();
114 SET_PTL2_PRESENT(ptl1, PTL1_INDEX(page));
[6d7ffa65]115 }
[da1bafb]116
117 pte_t *ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
118
[6d7ffa65]119 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT) {
[6b326ea1]120 pte_t *newpt = (pte_t *) frame_alloc(PTL3_SIZE,
121 FRAME_LOWMEM | FRAME_KA);
[e32e092]122 memsetb(newpt, FRAME_SIZE << PTL3_SIZE, 0);
[6d7ffa65]123 SET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page), KA2PA(newpt));
[6b326ea1]124 SET_PTL3_FLAGS(ptl2, PTL2_INDEX(page),
[609a417]125 PAGE_NOT_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE |
[6b326ea1]126 PAGE_WRITE);
[de73242]127 /*
128 * Make the new PTL3 visible only after it is fully initialized.
129 */
[609a417]130 write_barrier();
131 SET_PTL3_PRESENT(ptl2, PTL2_INDEX(page));
[6d7ffa65]132 }
[da1bafb]133
134 pte_t *ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
135
[6d7ffa65]136 SET_FRAME_ADDRESS(ptl3, PTL3_INDEX(page), frame);
[609a417]137 SET_FRAME_FLAGS(ptl3, PTL3_INDEX(page), flags | PAGE_NOT_PRESENT);
[de73242]138 /*
139 * Make the new mapping visible only after it is fully initialized.
140 */
[609a417]141 write_barrier();
142 SET_FRAME_PRESENT(ptl3, PTL3_INDEX(page));
[6d7ffa65]143}
144
[8f00329]145/** Remove mapping of page from hierarchical page tables.
146 *
[9179d0a]147 * Remove any mapping of page within address space as.
[8f00329]148 * TLB shootdown should follow in order to make effects of
149 * this call visible.
150 *
[ecbdc724]151 * Empty page tables except PTL0 are freed.
152 *
[da1bafb]153 * @param as Address space to wich page belongs.
[8f00329]154 * @param page Virtual address of the page to be demapped.
[da1bafb]155 *
[8f00329]156 */
[7f1c620]157void pt_mapping_remove(as_t *as, uintptr_t page)
[8f00329]158{
[1d432f9]159 ASSERT(page_table_locked(as));
160
[ecbdc724]161 /*
162 * First, remove the mapping, if it exists.
163 */
[da1bafb]164
165 pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table);
[8f00329]166 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT)
167 return;
[da1bafb]168
169 pte_t *ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
[8f00329]170 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT)
171 return;
[da1bafb]172
173 pte_t *ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
[8f00329]174 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT)
175 return;
[da1bafb]176
177 pte_t *ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
178
[c868e2d]179 /*
180 * Destroy the mapping.
181 * Setting to PAGE_NOT_PRESENT is not sufficient.
182 */
[e32e092]183 memsetb(&ptl3[PTL3_INDEX(page)], sizeof(pte_t), 0);
[da1bafb]184
[ecbdc724]185 /*
[c72dc15]186 * Second, free all empty tables along the way from PTL3 down to PTL0
187 * except those needed for sharing the kernel non-identity mappings.
[ecbdc724]188 */
189
[da1bafb]190 /* Check PTL3 */
191 bool empty = true;
192
193 unsigned int i;
[ecbdc724]194 for (i = 0; i < PTL3_ENTRIES; i++) {
195 if (PTE_VALID(&ptl3[i])) {
196 empty = false;
197 break;
198 }
199 }
[da1bafb]200
[ecbdc724]201 if (empty) {
202 /*
203 * PTL3 is empty.
[c72dc15]204 * Release the frame and remove PTL3 pointer from the parent
205 * table.
[ecbdc724]206 */
[da1bafb]207#if (PTL2_ENTRIES != 0)
208 memsetb(&ptl2[PTL2_INDEX(page)], sizeof(pte_t), 0);
209#elif (PTL1_ENTRIES != 0)
210 memsetb(&ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0);
211#else
[c72dc15]212 if (km_is_non_identity(page))
213 return;
214
[da1bafb]215 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
216#endif
[c72dc15]217 frame_free(KA2PA((uintptr_t) ptl3));
[ecbdc724]218 } else {
219 /*
220 * PTL3 is not empty.
221 * Therefore, there must be a path from PTL0 to PTL3 and
222 * thus nothing to free in higher levels.
[da1bafb]223 *
[ecbdc724]224 */
225 return;
226 }
227
[da1bafb]228 /* Check PTL2, empty is still true */
229#if (PTL2_ENTRIES != 0)
230 for (i = 0; i < PTL2_ENTRIES; i++) {
231 if (PTE_VALID(&ptl2[i])) {
232 empty = false;
233 break;
[ecbdc724]234 }
235 }
[da1bafb]236
237 if (empty) {
238 /*
239 * PTL2 is empty.
[c72dc15]240 * Release the frame and remove PTL2 pointer from the parent
241 * table.
[da1bafb]242 */
243#if (PTL1_ENTRIES != 0)
244 memsetb(&ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0);
245#else
[c72dc15]246 if (km_is_non_identity(page))
247 return;
248
[da1bafb]249 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
250#endif
[c72dc15]251 frame_free(KA2PA((uintptr_t) ptl2));
[da1bafb]252 } else {
253 /*
254 * PTL2 is not empty.
255 * Therefore, there must be a path from PTL0 to PTL2 and
256 * thus nothing to free in higher levels.
257 *
258 */
259 return;
260 }
261#endif /* PTL2_ENTRIES != 0 */
262
[ecbdc724]263 /* check PTL1, empty is still true */
[da1bafb]264#if (PTL1_ENTRIES != 0)
265 for (i = 0; i < PTL1_ENTRIES; i++) {
266 if (PTE_VALID(&ptl1[i])) {
267 empty = false;
268 break;
[ecbdc724]269 }
270 }
[da1bafb]271
272 if (empty) {
273 /*
274 * PTL1 is empty.
[c72dc15]275 * Release the frame and remove PTL1 pointer from the parent
276 * table.
[da1bafb]277 */
[c72dc15]278 if (km_is_non_identity(page))
279 return;
280
[da1bafb]281 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
[c72dc15]282 frame_free(KA2PA((uintptr_t) ptl1));
[da1bafb]283 }
284#endif /* PTL1_ENTRIES != 0 */
[8f00329]285}
286
[6d7ffa65]287/** Find mapping for virtual page in hierarchical page tables.
288 *
[235e6c7]289 * @param as Address space to which page belongs.
290 * @param page Virtual page.
291 * @param nolock True if the page tables need not be locked.
[6d7ffa65]292 *
[da1bafb]293 * @return NULL if there is no such mapping; entry from PTL3 describing
294 * the mapping otherwise.
295 *
[6d7ffa65]296 */
[235e6c7]297pte_t *pt_mapping_find(as_t *as, uintptr_t page, bool nolock)
[6d7ffa65]298{
[235e6c7]299 ASSERT(nolock || page_table_locked(as));
[1d432f9]300
[da1bafb]301 pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table);
[6d7ffa65]302 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT)
303 return NULL;
[e943ecf]304
305 read_barrier();
[da1bafb]306
307 pte_t *ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
[6d7ffa65]308 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT)
309 return NULL;
[e943ecf]310
311#if (PTL1_ENTRIES != 0)
[de73242]312 /*
313 * Always read ptl2 only after we are sure it is present.
314 */
[e943ecf]315 read_barrier();
316#endif
[da1bafb]317
318 pte_t *ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
[6d7ffa65]319 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT)
320 return NULL;
[e943ecf]321
322#if (PTL2_ENTRIES != 0)
[de73242]323 /*
324 * Always read ptl3 only after we are sure it is present.
325 */
[e943ecf]326 read_barrier();
327#endif
[da1bafb]328
329 pte_t *ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
330
[6d7ffa65]331 return &ptl3[PTL3_INDEX(page)];
332}
[b45c443]333
[caed0279]334/** Return the size of the region mapped by a single PTL0 entry.
335 *
336 * @return Size of the region mapped by a single PTL0 entry.
337 */
338static uintptr_t ptl0_step_get(void)
339{
340 size_t va_bits;
341
342 va_bits = fnzb(PTL0_ENTRIES) + fnzb(PTL1_ENTRIES) + fnzb(PTL2_ENTRIES) +
343 fnzb(PTL3_ENTRIES) + PAGE_WIDTH;
344
345 return 1UL << (va_bits - fnzb(PTL0_ENTRIES));
346}
347
[c868e2d]348/** Make the mappings in the given range global accross all address spaces.
349 *
350 * All PTL0 entries in the given range will be mapped to a next level page
351 * table. The next level page table will be allocated and cleared.
352 *
353 * pt_mapping_remove() will never deallocate these page tables even when there
354 * are no PTEs in them.
355 *
356 * @param as Address space.
357 * @param base Base address corresponding to the first PTL0 entry that will be
358 * altered by this function.
359 * @param size Size in bytes defining the range of PTL0 entries that will be
360 * altered by this function.
361 */
362void pt_mapping_make_global(uintptr_t base, size_t size)
363{
364 uintptr_t ptl0 = PA2KA((uintptr_t) AS_KERNEL->genarch.page_table);
[caed0279]365 uintptr_t ptl0_step = ptl0_step_get();
[c868e2d]366 size_t order;
367 uintptr_t addr;
368
369#if (PTL1_ENTRIES != 0)
370 order = PTL1_SIZE;
371#elif (PTL2_ENTRIES != 0)
372 order = PTL2_SIZE;
373#else
374 order = PTL3_SIZE;
375#endif
376
[a2789d2]377 ASSERT(size > 0);
[c868e2d]378
[caed0279]379 for (addr = ALIGN_DOWN(base, ptl0_step); addr - 1 < base + size - 1;
380 addr += ptl0_step) {
[c868e2d]381 uintptr_t l1;
382
383 l1 = (uintptr_t) frame_alloc(order, FRAME_KA | FRAME_LOWMEM);
384 memsetb((void *) l1, FRAME_SIZE << order, 0);
385 SET_PTL1_ADDRESS(ptl0, PTL0_INDEX(addr), KA2PA(l1));
386 SET_PTL1_FLAGS(ptl0, PTL0_INDEX(addr),
[34ab31c0]387 PAGE_PRESENT | PAGE_USER | PAGE_CACHEABLE |
388 PAGE_EXEC | PAGE_WRITE | PAGE_READ);
[c868e2d]389 }
390}
391
[f47fd19]392/** @}
[b45c443]393 */
Note: See TracBrowser for help on using the repository browser.