source: mainline/kernel/genarch/src/mm/page_pt.c@ e6a78b9

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since e6a78b9 was 34ab31c0, checked in by Jakub Jermar <jakub@…>, 13 years ago

Cosmetic reordering of page protection bits.

  • Property mode set to 100644
File size: 10.1 KB
RevLine 
[6d7ffa65]1/*
[df4ed85]2 * Copyright (c) 2006 Jakub Jermar
[6d7ffa65]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[f47fd19]29/** @addtogroup genarchmm
[b45c443]30 * @{
31 */
32
[0f27b4c]33/**
[b45c443]34 * @file
[da1bafb]35 * @brief Virtual Address Translation for hierarchical 4-level page tables.
[0f27b4c]36 */
37
[6d7ffa65]38#include <genarch/mm/page_pt.h>
39#include <mm/page.h>
40#include <mm/frame.h>
[c72dc15]41#include <mm/km.h>
[ef67bab]42#include <mm/as.h>
[6d7ffa65]43#include <arch/mm/page.h>
[fc1e4f6]44#include <arch/mm/as.h>
[d99c1d2]45#include <typedefs.h>
[6d7ffa65]46#include <arch/asm.h>
47#include <memstr.h>
[c868e2d]48#include <align.h>
49#include <macros.h>
[caed0279]50#include <bitops.h>
[6d7ffa65]51
[da1bafb]52static void pt_mapping_insert(as_t *, uintptr_t, uintptr_t, unsigned int);
53static void pt_mapping_remove(as_t *, uintptr_t);
[235e6c7]54static pte_t *pt_mapping_find(as_t *, uintptr_t, bool);
[c868e2d]55static void pt_mapping_make_global(uintptr_t, size_t);
[6d7ffa65]56
[f5935ed]57page_mapping_operations_t pt_mapping_operations = {
[6d7ffa65]58 .mapping_insert = pt_mapping_insert,
[8f00329]59 .mapping_remove = pt_mapping_remove,
[c868e2d]60 .mapping_find = pt_mapping_find,
61 .mapping_make_global = pt_mapping_make_global
[6d7ffa65]62};
63
64/** Map page to frame using hierarchical page tables.
65 *
[9179d0a]66 * Map virtual address page to physical address frame
67 * using flags.
[6d7ffa65]68 *
[da1bafb]69 * @param as Address space to wich page belongs.
70 * @param page Virtual address of the page to be mapped.
[6d7ffa65]71 * @param frame Physical address of memory frame to which the mapping is done.
72 * @param flags Flags to be used for mapping.
[da1bafb]73 *
[6d7ffa65]74 */
[da1bafb]75void pt_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame,
76 unsigned int flags)
[6d7ffa65]77{
[da1bafb]78 pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table);
[1d432f9]79
80 ASSERT(page_table_locked(as));
[da1bafb]81
[6d7ffa65]82 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT) {
[6b326ea1]83 pte_t *newpt = (pte_t *) frame_alloc(PTL1_SIZE,
84 FRAME_LOWMEM | FRAME_KA);
[e32e092]85 memsetb(newpt, FRAME_SIZE << PTL1_SIZE, 0);
[6d7ffa65]86 SET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page), KA2PA(newpt));
[6b326ea1]87 SET_PTL1_FLAGS(ptl0, PTL0_INDEX(page),
88 PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE |
89 PAGE_WRITE);
[6d7ffa65]90 }
[da1bafb]91
92 pte_t *ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
93
[6d7ffa65]94 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT) {
[6b326ea1]95 pte_t *newpt = (pte_t *) frame_alloc(PTL2_SIZE,
96 FRAME_LOWMEM | FRAME_KA);
[e32e092]97 memsetb(newpt, FRAME_SIZE << PTL2_SIZE, 0);
[6d7ffa65]98 SET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page), KA2PA(newpt));
[6b326ea1]99 SET_PTL2_FLAGS(ptl1, PTL1_INDEX(page),
100 PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE |
101 PAGE_WRITE);
[6d7ffa65]102 }
[da1bafb]103
104 pte_t *ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
105
[6d7ffa65]106 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT) {
[6b326ea1]107 pte_t *newpt = (pte_t *) frame_alloc(PTL3_SIZE,
108 FRAME_LOWMEM | FRAME_KA);
[e32e092]109 memsetb(newpt, FRAME_SIZE << PTL3_SIZE, 0);
[6d7ffa65]110 SET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page), KA2PA(newpt));
[6b326ea1]111 SET_PTL3_FLAGS(ptl2, PTL2_INDEX(page),
112 PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE |
113 PAGE_WRITE);
[6d7ffa65]114 }
[da1bafb]115
116 pte_t *ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
117
[6d7ffa65]118 SET_FRAME_ADDRESS(ptl3, PTL3_INDEX(page), frame);
119 SET_FRAME_FLAGS(ptl3, PTL3_INDEX(page), flags);
120}
121
[8f00329]122/** Remove mapping of page from hierarchical page tables.
123 *
[9179d0a]124 * Remove any mapping of page within address space as.
[8f00329]125 * TLB shootdown should follow in order to make effects of
126 * this call visible.
127 *
[ecbdc724]128 * Empty page tables except PTL0 are freed.
129 *
[da1bafb]130 * @param as Address space to wich page belongs.
[8f00329]131 * @param page Virtual address of the page to be demapped.
[da1bafb]132 *
[8f00329]133 */
[7f1c620]134void pt_mapping_remove(as_t *as, uintptr_t page)
[8f00329]135{
[1d432f9]136 ASSERT(page_table_locked(as));
137
[ecbdc724]138 /*
139 * First, remove the mapping, if it exists.
140 */
[da1bafb]141
142 pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table);
[8f00329]143 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT)
144 return;
[da1bafb]145
146 pte_t *ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
[8f00329]147 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT)
148 return;
[da1bafb]149
150 pte_t *ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
[8f00329]151 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT)
152 return;
[da1bafb]153
154 pte_t *ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
155
[c868e2d]156 /*
157 * Destroy the mapping.
158 * Setting to PAGE_NOT_PRESENT is not sufficient.
159 */
[e32e092]160 memsetb(&ptl3[PTL3_INDEX(page)], sizeof(pte_t), 0);
[da1bafb]161
[ecbdc724]162 /*
[c72dc15]163 * Second, free all empty tables along the way from PTL3 down to PTL0
164 * except those needed for sharing the kernel non-identity mappings.
[ecbdc724]165 */
166
[da1bafb]167 /* Check PTL3 */
168 bool empty = true;
169
170 unsigned int i;
[ecbdc724]171 for (i = 0; i < PTL3_ENTRIES; i++) {
172 if (PTE_VALID(&ptl3[i])) {
173 empty = false;
174 break;
175 }
176 }
[da1bafb]177
[ecbdc724]178 if (empty) {
179 /*
180 * PTL3 is empty.
[c72dc15]181 * Release the frame and remove PTL3 pointer from the parent
182 * table.
[ecbdc724]183 */
[da1bafb]184#if (PTL2_ENTRIES != 0)
185 memsetb(&ptl2[PTL2_INDEX(page)], sizeof(pte_t), 0);
186#elif (PTL1_ENTRIES != 0)
187 memsetb(&ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0);
188#else
[c72dc15]189 if (km_is_non_identity(page))
190 return;
191
[da1bafb]192 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
193#endif
[c72dc15]194 frame_free(KA2PA((uintptr_t) ptl3));
[ecbdc724]195 } else {
196 /*
197 * PTL3 is not empty.
198 * Therefore, there must be a path from PTL0 to PTL3 and
199 * thus nothing to free in higher levels.
[da1bafb]200 *
[ecbdc724]201 */
202 return;
203 }
204
[da1bafb]205 /* Check PTL2, empty is still true */
206#if (PTL2_ENTRIES != 0)
207 for (i = 0; i < PTL2_ENTRIES; i++) {
208 if (PTE_VALID(&ptl2[i])) {
209 empty = false;
210 break;
[ecbdc724]211 }
212 }
[da1bafb]213
214 if (empty) {
215 /*
216 * PTL2 is empty.
[c72dc15]217 * Release the frame and remove PTL2 pointer from the parent
218 * table.
[da1bafb]219 */
220#if (PTL1_ENTRIES != 0)
221 memsetb(&ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0);
222#else
[c72dc15]223 if (km_is_non_identity(page))
224 return;
225
[da1bafb]226 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
227#endif
[c72dc15]228 frame_free(KA2PA((uintptr_t) ptl2));
[da1bafb]229 } else {
230 /*
231 * PTL2 is not empty.
232 * Therefore, there must be a path from PTL0 to PTL2 and
233 * thus nothing to free in higher levels.
234 *
235 */
236 return;
237 }
238#endif /* PTL2_ENTRIES != 0 */
239
[ecbdc724]240 /* check PTL1, empty is still true */
[da1bafb]241#if (PTL1_ENTRIES != 0)
242 for (i = 0; i < PTL1_ENTRIES; i++) {
243 if (PTE_VALID(&ptl1[i])) {
244 empty = false;
245 break;
[ecbdc724]246 }
247 }
[da1bafb]248
249 if (empty) {
250 /*
251 * PTL1 is empty.
[c72dc15]252 * Release the frame and remove PTL1 pointer from the parent
253 * table.
[da1bafb]254 */
[c72dc15]255 if (km_is_non_identity(page))
256 return;
257
[da1bafb]258 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
[c72dc15]259 frame_free(KA2PA((uintptr_t) ptl1));
[da1bafb]260 }
261#endif /* PTL1_ENTRIES != 0 */
[8f00329]262}
263
[6d7ffa65]264/** Find mapping for virtual page in hierarchical page tables.
265 *
[235e6c7]266 * @param as Address space to which page belongs.
267 * @param page Virtual page.
268 * @param nolock True if the page tables need not be locked.
[6d7ffa65]269 *
[da1bafb]270 * @return NULL if there is no such mapping; entry from PTL3 describing
271 * the mapping otherwise.
272 *
[6d7ffa65]273 */
[235e6c7]274pte_t *pt_mapping_find(as_t *as, uintptr_t page, bool nolock)
[6d7ffa65]275{
[235e6c7]276 ASSERT(nolock || page_table_locked(as));
[1d432f9]277
[da1bafb]278 pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table);
[6d7ffa65]279 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT)
280 return NULL;
[da1bafb]281
282 pte_t *ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
[6d7ffa65]283 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT)
284 return NULL;
[da1bafb]285
286 pte_t *ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
[6d7ffa65]287 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT)
288 return NULL;
[da1bafb]289
290 pte_t *ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
291
[6d7ffa65]292 return &ptl3[PTL3_INDEX(page)];
293}
[b45c443]294
[caed0279]295/** Return the size of the region mapped by a single PTL0 entry.
296 *
297 * @return Size of the region mapped by a single PTL0 entry.
298 */
299static uintptr_t ptl0_step_get(void)
300{
301 size_t va_bits;
302
303 va_bits = fnzb(PTL0_ENTRIES) + fnzb(PTL1_ENTRIES) + fnzb(PTL2_ENTRIES) +
304 fnzb(PTL3_ENTRIES) + PAGE_WIDTH;
305
306 return 1UL << (va_bits - fnzb(PTL0_ENTRIES));
307}
308
[c868e2d]309/** Make the mappings in the given range global accross all address spaces.
310 *
311 * All PTL0 entries in the given range will be mapped to a next level page
312 * table. The next level page table will be allocated and cleared.
313 *
314 * pt_mapping_remove() will never deallocate these page tables even when there
315 * are no PTEs in them.
316 *
317 * @param as Address space.
318 * @param base Base address corresponding to the first PTL0 entry that will be
319 * altered by this function.
320 * @param size Size in bytes defining the range of PTL0 entries that will be
321 * altered by this function.
322 */
323void pt_mapping_make_global(uintptr_t base, size_t size)
324{
325 uintptr_t ptl0 = PA2KA((uintptr_t) AS_KERNEL->genarch.page_table);
[caed0279]326 uintptr_t ptl0_step = ptl0_step_get();
[c868e2d]327 size_t order;
328 uintptr_t addr;
329
330#if (PTL1_ENTRIES != 0)
331 order = PTL1_SIZE;
332#elif (PTL2_ENTRIES != 0)
333 order = PTL2_SIZE;
334#else
335 order = PTL3_SIZE;
336#endif
337
[a2789d2]338 ASSERT(size > 0);
[c868e2d]339
[caed0279]340 for (addr = ALIGN_DOWN(base, ptl0_step); addr - 1 < base + size - 1;
341 addr += ptl0_step) {
[c868e2d]342 uintptr_t l1;
343
344 l1 = (uintptr_t) frame_alloc(order, FRAME_KA | FRAME_LOWMEM);
345 memsetb((void *) l1, FRAME_SIZE << order, 0);
346 SET_PTL1_ADDRESS(ptl0, PTL0_INDEX(addr), KA2PA(l1));
347 SET_PTL1_FLAGS(ptl0, PTL0_INDEX(addr),
[34ab31c0]348 PAGE_PRESENT | PAGE_USER | PAGE_CACHEABLE |
349 PAGE_EXEC | PAGE_WRITE | PAGE_READ);
[c868e2d]350 }
351}
352
[f47fd19]353/** @}
[b45c443]354 */
Note: See TracBrowser for help on using the repository browser.