source: mainline/kernel/genarch/src/mm/page_pt.c@ 0949b7a

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 0949b7a was caed0279, checked in by Jakub Jermar <jakub@…>, 14 years ago

Fix computation of the address increment between two PTL0 entries.

The previous computation divided the theoretical address space size
(either 232 or 264 bytes) by the number of PTL0 entries. This,
however, provides wrong results on some 64-bit architectures that use
smaller virtual address spaces. For example, amd64 has only 48-bit
virtual address space and so the computation needs to take this into
account.

  • Property mode set to 100644
File size: 10.1 KB
Line 
1/*
2 * Copyright (c) 2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup genarchmm
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Virtual Address Translation for hierarchical 4-level page tables.
36 */
37
38#include <genarch/mm/page_pt.h>
39#include <mm/page.h>
40#include <mm/frame.h>
41#include <mm/km.h>
42#include <mm/as.h>
43#include <arch/mm/page.h>
44#include <arch/mm/as.h>
45#include <typedefs.h>
46#include <arch/asm.h>
47#include <memstr.h>
48#include <align.h>
49#include <macros.h>
50#include <bitops.h>
51
52static void pt_mapping_insert(as_t *, uintptr_t, uintptr_t, unsigned int);
53static void pt_mapping_remove(as_t *, uintptr_t);
54static pte_t *pt_mapping_find(as_t *, uintptr_t, bool);
55static void pt_mapping_make_global(uintptr_t, size_t);
56
57page_mapping_operations_t pt_mapping_operations = {
58 .mapping_insert = pt_mapping_insert,
59 .mapping_remove = pt_mapping_remove,
60 .mapping_find = pt_mapping_find,
61 .mapping_make_global = pt_mapping_make_global
62};
63
64/** Map page to frame using hierarchical page tables.
65 *
66 * Map virtual address page to physical address frame
67 * using flags.
68 *
69 * @param as Address space to wich page belongs.
70 * @param page Virtual address of the page to be mapped.
71 * @param frame Physical address of memory frame to which the mapping is done.
72 * @param flags Flags to be used for mapping.
73 *
74 */
75void pt_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame,
76 unsigned int flags)
77{
78 pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table);
79
80 ASSERT(page_table_locked(as));
81
82 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT) {
83 pte_t *newpt = (pte_t *) frame_alloc(PTL1_SIZE,
84 FRAME_LOWMEM | FRAME_KA);
85 memsetb(newpt, FRAME_SIZE << PTL1_SIZE, 0);
86 SET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page), KA2PA(newpt));
87 SET_PTL1_FLAGS(ptl0, PTL0_INDEX(page),
88 PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE |
89 PAGE_WRITE);
90 }
91
92 pte_t *ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
93
94 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT) {
95 pte_t *newpt = (pte_t *) frame_alloc(PTL2_SIZE,
96 FRAME_LOWMEM | FRAME_KA);
97 memsetb(newpt, FRAME_SIZE << PTL2_SIZE, 0);
98 SET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page), KA2PA(newpt));
99 SET_PTL2_FLAGS(ptl1, PTL1_INDEX(page),
100 PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE |
101 PAGE_WRITE);
102 }
103
104 pte_t *ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
105
106 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT) {
107 pte_t *newpt = (pte_t *) frame_alloc(PTL3_SIZE,
108 FRAME_LOWMEM | FRAME_KA);
109 memsetb(newpt, FRAME_SIZE << PTL3_SIZE, 0);
110 SET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page), KA2PA(newpt));
111 SET_PTL3_FLAGS(ptl2, PTL2_INDEX(page),
112 PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE |
113 PAGE_WRITE);
114 }
115
116 pte_t *ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
117
118 SET_FRAME_ADDRESS(ptl3, PTL3_INDEX(page), frame);
119 SET_FRAME_FLAGS(ptl3, PTL3_INDEX(page), flags);
120}
121
122/** Remove mapping of page from hierarchical page tables.
123 *
124 * Remove any mapping of page within address space as.
125 * TLB shootdown should follow in order to make effects of
126 * this call visible.
127 *
128 * Empty page tables except PTL0 are freed.
129 *
130 * @param as Address space to wich page belongs.
131 * @param page Virtual address of the page to be demapped.
132 *
133 */
134void pt_mapping_remove(as_t *as, uintptr_t page)
135{
136 ASSERT(page_table_locked(as));
137
138 /*
139 * First, remove the mapping, if it exists.
140 */
141
142 pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table);
143 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT)
144 return;
145
146 pte_t *ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
147 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT)
148 return;
149
150 pte_t *ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
151 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT)
152 return;
153
154 pte_t *ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
155
156 /*
157 * Destroy the mapping.
158 * Setting to PAGE_NOT_PRESENT is not sufficient.
159 */
160 memsetb(&ptl3[PTL3_INDEX(page)], sizeof(pte_t), 0);
161
162 /*
163 * Second, free all empty tables along the way from PTL3 down to PTL0
164 * except those needed for sharing the kernel non-identity mappings.
165 */
166
167 /* Check PTL3 */
168 bool empty = true;
169
170 unsigned int i;
171 for (i = 0; i < PTL3_ENTRIES; i++) {
172 if (PTE_VALID(&ptl3[i])) {
173 empty = false;
174 break;
175 }
176 }
177
178 if (empty) {
179 /*
180 * PTL3 is empty.
181 * Release the frame and remove PTL3 pointer from the parent
182 * table.
183 */
184#if (PTL2_ENTRIES != 0)
185 memsetb(&ptl2[PTL2_INDEX(page)], sizeof(pte_t), 0);
186#elif (PTL1_ENTRIES != 0)
187 memsetb(&ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0);
188#else
189 if (km_is_non_identity(page))
190 return;
191
192 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
193#endif
194 frame_free(KA2PA((uintptr_t) ptl3));
195 } else {
196 /*
197 * PTL3 is not empty.
198 * Therefore, there must be a path from PTL0 to PTL3 and
199 * thus nothing to free in higher levels.
200 *
201 */
202 return;
203 }
204
205 /* Check PTL2, empty is still true */
206#if (PTL2_ENTRIES != 0)
207 for (i = 0; i < PTL2_ENTRIES; i++) {
208 if (PTE_VALID(&ptl2[i])) {
209 empty = false;
210 break;
211 }
212 }
213
214 if (empty) {
215 /*
216 * PTL2 is empty.
217 * Release the frame and remove PTL2 pointer from the parent
218 * table.
219 */
220#if (PTL1_ENTRIES != 0)
221 memsetb(&ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0);
222#else
223 if (km_is_non_identity(page))
224 return;
225
226 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
227#endif
228 frame_free(KA2PA((uintptr_t) ptl2));
229 } else {
230 /*
231 * PTL2 is not empty.
232 * Therefore, there must be a path from PTL0 to PTL2 and
233 * thus nothing to free in higher levels.
234 *
235 */
236 return;
237 }
238#endif /* PTL2_ENTRIES != 0 */
239
240 /* check PTL1, empty is still true */
241#if (PTL1_ENTRIES != 0)
242 for (i = 0; i < PTL1_ENTRIES; i++) {
243 if (PTE_VALID(&ptl1[i])) {
244 empty = false;
245 break;
246 }
247 }
248
249 if (empty) {
250 /*
251 * PTL1 is empty.
252 * Release the frame and remove PTL1 pointer from the parent
253 * table.
254 */
255 if (km_is_non_identity(page))
256 return;
257
258 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
259 frame_free(KA2PA((uintptr_t) ptl1));
260 }
261#endif /* PTL1_ENTRIES != 0 */
262}
263
264/** Find mapping for virtual page in hierarchical page tables.
265 *
266 * @param as Address space to which page belongs.
267 * @param page Virtual page.
268 * @param nolock True if the page tables need not be locked.
269 *
270 * @return NULL if there is no such mapping; entry from PTL3 describing
271 * the mapping otherwise.
272 *
273 */
274pte_t *pt_mapping_find(as_t *as, uintptr_t page, bool nolock)
275{
276 ASSERT(nolock || page_table_locked(as));
277
278 pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table);
279 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT)
280 return NULL;
281
282 pte_t *ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
283 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT)
284 return NULL;
285
286 pte_t *ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
287 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT)
288 return NULL;
289
290 pte_t *ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
291
292 return &ptl3[PTL3_INDEX(page)];
293}
294
295/** Return the size of the region mapped by a single PTL0 entry.
296 *
297 * @return Size of the region mapped by a single PTL0 entry.
298 */
299static uintptr_t ptl0_step_get(void)
300{
301 size_t va_bits;
302
303 va_bits = fnzb(PTL0_ENTRIES) + fnzb(PTL1_ENTRIES) + fnzb(PTL2_ENTRIES) +
304 fnzb(PTL3_ENTRIES) + PAGE_WIDTH;
305
306 return 1UL << (va_bits - fnzb(PTL0_ENTRIES));
307}
308
309/** Make the mappings in the given range global accross all address spaces.
310 *
311 * All PTL0 entries in the given range will be mapped to a next level page
312 * table. The next level page table will be allocated and cleared.
313 *
314 * pt_mapping_remove() will never deallocate these page tables even when there
315 * are no PTEs in them.
316 *
317 * @param as Address space.
318 * @param base Base address corresponding to the first PTL0 entry that will be
319 * altered by this function.
320 * @param size Size in bytes defining the range of PTL0 entries that will be
321 * altered by this function.
322 */
323void pt_mapping_make_global(uintptr_t base, size_t size)
324{
325 uintptr_t ptl0 = PA2KA((uintptr_t) AS_KERNEL->genarch.page_table);
326 uintptr_t ptl0_step = ptl0_step_get();
327 size_t order;
328 uintptr_t addr;
329
330#if (PTL1_ENTRIES != 0)
331 order = PTL1_SIZE;
332#elif (PTL2_ENTRIES != 0)
333 order = PTL2_SIZE;
334#else
335 order = PTL3_SIZE;
336#endif
337
338 ASSERT(size > 0);
339
340 for (addr = ALIGN_DOWN(base, ptl0_step); addr - 1 < base + size - 1;
341 addr += ptl0_step) {
342 uintptr_t l1;
343
344 l1 = (uintptr_t) frame_alloc(order, FRAME_KA | FRAME_LOWMEM);
345 memsetb((void *) l1, FRAME_SIZE << order, 0);
346 SET_PTL1_ADDRESS(ptl0, PTL0_INDEX(addr), KA2PA(l1));
347 SET_PTL1_FLAGS(ptl0, PTL0_INDEX(addr),
348 PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE |
349 PAGE_WRITE);
350 }
351}
352
353/** @}
354 */
Note: See TracBrowser for help on using the repository browser.