source: mainline/kernel/genarch/src/mm/page_pt.c@ 5759975a

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 5759975a was 5df1963, checked in by Martin Decky <martin@…>, 12 years ago

bitmap frame allocator does not keep track of the size of the allocated frame blocks
to avoid memory leaks the number of allocated frames needs to be passed explicitly during deallocation

  • Property mode set to 100644
File size: 11.3 KB
Line 
1/*
2 * Copyright (c) 2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup genarchmm
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Virtual Address Translation for hierarchical 4-level page tables.
36 */
37
38#include <genarch/mm/page_pt.h>
39#include <mm/page.h>
40#include <mm/frame.h>
41#include <mm/km.h>
42#include <mm/as.h>
43#include <arch/mm/page.h>
44#include <arch/mm/as.h>
45#include <arch/barrier.h>
46#include <typedefs.h>
47#include <arch/asm.h>
48#include <memstr.h>
49#include <align.h>
50#include <macros.h>
51#include <bitops.h>
52
53static void pt_mapping_insert(as_t *, uintptr_t, uintptr_t, unsigned int);
54static void pt_mapping_remove(as_t *, uintptr_t);
55static pte_t *pt_mapping_find(as_t *, uintptr_t, bool);
56static void pt_mapping_make_global(uintptr_t, size_t);
57
58page_mapping_operations_t pt_mapping_operations = {
59 .mapping_insert = pt_mapping_insert,
60 .mapping_remove = pt_mapping_remove,
61 .mapping_find = pt_mapping_find,
62 .mapping_make_global = pt_mapping_make_global
63};
64
65/** Map page to frame using hierarchical page tables.
66 *
67 * Map virtual address page to physical address frame
68 * using flags.
69 *
70 * @param as Address space to wich page belongs.
71 * @param page Virtual address of the page to be mapped.
72 * @param frame Physical address of memory frame to which the mapping is done.
73 * @param flags Flags to be used for mapping.
74 *
75 */
76void pt_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame,
77 unsigned int flags)
78{
79 pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table);
80
81 ASSERT(page_table_locked(as));
82
83 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT) {
84 pte_t *newpt = (pte_t *)
85 PA2KA(frame_alloc(PTL1_FRAMES, FRAME_LOWMEM, 0));
86 memsetb(newpt, FRAMES2SIZE(PTL1_FRAMES), 0);
87 SET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page), KA2PA(newpt));
88 SET_PTL1_FLAGS(ptl0, PTL0_INDEX(page),
89 PAGE_NOT_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE |
90 PAGE_WRITE);
91 /*
92 * Make sure that a concurrent hardware page table walk or
93 * pt_mapping_find() will see the new PTL1 only after it is
94 * fully initialized.
95 */
96 write_barrier();
97 SET_PTL1_PRESENT(ptl0, PTL0_INDEX(page));
98 }
99
100 pte_t *ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
101
102 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT) {
103 pte_t *newpt = (pte_t *)
104 PA2KA(frame_alloc(PTL2_FRAMES, FRAME_LOWMEM, 0));
105 memsetb(newpt, FRAMES2SIZE(PTL2_FRAMES), 0);
106 SET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page), KA2PA(newpt));
107 SET_PTL2_FLAGS(ptl1, PTL1_INDEX(page),
108 PAGE_NOT_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE |
109 PAGE_WRITE);
110 /*
111 * Make the new PTL2 visible only after it is fully initialized.
112 */
113 write_barrier();
114 SET_PTL2_PRESENT(ptl1, PTL1_INDEX(page));
115 }
116
117 pte_t *ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
118
119 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT) {
120 pte_t *newpt = (pte_t *)
121 PA2KA(frame_alloc(PTL3_FRAMES, FRAME_LOWMEM, 0));
122 memsetb(newpt, FRAMES2SIZE(PTL3_FRAMES), 0);
123 SET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page), KA2PA(newpt));
124 SET_PTL3_FLAGS(ptl2, PTL2_INDEX(page),
125 PAGE_NOT_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE |
126 PAGE_WRITE);
127 /*
128 * Make the new PTL3 visible only after it is fully initialized.
129 */
130 write_barrier();
131 SET_PTL3_PRESENT(ptl2, PTL2_INDEX(page));
132 }
133
134 pte_t *ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
135
136 SET_FRAME_ADDRESS(ptl3, PTL3_INDEX(page), frame);
137 SET_FRAME_FLAGS(ptl3, PTL3_INDEX(page), flags | PAGE_NOT_PRESENT);
138 /*
139 * Make the new mapping visible only after it is fully initialized.
140 */
141 write_barrier();
142 SET_FRAME_PRESENT(ptl3, PTL3_INDEX(page));
143}
144
145/** Remove mapping of page from hierarchical page tables.
146 *
147 * Remove any mapping of page within address space as.
148 * TLB shootdown should follow in order to make effects of
149 * this call visible.
150 *
151 * Empty page tables except PTL0 are freed.
152 *
153 * @param as Address space to wich page belongs.
154 * @param page Virtual address of the page to be demapped.
155 *
156 */
157void pt_mapping_remove(as_t *as, uintptr_t page)
158{
159 ASSERT(page_table_locked(as));
160
161 /*
162 * First, remove the mapping, if it exists.
163 */
164
165 pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table);
166 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT)
167 return;
168
169 pte_t *ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
170 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT)
171 return;
172
173 pte_t *ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
174 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT)
175 return;
176
177 pte_t *ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
178
179 /*
180 * Destroy the mapping.
181 * Setting to PAGE_NOT_PRESENT is not sufficient.
182 * But we need SET_FRAME for possible PT coherence maintenance.
183 * At least on ARM.
184 */
185 //TODO: Fix this inconsistency
186 SET_FRAME_FLAGS(ptl3, PTL3_INDEX(page), PAGE_NOT_PRESENT);
187 memsetb(&ptl3[PTL3_INDEX(page)], sizeof(pte_t), 0);
188
189 /*
190 * Second, free all empty tables along the way from PTL3 down to PTL0
191 * except those needed for sharing the kernel non-identity mappings.
192 */
193
194 /* Check PTL3 */
195 bool empty = true;
196
197 unsigned int i;
198 for (i = 0; i < PTL3_ENTRIES; i++) {
199 if (PTE_VALID(&ptl3[i])) {
200 empty = false;
201 break;
202 }
203 }
204
205 if (empty) {
206 /*
207 * PTL3 is empty.
208 * Release the frame and remove PTL3 pointer from the parent
209 * table.
210 */
211#if (PTL2_ENTRIES != 0)
212 memsetb(&ptl2[PTL2_INDEX(page)], sizeof(pte_t), 0);
213#elif (PTL1_ENTRIES != 0)
214 memsetb(&ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0);
215#else
216 if (km_is_non_identity(page))
217 return;
218
219 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
220#endif
221 frame_free(KA2PA((uintptr_t) ptl3), PTL3_FRAMES);
222 } else {
223 /*
224 * PTL3 is not empty.
225 * Therefore, there must be a path from PTL0 to PTL3 and
226 * thus nothing to free in higher levels.
227 *
228 */
229 return;
230 }
231
232 /* Check PTL2, empty is still true */
233#if (PTL2_ENTRIES != 0)
234 for (i = 0; i < PTL2_ENTRIES; i++) {
235 if (PTE_VALID(&ptl2[i])) {
236 empty = false;
237 break;
238 }
239 }
240
241 if (empty) {
242 /*
243 * PTL2 is empty.
244 * Release the frame and remove PTL2 pointer from the parent
245 * table.
246 */
247#if (PTL1_ENTRIES != 0)
248 memsetb(&ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0);
249#else
250 if (km_is_non_identity(page))
251 return;
252
253 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
254#endif
255 frame_free(KA2PA((uintptr_t) ptl2), PTL2_FRAMES);
256 } else {
257 /*
258 * PTL2 is not empty.
259 * Therefore, there must be a path from PTL0 to PTL2 and
260 * thus nothing to free in higher levels.
261 *
262 */
263 return;
264 }
265#endif /* PTL2_ENTRIES != 0 */
266
267 /* check PTL1, empty is still true */
268#if (PTL1_ENTRIES != 0)
269 for (i = 0; i < PTL1_ENTRIES; i++) {
270 if (PTE_VALID(&ptl1[i])) {
271 empty = false;
272 break;
273 }
274 }
275
276 if (empty) {
277 /*
278 * PTL1 is empty.
279 * Release the frame and remove PTL1 pointer from the parent
280 * table.
281 */
282 if (km_is_non_identity(page))
283 return;
284
285 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
286 frame_free(KA2PA((uintptr_t) ptl1), PTL1_FRAMES);
287 }
288#endif /* PTL1_ENTRIES != 0 */
289}
290
291/** Find mapping for virtual page in hierarchical page tables.
292 *
293 * @param as Address space to which page belongs.
294 * @param page Virtual page.
295 * @param nolock True if the page tables need not be locked.
296 *
297 * @return NULL if there is no such mapping; entry from PTL3 describing
298 * the mapping otherwise.
299 *
300 */
301pte_t *pt_mapping_find(as_t *as, uintptr_t page, bool nolock)
302{
303 ASSERT(nolock || page_table_locked(as));
304
305 pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table);
306 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT)
307 return NULL;
308
309 read_barrier();
310
311 pte_t *ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
312 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT)
313 return NULL;
314
315#if (PTL1_ENTRIES != 0)
316 /*
317 * Always read ptl2 only after we are sure it is present.
318 */
319 read_barrier();
320#endif
321
322 pte_t *ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
323 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT)
324 return NULL;
325
326#if (PTL2_ENTRIES != 0)
327 /*
328 * Always read ptl3 only after we are sure it is present.
329 */
330 read_barrier();
331#endif
332
333 pte_t *ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
334
335 return &ptl3[PTL3_INDEX(page)];
336}
337
338/** Return the size of the region mapped by a single PTL0 entry.
339 *
340 * @return Size of the region mapped by a single PTL0 entry.
341 */
342static uintptr_t ptl0_step_get(void)
343{
344 size_t va_bits;
345
346 va_bits = fnzb(PTL0_ENTRIES) + fnzb(PTL1_ENTRIES) + fnzb(PTL2_ENTRIES) +
347 fnzb(PTL3_ENTRIES) + PAGE_WIDTH;
348
349 return 1UL << (va_bits - fnzb(PTL0_ENTRIES));
350}
351
352/** Make the mappings in the given range global accross all address spaces.
353 *
354 * All PTL0 entries in the given range will be mapped to a next level page
355 * table. The next level page table will be allocated and cleared.
356 *
357 * pt_mapping_remove() will never deallocate these page tables even when there
358 * are no PTEs in them.
359 *
360 * @param as Address space.
361 * @param base Base address corresponding to the first PTL0 entry that will be
362 * altered by this function.
363 * @param size Size in bytes defining the range of PTL0 entries that will be
364 * altered by this function.
365 *
366 */
367void pt_mapping_make_global(uintptr_t base, size_t size)
368{
369 ASSERT(size > 0);
370
371 uintptr_t ptl0 = PA2KA((uintptr_t) AS_KERNEL->genarch.page_table);
372 uintptr_t ptl0_step = ptl0_step_get();
373 size_t frames;
374
375#if (PTL1_ENTRIES != 0)
376 frames = PTL1_FRAMES;
377#elif (PTL2_ENTRIES != 0)
378 frames = PTL2_FRAMES;
379#else
380 frames = PTL3_FRAMES;
381#endif
382
383 for (uintptr_t addr = ALIGN_DOWN(base, ptl0_step);
384 addr - 1 < base + size - 1;
385 addr += ptl0_step) {
386 uintptr_t l1 = PA2KA(frame_alloc(frames, FRAME_LOWMEM, 0));
387 memsetb((void *) l1, FRAMES2SIZE(frames), 0);
388 SET_PTL1_ADDRESS(ptl0, PTL0_INDEX(addr), KA2PA(l1));
389 SET_PTL1_FLAGS(ptl0, PTL0_INDEX(addr),
390 PAGE_PRESENT | PAGE_USER | PAGE_CACHEABLE |
391 PAGE_EXEC | PAGE_WRITE | PAGE_READ);
392 }
393}
394
395/** @}
396 */
Note: See TracBrowser for help on using the repository browser.