source: mainline/kernel/genarch/src/mm/page_pt.c@ 6ac3d27

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 6ac3d27 was f7bb6d1, checked in by Jakub Klama <jakub.klama@…>, 12 years ago

Merge from launchpad branch.

  • Property mode set to 100644
File size: 11.7 KB
Line 
1/*
2 * Copyright (c) 2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup genarchmm
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Virtual Address Translation for hierarchical 4-level page tables.
36 */
37
38#include <genarch/mm/page_pt.h>
39#include <mm/page.h>
40#include <mm/frame.h>
41#include <mm/km.h>
42#include <mm/as.h>
43#include <arch/mm/page.h>
44#include <arch/mm/as.h>
45#include <arch/barrier.h>
46#include <typedefs.h>
47#include <arch/asm.h>
48#include <memstr.h>
49#include <align.h>
50#include <macros.h>
51#include <bitops.h>
52
53static void pt_mapping_insert(as_t *, uintptr_t, uintptr_t, unsigned int);
54static void pt_mapping_remove(as_t *, uintptr_t);
55static pte_t *pt_mapping_find(as_t *, uintptr_t, bool);
56static void pt_mapping_make_global(uintptr_t, size_t);
57
58page_mapping_operations_t pt_mapping_operations = {
59 .mapping_insert = pt_mapping_insert,
60 .mapping_remove = pt_mapping_remove,
61 .mapping_find = pt_mapping_find,
62 .mapping_make_global = pt_mapping_make_global
63};
64
65/** Map page to frame using hierarchical page tables.
66 *
67 * Map virtual address page to physical address frame
68 * using flags.
69 *
70 * @param as Address space to wich page belongs.
71 * @param page Virtual address of the page to be mapped.
72 * @param frame Physical address of memory frame to which the mapping is done.
73 * @param flags Flags to be used for mapping.
74 *
75 */
76void pt_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame,
77 unsigned int flags)
78{
79 //printf("pt_mapping_insert: as=%p, page=0x%08x, frame=0x%08x\n", as, page, frame);
80
81 pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table);
82
83 //printf("ptl0 = %p\n", ptl0);
84
85 ASSERT(page_table_locked(as));
86
87 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT) {
88 // printf("allocating ptl1\n");
89
90 pte_t *newpt = (pte_t *) frame_alloc(PTL1_SIZE,
91 FRAME_LOWMEM | FRAME_KA);
92
93 // printf("newpt = %p, index = %d\n", newpt, PTL0_INDEX(page));
94
95 memsetb(newpt, FRAME_SIZE << PTL1_SIZE, 0);
96 SET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page), KA2PA(newpt));
97 SET_PTL1_FLAGS(ptl0, PTL0_INDEX(page),
98 PAGE_NOT_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE |
99 PAGE_WRITE);
100 /*
101 * Make sure that a concurrent hardware page table walk or
102 * pt_mapping_find() will see the new PTL1 only after it is
103 * fully initialized.
104 */
105 write_barrier();
106 SET_PTL1_PRESENT(ptl0, PTL0_INDEX(page));
107 }
108
109 pte_t *ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
110
111// printf("ptl1 = %p\n", ptl1);
112
113 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT) {
114// printf("allocating ptl2\n");
115
116 pte_t *newpt = (pte_t *) frame_alloc(PTL2_SIZE,
117 FRAME_LOWMEM | FRAME_KA);
118
119// printf("newpt = %p, index = %d\n", newpt, PTL1_INDEX(page));
120
121 memsetb(newpt, FRAME_SIZE << PTL2_SIZE, 0);
122 SET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page), KA2PA(newpt));
123 SET_PTL2_FLAGS(ptl1, PTL1_INDEX(page),
124 PAGE_NOT_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE |
125 PAGE_WRITE);
126 /*
127 * Make the new PTL2 visible only after it is fully initialized.
128 */
129 write_barrier();
130 SET_PTL2_PRESENT(ptl1, PTL1_INDEX(page));
131 }
132
133 pte_t *ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
134
135// printf("ptl2 = %p\n", ptl2);
136
137 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT) {
138// printf("allocating ptl3\n");
139
140 pte_t *newpt = (pte_t *) frame_alloc(PTL3_SIZE,
141 FRAME_LOWMEM | FRAME_KA);
142
143// printf("newpt = %p, index = %d\n", newpt, PTL2_INDEX(page));
144
145 memsetb(newpt, FRAME_SIZE << PTL3_SIZE, 0);
146 SET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page), KA2PA(newpt));
147 SET_PTL3_FLAGS(ptl2, PTL2_INDEX(page),
148 PAGE_NOT_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE |
149 PAGE_WRITE);
150 /*
151 * Make the new PTL3 visible only after it is fully initialized.
152 */
153 write_barrier();
154 SET_PTL3_PRESENT(ptl2, PTL2_INDEX(page));
155 }
156
157 pte_t *ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
158
159// printf("ptl3 = %p\n", ptl3);
160
161 SET_FRAME_ADDRESS(ptl3, PTL3_INDEX(page), frame);
162 SET_FRAME_FLAGS(ptl3, PTL3_INDEX(page), flags | PAGE_NOT_PRESENT);
163 /*
164 * Make the new mapping visible only after it is fully initialized.
165 */
166 write_barrier();
167 SET_FRAME_PRESENT(ptl3, PTL3_INDEX(page));
168}
169
170/** Remove mapping of page from hierarchical page tables.
171 *
172 * Remove any mapping of page within address space as.
173 * TLB shootdown should follow in order to make effects of
174 * this call visible.
175 *
176 * Empty page tables except PTL0 are freed.
177 *
178 * @param as Address space to wich page belongs.
179 * @param page Virtual address of the page to be demapped.
180 *
181 */
182void pt_mapping_remove(as_t *as, uintptr_t page)
183{
184 ASSERT(page_table_locked(as));
185
186 /*
187 * First, remove the mapping, if it exists.
188 */
189
190 pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table);
191 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT)
192 return;
193
194 pte_t *ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
195 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT)
196 return;
197
198 pte_t *ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
199 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT)
200 return;
201
202 pte_t *ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
203
204 /*
205 * Destroy the mapping.
206 * Setting to PAGE_NOT_PRESENT is not sufficient.
207 * But we need SET_FRAME for possible PT coherence maintenance.
208 * At least on ARM.
209 */
210 //TODO: Fix this inconsistency
211 SET_FRAME_FLAGS(ptl3, PTL3_INDEX(page), PAGE_NOT_PRESENT);
212 memsetb(&ptl3[PTL3_INDEX(page)], sizeof(pte_t), 0);
213
214 /*
215 * Second, free all empty tables along the way from PTL3 down to PTL0
216 * except those needed for sharing the kernel non-identity mappings.
217 */
218
219 /* Check PTL3 */
220 bool empty = true;
221
222 unsigned int i;
223 for (i = 0; i < PTL3_ENTRIES; i++) {
224 if (PTE_VALID(&ptl3[i])) {
225 empty = false;
226 break;
227 }
228 }
229
230 if (empty) {
231 /*
232 * PTL3 is empty.
233 * Release the frame and remove PTL3 pointer from the parent
234 * table.
235 */
236#if (PTL2_ENTRIES != 0)
237 memsetb(&ptl2[PTL2_INDEX(page)], sizeof(pte_t), 0);
238#elif (PTL1_ENTRIES != 0)
239 memsetb(&ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0);
240#else
241 if (km_is_non_identity(page))
242 return;
243
244 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
245#endif
246 frame_free(KA2PA((uintptr_t) ptl3));
247 } else {
248 /*
249 * PTL3 is not empty.
250 * Therefore, there must be a path from PTL0 to PTL3 and
251 * thus nothing to free in higher levels.
252 *
253 */
254 return;
255 }
256
257 /* Check PTL2, empty is still true */
258#if (PTL2_ENTRIES != 0)
259 for (i = 0; i < PTL2_ENTRIES; i++) {
260 if (PTE_VALID(&ptl2[i])) {
261 empty = false;
262 break;
263 }
264 }
265
266 if (empty) {
267 /*
268 * PTL2 is empty.
269 * Release the frame and remove PTL2 pointer from the parent
270 * table.
271 */
272#if (PTL1_ENTRIES != 0)
273 memsetb(&ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0);
274#else
275 if (km_is_non_identity(page))
276 return;
277
278 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
279#endif
280 frame_free(KA2PA((uintptr_t) ptl2));
281 } else {
282 /*
283 * PTL2 is not empty.
284 * Therefore, there must be a path from PTL0 to PTL2 and
285 * thus nothing to free in higher levels.
286 *
287 */
288 return;
289 }
290#endif /* PTL2_ENTRIES != 0 */
291
292 /* check PTL1, empty is still true */
293#if (PTL1_ENTRIES != 0)
294 for (i = 0; i < PTL1_ENTRIES; i++) {
295 if (PTE_VALID(&ptl1[i])) {
296 empty = false;
297 break;
298 }
299 }
300
301 if (empty) {
302 /*
303 * PTL1 is empty.
304 * Release the frame and remove PTL1 pointer from the parent
305 * table.
306 */
307 if (km_is_non_identity(page))
308 return;
309
310 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
311 frame_free(KA2PA((uintptr_t) ptl1));
312 }
313#endif /* PTL1_ENTRIES != 0 */
314}
315
316/** Find mapping for virtual page in hierarchical page tables.
317 *
318 * @param as Address space to which page belongs.
319 * @param page Virtual page.
320 * @param nolock True if the page tables need not be locked.
321 *
322 * @return NULL if there is no such mapping; entry from PTL3 describing
323 * the mapping otherwise.
324 *
325 */
326pte_t *pt_mapping_find(as_t *as, uintptr_t page, bool nolock)
327{
328 ASSERT(nolock || page_table_locked(as));
329
330 pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table);
331 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT)
332 return NULL;
333
334 read_barrier();
335
336 pte_t *ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
337 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT)
338 return NULL;
339
340#if (PTL1_ENTRIES != 0)
341 /*
342 * Always read ptl2 only after we are sure it is present.
343 */
344 read_barrier();
345#endif
346
347 pte_t *ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
348 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT)
349 return NULL;
350
351#if (PTL2_ENTRIES != 0)
352 /*
353 * Always read ptl3 only after we are sure it is present.
354 */
355 read_barrier();
356#endif
357
358 pte_t *ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
359
360 return &ptl3[PTL3_INDEX(page)];
361}
362
363/** Return the size of the region mapped by a single PTL0 entry.
364 *
365 * @return Size of the region mapped by a single PTL0 entry.
366 */
367static uintptr_t ptl0_step_get(void)
368{
369 size_t va_bits;
370
371 va_bits = fnzb(PTL0_ENTRIES) + fnzb(PTL1_ENTRIES) + fnzb(PTL2_ENTRIES) +
372 fnzb(PTL3_ENTRIES) + PAGE_WIDTH;
373
374 return 1UL << (va_bits - fnzb(PTL0_ENTRIES));
375}
376
377/** Make the mappings in the given range global accross all address spaces.
378 *
379 * All PTL0 entries in the given range will be mapped to a next level page
380 * table. The next level page table will be allocated and cleared.
381 *
382 * pt_mapping_remove() will never deallocate these page tables even when there
383 * are no PTEs in them.
384 *
385 * @param as Address space.
386 * @param base Base address corresponding to the first PTL0 entry that will be
387 * altered by this function.
388 * @param size Size in bytes defining the range of PTL0 entries that will be
389 * altered by this function.
390 */
391void pt_mapping_make_global(uintptr_t base, size_t size)
392{
393 uintptr_t ptl0 = PA2KA((uintptr_t) AS_KERNEL->genarch.page_table);
394 uintptr_t ptl0_step = ptl0_step_get();
395 size_t order;
396 uintptr_t addr;
397
398#if (PTL1_ENTRIES != 0)
399 order = PTL1_SIZE;
400#elif (PTL2_ENTRIES != 0)
401 order = PTL2_SIZE;
402#else
403 order = PTL3_SIZE;
404#endif
405
406 ASSERT(size > 0);
407
408 for (addr = ALIGN_DOWN(base, ptl0_step); addr - 1 < base + size - 1;
409 addr += ptl0_step) {
410 uintptr_t l1;
411
412 l1 = (uintptr_t) frame_alloc(order, FRAME_KA | FRAME_LOWMEM);
413 memsetb((void *) l1, FRAME_SIZE << order, 0);
414 SET_PTL1_ADDRESS(ptl0, PTL0_INDEX(addr), KA2PA(l1));
415 SET_PTL1_FLAGS(ptl0, PTL0_INDEX(addr),
416 PAGE_PRESENT | PAGE_USER | PAGE_CACHEABLE |
417 PAGE_EXEC | PAGE_WRITE | PAGE_READ);
418 }
419}
420
421/** @}
422 */
Note: See TracBrowser for help on using the repository browser.