source: mainline/kernel/genarch/src/mm/page_pt.c@ a7f7ed12

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since a7f7ed12 was e943ecf, checked in by Jakub Jermar <jakub@…>, 13 years ago

Add read_barrier()'s to pt_mapping_find().

This is to prevent a rather hypothetical scenario in which the
architecture uses a PTE format with the present bit in a different cache
line than that of the frame address field, and reorders the load of the
present bit after the load of the frame address despite the control
dependency between the two loads.

Most of the architectures are known not to reorder control-dependent
loads, but some newer variants of arm32 do this.

Note that a read memory barrier would have to be used also in every
lock-free caller of page_mapping_find() to order reading of the present
bit and the rest of the PTE content. These are fortunately limited to
architecture-dependent TLB-miss handlers as generic code always uses
the locked variant. Since arm32 has hardware-walked page tables, it does
not call page_mapping_find() and thus nothing has to be changed.

  • Property mode set to 100644
File size: 10.5 KB
Line 
1/*
2 * Copyright (c) 2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup genarchmm
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Virtual Address Translation for hierarchical 4-level page tables.
36 */
37
38#include <genarch/mm/page_pt.h>
39#include <mm/page.h>
40#include <mm/frame.h>
41#include <mm/km.h>
42#include <mm/as.h>
43#include <arch/mm/page.h>
44#include <arch/mm/as.h>
45#include <arch/barrier.h>
46#include <typedefs.h>
47#include <arch/asm.h>
48#include <memstr.h>
49#include <align.h>
50#include <macros.h>
51#include <bitops.h>
52
53static void pt_mapping_insert(as_t *, uintptr_t, uintptr_t, unsigned int);
54static void pt_mapping_remove(as_t *, uintptr_t);
55static pte_t *pt_mapping_find(as_t *, uintptr_t, bool);
56static void pt_mapping_make_global(uintptr_t, size_t);
57
58page_mapping_operations_t pt_mapping_operations = {
59 .mapping_insert = pt_mapping_insert,
60 .mapping_remove = pt_mapping_remove,
61 .mapping_find = pt_mapping_find,
62 .mapping_make_global = pt_mapping_make_global
63};
64
65/** Map page to frame using hierarchical page tables.
66 *
67 * Map virtual address page to physical address frame
68 * using flags.
69 *
70 * @param as Address space to wich page belongs.
71 * @param page Virtual address of the page to be mapped.
72 * @param frame Physical address of memory frame to which the mapping is done.
73 * @param flags Flags to be used for mapping.
74 *
75 */
76void pt_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame,
77 unsigned int flags)
78{
79 pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table);
80
81 ASSERT(page_table_locked(as));
82
83 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT) {
84 pte_t *newpt = (pte_t *) frame_alloc(PTL1_SIZE,
85 FRAME_LOWMEM | FRAME_KA);
86 memsetb(newpt, FRAME_SIZE << PTL1_SIZE, 0);
87 SET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page), KA2PA(newpt));
88 SET_PTL1_FLAGS(ptl0, PTL0_INDEX(page),
89 PAGE_NOT_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE |
90 PAGE_WRITE);
91 write_barrier();
92 SET_PTL1_PRESENT(ptl0, PTL0_INDEX(page));
93 }
94
95 pte_t *ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
96
97 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT) {
98 pte_t *newpt = (pte_t *) frame_alloc(PTL2_SIZE,
99 FRAME_LOWMEM | FRAME_KA);
100 memsetb(newpt, FRAME_SIZE << PTL2_SIZE, 0);
101 SET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page), KA2PA(newpt));
102 SET_PTL2_FLAGS(ptl1, PTL1_INDEX(page),
103 PAGE_NOT_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE |
104 PAGE_WRITE);
105 write_barrier();
106 SET_PTL2_PRESENT(ptl1, PTL1_INDEX(page));
107 }
108
109 pte_t *ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
110
111 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT) {
112 pte_t *newpt = (pte_t *) frame_alloc(PTL3_SIZE,
113 FRAME_LOWMEM | FRAME_KA);
114 memsetb(newpt, FRAME_SIZE << PTL3_SIZE, 0);
115 SET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page), KA2PA(newpt));
116 SET_PTL3_FLAGS(ptl2, PTL2_INDEX(page),
117 PAGE_NOT_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE |
118 PAGE_WRITE);
119 write_barrier();
120 SET_PTL3_PRESENT(ptl2, PTL2_INDEX(page));
121 }
122
123 pte_t *ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
124
125 SET_FRAME_ADDRESS(ptl3, PTL3_INDEX(page), frame);
126 SET_FRAME_FLAGS(ptl3, PTL3_INDEX(page), flags | PAGE_NOT_PRESENT);
127 write_barrier();
128 SET_FRAME_PRESENT(ptl3, PTL3_INDEX(page));
129}
130
131/** Remove mapping of page from hierarchical page tables.
132 *
133 * Remove any mapping of page within address space as.
134 * TLB shootdown should follow in order to make effects of
135 * this call visible.
136 *
137 * Empty page tables except PTL0 are freed.
138 *
139 * @param as Address space to wich page belongs.
140 * @param page Virtual address of the page to be demapped.
141 *
142 */
143void pt_mapping_remove(as_t *as, uintptr_t page)
144{
145 ASSERT(page_table_locked(as));
146
147 /*
148 * First, remove the mapping, if it exists.
149 */
150
151 pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table);
152 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT)
153 return;
154
155 pte_t *ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
156 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT)
157 return;
158
159 pte_t *ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
160 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT)
161 return;
162
163 pte_t *ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
164
165 /*
166 * Destroy the mapping.
167 * Setting to PAGE_NOT_PRESENT is not sufficient.
168 */
169 memsetb(&ptl3[PTL3_INDEX(page)], sizeof(pte_t), 0);
170
171 /*
172 * Second, free all empty tables along the way from PTL3 down to PTL0
173 * except those needed for sharing the kernel non-identity mappings.
174 */
175
176 /* Check PTL3 */
177 bool empty = true;
178
179 unsigned int i;
180 for (i = 0; i < PTL3_ENTRIES; i++) {
181 if (PTE_VALID(&ptl3[i])) {
182 empty = false;
183 break;
184 }
185 }
186
187 if (empty) {
188 /*
189 * PTL3 is empty.
190 * Release the frame and remove PTL3 pointer from the parent
191 * table.
192 */
193#if (PTL2_ENTRIES != 0)
194 memsetb(&ptl2[PTL2_INDEX(page)], sizeof(pte_t), 0);
195#elif (PTL1_ENTRIES != 0)
196 memsetb(&ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0);
197#else
198 if (km_is_non_identity(page))
199 return;
200
201 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
202#endif
203 frame_free(KA2PA((uintptr_t) ptl3));
204 } else {
205 /*
206 * PTL3 is not empty.
207 * Therefore, there must be a path from PTL0 to PTL3 and
208 * thus nothing to free in higher levels.
209 *
210 */
211 return;
212 }
213
214 /* Check PTL2, empty is still true */
215#if (PTL2_ENTRIES != 0)
216 for (i = 0; i < PTL2_ENTRIES; i++) {
217 if (PTE_VALID(&ptl2[i])) {
218 empty = false;
219 break;
220 }
221 }
222
223 if (empty) {
224 /*
225 * PTL2 is empty.
226 * Release the frame and remove PTL2 pointer from the parent
227 * table.
228 */
229#if (PTL1_ENTRIES != 0)
230 memsetb(&ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0);
231#else
232 if (km_is_non_identity(page))
233 return;
234
235 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
236#endif
237 frame_free(KA2PA((uintptr_t) ptl2));
238 } else {
239 /*
240 * PTL2 is not empty.
241 * Therefore, there must be a path from PTL0 to PTL2 and
242 * thus nothing to free in higher levels.
243 *
244 */
245 return;
246 }
247#endif /* PTL2_ENTRIES != 0 */
248
249 /* check PTL1, empty is still true */
250#if (PTL1_ENTRIES != 0)
251 for (i = 0; i < PTL1_ENTRIES; i++) {
252 if (PTE_VALID(&ptl1[i])) {
253 empty = false;
254 break;
255 }
256 }
257
258 if (empty) {
259 /*
260 * PTL1 is empty.
261 * Release the frame and remove PTL1 pointer from the parent
262 * table.
263 */
264 if (km_is_non_identity(page))
265 return;
266
267 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
268 frame_free(KA2PA((uintptr_t) ptl1));
269 }
270#endif /* PTL1_ENTRIES != 0 */
271}
272
273/** Find mapping for virtual page in hierarchical page tables.
274 *
275 * @param as Address space to which page belongs.
276 * @param page Virtual page.
277 * @param nolock True if the page tables need not be locked.
278 *
279 * @return NULL if there is no such mapping; entry from PTL3 describing
280 * the mapping otherwise.
281 *
282 */
283pte_t *pt_mapping_find(as_t *as, uintptr_t page, bool nolock)
284{
285 ASSERT(nolock || page_table_locked(as));
286
287 pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table);
288 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT)
289 return NULL;
290
291 read_barrier();
292
293 pte_t *ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
294 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT)
295 return NULL;
296
297#if (PTL1_ENTRIES != 0)
298 read_barrier();
299#endif
300
301 pte_t *ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
302 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT)
303 return NULL;
304
305#if (PTL2_ENTRIES != 0)
306 read_barrier();
307#endif
308
309 pte_t *ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
310
311 return &ptl3[PTL3_INDEX(page)];
312}
313
314/** Return the size of the region mapped by a single PTL0 entry.
315 *
316 * @return Size of the region mapped by a single PTL0 entry.
317 */
318static uintptr_t ptl0_step_get(void)
319{
320 size_t va_bits;
321
322 va_bits = fnzb(PTL0_ENTRIES) + fnzb(PTL1_ENTRIES) + fnzb(PTL2_ENTRIES) +
323 fnzb(PTL3_ENTRIES) + PAGE_WIDTH;
324
325 return 1UL << (va_bits - fnzb(PTL0_ENTRIES));
326}
327
328/** Make the mappings in the given range global accross all address spaces.
329 *
330 * All PTL0 entries in the given range will be mapped to a next level page
331 * table. The next level page table will be allocated and cleared.
332 *
333 * pt_mapping_remove() will never deallocate these page tables even when there
334 * are no PTEs in them.
335 *
336 * @param as Address space.
337 * @param base Base address corresponding to the first PTL0 entry that will be
338 * altered by this function.
339 * @param size Size in bytes defining the range of PTL0 entries that will be
340 * altered by this function.
341 */
342void pt_mapping_make_global(uintptr_t base, size_t size)
343{
344 uintptr_t ptl0 = PA2KA((uintptr_t) AS_KERNEL->genarch.page_table);
345 uintptr_t ptl0_step = ptl0_step_get();
346 size_t order;
347 uintptr_t addr;
348
349#if (PTL1_ENTRIES != 0)
350 order = PTL1_SIZE;
351#elif (PTL2_ENTRIES != 0)
352 order = PTL2_SIZE;
353#else
354 order = PTL3_SIZE;
355#endif
356
357 ASSERT(size > 0);
358
359 for (addr = ALIGN_DOWN(base, ptl0_step); addr - 1 < base + size - 1;
360 addr += ptl0_step) {
361 uintptr_t l1;
362
363 l1 = (uintptr_t) frame_alloc(order, FRAME_KA | FRAME_LOWMEM);
364 memsetb((void *) l1, FRAME_SIZE << order, 0);
365 SET_PTL1_ADDRESS(ptl0, PTL0_INDEX(addr), KA2PA(l1));
366 SET_PTL1_FLAGS(ptl0, PTL0_INDEX(addr),
367 PAGE_PRESENT | PAGE_USER | PAGE_CACHEABLE |
368 PAGE_EXEC | PAGE_WRITE | PAGE_READ);
369 }
370}
371
372/** @}
373 */
Note: See TracBrowser for help on using the repository browser.