source: mainline/kernel/genarch/src/mm/page_pt.c@ 5a6a42f

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 5a6a42f was a2789d2, checked in by Jakub Jermar <jakub@…>, 14 years ago

Avoid overflow in comparison when base + size == 0.

  • Property mode set to 100644
File size: 9.8 KB
Line 
1/*
2 * Copyright (c) 2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup genarchmm
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Virtual Address Translation for hierarchical 4-level page tables.
36 */
37
38#include <genarch/mm/page_pt.h>
39#include <mm/page.h>
40#include <mm/frame.h>
41#include <mm/km.h>
42#include <mm/as.h>
43#include <arch/mm/page.h>
44#include <arch/mm/as.h>
45#include <typedefs.h>
46#include <arch/asm.h>
47#include <memstr.h>
48#include <align.h>
49#include <macros.h>
50
51static void pt_mapping_insert(as_t *, uintptr_t, uintptr_t, unsigned int);
52static void pt_mapping_remove(as_t *, uintptr_t);
53static pte_t *pt_mapping_find(as_t *, uintptr_t, bool);
54static void pt_mapping_make_global(uintptr_t, size_t);
55
56page_mapping_operations_t pt_mapping_operations = {
57 .mapping_insert = pt_mapping_insert,
58 .mapping_remove = pt_mapping_remove,
59 .mapping_find = pt_mapping_find,
60 .mapping_make_global = pt_mapping_make_global
61};
62
63/** Map page to frame using hierarchical page tables.
64 *
65 * Map virtual address page to physical address frame
66 * using flags.
67 *
68 * @param as Address space to wich page belongs.
69 * @param page Virtual address of the page to be mapped.
70 * @param frame Physical address of memory frame to which the mapping is done.
71 * @param flags Flags to be used for mapping.
72 *
73 */
74void pt_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame,
75 unsigned int flags)
76{
77 pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table);
78
79 ASSERT(page_table_locked(as));
80
81 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT) {
82 pte_t *newpt = (pte_t *) frame_alloc(PTL1_SIZE,
83 FRAME_LOWMEM | FRAME_KA);
84 memsetb(newpt, FRAME_SIZE << PTL1_SIZE, 0);
85 SET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page), KA2PA(newpt));
86 SET_PTL1_FLAGS(ptl0, PTL0_INDEX(page),
87 PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE |
88 PAGE_WRITE);
89 }
90
91 pte_t *ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
92
93 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT) {
94 pte_t *newpt = (pte_t *) frame_alloc(PTL2_SIZE,
95 FRAME_LOWMEM | FRAME_KA);
96 memsetb(newpt, FRAME_SIZE << PTL2_SIZE, 0);
97 SET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page), KA2PA(newpt));
98 SET_PTL2_FLAGS(ptl1, PTL1_INDEX(page),
99 PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE |
100 PAGE_WRITE);
101 }
102
103 pte_t *ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
104
105 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT) {
106 pte_t *newpt = (pte_t *) frame_alloc(PTL3_SIZE,
107 FRAME_LOWMEM | FRAME_KA);
108 memsetb(newpt, FRAME_SIZE << PTL3_SIZE, 0);
109 SET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page), KA2PA(newpt));
110 SET_PTL3_FLAGS(ptl2, PTL2_INDEX(page),
111 PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE |
112 PAGE_WRITE);
113 }
114
115 pte_t *ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
116
117 SET_FRAME_ADDRESS(ptl3, PTL3_INDEX(page), frame);
118 SET_FRAME_FLAGS(ptl3, PTL3_INDEX(page), flags);
119}
120
121/** Remove mapping of page from hierarchical page tables.
122 *
123 * Remove any mapping of page within address space as.
124 * TLB shootdown should follow in order to make effects of
125 * this call visible.
126 *
127 * Empty page tables except PTL0 are freed.
128 *
129 * @param as Address space to wich page belongs.
130 * @param page Virtual address of the page to be demapped.
131 *
132 */
133void pt_mapping_remove(as_t *as, uintptr_t page)
134{
135 ASSERT(page_table_locked(as));
136
137 /*
138 * First, remove the mapping, if it exists.
139 */
140
141 pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table);
142 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT)
143 return;
144
145 pte_t *ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
146 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT)
147 return;
148
149 pte_t *ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
150 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT)
151 return;
152
153 pte_t *ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
154
155 /*
156 * Destroy the mapping.
157 * Setting to PAGE_NOT_PRESENT is not sufficient.
158 */
159 memsetb(&ptl3[PTL3_INDEX(page)], sizeof(pte_t), 0);
160
161 /*
162 * Second, free all empty tables along the way from PTL3 down to PTL0
163 * except those needed for sharing the kernel non-identity mappings.
164 */
165
166 /* Check PTL3 */
167 bool empty = true;
168
169 unsigned int i;
170 for (i = 0; i < PTL3_ENTRIES; i++) {
171 if (PTE_VALID(&ptl3[i])) {
172 empty = false;
173 break;
174 }
175 }
176
177 if (empty) {
178 /*
179 * PTL3 is empty.
180 * Release the frame and remove PTL3 pointer from the parent
181 * table.
182 */
183#if (PTL2_ENTRIES != 0)
184 memsetb(&ptl2[PTL2_INDEX(page)], sizeof(pte_t), 0);
185#elif (PTL1_ENTRIES != 0)
186 memsetb(&ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0);
187#else
188 if (km_is_non_identity(page))
189 return;
190
191 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
192#endif
193 frame_free(KA2PA((uintptr_t) ptl3));
194 } else {
195 /*
196 * PTL3 is not empty.
197 * Therefore, there must be a path from PTL0 to PTL3 and
198 * thus nothing to free in higher levels.
199 *
200 */
201 return;
202 }
203
204 /* Check PTL2, empty is still true */
205#if (PTL2_ENTRIES != 0)
206 for (i = 0; i < PTL2_ENTRIES; i++) {
207 if (PTE_VALID(&ptl2[i])) {
208 empty = false;
209 break;
210 }
211 }
212
213 if (empty) {
214 /*
215 * PTL2 is empty.
216 * Release the frame and remove PTL2 pointer from the parent
217 * table.
218 */
219#if (PTL1_ENTRIES != 0)
220 memsetb(&ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0);
221#else
222 if (km_is_non_identity(page))
223 return;
224
225 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
226#endif
227 frame_free(KA2PA((uintptr_t) ptl2));
228 } else {
229 /*
230 * PTL2 is not empty.
231 * Therefore, there must be a path from PTL0 to PTL2 and
232 * thus nothing to free in higher levels.
233 *
234 */
235 return;
236 }
237#endif /* PTL2_ENTRIES != 0 */
238
239 /* check PTL1, empty is still true */
240#if (PTL1_ENTRIES != 0)
241 for (i = 0; i < PTL1_ENTRIES; i++) {
242 if (PTE_VALID(&ptl1[i])) {
243 empty = false;
244 break;
245 }
246 }
247
248 if (empty) {
249 /*
250 * PTL1 is empty.
251 * Release the frame and remove PTL1 pointer from the parent
252 * table.
253 */
254 if (km_is_non_identity(page))
255 return;
256
257 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
258 frame_free(KA2PA((uintptr_t) ptl1));
259 }
260#endif /* PTL1_ENTRIES != 0 */
261}
262
263/** Find mapping for virtual page in hierarchical page tables.
264 *
265 * @param as Address space to which page belongs.
266 * @param page Virtual page.
267 * @param nolock True if the page tables need not be locked.
268 *
269 * @return NULL if there is no such mapping; entry from PTL3 describing
270 * the mapping otherwise.
271 *
272 */
273pte_t *pt_mapping_find(as_t *as, uintptr_t page, bool nolock)
274{
275 ASSERT(nolock || page_table_locked(as));
276
277 pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table);
278 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT)
279 return NULL;
280
281 pte_t *ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
282 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT)
283 return NULL;
284
285 pte_t *ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
286 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT)
287 return NULL;
288
289 pte_t *ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
290
291 return &ptl3[PTL3_INDEX(page)];
292}
293
294/** Make the mappings in the given range global accross all address spaces.
295 *
296 * All PTL0 entries in the given range will be mapped to a next level page
297 * table. The next level page table will be allocated and cleared.
298 *
299 * pt_mapping_remove() will never deallocate these page tables even when there
300 * are no PTEs in them.
301 *
302 * @param as Address space.
303 * @param base Base address corresponding to the first PTL0 entry that will be
304 * altered by this function.
305 * @param size Size in bytes defining the range of PTL0 entries that will be
306 * altered by this function.
307 */
308void pt_mapping_make_global(uintptr_t base, size_t size)
309{
310 uintptr_t ptl0 = PA2KA((uintptr_t) AS_KERNEL->genarch.page_table);
311 uintptr_t ptl0step = (((uintptr_t) -1) / PTL0_ENTRIES) + 1;
312 size_t order;
313 uintptr_t addr;
314
315#if (PTL1_ENTRIES != 0)
316 order = PTL1_SIZE;
317#elif (PTL2_ENTRIES != 0)
318 order = PTL2_SIZE;
319#else
320 order = PTL3_SIZE;
321#endif
322
323 ASSERT(ispwr2(ptl0step));
324 ASSERT(size > 0);
325
326 for (addr = ALIGN_DOWN(base, ptl0step); addr - 1 < base + size - 1;
327 addr += ptl0step) {
328 uintptr_t l1;
329
330 l1 = (uintptr_t) frame_alloc(order, FRAME_KA | FRAME_LOWMEM);
331 memsetb((void *) l1, FRAME_SIZE << order, 0);
332 SET_PTL1_ADDRESS(ptl0, PTL0_INDEX(addr), KA2PA(l1));
333 SET_PTL1_FLAGS(ptl0, PTL0_INDEX(addr),
334 PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE |
335 PAGE_WRITE);
336 }
337}
338
339/** @}
340 */
Note: See TracBrowser for help on using the repository browser.