source: mainline/kernel/generic/src/mm/backend_elf.c@ d965dc3

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since d965dc3 was 59fb782, checked in by Jakub Jermar <jakub@…>, 12 years ago

Unify the use of virtual addresses and virtual page addresses in mm code.

  • as_page_fault() accepts faulting address (if available) and propagates the faulting page further along
  • backends' page_fault() handlers assume page fault address
  • page_mapping_create/destroy/find() accept addresses, but pass only page and frame addresses along
  • as_area_create(), as_area_resize() now test whether the address is page-aligned
  • renames of various variables to better fit their purpose (address vs. page)
  • no need to align the addresses in mips32 TLB exception handlers now
  • Property mode set to 100644
File size: 11.9 KB
Line 
1/*
2 * Copyright (c) 2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup genericmm
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Backend for address space areas backed by an ELF image.
36 */
37
38#include <lib/elf.h>
39#include <debug.h>
40#include <typedefs.h>
41#include <mm/as.h>
42#include <mm/frame.h>
43#include <mm/slab.h>
44#include <mm/page.h>
45#include <mm/reserve.h>
46#include <mm/km.h>
47#include <genarch/mm/page_pt.h>
48#include <genarch/mm/page_ht.h>
49#include <align.h>
50#include <memstr.h>
51#include <macros.h>
52#include <arch.h>
53#include <arch/barrier.h>
54
55static bool elf_create(as_area_t *);
56static bool elf_resize(as_area_t *, size_t);
57static void elf_share(as_area_t *);
58static void elf_destroy(as_area_t *);
59
60static bool elf_is_resizable(as_area_t *);
61static bool elf_is_shareable(as_area_t *);
62
63static int elf_page_fault(as_area_t *, uintptr_t, pf_access_t);
64static void elf_frame_free(as_area_t *, uintptr_t, uintptr_t);
65
66mem_backend_t elf_backend = {
67 .create = elf_create,
68 .resize = elf_resize,
69 .share = elf_share,
70 .destroy = elf_destroy,
71
72 .is_resizable = elf_is_resizable,
73 .is_shareable = elf_is_shareable,
74
75 .page_fault = elf_page_fault,
76 .frame_free = elf_frame_free,
77};
78
79static size_t elf_nonanon_pages_get(as_area_t *area)
80{
81 elf_segment_header_t *entry = area->backend_data.segment;
82 uintptr_t first = ALIGN_UP(entry->p_vaddr, PAGE_SIZE);
83 uintptr_t last = ALIGN_DOWN(entry->p_vaddr + entry->p_filesz,
84 PAGE_SIZE);
85
86 if (entry->p_flags & PF_W)
87 return 0;
88
89 if (last < first)
90 return 0;
91
92 return last - first;
93}
94
95bool elf_create(as_area_t *area)
96{
97 size_t nonanon_pages = elf_nonanon_pages_get(area);
98
99 if (area->pages <= nonanon_pages)
100 return true;
101
102 return reserve_try_alloc(area->pages - nonanon_pages);
103}
104
105bool elf_resize(as_area_t *area, size_t new_pages)
106{
107 size_t nonanon_pages = elf_nonanon_pages_get(area);
108
109 if (new_pages > area->pages) {
110 /* The area is growing. */
111 if (area->pages >= nonanon_pages)
112 return reserve_try_alloc(new_pages - area->pages);
113 else if (new_pages > nonanon_pages)
114 return reserve_try_alloc(new_pages - nonanon_pages);
115 } else if (new_pages < area->pages) {
116 /* The area is shrinking. */
117 if (new_pages >= nonanon_pages)
118 reserve_free(area->pages - new_pages);
119 else if (area->pages > nonanon_pages)
120 reserve_free(nonanon_pages - new_pages);
121 }
122
123 return true;
124}
125
126/** Share ELF image backed address space area.
127 *
128 * If the area is writable, then all mapped pages are duplicated in the pagemap.
129 * Otherwise only portions of the area that are not backed by the ELF image
130 * are put into the pagemap.
131 *
132 * @param area Address space area.
133 */
134void elf_share(as_area_t *area)
135{
136 elf_segment_header_t *entry = area->backend_data.segment;
137 link_t *cur;
138 btree_node_t *leaf, *node;
139 uintptr_t start_anon = entry->p_vaddr + entry->p_filesz;
140
141 ASSERT(mutex_locked(&area->as->lock));
142 ASSERT(mutex_locked(&area->lock));
143
144 /*
145 * Find the node in which to start linear search.
146 */
147 if (area->flags & AS_AREA_WRITE) {
148 node = list_get_instance(list_first(&area->used_space.leaf_list),
149 btree_node_t, leaf_link);
150 } else {
151 (void) btree_search(&area->sh_info->pagemap, start_anon, &leaf);
152 node = btree_leaf_node_left_neighbour(&area->sh_info->pagemap,
153 leaf);
154 if (!node)
155 node = leaf;
156 }
157
158 /*
159 * Copy used anonymous portions of the area to sh_info's page map.
160 */
161 mutex_lock(&area->sh_info->lock);
162 for (cur = &node->leaf_link; cur != &area->used_space.leaf_list.head;
163 cur = cur->next) {
164 unsigned int i;
165
166 node = list_get_instance(cur, btree_node_t, leaf_link);
167
168 for (i = 0; i < node->keys; i++) {
169 uintptr_t base = node->key[i];
170 size_t count = (size_t) node->value[i];
171 unsigned int j;
172
173 /*
174 * Skip read-only areas of used space that are backed
175 * by the ELF image.
176 */
177 if (!(area->flags & AS_AREA_WRITE))
178 if (base >= entry->p_vaddr &&
179 base + P2SZ(count) <= start_anon)
180 continue;
181
182 for (j = 0; j < count; j++) {
183 pte_t *pte;
184
185 /*
186 * Skip read-only pages that are backed by the
187 * ELF image.
188 */
189 if (!(area->flags & AS_AREA_WRITE))
190 if (base >= entry->p_vaddr &&
191 base + P2SZ(j + 1) <= start_anon)
192 continue;
193
194 page_table_lock(area->as, false);
195 pte = page_mapping_find(area->as,
196 base + P2SZ(j), false);
197 ASSERT(pte && PTE_VALID(pte) &&
198 PTE_PRESENT(pte));
199 btree_insert(&area->sh_info->pagemap,
200 (base + P2SZ(j)) - area->base,
201 (void *) PTE_GET_FRAME(pte), NULL);
202 page_table_unlock(area->as, false);
203
204 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte));
205 frame_reference_add(pfn);
206 }
207
208 }
209 }
210 mutex_unlock(&area->sh_info->lock);
211}
212
213void elf_destroy(as_area_t *area)
214{
215 size_t nonanon_pages = elf_nonanon_pages_get(area);
216
217 if (area->pages > nonanon_pages)
218 reserve_free(area->pages - nonanon_pages);
219}
220
221bool elf_is_resizable(as_area_t *area)
222{
223 return true;
224}
225
226bool elf_is_shareable(as_area_t *area)
227{
228 return true;
229}
230
231
232/** Service a page fault in the ELF backend address space area.
233 *
234 * The address space area and page tables must be already locked.
235 *
236 * @param area Pointer to the address space area.
237 * @param upage Faulting virtual page.
238 * @param access Access mode that caused the fault (i.e.
239 * read/write/exec).
240 *
241 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK
242 * on success (i.e. serviced).
243 */
244int elf_page_fault(as_area_t *area, uintptr_t upage, pf_access_t access)
245{
246 elf_header_t *elf = area->backend_data.elf;
247 elf_segment_header_t *entry = area->backend_data.segment;
248 btree_node_t *leaf;
249 uintptr_t base;
250 uintptr_t frame;
251 uintptr_t kpage;
252 uintptr_t start_anon;
253 size_t i;
254 bool dirty = false;
255
256 ASSERT(page_table_locked(AS));
257 ASSERT(mutex_locked(&area->lock));
258 ASSERT(IS_ALIGNED(upage, PAGE_SIZE));
259
260 if (!as_area_check_access(area, access))
261 return AS_PF_FAULT;
262
263 if (upage < ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE))
264 return AS_PF_FAULT;
265
266 if (upage >= entry->p_vaddr + entry->p_memsz)
267 return AS_PF_FAULT;
268
269 i = (upage - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH;
270 base = (uintptr_t)
271 (((void *) elf) + ALIGN_DOWN(entry->p_offset, PAGE_SIZE));
272
273 /* Virtual address of the end of initialized part of segment */
274 start_anon = entry->p_vaddr + entry->p_filesz;
275
276 if (area->sh_info) {
277 bool found = false;
278
279 /*
280 * The address space area is shared.
281 */
282
283 mutex_lock(&area->sh_info->lock);
284 frame = (uintptr_t) btree_search(&area->sh_info->pagemap,
285 upage - area->base, &leaf);
286 if (!frame) {
287 unsigned int i;
288
289 /*
290 * Workaround for valid NULL address.
291 */
292
293 for (i = 0; i < leaf->keys; i++) {
294 if (leaf->key[i] == upage - area->base) {
295 found = true;
296 break;
297 }
298 }
299 }
300 if (frame || found) {
301 frame_reference_add(ADDR2PFN(frame));
302 page_mapping_insert(AS, upage, frame,
303 as_area_get_flags(area));
304 if (!used_space_insert(area, upage, 1))
305 panic("Cannot insert used space.");
306 mutex_unlock(&area->sh_info->lock);
307 return AS_PF_OK;
308 }
309 }
310
311 /*
312 * The area is either not shared or the pagemap does not contain the
313 * mapping.
314 */
315 if (upage >= entry->p_vaddr && upage + PAGE_SIZE <= start_anon) {
316 /*
317 * Initialized portion of the segment. The memory is backed
318 * directly by the content of the ELF image. Pages are
319 * only copied if the segment is writable so that there
320 * can be more instantions of the same memory ELF image
321 * used at a time. Note that this could be later done
322 * as COW.
323 */
324 if (entry->p_flags & PF_W) {
325 kpage = km_temporary_page_get(&frame, FRAME_NO_RESERVE);
326 memcpy((void *) kpage, (void *) (base + i * PAGE_SIZE),
327 PAGE_SIZE);
328 if (entry->p_flags & PF_X) {
329 smc_coherence_block((void *) kpage, PAGE_SIZE);
330 }
331 km_temporary_page_put(kpage);
332 dirty = true;
333 } else {
334 pte_t *pte = page_mapping_find(AS_KERNEL,
335 base + i * FRAME_SIZE, true);
336
337 ASSERT(pte);
338 ASSERT(PTE_PRESENT(pte));
339
340 frame = PTE_GET_FRAME(pte);
341 }
342 } else if (upage >= start_anon) {
343 /*
344 * This is the uninitialized portion of the segment.
345 * It is not physically present in the ELF image.
346 * To resolve the situation, a frame must be allocated
347 * and cleared.
348 */
349 kpage = km_temporary_page_get(&frame, FRAME_NO_RESERVE);
350 memsetb((void *) kpage, PAGE_SIZE, 0);
351 km_temporary_page_put(kpage);
352 dirty = true;
353 } else {
354 size_t pad_lo, pad_hi;
355 /*
356 * The mixed case.
357 *
358 * The middle part is backed by the ELF image and
359 * the lower and upper parts are anonymous memory.
360 * (The segment can be and often is shorter than 1 page).
361 */
362 if (upage < entry->p_vaddr)
363 pad_lo = entry->p_vaddr - upage;
364 else
365 pad_lo = 0;
366
367 if (start_anon < upage + PAGE_SIZE)
368 pad_hi = upage + PAGE_SIZE - start_anon;
369 else
370 pad_hi = 0;
371
372 kpage = km_temporary_page_get(&frame, FRAME_NO_RESERVE);
373 memcpy((void *) (kpage + pad_lo),
374 (void *) (base + i * PAGE_SIZE + pad_lo),
375 PAGE_SIZE - pad_lo - pad_hi);
376 if (entry->p_flags & PF_X) {
377 smc_coherence_block((void *) (kpage + pad_lo),
378 PAGE_SIZE - pad_lo - pad_hi);
379 }
380 memsetb((void *) kpage, pad_lo, 0);
381 memsetb((void *) (kpage + PAGE_SIZE - pad_hi), pad_hi, 0);
382 km_temporary_page_put(kpage);
383 dirty = true;
384 }
385
386 if (dirty && area->sh_info) {
387 frame_reference_add(ADDR2PFN(frame));
388 btree_insert(&area->sh_info->pagemap, upage - area->base,
389 (void *) frame, leaf);
390 }
391
392 if (area->sh_info)
393 mutex_unlock(&area->sh_info->lock);
394
395 page_mapping_insert(AS, upage, frame, as_area_get_flags(area));
396 if (!used_space_insert(area, upage, 1))
397 panic("Cannot insert used space.");
398
399 return AS_PF_OK;
400}
401
402/** Free a frame that is backed by the ELF backend.
403 *
404 * The address space area and page tables must be already locked.
405 *
406 * @param area Pointer to the address space area.
407 * @param page Page that is mapped to frame. Must be aligned to
408 * PAGE_SIZE.
409 * @param frame Frame to be released.
410 *
411 */
412void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame)
413{
414 elf_segment_header_t *entry = area->backend_data.segment;
415 uintptr_t start_anon;
416
417 ASSERT(page_table_locked(area->as));
418 ASSERT(mutex_locked(&area->lock));
419
420 ASSERT(page >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE));
421 ASSERT(page < entry->p_vaddr + entry->p_memsz);
422
423 start_anon = entry->p_vaddr + entry->p_filesz;
424
425 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) {
426 if (entry->p_flags & PF_W) {
427 /*
428 * Free the frame with the copy of writable segment
429 * data.
430 */
431 frame_free_noreserve(frame);
432 }
433 } else {
434 /*
435 * The frame is either anonymous memory or the mixed case (i.e.
436 * lower part is backed by the ELF image and the upper is
437 * anonymous). In any case, a frame needs to be freed.
438 */
439 frame_free_noreserve(frame);
440 }
441}
442
443/** @}
444 */
Note: See TracBrowser for help on using the repository browser.