source: mainline/kernel/generic/src/mm/backend_elf.c@ 2c86f81

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 2c86f81 was 2c86f81, checked in by Jakub Jermar <jakub@…>, 14 years ago

Writable ELF segments need to be reserved memory regardless of whether
some pages are anonymous or not.

  • Property mode set to 100644
File size: 11.5 KB
Line 
1/*
2 * Copyright (c) 2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup genericmm
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Backend for address space areas backed by an ELF image.
36 */
37
38#include <lib/elf.h>
39#include <debug.h>
40#include <typedefs.h>
41#include <mm/as.h>
42#include <mm/frame.h>
43#include <mm/slab.h>
44#include <mm/page.h>
45#include <mm/reserve.h>
46#include <genarch/mm/page_pt.h>
47#include <genarch/mm/page_ht.h>
48#include <align.h>
49#include <memstr.h>
50#include <macros.h>
51#include <arch.h>
52#include <arch/barrier.h>
53
54static bool elf_create(as_area_t *);
55static bool elf_resize(as_area_t *, size_t);
56static void elf_share(as_area_t *);
57static void elf_destroy(as_area_t *);
58
59static int elf_page_fault(as_area_t *, uintptr_t, pf_access_t);
60static void elf_frame_free(as_area_t *, uintptr_t, uintptr_t);
61
62mem_backend_t elf_backend = {
63 .create = elf_create,
64 .resize = elf_resize,
65 .share = elf_share,
66 .destroy = elf_destroy,
67
68 .page_fault = elf_page_fault,
69 .frame_free = elf_frame_free,
70};
71
72bool elf_create(as_area_t *area)
73{
74 elf_segment_header_t *entry = area->backend_data.segment;
75 size_t nonanon_pages = ALIGN_DOWN(entry->p_filesz, PAGE_SIZE);
76
77 if (entry->p_flags & PF_W)
78 nonanon_pages = 0;
79
80 if (area->pages <= nonanon_pages)
81 return true;
82
83 return reserve_try_alloc(area->pages - nonanon_pages);
84}
85
86bool elf_resize(as_area_t *area, size_t new_pages)
87{
88 elf_segment_header_t *entry = area->backend_data.segment;
89 size_t nonanon_pages = ALIGN_DOWN(entry->p_filesz, PAGE_SIZE);
90
91 if (entry->p_flags & PF_W)
92 nonanon_pages = 0;
93
94 if (new_pages > area->pages) {
95 /* The area is growing. */
96 if (area->pages >= nonanon_pages)
97 return reserve_try_alloc(new_pages - area->pages);
98 else if (new_pages > nonanon_pages)
99 return reserve_try_alloc(new_pages - nonanon_pages);
100 } else if (new_pages < area->pages) {
101 /* The area is shrinking. */
102 if (new_pages >= nonanon_pages)
103 reserve_free(area->pages - new_pages);
104 else if (area->pages > nonanon_pages)
105 reserve_free(nonanon_pages - new_pages);
106 }
107
108 return true;
109}
110
111/** Share ELF image backed address space area.
112 *
113 * If the area is writable, then all mapped pages are duplicated in the pagemap.
114 * Otherwise only portions of the area that are not backed by the ELF image
115 * are put into the pagemap.
116 *
117 * @param area Address space area.
118 */
119void elf_share(as_area_t *area)
120{
121 elf_segment_header_t *entry = area->backend_data.segment;
122 link_t *cur;
123 btree_node_t *leaf, *node;
124 uintptr_t start_anon = entry->p_vaddr + entry->p_filesz;
125
126 ASSERT(mutex_locked(&area->as->lock));
127 ASSERT(mutex_locked(&area->lock));
128
129 /*
130 * Find the node in which to start linear search.
131 */
132 if (area->flags & AS_AREA_WRITE) {
133 node = list_get_instance(area->used_space.leaf_head.next,
134 btree_node_t, leaf_link);
135 } else {
136 (void) btree_search(&area->sh_info->pagemap, start_anon, &leaf);
137 node = btree_leaf_node_left_neighbour(&area->sh_info->pagemap,
138 leaf);
139 if (!node)
140 node = leaf;
141 }
142
143 /*
144 * Copy used anonymous portions of the area to sh_info's page map.
145 */
146 mutex_lock(&area->sh_info->lock);
147 for (cur = &node->leaf_link; cur != &area->used_space.leaf_head;
148 cur = cur->next) {
149 unsigned int i;
150
151 node = list_get_instance(cur, btree_node_t, leaf_link);
152
153 for (i = 0; i < node->keys; i++) {
154 uintptr_t base = node->key[i];
155 size_t count = (size_t) node->value[i];
156 unsigned int j;
157
158 /*
159 * Skip read-only areas of used space that are backed
160 * by the ELF image.
161 */
162 if (!(area->flags & AS_AREA_WRITE))
163 if (base >= entry->p_vaddr &&
164 base + count * PAGE_SIZE <= start_anon)
165 continue;
166
167 for (j = 0; j < count; j++) {
168 pte_t *pte;
169
170 /*
171 * Skip read-only pages that are backed by the
172 * ELF image.
173 */
174 if (!(area->flags & AS_AREA_WRITE))
175 if (base >= entry->p_vaddr &&
176 base + (j + 1) * PAGE_SIZE <=
177 start_anon)
178 continue;
179
180 page_table_lock(area->as, false);
181 pte = page_mapping_find(area->as,
182 base + j * PAGE_SIZE);
183 ASSERT(pte && PTE_VALID(pte) &&
184 PTE_PRESENT(pte));
185 btree_insert(&area->sh_info->pagemap,
186 (base + j * PAGE_SIZE) - area->base,
187 (void *) PTE_GET_FRAME(pte), NULL);
188 page_table_unlock(area->as, false);
189
190 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte));
191 frame_reference_add(pfn);
192 }
193
194 }
195 }
196 mutex_unlock(&area->sh_info->lock);
197}
198
199void elf_destroy(as_area_t *area)
200{
201 elf_segment_header_t *entry = area->backend_data.segment;
202 size_t nonanon_pages = ALIGN_DOWN(entry->p_filesz, PAGE_SIZE);
203
204 if (entry->p_flags & PF_W)
205 nonanon_pages = 0;
206
207 if (area->pages > nonanon_pages)
208 reserve_free(area->pages - nonanon_pages);
209}
210
211/** Service a page fault in the ELF backend address space area.
212 *
213 * The address space area and page tables must be already locked.
214 *
215 * @param area Pointer to the address space area.
216 * @param addr Faulting virtual address.
217 * @param access Access mode that caused the fault (i.e.
218 * read/write/exec).
219 *
220 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK
221 * on success (i.e. serviced).
222 */
223int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access)
224{
225 elf_header_t *elf = area->backend_data.elf;
226 elf_segment_header_t *entry = area->backend_data.segment;
227 btree_node_t *leaf;
228 uintptr_t base, frame, page, start_anon;
229 size_t i;
230 bool dirty = false;
231
232 ASSERT(page_table_locked(AS));
233 ASSERT(mutex_locked(&area->lock));
234
235 if (!as_area_check_access(area, access))
236 return AS_PF_FAULT;
237
238 if (addr < ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE))
239 return AS_PF_FAULT;
240
241 if (addr >= entry->p_vaddr + entry->p_memsz)
242 return AS_PF_FAULT;
243
244 i = (addr - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH;
245 base = (uintptr_t)
246 (((void *) elf) + ALIGN_DOWN(entry->p_offset, PAGE_SIZE));
247
248 /* Virtual address of faulting page*/
249 page = ALIGN_DOWN(addr, PAGE_SIZE);
250
251 /* Virtual address of the end of initialized part of segment */
252 start_anon = entry->p_vaddr + entry->p_filesz;
253
254 if (area->sh_info) {
255 bool found = false;
256
257 /*
258 * The address space area is shared.
259 */
260
261 mutex_lock(&area->sh_info->lock);
262 frame = (uintptr_t) btree_search(&area->sh_info->pagemap,
263 page - area->base, &leaf);
264 if (!frame) {
265 unsigned int i;
266
267 /*
268 * Workaround for valid NULL address.
269 */
270
271 for (i = 0; i < leaf->keys; i++) {
272 if (leaf->key[i] == page - area->base) {
273 found = true;
274 break;
275 }
276 }
277 }
278 if (frame || found) {
279 frame_reference_add(ADDR2PFN(frame));
280 page_mapping_insert(AS, addr, frame,
281 as_area_get_flags(area));
282 if (!used_space_insert(area, page, 1))
283 panic("Cannot insert used space.");
284 mutex_unlock(&area->sh_info->lock);
285 return AS_PF_OK;
286 }
287 }
288
289 /*
290 * The area is either not shared or the pagemap does not contain the
291 * mapping.
292 */
293 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) {
294 /*
295 * Initialized portion of the segment. The memory is backed
296 * directly by the content of the ELF image. Pages are
297 * only copied if the segment is writable so that there
298 * can be more instantions of the same memory ELF image
299 * used at a time. Note that this could be later done
300 * as COW.
301 */
302 if (entry->p_flags & PF_W) {
303 frame = (uintptr_t)frame_alloc_noreserve(ONE_FRAME, 0);
304 memcpy((void *) PA2KA(frame),
305 (void *) (base + i * FRAME_SIZE), FRAME_SIZE);
306 if (entry->p_flags & PF_X) {
307 smc_coherence_block((void *) PA2KA(frame),
308 FRAME_SIZE);
309 }
310 dirty = true;
311 } else {
312 frame = KA2PA(base + i * FRAME_SIZE);
313 }
314 } else if (page >= start_anon) {
315 /*
316 * This is the uninitialized portion of the segment.
317 * It is not physically present in the ELF image.
318 * To resolve the situation, a frame must be allocated
319 * and cleared.
320 */
321 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0);
322 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0);
323 dirty = true;
324 } else {
325 size_t pad_lo, pad_hi;
326 /*
327 * The mixed case.
328 *
329 * The middle part is backed by the ELF image and
330 * the lower and upper parts are anonymous memory.
331 * (The segment can be and often is shorter than 1 page).
332 */
333 if (page < entry->p_vaddr)
334 pad_lo = entry->p_vaddr - page;
335 else
336 pad_lo = 0;
337
338 if (start_anon < page + PAGE_SIZE)
339 pad_hi = page + PAGE_SIZE - start_anon;
340 else
341 pad_hi = 0;
342
343 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0);
344 memcpy((void *) (PA2KA(frame) + pad_lo),
345 (void *) (base + i * FRAME_SIZE + pad_lo),
346 FRAME_SIZE - pad_lo - pad_hi);
347 if (entry->p_flags & PF_X) {
348 smc_coherence_block((void *) (PA2KA(frame) + pad_lo),
349 FRAME_SIZE - pad_lo - pad_hi);
350 }
351 memsetb((void *) PA2KA(frame), pad_lo, 0);
352 memsetb((void *) (PA2KA(frame) + FRAME_SIZE - pad_hi), pad_hi,
353 0);
354 dirty = true;
355 }
356
357 if (dirty && area->sh_info) {
358 frame_reference_add(ADDR2PFN(frame));
359 btree_insert(&area->sh_info->pagemap, page - area->base,
360 (void *) frame, leaf);
361 }
362
363 if (area->sh_info)
364 mutex_unlock(&area->sh_info->lock);
365
366 page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
367 if (!used_space_insert(area, page, 1))
368 panic("Cannot insert used space.");
369
370 return AS_PF_OK;
371}
372
373/** Free a frame that is backed by the ELF backend.
374 *
375 * The address space area and page tables must be already locked.
376 *
377 * @param area Pointer to the address space area.
378 * @param page Page that is mapped to frame. Must be aligned to
379 * PAGE_SIZE.
380 * @param frame Frame to be released.
381 *
382 */
383void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame)
384{
385 elf_segment_header_t *entry = area->backend_data.segment;
386 uintptr_t start_anon;
387
388 ASSERT(page_table_locked(area->as));
389 ASSERT(mutex_locked(&area->lock));
390
391 ASSERT(page >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE));
392 ASSERT(page < entry->p_vaddr + entry->p_memsz);
393
394 start_anon = entry->p_vaddr + entry->p_filesz;
395
396 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) {
397 if (entry->p_flags & PF_W) {
398 /*
399 * Free the frame with the copy of writable segment
400 * data.
401 */
402 frame_free_noreserve(frame);
403 }
404 } else {
405 /*
406 * The frame is either anonymous memory or the mixed case (i.e.
407 * lower part is backed by the ELF image and the upper is
408 * anonymous). In any case, a frame needs to be freed.
409 */
410 frame_free_noreserve(frame);
411 }
412}
413
414/** @}
415 */
Note: See TracBrowser for help on using the repository browser.