source: mainline/kernel/generic/src/mm/backend_elf.c@ 8a2474f

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 8a2474f was 8a2474f, checked in by Jakub Jermar <jakub@…>, 14 years ago

Remove comments.

  • Property mode set to 100644
File size: 11.3 KB
RevLine 
[0ee077ee]1/*
[df4ed85]2 * Copyright (c) 2006 Jakub Jermar
[0ee077ee]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[cc73a8a1]29/** @addtogroup genericmm
[b45c443]30 * @{
31 */
32
[0ee077ee]33/**
[b45c443]34 * @file
[0ee077ee]35 * @brief Backend for address space areas backed by an ELF image.
36 */
37
[d4b5542]38#include <lib/elf.h>
[0ee077ee]39#include <debug.h>
[d99c1d2]40#include <typedefs.h>
[0ee077ee]41#include <mm/as.h>
42#include <mm/frame.h>
43#include <mm/slab.h>
[00b595b]44#include <mm/page.h>
[03523dc]45#include <mm/reserve.h>
[00b595b]46#include <genarch/mm/page_pt.h>
47#include <genarch/mm/page_ht.h>
[0ee077ee]48#include <align.h>
49#include <memstr.h>
50#include <macros.h>
51#include <arch.h>
[36e86862]52#include <arch/barrier.h>
[0ee077ee]53
[03523dc]54static bool elf_create(as_area_t *);
55static bool elf_resize(as_area_t *, size_t);
56static void elf_share(as_area_t *);
57static void elf_destroy(as_area_t *);
58
[7f1c620]59static int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access);
60static void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame);
[0ee077ee]61
62mem_backend_t elf_backend = {
[03523dc]63 .create = elf_create,
64 .resize = elf_resize,
65 .share = elf_share,
66 .destroy = elf_destroy,
67
[0ee077ee]68 .page_fault = elf_page_fault,
69 .frame_free = elf_frame_free,
70};
71
[03523dc]72bool elf_create(as_area_t *area)
73{
[9dd730d1]74 elf_segment_header_t *entry = area->backend_data.segment;
75 size_t nonanon_pages = ALIGN_DOWN(entry->p_filesz, PAGE_SIZE);
76
77 if (area->pages <= nonanon_pages)
78 return true;
79
80 return reserve_try_alloc(area->pages - nonanon_pages);
[03523dc]81}
82
83bool elf_resize(as_area_t *area, size_t new_pages)
84{
[9dd730d1]85 elf_segment_header_t *entry = area->backend_data.segment;
86 size_t nonanon_pages = ALIGN_DOWN(entry->p_filesz, PAGE_SIZE);
87
88 if (new_pages > area->pages) {
89 /* The area is growing. */
90 if (area->pages >= nonanon_pages)
91 return reserve_try_alloc(new_pages - area->pages);
92 else if (new_pages > nonanon_pages)
93 return reserve_try_alloc(new_pages - nonanon_pages);
94 } else if (new_pages < area->pages) {
95 /* The area is shrinking. */
96 if (new_pages >= nonanon_pages)
97 reserve_free(area->pages - new_pages);
98 else if (area->pages > nonanon_pages)
99 reserve_free(nonanon_pages - new_pages);
100 }
[03523dc]101
102 return true;
103}
104
105/** Share ELF image backed address space area.
106 *
107 * If the area is writable, then all mapped pages are duplicated in the pagemap.
108 * Otherwise only portions of the area that are not backed by the ELF image
109 * are put into the pagemap.
110 *
111 * @param area Address space area.
112 */
113void elf_share(as_area_t *area)
114{
115 elf_segment_header_t *entry = area->backend_data.segment;
116 link_t *cur;
117 btree_node_t *leaf, *node;
118 uintptr_t start_anon = entry->p_vaddr + entry->p_filesz;
119
120 ASSERT(mutex_locked(&area->as->lock));
121 ASSERT(mutex_locked(&area->lock));
122
123 /*
124 * Find the node in which to start linear search.
125 */
126 if (area->flags & AS_AREA_WRITE) {
127 node = list_get_instance(area->used_space.leaf_head.next,
128 btree_node_t, leaf_link);
129 } else {
130 (void) btree_search(&area->sh_info->pagemap, start_anon, &leaf);
131 node = btree_leaf_node_left_neighbour(&area->sh_info->pagemap,
132 leaf);
133 if (!node)
134 node = leaf;
135 }
136
137 /*
138 * Copy used anonymous portions of the area to sh_info's page map.
139 */
140 mutex_lock(&area->sh_info->lock);
141 for (cur = &node->leaf_link; cur != &area->used_space.leaf_head;
142 cur = cur->next) {
143 unsigned int i;
144
145 node = list_get_instance(cur, btree_node_t, leaf_link);
146
147 for (i = 0; i < node->keys; i++) {
148 uintptr_t base = node->key[i];
149 size_t count = (size_t) node->value[i];
150 unsigned int j;
151
152 /*
153 * Skip read-only areas of used space that are backed
154 * by the ELF image.
155 */
156 if (!(area->flags & AS_AREA_WRITE))
157 if (base >= entry->p_vaddr &&
158 base + count * PAGE_SIZE <= start_anon)
159 continue;
160
161 for (j = 0; j < count; j++) {
162 pte_t *pte;
163
164 /*
165 * Skip read-only pages that are backed by the
166 * ELF image.
167 */
168 if (!(area->flags & AS_AREA_WRITE))
169 if (base >= entry->p_vaddr &&
170 base + (j + 1) * PAGE_SIZE <=
171 start_anon)
172 continue;
173
174 page_table_lock(area->as, false);
175 pte = page_mapping_find(area->as,
176 base + j * PAGE_SIZE);
177 ASSERT(pte && PTE_VALID(pte) &&
178 PTE_PRESENT(pte));
179 btree_insert(&area->sh_info->pagemap,
180 (base + j * PAGE_SIZE) - area->base,
181 (void *) PTE_GET_FRAME(pte), NULL);
182 page_table_unlock(area->as, false);
183
184 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte));
185 frame_reference_add(pfn);
186 }
187
188 }
189 }
190 mutex_unlock(&area->sh_info->lock);
191}
192
193void elf_destroy(as_area_t *area)
194{
[9dd730d1]195 elf_segment_header_t *entry = area->backend_data.segment;
196 size_t nonanon_pages = ALIGN_DOWN(entry->p_filesz, PAGE_SIZE);
197
198 if (area->pages > nonanon_pages)
199 reserve_free(area->pages - nonanon_pages);
[03523dc]200}
201
[0ee077ee]202/** Service a page fault in the ELF backend address space area.
203 *
204 * The address space area and page tables must be already locked.
205 *
[36e86862]206 * @param area Pointer to the address space area.
207 * @param addr Faulting virtual address.
208 * @param access Access mode that caused the fault (i.e.
209 * read/write/exec).
[0ee077ee]210 *
[36e86862]211 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK
212 * on success (i.e. serviced).
[0ee077ee]213 */
[7f1c620]214int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access)
[0ee077ee]215{
[127c957b]216 elf_header_t *elf = area->backend_data.elf;
217 elf_segment_header_t *entry = area->backend_data.segment;
[00b595b]218 btree_node_t *leaf;
[1cc2974]219 uintptr_t base, frame, page, start_anon;
[98000fb]220 size_t i;
[454f1da]221 bool dirty = false;
[0ee077ee]222
[1d432f9]223 ASSERT(page_table_locked(AS));
224 ASSERT(mutex_locked(&area->lock));
225
[0ee077ee]226 if (!as_area_check_access(area, access))
227 return AS_PF_FAULT;
[917a8c8]228
229 if (addr < ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE))
230 return AS_PF_FAULT;
231
232 if (addr >= entry->p_vaddr + entry->p_memsz)
233 return AS_PF_FAULT;
234
[1cc2974]235 i = (addr - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH;
236 base = (uintptr_t)
237 (((void *) elf) + ALIGN_DOWN(entry->p_offset, PAGE_SIZE));
238
239 /* Virtual address of faulting page*/
240 page = ALIGN_DOWN(addr, PAGE_SIZE);
241
242 /* Virtual address of the end of initialized part of segment */
243 start_anon = entry->p_vaddr + entry->p_filesz;
[00b595b]244
245 if (area->sh_info) {
246 bool found = false;
247
248 /*
249 * The address space area is shared.
250 */
[1cc2974]251
[00b595b]252 mutex_lock(&area->sh_info->lock);
[7f1c620]253 frame = (uintptr_t) btree_search(&area->sh_info->pagemap,
[1cc2974]254 page - area->base, &leaf);
[00b595b]255 if (!frame) {
[6c441cf8]256 unsigned int i;
[00b595b]257
258 /*
259 * Workaround for valid NULL address.
260 */
261
262 for (i = 0; i < leaf->keys; i++) {
[6461d286]263 if (leaf->key[i] == page - area->base) {
[00b595b]264 found = true;
265 break;
266 }
267 }
268 }
269 if (frame || found) {
[c9d2235b]270 frame_reference_add(ADDR2PFN(frame));
[d5bd8d7]271 page_mapping_insert(AS, addr, frame,
272 as_area_get_flags(area));
[1cc2974]273 if (!used_space_insert(area, page, 1))
[f651e80]274 panic("Cannot insert used space.");
[00b595b]275 mutex_unlock(&area->sh_info->lock);
276 return AS_PF_OK;
277 }
278 }
[1cc2974]279
[00b595b]280 /*
[d5bd8d7]281 * The area is either not shared or the pagemap does not contain the
282 * mapping.
[00b595b]283 */
[1cc2974]284 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) {
[0ee077ee]285 /*
286 * Initialized portion of the segment. The memory is backed
287 * directly by the content of the ELF image. Pages are
288 * only copied if the segment is writable so that there
289 * can be more instantions of the same memory ELF image
290 * used at a time. Note that this could be later done
291 * as COW.
292 */
293 if (entry->p_flags & PF_W) {
[b838fdf]294 frame = (uintptr_t)frame_alloc_noreserve(ONE_FRAME, 0);
[d5bd8d7]295 memcpy((void *) PA2KA(frame),
296 (void *) (base + i * FRAME_SIZE), FRAME_SIZE);
[62cd66f]297 if (entry->p_flags & PF_X) {
[3759681]298 smc_coherence_block((void *) PA2KA(frame),
299 FRAME_SIZE);
[62cd66f]300 }
[454f1da]301 dirty = true;
[0ee077ee]302 } else {
[1cc2974]303 frame = KA2PA(base + i * FRAME_SIZE);
[0ee077ee]304 }
[1cc2974]305 } else if (page >= start_anon) {
[0ee077ee]306 /*
307 * This is the uninitialized portion of the segment.
308 * It is not physically present in the ELF image.
309 * To resolve the situation, a frame must be allocated
310 * and cleared.
311 */
[b838fdf]312 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0);
[e32e092]313 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0);
[454f1da]314 dirty = true;
[0ee077ee]315 } else {
[1cc2974]316 size_t pad_lo, pad_hi;
[0ee077ee]317 /*
318 * The mixed case.
[1cc2974]319 *
320 * The middle part is backed by the ELF image and
321 * the lower and upper parts are anonymous memory.
322 * (The segment can be and often is shorter than 1 page).
[0ee077ee]323 */
[1cc2974]324 if (page < entry->p_vaddr)
325 pad_lo = entry->p_vaddr - page;
326 else
327 pad_lo = 0;
328
329 if (start_anon < page + PAGE_SIZE)
330 pad_hi = page + PAGE_SIZE - start_anon;
331 else
332 pad_hi = 0;
333
[b838fdf]334 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0);
[1cc2974]335 memcpy((void *) (PA2KA(frame) + pad_lo),
336 (void *) (base + i * FRAME_SIZE + pad_lo),
337 FRAME_SIZE - pad_lo - pad_hi);
[62cd66f]338 if (entry->p_flags & PF_X) {
[3759681]339 smc_coherence_block((void *) (PA2KA(frame) + pad_lo),
340 FRAME_SIZE - pad_lo - pad_hi);
[62cd66f]341 }
[e32e092]342 memsetb((void *) PA2KA(frame), pad_lo, 0);
[36e86862]343 memsetb((void *) (PA2KA(frame) + FRAME_SIZE - pad_hi), pad_hi,
344 0);
[454f1da]345 dirty = true;
[1cc2974]346 }
[00b595b]347
[1cc2974]348 if (dirty && area->sh_info) {
349 frame_reference_add(ADDR2PFN(frame));
350 btree_insert(&area->sh_info->pagemap, page - area->base,
351 (void *) frame, leaf);
[0ee077ee]352 }
[1cc2974]353
[00b595b]354 if (area->sh_info)
355 mutex_unlock(&area->sh_info->lock);
[1cc2974]356
[0ee077ee]357 page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
[1cc2974]358 if (!used_space_insert(area, page, 1))
[f651e80]359 panic("Cannot insert used space.");
[0ee077ee]360
361 return AS_PF_OK;
362}
363
364/** Free a frame that is backed by the ELF backend.
365 *
366 * The address space area and page tables must be already locked.
367 *
[36e86862]368 * @param area Pointer to the address space area.
369 * @param page Page that is mapped to frame. Must be aligned to
370 * PAGE_SIZE.
371 * @param frame Frame to be released.
[0ee077ee]372 *
373 */
[7f1c620]374void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame)
[0ee077ee]375{
[127c957b]376 elf_segment_header_t *entry = area->backend_data.segment;
[137691a]377 uintptr_t start_anon;
[1cc2974]378
[1d432f9]379 ASSERT(page_table_locked(area->as));
380 ASSERT(mutex_locked(&area->lock));
381
382 ASSERT(page >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE));
383 ASSERT(page < entry->p_vaddr + entry->p_memsz);
384
[1cc2974]385 start_anon = entry->p_vaddr + entry->p_filesz;
386
387 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) {
[0ee077ee]388 if (entry->p_flags & PF_W) {
389 /*
[d5bd8d7]390 * Free the frame with the copy of writable segment
391 * data.
[0ee077ee]392 */
[b838fdf]393 frame_free_noreserve(frame);
[0ee077ee]394 }
395 } else {
396 /*
[d5bd8d7]397 * The frame is either anonymous memory or the mixed case (i.e.
398 * lower part is backed by the ELF image and the upper is
399 * anonymous). In any case, a frame needs to be freed.
[137691a]400 */
[b838fdf]401 frame_free_noreserve(frame);
[0ee077ee]402 }
403}
[00b595b]404
[cc73a8a1]405/** @}
[b45c443]406 */
Note: See TracBrowser for help on using the repository browser.