source: mainline/kernel/generic/src/mm/backend_elf.c@ f1380b7

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since f1380b7 was a35b458, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 7 years ago

style: Remove trailing whitespace on _all_ lines, including empty ones, for particular file types.

Command used: tools/srepl '\s\+$' '' -- *.c *.h *.py *.sh *.s *.S *.ag

Currently, whitespace on empty lines is very inconsistent.
There are two basic choices: Either remove the whitespace, or keep empty lines
indented to the level of surrounding code. The former is AFAICT more common,
and also much easier to do automatically.

Alternatively, we could write script for automatic indentation, and use that
instead. However, if such a script exists, it's possible to use the indented
style locally, by having the editor apply relevant conversions on load/save,
without affecting remote repository. IMO, it makes more sense to adopt
the simpler rule.

  • Property mode set to 100644
File size: 12.0 KB
RevLine 
[0ee077ee]1/*
[df4ed85]2 * Copyright (c) 2006 Jakub Jermar
[0ee077ee]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[cc73a8a1]29/** @addtogroup genericmm
[b45c443]30 * @{
31 */
32
[0ee077ee]33/**
[b45c443]34 * @file
[0ee077ee]35 * @brief Backend for address space areas backed by an ELF image.
36 */
37
[d4b5542]38#include <lib/elf.h>
[63e27ef]39#include <assert.h>
[d99c1d2]40#include <typedefs.h>
[0ee077ee]41#include <mm/as.h>
42#include <mm/frame.h>
43#include <mm/slab.h>
[00b595b]44#include <mm/page.h>
[03523dc]45#include <mm/reserve.h>
[c7f8fc5]46#include <mm/km.h>
[00b595b]47#include <genarch/mm/page_pt.h>
48#include <genarch/mm/page_ht.h>
[0ee077ee]49#include <align.h>
[44a7ee5]50#include <mem.h>
[0ee077ee]51#include <macros.h>
52#include <arch.h>
[36e86862]53#include <arch/barrier.h>
[0ee077ee]54
[03523dc]55static bool elf_create(as_area_t *);
56static bool elf_resize(as_area_t *, size_t);
57static void elf_share(as_area_t *);
58static void elf_destroy(as_area_t *);
59
[01029fc]60static bool elf_is_resizable(as_area_t *);
61static bool elf_is_shareable(as_area_t *);
62
[cda1378]63static int elf_page_fault(as_area_t *, uintptr_t, pf_access_t);
64static void elf_frame_free(as_area_t *, uintptr_t, uintptr_t);
[0ee077ee]65
66mem_backend_t elf_backend = {
[03523dc]67 .create = elf_create,
68 .resize = elf_resize,
69 .share = elf_share,
70 .destroy = elf_destroy,
71
[01029fc]72 .is_resizable = elf_is_resizable,
73 .is_shareable = elf_is_shareable,
74
[0ee077ee]75 .page_fault = elf_page_fault,
76 .frame_free = elf_frame_free,
[83b6ba9f]77
78 .create_shared_data = NULL,
79 .destroy_shared_data = NULL
[0ee077ee]80};
81
[3ac69647]82static size_t elf_nonanon_pages_get(as_area_t *area)
[03523dc]83{
[9dd730d1]84 elf_segment_header_t *entry = area->backend_data.segment;
[8f6c6264]85 uintptr_t first = ALIGN_UP(entry->p_vaddr, PAGE_SIZE);
86 uintptr_t last = ALIGN_DOWN(entry->p_vaddr + entry->p_filesz,
87 PAGE_SIZE);
[9dd730d1]88
[2c86f81]89 if (entry->p_flags & PF_W)
[3ac69647]90 return 0;
91
[8f6c6264]92 if (last < first)
93 return 0;
94
95 return last - first;
[3ac69647]96}
97
98bool elf_create(as_area_t *area)
99{
100 size_t nonanon_pages = elf_nonanon_pages_get(area);
[2c86f81]101
[9dd730d1]102 if (area->pages <= nonanon_pages)
103 return true;
[a35b458]104
[9dd730d1]105 return reserve_try_alloc(area->pages - nonanon_pages);
[03523dc]106}
107
108bool elf_resize(as_area_t *area, size_t new_pages)
109{
[3ac69647]110 size_t nonanon_pages = elf_nonanon_pages_get(area);
[2c86f81]111
[9dd730d1]112 if (new_pages > area->pages) {
113 /* The area is growing. */
114 if (area->pages >= nonanon_pages)
115 return reserve_try_alloc(new_pages - area->pages);
116 else if (new_pages > nonanon_pages)
117 return reserve_try_alloc(new_pages - nonanon_pages);
118 } else if (new_pages < area->pages) {
119 /* The area is shrinking. */
120 if (new_pages >= nonanon_pages)
121 reserve_free(area->pages - new_pages);
122 else if (area->pages > nonanon_pages)
123 reserve_free(nonanon_pages - new_pages);
124 }
[a35b458]125
[03523dc]126 return true;
127}
128
129/** Share ELF image backed address space area.
130 *
131 * If the area is writable, then all mapped pages are duplicated in the pagemap.
132 * Otherwise only portions of the area that are not backed by the ELF image
133 * are put into the pagemap.
134 *
135 * @param area Address space area.
136 */
137void elf_share(as_area_t *area)
138{
139 elf_segment_header_t *entry = area->backend_data.segment;
140 link_t *cur;
141 btree_node_t *leaf, *node;
142 uintptr_t start_anon = entry->p_vaddr + entry->p_filesz;
143
[63e27ef]144 assert(mutex_locked(&area->as->lock));
145 assert(mutex_locked(&area->lock));
[03523dc]146
147 /*
148 * Find the node in which to start linear search.
149 */
150 if (area->flags & AS_AREA_WRITE) {
[55b77d9]151 node = list_get_instance(list_first(&area->used_space.leaf_list),
[03523dc]152 btree_node_t, leaf_link);
153 } else {
154 (void) btree_search(&area->sh_info->pagemap, start_anon, &leaf);
155 node = btree_leaf_node_left_neighbour(&area->sh_info->pagemap,
156 leaf);
157 if (!node)
158 node = leaf;
159 }
160
161 /*
162 * Copy used anonymous portions of the area to sh_info's page map.
163 */
164 mutex_lock(&area->sh_info->lock);
[55b77d9]165 for (cur = &node->leaf_link; cur != &area->used_space.leaf_list.head;
[03523dc]166 cur = cur->next) {
167 unsigned int i;
[a35b458]168
[03523dc]169 node = list_get_instance(cur, btree_node_t, leaf_link);
[a35b458]170
[03523dc]171 for (i = 0; i < node->keys; i++) {
172 uintptr_t base = node->key[i];
173 size_t count = (size_t) node->value[i];
174 unsigned int j;
[a35b458]175
[03523dc]176 /*
177 * Skip read-only areas of used space that are backed
178 * by the ELF image.
179 */
180 if (!(area->flags & AS_AREA_WRITE))
181 if (base >= entry->p_vaddr &&
[b4ffe5bc]182 base + P2SZ(count) <= start_anon)
[03523dc]183 continue;
[a35b458]184
[03523dc]185 for (j = 0; j < count; j++) {
[38dc82d]186 pte_t pte;
187 bool found;
[a35b458]188
[03523dc]189 /*
190 * Skip read-only pages that are backed by the
191 * ELF image.
192 */
193 if (!(area->flags & AS_AREA_WRITE))
194 if (base >= entry->p_vaddr &&
[b4ffe5bc]195 base + P2SZ(j + 1) <= start_anon)
[03523dc]196 continue;
[a35b458]197
[03523dc]198 page_table_lock(area->as, false);
[38dc82d]199 found = page_mapping_find(area->as,
200 base + P2SZ(j), false, &pte);
201
[63e27ef]202 assert(found);
203 assert(PTE_VALID(&pte));
204 assert(PTE_PRESENT(&pte));
[38dc82d]205
[03523dc]206 btree_insert(&area->sh_info->pagemap,
[b4ffe5bc]207 (base + P2SZ(j)) - area->base,
[38dc82d]208 (void *) PTE_GET_FRAME(&pte), NULL);
[03523dc]209 page_table_unlock(area->as, false);
210
[38dc82d]211 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(&pte));
[03523dc]212 frame_reference_add(pfn);
213 }
[a35b458]214
[03523dc]215 }
216 }
217 mutex_unlock(&area->sh_info->lock);
218}
219
220void elf_destroy(as_area_t *area)
221{
[3ac69647]222 size_t nonanon_pages = elf_nonanon_pages_get(area);
[2c86f81]223
[9dd730d1]224 if (area->pages > nonanon_pages)
225 reserve_free(area->pages - nonanon_pages);
[03523dc]226}
227
[01029fc]228bool elf_is_resizable(as_area_t *area)
229{
230 return true;
231}
232
233bool elf_is_shareable(as_area_t *area)
234{
235 return true;
236}
237
238
[0ee077ee]239/** Service a page fault in the ELF backend address space area.
240 *
241 * The address space area and page tables must be already locked.
242 *
[36e86862]243 * @param area Pointer to the address space area.
[59fb782]244 * @param upage Faulting virtual page.
[36e86862]245 * @param access Access mode that caused the fault (i.e.
246 * read/write/exec).
[0ee077ee]247 *
[36e86862]248 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK
249 * on success (i.e. serviced).
[0ee077ee]250 */
[59fb782]251int elf_page_fault(as_area_t *area, uintptr_t upage, pf_access_t access)
[0ee077ee]252{
[127c957b]253 elf_header_t *elf = area->backend_data.elf;
254 elf_segment_header_t *entry = area->backend_data.segment;
[00b595b]255 btree_node_t *leaf;
[c7f8fc5]256 uintptr_t base;
257 uintptr_t frame;
258 uintptr_t kpage;
259 uintptr_t start_anon;
[98000fb]260 size_t i;
[454f1da]261 bool dirty = false;
[0ee077ee]262
[63e27ef]263 assert(page_table_locked(AS));
264 assert(mutex_locked(&area->lock));
265 assert(IS_ALIGNED(upage, PAGE_SIZE));
[1d432f9]266
[0ee077ee]267 if (!as_area_check_access(area, access))
268 return AS_PF_FAULT;
[a35b458]269
[59fb782]270 if (upage < ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE))
[917a8c8]271 return AS_PF_FAULT;
[a35b458]272
[59fb782]273 if (upage >= entry->p_vaddr + entry->p_memsz)
[917a8c8]274 return AS_PF_FAULT;
[a35b458]275
[59fb782]276 i = (upage - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH;
[1cc2974]277 base = (uintptr_t)
278 (((void *) elf) + ALIGN_DOWN(entry->p_offset, PAGE_SIZE));
279
280 /* Virtual address of the end of initialized part of segment */
281 start_anon = entry->p_vaddr + entry->p_filesz;
[00b595b]282
[83b6ba9f]283 mutex_lock(&area->sh_info->lock);
284 if (area->sh_info->shared) {
[00b595b]285 bool found = false;
286
287 /*
288 * The address space area is shared.
289 */
[a35b458]290
[7f1c620]291 frame = (uintptr_t) btree_search(&area->sh_info->pagemap,
[c7f8fc5]292 upage - area->base, &leaf);
[00b595b]293 if (!frame) {
[6c441cf8]294 unsigned int i;
[00b595b]295
296 /*
297 * Workaround for valid NULL address.
298 */
299
300 for (i = 0; i < leaf->keys; i++) {
[c7f8fc5]301 if (leaf->key[i] == upage - area->base) {
[00b595b]302 found = true;
303 break;
304 }
305 }
306 }
307 if (frame || found) {
[c9d2235b]308 frame_reference_add(ADDR2PFN(frame));
[c7f8fc5]309 page_mapping_insert(AS, upage, frame,
[d5bd8d7]310 as_area_get_flags(area));
[c7f8fc5]311 if (!used_space_insert(area, upage, 1))
[f651e80]312 panic("Cannot insert used space.");
[00b595b]313 mutex_unlock(&area->sh_info->lock);
314 return AS_PF_OK;
315 }
316 }
[1cc2974]317
[00b595b]318 /*
[d5bd8d7]319 * The area is either not shared or the pagemap does not contain the
320 * mapping.
[00b595b]321 */
[c7f8fc5]322 if (upage >= entry->p_vaddr && upage + PAGE_SIZE <= start_anon) {
[0ee077ee]323 /*
324 * Initialized portion of the segment. The memory is backed
325 * directly by the content of the ELF image. Pages are
326 * only copied if the segment is writable so that there
327 * can be more instantions of the same memory ELF image
328 * used at a time. Note that this could be later done
329 * as COW.
330 */
331 if (entry->p_flags & PF_W) {
[c7f8fc5]332 kpage = km_temporary_page_get(&frame, FRAME_NO_RESERVE);
[d56382d]333 memcpy((void *) kpage, (void *) (base + i * PAGE_SIZE),
[c7f8fc5]334 PAGE_SIZE);
[d56382d]335 if (entry->p_flags & PF_X) {
336 smc_coherence_block((void *) kpage, PAGE_SIZE);
337 }
[c7f8fc5]338 km_temporary_page_put(kpage);
[454f1da]339 dirty = true;
[0ee077ee]340 } else {
[38dc82d]341 pte_t pte;
342 bool found;
343
344 found = page_mapping_find(AS_KERNEL,
345 base + i * FRAME_SIZE, true, &pte);
[32817cc]346
[63e27ef]347 assert(found);
348 assert(PTE_PRESENT(&pte));
[32817cc]349
[38dc82d]350 frame = PTE_GET_FRAME(&pte);
[1b20da0]351 }
[c7f8fc5]352 } else if (upage >= start_anon) {
[0ee077ee]353 /*
354 * This is the uninitialized portion of the segment.
355 * It is not physically present in the ELF image.
356 * To resolve the situation, a frame must be allocated
357 * and cleared.
358 */
[c7f8fc5]359 kpage = km_temporary_page_get(&frame, FRAME_NO_RESERVE);
360 memsetb((void *) kpage, PAGE_SIZE, 0);
361 km_temporary_page_put(kpage);
[454f1da]362 dirty = true;
[0ee077ee]363 } else {
[1cc2974]364 size_t pad_lo, pad_hi;
[0ee077ee]365 /*
366 * The mixed case.
[1cc2974]367 *
368 * The middle part is backed by the ELF image and
369 * the lower and upper parts are anonymous memory.
370 * (The segment can be and often is shorter than 1 page).
[0ee077ee]371 */
[c7f8fc5]372 if (upage < entry->p_vaddr)
373 pad_lo = entry->p_vaddr - upage;
[1cc2974]374 else
375 pad_lo = 0;
376
[c7f8fc5]377 if (start_anon < upage + PAGE_SIZE)
378 pad_hi = upage + PAGE_SIZE - start_anon;
[1cc2974]379 else
380 pad_hi = 0;
381
[c7f8fc5]382 kpage = km_temporary_page_get(&frame, FRAME_NO_RESERVE);
383 memcpy((void *) (kpage + pad_lo),
384 (void *) (base + i * PAGE_SIZE + pad_lo),
385 PAGE_SIZE - pad_lo - pad_hi);
[62cd66f]386 if (entry->p_flags & PF_X) {
[1b20da0]387 smc_coherence_block((void *) (kpage + pad_lo),
[c7f8fc5]388 PAGE_SIZE - pad_lo - pad_hi);
[62cd66f]389 }
[c7f8fc5]390 memsetb((void *) kpage, pad_lo, 0);
391 memsetb((void *) (kpage + PAGE_SIZE - pad_hi), pad_hi, 0);
392 km_temporary_page_put(kpage);
[454f1da]393 dirty = true;
[1cc2974]394 }
[00b595b]395
[83b6ba9f]396 if (dirty && area->sh_info->shared) {
[1cc2974]397 frame_reference_add(ADDR2PFN(frame));
[c7f8fc5]398 btree_insert(&area->sh_info->pagemap, upage - area->base,
[1cc2974]399 (void *) frame, leaf);
[0ee077ee]400 }
[1cc2974]401
[83b6ba9f]402 mutex_unlock(&area->sh_info->lock);
[1cc2974]403
[c7f8fc5]404 page_mapping_insert(AS, upage, frame, as_area_get_flags(area));
405 if (!used_space_insert(area, upage, 1))
[f651e80]406 panic("Cannot insert used space.");
[0ee077ee]407
408 return AS_PF_OK;
409}
410
411/** Free a frame that is backed by the ELF backend.
412 *
413 * The address space area and page tables must be already locked.
414 *
[36e86862]415 * @param area Pointer to the address space area.
416 * @param page Page that is mapped to frame. Must be aligned to
417 * PAGE_SIZE.
418 * @param frame Frame to be released.
[0ee077ee]419 *
420 */
[7f1c620]421void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame)
[0ee077ee]422{
[127c957b]423 elf_segment_header_t *entry = area->backend_data.segment;
[137691a]424 uintptr_t start_anon;
[1cc2974]425
[63e27ef]426 assert(page_table_locked(area->as));
427 assert(mutex_locked(&area->lock));
[1d432f9]428
[63e27ef]429 assert(page >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE));
430 assert(page < entry->p_vaddr + entry->p_memsz);
[1d432f9]431
[1cc2974]432 start_anon = entry->p_vaddr + entry->p_filesz;
433
434 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) {
[0ee077ee]435 if (entry->p_flags & PF_W) {
436 /*
[d5bd8d7]437 * Free the frame with the copy of writable segment
438 * data.
[0ee077ee]439 */
[5df1963]440 frame_free_noreserve(frame, 1);
[0ee077ee]441 }
442 } else {
443 /*
[d5bd8d7]444 * The frame is either anonymous memory or the mixed case (i.e.
445 * lower part is backed by the ELF image and the upper is
446 * anonymous). In any case, a frame needs to be freed.
[137691a]447 */
[5df1963]448 frame_free_noreserve(frame, 1);
[0ee077ee]449 }
450}
[00b595b]451
[cc73a8a1]452/** @}
[b45c443]453 */
Note: See TracBrowser for help on using the repository browser.