source: mainline/kernel/generic/src/mm/backend_elf.c@ 6b9e85b

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 6b9e85b was 6b9e85b, checked in by Jakub Jermar <jakub@…>, 14 years ago

Remove sparc64's cache.h and all references to it.

  • Property mode set to 100644
File size: 10.7 KB
Line 
1/*
2 * Copyright (c) 2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup genericmm
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Backend for address space areas backed by an ELF image.
36 */
37
38#include <lib/elf.h>
39#include <debug.h>
40#include <typedefs.h>
41#include <mm/as.h>
42#include <mm/frame.h>
43#include <mm/slab.h>
44#include <mm/page.h>
45#include <mm/reserve.h>
46#include <genarch/mm/page_pt.h>
47#include <genarch/mm/page_ht.h>
48#include <align.h>
49#include <memstr.h>
50#include <macros.h>
51#include <arch.h>
52#include <arch/barrier.h>
53
54static bool elf_create(as_area_t *);
55static bool elf_resize(as_area_t *, size_t);
56static void elf_share(as_area_t *);
57static void elf_destroy(as_area_t *);
58
59static int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access);
60static void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame);
61
62mem_backend_t elf_backend = {
63 .create = elf_create,
64 .resize = elf_resize,
65 .share = elf_share,
66 .destroy = elf_destroy,
67
68 .page_fault = elf_page_fault,
69 .frame_free = elf_frame_free,
70};
71
72bool elf_create(as_area_t *area)
73{
74 /**
75 * @todo:
76 * Reserve only how much is necessary for anonymous pages plus the
77 * supporting structures allocated during the page fault.
78 */
79 return reserve_try_alloc(area->pages);
80}
81
82bool elf_resize(as_area_t *area, size_t new_pages)
83{
84 if (new_pages > area->pages)
85 return reserve_try_alloc(new_pages - area->pages);
86 else if (new_pages < area->pages)
87 reserve_free(area->pages - new_pages);
88
89 return true;
90}
91
92/** Share ELF image backed address space area.
93 *
94 * If the area is writable, then all mapped pages are duplicated in the pagemap.
95 * Otherwise only portions of the area that are not backed by the ELF image
96 * are put into the pagemap.
97 *
98 * @param area Address space area.
99 */
100void elf_share(as_area_t *area)
101{
102 elf_segment_header_t *entry = area->backend_data.segment;
103 link_t *cur;
104 btree_node_t *leaf, *node;
105 uintptr_t start_anon = entry->p_vaddr + entry->p_filesz;
106
107 ASSERT(mutex_locked(&area->as->lock));
108 ASSERT(mutex_locked(&area->lock));
109
110 /*
111 * Find the node in which to start linear search.
112 */
113 if (area->flags & AS_AREA_WRITE) {
114 node = list_get_instance(area->used_space.leaf_head.next,
115 btree_node_t, leaf_link);
116 } else {
117 (void) btree_search(&area->sh_info->pagemap, start_anon, &leaf);
118 node = btree_leaf_node_left_neighbour(&area->sh_info->pagemap,
119 leaf);
120 if (!node)
121 node = leaf;
122 }
123
124 /*
125 * Copy used anonymous portions of the area to sh_info's page map.
126 */
127 mutex_lock(&area->sh_info->lock);
128 for (cur = &node->leaf_link; cur != &area->used_space.leaf_head;
129 cur = cur->next) {
130 unsigned int i;
131
132 node = list_get_instance(cur, btree_node_t, leaf_link);
133
134 for (i = 0; i < node->keys; i++) {
135 uintptr_t base = node->key[i];
136 size_t count = (size_t) node->value[i];
137 unsigned int j;
138
139 /*
140 * Skip read-only areas of used space that are backed
141 * by the ELF image.
142 */
143 if (!(area->flags & AS_AREA_WRITE))
144 if (base >= entry->p_vaddr &&
145 base + count * PAGE_SIZE <= start_anon)
146 continue;
147
148 for (j = 0; j < count; j++) {
149 pte_t *pte;
150
151 /*
152 * Skip read-only pages that are backed by the
153 * ELF image.
154 */
155 if (!(area->flags & AS_AREA_WRITE))
156 if (base >= entry->p_vaddr &&
157 base + (j + 1) * PAGE_SIZE <=
158 start_anon)
159 continue;
160
161 page_table_lock(area->as, false);
162 pte = page_mapping_find(area->as,
163 base + j * PAGE_SIZE);
164 ASSERT(pte && PTE_VALID(pte) &&
165 PTE_PRESENT(pte));
166 btree_insert(&area->sh_info->pagemap,
167 (base + j * PAGE_SIZE) - area->base,
168 (void *) PTE_GET_FRAME(pte), NULL);
169 page_table_unlock(area->as, false);
170
171 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte));
172 frame_reference_add(pfn);
173 }
174
175 }
176 }
177 mutex_unlock(&area->sh_info->lock);
178}
179
180void elf_destroy(as_area_t *area)
181{
182 /**
183 * @todo:
184 * Unreserve only how much was really reserved.
185 */
186 reserve_free(area->pages);
187}
188
189/** Service a page fault in the ELF backend address space area.
190 *
191 * The address space area and page tables must be already locked.
192 *
193 * @param area Pointer to the address space area.
194 * @param addr Faulting virtual address.
195 * @param access Access mode that caused the fault (i.e.
196 * read/write/exec).
197 *
198 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK
199 * on success (i.e. serviced).
200 */
201int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access)
202{
203 elf_header_t *elf = area->backend_data.elf;
204 elf_segment_header_t *entry = area->backend_data.segment;
205 btree_node_t *leaf;
206 uintptr_t base, frame, page, start_anon;
207 size_t i;
208 bool dirty = false;
209
210 ASSERT(page_table_locked(AS));
211 ASSERT(mutex_locked(&area->lock));
212
213 if (!as_area_check_access(area, access))
214 return AS_PF_FAULT;
215
216 ASSERT((addr >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) &&
217 (addr < entry->p_vaddr + entry->p_memsz));
218 i = (addr - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH;
219 base = (uintptr_t)
220 (((void *) elf) + ALIGN_DOWN(entry->p_offset, PAGE_SIZE));
221
222 /* Virtual address of faulting page*/
223 page = ALIGN_DOWN(addr, PAGE_SIZE);
224
225 /* Virtual address of the end of initialized part of segment */
226 start_anon = entry->p_vaddr + entry->p_filesz;
227
228 if (area->sh_info) {
229 bool found = false;
230
231 /*
232 * The address space area is shared.
233 */
234
235 mutex_lock(&area->sh_info->lock);
236 frame = (uintptr_t) btree_search(&area->sh_info->pagemap,
237 page - area->base, &leaf);
238 if (!frame) {
239 unsigned int i;
240
241 /*
242 * Workaround for valid NULL address.
243 */
244
245 for (i = 0; i < leaf->keys; i++) {
246 if (leaf->key[i] == page - area->base) {
247 found = true;
248 break;
249 }
250 }
251 }
252 if (frame || found) {
253 frame_reference_add(ADDR2PFN(frame));
254 page_mapping_insert(AS, addr, frame,
255 as_area_get_flags(area));
256 if (!used_space_insert(area, page, 1))
257 panic("Cannot insert used space.");
258 mutex_unlock(&area->sh_info->lock);
259 return AS_PF_OK;
260 }
261 }
262
263 /*
264 * The area is either not shared or the pagemap does not contain the
265 * mapping.
266 */
267 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) {
268 /*
269 * Initialized portion of the segment. The memory is backed
270 * directly by the content of the ELF image. Pages are
271 * only copied if the segment is writable so that there
272 * can be more instantions of the same memory ELF image
273 * used at a time. Note that this could be later done
274 * as COW.
275 */
276 if (entry->p_flags & PF_W) {
277 frame = (uintptr_t)frame_alloc_noreserve(ONE_FRAME, 0);
278 memcpy((void *) PA2KA(frame),
279 (void *) (base + i * FRAME_SIZE), FRAME_SIZE);
280 if (entry->p_flags & PF_X) {
281 smc_coherence_block((void *) PA2KA(frame),
282 FRAME_SIZE);
283 }
284 dirty = true;
285 } else {
286 frame = KA2PA(base + i * FRAME_SIZE);
287 }
288 } else if (page >= start_anon) {
289 /*
290 * This is the uninitialized portion of the segment.
291 * It is not physically present in the ELF image.
292 * To resolve the situation, a frame must be allocated
293 * and cleared.
294 */
295 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0);
296 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0);
297 dirty = true;
298 } else {
299 size_t pad_lo, pad_hi;
300 /*
301 * The mixed case.
302 *
303 * The middle part is backed by the ELF image and
304 * the lower and upper parts are anonymous memory.
305 * (The segment can be and often is shorter than 1 page).
306 */
307 if (page < entry->p_vaddr)
308 pad_lo = entry->p_vaddr - page;
309 else
310 pad_lo = 0;
311
312 if (start_anon < page + PAGE_SIZE)
313 pad_hi = page + PAGE_SIZE - start_anon;
314 else
315 pad_hi = 0;
316
317 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0);
318 memcpy((void *) (PA2KA(frame) + pad_lo),
319 (void *) (base + i * FRAME_SIZE + pad_lo),
320 FRAME_SIZE - pad_lo - pad_hi);
321 if (entry->p_flags & PF_X) {
322 smc_coherence_block((void *) (PA2KA(frame) + pad_lo),
323 FRAME_SIZE - pad_lo - pad_hi);
324 }
325 memsetb((void *) PA2KA(frame), pad_lo, 0);
326 memsetb((void *) (PA2KA(frame) + FRAME_SIZE - pad_hi), pad_hi,
327 0);
328 dirty = true;
329 }
330
331 if (dirty && area->sh_info) {
332 frame_reference_add(ADDR2PFN(frame));
333 btree_insert(&area->sh_info->pagemap, page - area->base,
334 (void *) frame, leaf);
335 }
336
337 if (area->sh_info)
338 mutex_unlock(&area->sh_info->lock);
339
340 page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
341 if (!used_space_insert(area, page, 1))
342 panic("Cannot insert used space.");
343
344 return AS_PF_OK;
345}
346
347/** Free a frame that is backed by the ELF backend.
348 *
349 * The address space area and page tables must be already locked.
350 *
351 * @param area Pointer to the address space area.
352 * @param page Page that is mapped to frame. Must be aligned to
353 * PAGE_SIZE.
354 * @param frame Frame to be released.
355 *
356 */
357void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame)
358{
359 elf_segment_header_t *entry = area->backend_data.segment;
360 uintptr_t start_anon;
361
362 ASSERT(page_table_locked(area->as));
363 ASSERT(mutex_locked(&area->lock));
364
365 ASSERT(page >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE));
366 ASSERT(page < entry->p_vaddr + entry->p_memsz);
367
368 start_anon = entry->p_vaddr + entry->p_filesz;
369
370 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) {
371 if (entry->p_flags & PF_W) {
372 /*
373 * Free the frame with the copy of writable segment
374 * data.
375 */
376 frame_free_noreserve(frame);
377 }
378 } else {
379 /*
380 * The frame is either anonymous memory or the mixed case (i.e.
381 * lower part is backed by the ELF image and the upper is
382 * anonymous). In any case, a frame needs to be freed.
383 */
384 frame_free_noreserve(frame);
385 }
386}
387
388/** @}
389 */
Note: See TracBrowser for help on using the repository browser.