source: mainline/kernel/generic/src/mm/backend_elf.c@ 6b781c0

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 6b781c0 was 2057572, checked in by Jakub Jermar <jakub@…>, 18 years ago

The Ultimate Solution To Illegal Virtual Aliases.
It is better to avoid them completely than to fight them.
Switch the sparc64 port to 16K pages. The TLBs and TSBs
continue to operate with 8K pages only. Page tables and
other generic parts operate with 16K pages.

Because the MMU doesn't support 16K directly, each 16K
page is emulated by a pair of 8K pages. With 16K pages,
illegal aliases cannot be created in 16K D-cache.

  • Property mode set to 100644
File size: 9.6 KB
Line 
1/*
2 * Copyright (c) 2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup genericmm
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Backend for address space areas backed by an ELF image.
36 */
37
38#include <lib/elf.h>
39#include <debug.h>
40#include <arch/types.h>
41#include <mm/as.h>
42#include <mm/frame.h>
43#include <mm/slab.h>
44#include <mm/page.h>
45#include <genarch/mm/page_pt.h>
46#include <genarch/mm/page_ht.h>
47#include <align.h>
48#include <memstr.h>
49#include <macros.h>
50#include <arch.h>
51
52#ifdef CONFIG_VIRT_IDX_DCACHE
53#include <arch/mm/cache.h>
54#endif
55
56static int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access);
57static void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame);
58static void elf_share(as_area_t *area);
59
60mem_backend_t elf_backend = {
61 .page_fault = elf_page_fault,
62 .frame_free = elf_frame_free,
63 .share = elf_share
64};
65
66/** Service a page fault in the ELF backend address space area.
67 *
68 * The address space area and page tables must be already locked.
69 *
70 * @param area Pointer to the address space area.
71 * @param addr Faulting virtual address.
72 * @param access Access mode that caused the fault (i.e. read/write/exec).
73 *
74 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e.
75 * serviced).
76 */
77int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access)
78{
79 elf_header_t *elf = area->backend_data.elf;
80 elf_segment_header_t *entry = area->backend_data.segment;
81 btree_node_t *leaf;
82 uintptr_t base, frame;
83 index_t i;
84 bool dirty = false;
85
86 if (!as_area_check_access(area, access))
87 return AS_PF_FAULT;
88
89 ASSERT((addr >= entry->p_vaddr) &&
90 (addr < entry->p_vaddr + entry->p_memsz));
91 i = (addr - entry->p_vaddr) >> PAGE_WIDTH;
92 base = (uintptr_t) (((void *) elf) + entry->p_offset);
93 ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
94
95 if (area->sh_info) {
96 bool found = false;
97
98 /*
99 * The address space area is shared.
100 */
101
102 mutex_lock(&area->sh_info->lock);
103 frame = (uintptr_t) btree_search(&area->sh_info->pagemap,
104 ALIGN_DOWN(addr, PAGE_SIZE) - area->base, &leaf);
105 if (!frame) {
106 int i;
107
108 /*
109 * Workaround for valid NULL address.
110 */
111
112 for (i = 0; i < leaf->keys; i++) {
113 if (leaf->key[i] ==
114 ALIGN_DOWN(addr, PAGE_SIZE)) {
115 found = true;
116 break;
117 }
118 }
119 }
120 if (frame || found) {
121 frame_reference_add(ADDR2PFN(frame));
122 page_mapping_insert(AS, addr, frame,
123 as_area_get_flags(area));
124 if (!used_space_insert(area,
125 ALIGN_DOWN(addr, PAGE_SIZE), 1))
126 panic("Could not insert used space.\n");
127 mutex_unlock(&area->sh_info->lock);
128 return AS_PF_OK;
129 }
130 }
131
132 /*
133 * The area is either not shared or the pagemap does not contain the
134 * mapping.
135 */
136
137 if (ALIGN_DOWN(addr, PAGE_SIZE) + PAGE_SIZE <
138 entry->p_vaddr + entry->p_filesz) {
139 /*
140 * Initialized portion of the segment. The memory is backed
141 * directly by the content of the ELF image. Pages are
142 * only copied if the segment is writable so that there
143 * can be more instantions of the same memory ELF image
144 * used at a time. Note that this could be later done
145 * as COW.
146 */
147 if (entry->p_flags & PF_W) {
148 frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
149 memcpy((void *) PA2KA(frame),
150 (void *) (base + i * FRAME_SIZE), FRAME_SIZE);
151 dirty = true;
152
153 if (area->sh_info) {
154 frame_reference_add(ADDR2PFN(frame));
155 btree_insert(&area->sh_info->pagemap,
156 ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
157 (void *) frame, leaf);
158 }
159
160 } else {
161 frame = KA2PA(base + i*FRAME_SIZE);
162 }
163 } else if (ALIGN_DOWN(addr, PAGE_SIZE) >=
164 ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
165 /*
166 * This is the uninitialized portion of the segment.
167 * It is not physically present in the ELF image.
168 * To resolve the situation, a frame must be allocated
169 * and cleared.
170 */
171 frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
172 memsetb(PA2KA(frame), FRAME_SIZE, 0);
173 dirty = true;
174
175 if (area->sh_info) {
176 frame_reference_add(ADDR2PFN(frame));
177 btree_insert(&area->sh_info->pagemap,
178 ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
179 (void *) frame, leaf);
180 }
181
182 } else {
183 size_t size;
184 /*
185 * The mixed case.
186 * The lower part is backed by the ELF image and
187 * the upper part is anonymous memory.
188 */
189 size = entry->p_filesz - (i<<PAGE_WIDTH);
190 frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
191 memsetb(PA2KA(frame) + size, FRAME_SIZE - size, 0);
192 memcpy((void *) PA2KA(frame), (void *) (base + i * FRAME_SIZE),
193 size);
194 dirty = true;
195
196 if (area->sh_info) {
197 frame_reference_add(ADDR2PFN(frame));
198 btree_insert(&area->sh_info->pagemap,
199 ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
200 (void *) frame, leaf);
201 }
202
203 }
204
205 if (area->sh_info)
206 mutex_unlock(&area->sh_info->lock);
207
208 page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
209 if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
210 panic("Could not insert used space.\n");
211
212 return AS_PF_OK;
213}
214
215/** Free a frame that is backed by the ELF backend.
216 *
217 * The address space area and page tables must be already locked.
218 *
219 * @param area Pointer to the address space area.
220 * @param page Page that is mapped to frame. Must be aligned to PAGE_SIZE.
221 * @param frame Frame to be released.
222 *
223 */
224void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame)
225{
226 elf_header_t *elf = area->backend_data.elf;
227 elf_segment_header_t *entry = area->backend_data.segment;
228 uintptr_t base;
229 index_t i;
230
231 ASSERT((page >= entry->p_vaddr) &&
232 (page < entry->p_vaddr + entry->p_memsz));
233 i = (page - entry->p_vaddr) >> PAGE_WIDTH;
234 base = (uintptr_t) (((void *) elf) + entry->p_offset);
235 ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
236
237 if (page + PAGE_SIZE <
238 ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
239 if (entry->p_flags & PF_W) {
240 /*
241 * Free the frame with the copy of writable segment
242 * data.
243 */
244 frame_free(frame);
245 }
246 } else {
247 /*
248 * The frame is either anonymous memory or the mixed case (i.e.
249 * lower part is backed by the ELF image and the upper is
250 * anonymous). In any case, a frame needs to be freed.
251 */
252 frame_free(frame);
253 }
254}
255
256/** Share ELF image backed address space area.
257 *
258 * If the area is writable, then all mapped pages are duplicated in the pagemap.
259 * Otherwise only portions of the area that are not backed by the ELF image
260 * are put into the pagemap.
261 *
262 * The address space and address space area must be locked prior to the call.
263 *
264 * @param area Address space area.
265 */
266void elf_share(as_area_t *area)
267{
268 elf_segment_header_t *entry = area->backend_data.segment;
269 link_t *cur;
270 btree_node_t *leaf, *node;
271 uintptr_t start_anon = entry->p_vaddr + entry->p_filesz;
272
273 /*
274 * Find the node in which to start linear search.
275 */
276 if (area->flags & AS_AREA_WRITE) {
277 node = list_get_instance(area->used_space.leaf_head.next,
278 btree_node_t, leaf_link);
279 } else {
280 (void) btree_search(&area->sh_info->pagemap, start_anon, &leaf);
281 node = btree_leaf_node_left_neighbour(&area->sh_info->pagemap,
282 leaf);
283 if (!node)
284 node = leaf;
285 }
286
287 /*
288 * Copy used anonymous portions of the area to sh_info's page map.
289 */
290 mutex_lock(&area->sh_info->lock);
291 for (cur = &node->leaf_link; cur != &area->used_space.leaf_head;
292 cur = cur->next) {
293 int i;
294
295 node = list_get_instance(cur, btree_node_t, leaf_link);
296
297 for (i = 0; i < node->keys; i++) {
298 uintptr_t base = node->key[i];
299 count_t count = (count_t) node->value[i];
300 int j;
301
302 /*
303 * Skip read-only areas of used space that are backed
304 * by the ELF image.
305 */
306 if (!(area->flags & AS_AREA_WRITE))
307 if (base + count*PAGE_SIZE <= start_anon)
308 continue;
309
310 for (j = 0; j < count; j++) {
311 pte_t *pte;
312
313 /*
314 * Skip read-only pages that are backed by the
315 * ELF image.
316 */
317 if (!(area->flags & AS_AREA_WRITE))
318 if (base + (j + 1) * PAGE_SIZE <=
319 start_anon)
320 continue;
321
322 page_table_lock(area->as, false);
323 pte = page_mapping_find(area->as,
324 base + j * PAGE_SIZE);
325 ASSERT(pte && PTE_VALID(pte) &&
326 PTE_PRESENT(pte));
327 btree_insert(&area->sh_info->pagemap,
328 (base + j * PAGE_SIZE) - area->base,
329 (void *) PTE_GET_FRAME(pte), NULL);
330 page_table_unlock(area->as, false);
331
332 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte));
333 frame_reference_add(pfn);
334 }
335
336 }
337 }
338 mutex_unlock(&area->sh_info->lock);
339}
340
341/** @}
342 */
Note: See TracBrowser for help on using the repository browser.