Changeset 8182031 in mainline for generic/src/lib/elf.c
- Timestamp:
- 2006-05-23T23:09:13Z (19 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 82da5f5
- Parents:
- 56789125
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
generic/src/lib/elf.c
r56789125 r8182031 42 42 #include <memstr.h> 43 43 #include <macros.h> 44 #include <arch.h> 44 45 45 46 static char *error_codes[] = { … … 55 56 static int section_header(elf_section_header_t *entry, elf_header_t *elf, as_t *as); 56 57 static int load_segment(elf_segment_header_t *entry, elf_header_t *elf, as_t *as); 58 59 static int elf_page_fault(as_area_t *area, __address addr); 60 static void elf_frame_free(as_area_t *area, __address page, __address frame); 61 62 mem_backend_t elf_backend = { 63 .backend_page_fault = elf_page_fault, 64 .backend_frame_free = elf_frame_free 65 }; 57 66 58 67 /** ELF loader … … 160 169 { 161 170 as_area_t *a; 162 int i, flags = 0; 163 size_t segment_size; 164 __u8 *segment; 171 int flags = 0; 172 void *backend_data[2] = { elf, entry }; 165 173 166 174 if (entry->p_align > 1) { … … 183 191 return EE_UNSUPPORTED; 184 192 185 segment_size = ALIGN_UP(max(entry->p_filesz, entry->p_memsz), PAGE_SIZE); 186 if ((entry->p_flags & PF_W)) { 187 /* If writable, copy data (should be COW in the future) */ 188 segment = malloc(segment_size, 0); 189 memsetb((__address) (segment + entry->p_filesz), segment_size - entry->p_filesz, 0); 190 memcpy(segment, (void *) (((__address) elf) + entry->p_offset), entry->p_filesz); 191 } else /* Map identically original data */ 192 segment = ((void *) elf) + entry->p_offset; 193 194 a = as_area_create(as, flags, entry->p_memsz, entry->p_vaddr, AS_AREA_ATTR_NONE); 193 a = as_area_create(as, flags, entry->p_memsz, entry->p_vaddr, AS_AREA_ATTR_NONE, &elf_backend, backend_data); 195 194 if (!a) 196 195 return EE_MEMORY; 197 196 198 for (i = 0; i < SIZE2FRAMES(entry->p_filesz); i++) {199 as_set_mapping(as, entry->p_vaddr + i*PAGE_SIZE, KA2PA(((__address) segment) + i*PAGE_SIZE));200 }201 197 /* 198 * The segment will be mapped on demand by elf_page_fault(). 199 */ 200 202 201 return EE_OK; 203 202 } … … 220 219 return EE_OK; 221 220 } 221 222 /** Service a page fault in the ELF backend address space area. 223 * 224 * The address space area and page tables must be already locked. 225 * 226 * @param area Pointer to the address space area. 227 * @param addr Faulting virtual address. 228 * 229 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced). 230 */ 231 int elf_page_fault(as_area_t *area, __address addr) 232 { 233 elf_header_t *elf = (elf_header_t *) area->backend_data[0]; 234 elf_segment_header_t *entry = (elf_segment_header_t *) area->backend_data[1]; 235 __address base, frame; 236 index_t i; 237 238 ASSERT((addr >= entry->p_vaddr) && (addr < entry->p_vaddr + entry->p_memsz)); 239 i = (addr - entry->p_vaddr) >> PAGE_WIDTH; 240 base = (__address) (((void *) elf) + entry->p_offset); 241 ASSERT(ALIGN_UP(base, FRAME_SIZE) == base); 242 243 if (ALIGN_DOWN(addr, PAGE_SIZE) + PAGE_SIZE < entry->p_vaddr + entry->p_filesz) { 244 /* 245 * Initialized portion of the segment. The memory is backed 246 * directly by the content of the ELF image. Pages are 247 * only copied if the segment is writable so that there 248 * can be more instantions of the same memory ELF image 249 * used at a time. Note that this could be later done 250 * as COW. 251 */ 252 if (entry->p_flags & PF_W) { 253 frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0)); 254 memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), FRAME_SIZE); 255 } else { 256 frame = KA2PA(base + i*FRAME_SIZE); 257 } 258 } else if (ALIGN_DOWN(addr, PAGE_SIZE) >= ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) { 259 /* 260 * This is the uninitialized portion of the segment. 261 * It is not physically present in the ELF image. 262 * To resolve the situation, a frame must be allocated 263 * and cleared. 264 */ 265 frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0)); 266 memsetb(PA2KA(frame), FRAME_SIZE, 0); 267 } else { 268 size_t size; 269 /* 270 * The mixed case. 271 * The lower part is backed by the ELF image and 272 * the upper part is anonymous memory. 273 */ 274 size = entry->p_filesz - (i<<PAGE_WIDTH); 275 frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0)); 276 memsetb(PA2KA(frame) + size, FRAME_SIZE - size, 0); 277 memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), size); 278 } 279 280 page_mapping_insert(AS, addr, frame, as_area_get_flags(area)); 281 if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1)) 282 panic("Could not insert used space.\n"); 283 284 return AS_PF_OK; 285 } 286 287 /** Free a frame that is backed by the ELF backend. 288 * 289 * The address space area and page tables must be already locked. 290 * 291 * @param area Pointer to the address space area. 292 * @param page Page that is mapped to frame. Must be aligned to PAGE_SIZE. 293 * @param frame Frame to be released. 294 * 295 */ 296 void elf_frame_free(as_area_t *area, __address page, __address frame) 297 { 298 elf_header_t *elf = (elf_header_t *) area->backend_data[0]; 299 elf_segment_header_t *entry = (elf_segment_header_t *) area->backend_data[1]; 300 __address base; 301 index_t i; 302 303 ASSERT((page >= entry->p_vaddr) && (page < entry->p_vaddr + entry->p_memsz)); 304 i = (page - entry->p_vaddr) >> PAGE_WIDTH; 305 base = (__address) (((void *) elf) + entry->p_offset); 306 ASSERT(ALIGN_UP(base, FRAME_SIZE) == base); 307 308 if (page + PAGE_SIZE < ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) { 309 if (entry->p_flags & PF_W) { 310 /* 311 * Free the frame with the copy of writable segment data. 312 */ 313 frame_free(ADDR2PFN(frame)); 314 } 315 } else { 316 /* 317 * The frame is either anonymous memory or the mixed case (i.e. lower 318 * part is backed by the ELF image and the upper is anonymous). 319 * In any case, a frame needs to be freed. 320 */ 321 frame_free(ADDR2PFN(frame)); 322 } 323 }
Note:
See TracChangeset
for help on using the changeset viewer.