Changeset 0ee077ee in mainline for generic/src/lib/elf.c


Ignore:
Timestamp:
2006-05-27T17:50:30Z (19 years ago)
Author:
Jakub Jermar <jakub@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
127c957b
Parents:
fb84455
Message:

Move the sharing functionality to address space area backends.
Add backend for continuous regions of physical memory.
Sharing for these areas works automagically now.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • generic/src/lib/elf.c

    rfb84455 r0ee077ee  
    11/*
    22 * Copyright (C) 2006 Sergey Bondari
     3 * Copyright (C) 2006 Jakub Jermar
    34 * All rights reserved.
    45 *
     
    5758static int load_segment(elf_segment_header_t *entry, elf_header_t *elf, as_t *as);
    5859
    59 static int elf_page_fault(as_area_t *area, __address addr, pf_access_t access);
    60 static void elf_frame_free(as_area_t *area, __address page, __address frame);
    61 
    62 mem_backend_t elf_backend = {
    63         .backend_page_fault = elf_page_fault,
    64         .backend_frame_free = elf_frame_free
    65 };
    66 
    6760/** ELF loader
    6861 *
     
    170163        as_area_t *a;
    171164        int flags = 0;
    172         void *backend_data[2] = { elf, entry };
     165        mem_backend_data_t backend_data = { .d1 = (__native) elf, .d2 = (__native) entry };
    173166
    174167        if (entry->p_align > 1) {
     
    184177        if (entry->p_flags & PF_R)
    185178                flags |= AS_AREA_READ;
     179        flags |= AS_AREA_CACHEABLE;
    186180
    187181        /*
     
    191185                return EE_UNSUPPORTED;
    192186
    193         a = as_area_create(as, flags, entry->p_memsz, entry->p_vaddr, AS_AREA_ATTR_NONE, &elf_backend, backend_data);
     187        a = as_area_create(as, flags, entry->p_memsz, entry->p_vaddr, AS_AREA_ATTR_NONE,
     188                &elf_backend, &backend_data);
    194189        if (!a)
    195190                return EE_MEMORY;
     
    219214        return EE_OK;
    220215}
    221 
    222 /** Service a page fault in the ELF backend address space area.
    223  *
    224  * The address space area and page tables must be already locked.
    225  *
    226  * @param area Pointer to the address space area.
    227  * @param addr Faulting virtual address.
    228  * @param access Access mode that caused the fault (i.e. read/write/exec).
    229  *
    230  * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
    231  */
    232 int elf_page_fault(as_area_t *area, __address addr, pf_access_t access)
    233 {
    234         elf_header_t *elf = (elf_header_t *) area->backend_data[0];
    235         elf_segment_header_t *entry = (elf_segment_header_t *) area->backend_data[1];
    236         __address base, frame;
    237         index_t i;
    238 
    239         if (!as_area_check_access(area, access))
    240                 return AS_PF_FAULT;
    241 
    242         ASSERT((addr >= entry->p_vaddr) && (addr < entry->p_vaddr + entry->p_memsz));
    243         i = (addr - entry->p_vaddr) >> PAGE_WIDTH;
    244         base = (__address) (((void *) elf) + entry->p_offset);
    245         ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
    246        
    247         if (ALIGN_DOWN(addr, PAGE_SIZE) + PAGE_SIZE < entry->p_vaddr + entry->p_filesz) {
    248                 /*
    249                  * Initialized portion of the segment. The memory is backed
    250                  * directly by the content of the ELF image. Pages are
    251                  * only copied if the segment is writable so that there
    252                  * can be more instantions of the same memory ELF image
    253                  * used at a time. Note that this could be later done
    254                  * as COW.
    255                  */
    256                 if (entry->p_flags & PF_W) {
    257                         frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
    258                         memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), FRAME_SIZE);
    259                 } else {
    260                         frame = KA2PA(base + i*FRAME_SIZE);
    261                 }       
    262         } else if (ALIGN_DOWN(addr, PAGE_SIZE) >= ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
    263                 /*
    264                  * This is the uninitialized portion of the segment.
    265                  * It is not physically present in the ELF image.
    266                  * To resolve the situation, a frame must be allocated
    267                  * and cleared.
    268                  */
    269                 frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
    270                 memsetb(PA2KA(frame), FRAME_SIZE, 0);
    271         } else {
    272                 size_t size;
    273                 /*
    274                  * The mixed case.
    275                  * The lower part is backed by the ELF image and
    276                  * the upper part is anonymous memory.
    277                  */
    278                 size = entry->p_filesz - (i<<PAGE_WIDTH);
    279                 frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
    280                 memsetb(PA2KA(frame) + size, FRAME_SIZE - size, 0);
    281                 memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), size);
    282         }
    283        
    284         page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
    285         if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
    286                 panic("Could not insert used space.\n");
    287 
    288         return AS_PF_OK;
    289 }
    290 
    291 /** Free a frame that is backed by the ELF backend.
    292  *
    293  * The address space area and page tables must be already locked.
    294  *
    295  * @param area Pointer to the address space area.
    296  * @param page Page that is mapped to frame. Must be aligned to PAGE_SIZE.
    297  * @param frame Frame to be released.
    298  *
    299  */
    300 void elf_frame_free(as_area_t *area, __address page, __address frame)
    301 {
    302         elf_header_t *elf = (elf_header_t *) area->backend_data[0];
    303         elf_segment_header_t *entry = (elf_segment_header_t *) area->backend_data[1];
    304         __address base;
    305         index_t i;
    306        
    307         ASSERT((page >= entry->p_vaddr) && (page < entry->p_vaddr + entry->p_memsz));
    308         i = (page - entry->p_vaddr) >> PAGE_WIDTH;
    309         base = (__address) (((void *) elf) + entry->p_offset);
    310         ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
    311        
    312         if (page + PAGE_SIZE < ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
    313                 if (entry->p_flags & PF_W) {
    314                         /*
    315                          * Free the frame with the copy of writable segment data.
    316                          */
    317                         frame_free(ADDR2PFN(frame));
    318                 }
    319         } else {
    320                 /*
    321                  * The frame is either anonymous memory or the mixed case (i.e. lower
    322                  * part is backed by the ELF image and the upper is anonymous).
    323                  * In any case, a frame needs to be freed.
    324                  */
    325                 frame_free(ADDR2PFN(frame));
    326         }
    327 }
Note: See TracChangeset for help on using the changeset viewer.