Changes in kernel/generic/src/mm/backend_elf.c [b4ffe5bc:917a8c8] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/backend_elf.c
rb4ffe5bc r917a8c8 43 43 #include <mm/slab.h> 44 44 #include <mm/page.h> 45 #include <mm/reserve.h>46 45 #include <genarch/mm/page_pt.h> 47 46 #include <genarch/mm/page_ht.h> … … 52 51 #include <arch/barrier.h> 53 52 54 static bool elf_create(as_area_t *); 55 static bool elf_resize(as_area_t *, size_t); 56 static void elf_share(as_area_t *); 57 static void elf_destroy(as_area_t *); 58 59 static int elf_page_fault(as_area_t *, uintptr_t, pf_access_t);60 static void elf_ frame_free(as_area_t *, uintptr_t, uintptr_t);53 #ifdef CONFIG_VIRT_IDX_DCACHE 54 #include <arch/mm/cache.h> 55 #endif 56 57 static int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access); 58 static void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame); 59 static void elf_share(as_area_t *area); 61 60 62 61 mem_backend_t elf_backend = { 63 .create = elf_create,64 .resize = elf_resize,65 .share = elf_share,66 .destroy = elf_destroy,67 68 62 .page_fault = elf_page_fault, 69 63 .frame_free = elf_frame_free, 64 .share = elf_share 70 65 }; 71 66 72 static size_t elf_nonanon_pages_get(as_area_t *area) 67 /** Service a page fault in the ELF backend address space area. 68 * 69 * The address space area and page tables must be already locked. 70 * 71 * @param area Pointer to the address space area. 72 * @param addr Faulting virtual address. 73 * @param access Access mode that caused the fault (i.e. 74 * read/write/exec). 75 * 76 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK 77 * on success (i.e. serviced). 78 */ 79 int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access) 80 { 81 elf_header_t *elf = area->backend_data.elf; 82 elf_segment_header_t *entry = area->backend_data.segment; 83 btree_node_t *leaf; 84 uintptr_t base, frame, page, start_anon; 85 size_t i; 86 bool dirty = false; 87 88 ASSERT(page_table_locked(AS)); 89 ASSERT(mutex_locked(&area->lock)); 90 91 if (!as_area_check_access(area, access)) 92 return AS_PF_FAULT; 93 94 if (addr < ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) 95 return AS_PF_FAULT; 96 97 if (addr >= entry->p_vaddr + entry->p_memsz) 98 return AS_PF_FAULT; 99 100 i = (addr - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH; 101 base = (uintptr_t) 102 (((void *) elf) + ALIGN_DOWN(entry->p_offset, PAGE_SIZE)); 103 104 /* Virtual address of faulting page*/ 105 page = ALIGN_DOWN(addr, PAGE_SIZE); 106 107 /* Virtual address of the end of initialized part of segment */ 108 start_anon = entry->p_vaddr + entry->p_filesz; 109 110 if (area->sh_info) { 111 bool found = false; 112 113 /* 114 * The address space area is shared. 115 */ 116 117 mutex_lock(&area->sh_info->lock); 118 frame = (uintptr_t) btree_search(&area->sh_info->pagemap, 119 page - area->base, &leaf); 120 if (!frame) { 121 unsigned int i; 122 123 /* 124 * Workaround for valid NULL address. 125 */ 126 127 for (i = 0; i < leaf->keys; i++) { 128 if (leaf->key[i] == page - area->base) { 129 found = true; 130 break; 131 } 132 } 133 } 134 if (frame || found) { 135 frame_reference_add(ADDR2PFN(frame)); 136 page_mapping_insert(AS, addr, frame, 137 as_area_get_flags(area)); 138 if (!used_space_insert(area, page, 1)) 139 panic("Cannot insert used space."); 140 mutex_unlock(&area->sh_info->lock); 141 return AS_PF_OK; 142 } 143 } 144 145 /* 146 * The area is either not shared or the pagemap does not contain the 147 * mapping. 148 */ 149 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) { 150 /* 151 * Initialized portion of the segment. The memory is backed 152 * directly by the content of the ELF image. Pages are 153 * only copied if the segment is writable so that there 154 * can be more instantions of the same memory ELF image 155 * used at a time. Note that this could be later done 156 * as COW. 157 */ 158 if (entry->p_flags & PF_W) { 159 frame = (uintptr_t)frame_alloc(ONE_FRAME, 0); 160 memcpy((void *) PA2KA(frame), 161 (void *) (base + i * FRAME_SIZE), FRAME_SIZE); 162 if (entry->p_flags & PF_X) { 163 smc_coherence_block((void *) PA2KA(frame), 164 FRAME_SIZE); 165 } 166 dirty = true; 167 } else { 168 frame = KA2PA(base + i * FRAME_SIZE); 169 } 170 } else if (page >= start_anon) { 171 /* 172 * This is the uninitialized portion of the segment. 173 * It is not physically present in the ELF image. 174 * To resolve the situation, a frame must be allocated 175 * and cleared. 176 */ 177 frame = (uintptr_t)frame_alloc(ONE_FRAME, 0); 178 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 179 dirty = true; 180 } else { 181 size_t pad_lo, pad_hi; 182 /* 183 * The mixed case. 184 * 185 * The middle part is backed by the ELF image and 186 * the lower and upper parts are anonymous memory. 187 * (The segment can be and often is shorter than 1 page). 188 */ 189 if (page < entry->p_vaddr) 190 pad_lo = entry->p_vaddr - page; 191 else 192 pad_lo = 0; 193 194 if (start_anon < page + PAGE_SIZE) 195 pad_hi = page + PAGE_SIZE - start_anon; 196 else 197 pad_hi = 0; 198 199 frame = (uintptr_t)frame_alloc(ONE_FRAME, 0); 200 memcpy((void *) (PA2KA(frame) + pad_lo), 201 (void *) (base + i * FRAME_SIZE + pad_lo), 202 FRAME_SIZE - pad_lo - pad_hi); 203 if (entry->p_flags & PF_X) { 204 smc_coherence_block((void *) (PA2KA(frame) + pad_lo), 205 FRAME_SIZE - pad_lo - pad_hi); 206 } 207 memsetb((void *) PA2KA(frame), pad_lo, 0); 208 memsetb((void *) (PA2KA(frame) + FRAME_SIZE - pad_hi), pad_hi, 209 0); 210 dirty = true; 211 } 212 213 if (dirty && area->sh_info) { 214 frame_reference_add(ADDR2PFN(frame)); 215 btree_insert(&area->sh_info->pagemap, page - area->base, 216 (void *) frame, leaf); 217 } 218 219 if (area->sh_info) 220 mutex_unlock(&area->sh_info->lock); 221 222 page_mapping_insert(AS, addr, frame, as_area_get_flags(area)); 223 if (!used_space_insert(area, page, 1)) 224 panic("Cannot insert used space."); 225 226 return AS_PF_OK; 227 } 228 229 /** Free a frame that is backed by the ELF backend. 230 * 231 * The address space area and page tables must be already locked. 232 * 233 * @param area Pointer to the address space area. 234 * @param page Page that is mapped to frame. Must be aligned to 235 * PAGE_SIZE. 236 * @param frame Frame to be released. 237 * 238 */ 239 void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame) 73 240 { 74 241 elf_segment_header_t *entry = area->backend_data.segment; 75 uintptr_t first = ALIGN_UP(entry->p_vaddr, PAGE_SIZE); 76 uintptr_t last = ALIGN_DOWN(entry->p_vaddr + entry->p_filesz, 77 PAGE_SIZE); 78 79 if (entry->p_flags & PF_W) 80 return 0; 81 82 if (last < first) 83 return 0; 84 85 return last - first; 86 } 87 88 bool elf_create(as_area_t *area) 89 { 90 size_t nonanon_pages = elf_nonanon_pages_get(area); 91 92 if (area->pages <= nonanon_pages) 93 return true; 94 95 return reserve_try_alloc(area->pages - nonanon_pages); 96 } 97 98 bool elf_resize(as_area_t *area, size_t new_pages) 99 { 100 size_t nonanon_pages = elf_nonanon_pages_get(area); 101 102 if (new_pages > area->pages) { 103 /* The area is growing. */ 104 if (area->pages >= nonanon_pages) 105 return reserve_try_alloc(new_pages - area->pages); 106 else if (new_pages > nonanon_pages) 107 return reserve_try_alloc(new_pages - nonanon_pages); 108 } else if (new_pages < area->pages) { 109 /* The area is shrinking. */ 110 if (new_pages >= nonanon_pages) 111 reserve_free(area->pages - new_pages); 112 else if (area->pages > nonanon_pages) 113 reserve_free(nonanon_pages - new_pages); 114 } 115 116 return true; 242 uintptr_t start_anon; 243 244 ASSERT(page_table_locked(area->as)); 245 ASSERT(mutex_locked(&area->lock)); 246 247 ASSERT(page >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)); 248 ASSERT(page < entry->p_vaddr + entry->p_memsz); 249 250 start_anon = entry->p_vaddr + entry->p_filesz; 251 252 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) { 253 if (entry->p_flags & PF_W) { 254 /* 255 * Free the frame with the copy of writable segment 256 * data. 257 */ 258 frame_free(frame); 259 } 260 } else { 261 /* 262 * The frame is either anonymous memory or the mixed case (i.e. 263 * lower part is backed by the ELF image and the upper is 264 * anonymous). In any case, a frame needs to be freed. 265 */ 266 frame_free(frame); 267 } 117 268 } 118 269 … … 170 321 if (!(area->flags & AS_AREA_WRITE)) 171 322 if (base >= entry->p_vaddr && 172 base + P2SZ(count)<= start_anon)323 base + count * PAGE_SIZE <= start_anon) 173 324 continue; 174 325 … … 182 333 if (!(area->flags & AS_AREA_WRITE)) 183 334 if (base >= entry->p_vaddr && 184 base + P2SZ(j + 1) <= start_anon) 335 base + (j + 1) * PAGE_SIZE <= 336 start_anon) 185 337 continue; 186 338 187 339 page_table_lock(area->as, false); 188 340 pte = page_mapping_find(area->as, 189 base + P2SZ(j), false);341 base + j * PAGE_SIZE); 190 342 ASSERT(pte && PTE_VALID(pte) && 191 343 PTE_PRESENT(pte)); 192 344 btree_insert(&area->sh_info->pagemap, 193 (base + P2SZ(j)) - area->base,345 (base + j * PAGE_SIZE) - area->base, 194 346 (void *) PTE_GET_FRAME(pte), NULL); 195 347 page_table_unlock(area->as, false); … … 204 356 } 205 357 206 void elf_destroy(as_area_t *area)207 {208 size_t nonanon_pages = elf_nonanon_pages_get(area);209 210 if (area->pages > nonanon_pages)211 reserve_free(area->pages - nonanon_pages);212 }213 214 /** Service a page fault in the ELF backend address space area.215 *216 * The address space area and page tables must be already locked.217 *218 * @param area Pointer to the address space area.219 * @param addr Faulting virtual address.220 * @param access Access mode that caused the fault (i.e.221 * read/write/exec).222 *223 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK224 * on success (i.e. serviced).225 */226 int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access)227 {228 elf_header_t *elf = area->backend_data.elf;229 elf_segment_header_t *entry = area->backend_data.segment;230 btree_node_t *leaf;231 uintptr_t base, frame, page, start_anon;232 size_t i;233 bool dirty = false;234 235 ASSERT(page_table_locked(AS));236 ASSERT(mutex_locked(&area->lock));237 238 if (!as_area_check_access(area, access))239 return AS_PF_FAULT;240 241 if (addr < ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE))242 return AS_PF_FAULT;243 244 if (addr >= entry->p_vaddr + entry->p_memsz)245 return AS_PF_FAULT;246 247 i = (addr - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH;248 base = (uintptr_t)249 (((void *) elf) + ALIGN_DOWN(entry->p_offset, PAGE_SIZE));250 251 /* Virtual address of faulting page*/252 page = ALIGN_DOWN(addr, PAGE_SIZE);253 254 /* Virtual address of the end of initialized part of segment */255 start_anon = entry->p_vaddr + entry->p_filesz;256 257 if (area->sh_info) {258 bool found = false;259 260 /*261 * The address space area is shared.262 */263 264 mutex_lock(&area->sh_info->lock);265 frame = (uintptr_t) btree_search(&area->sh_info->pagemap,266 page - area->base, &leaf);267 if (!frame) {268 unsigned int i;269 270 /*271 * Workaround for valid NULL address.272 */273 274 for (i = 0; i < leaf->keys; i++) {275 if (leaf->key[i] == page - area->base) {276 found = true;277 break;278 }279 }280 }281 if (frame || found) {282 frame_reference_add(ADDR2PFN(frame));283 page_mapping_insert(AS, addr, frame,284 as_area_get_flags(area));285 if (!used_space_insert(area, page, 1))286 panic("Cannot insert used space.");287 mutex_unlock(&area->sh_info->lock);288 return AS_PF_OK;289 }290 }291 292 /*293 * The area is either not shared or the pagemap does not contain the294 * mapping.295 */296 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) {297 /*298 * Initialized portion of the segment. The memory is backed299 * directly by the content of the ELF image. Pages are300 * only copied if the segment is writable so that there301 * can be more instantions of the same memory ELF image302 * used at a time. Note that this could be later done303 * as COW.304 */305 if (entry->p_flags & PF_W) {306 frame = (uintptr_t)frame_alloc_noreserve(ONE_FRAME, 0);307 memcpy((void *) PA2KA(frame),308 (void *) (base + i * FRAME_SIZE), FRAME_SIZE);309 if (entry->p_flags & PF_X) {310 smc_coherence_block((void *) PA2KA(frame),311 FRAME_SIZE);312 }313 dirty = true;314 } else {315 frame = KA2PA(base + i * FRAME_SIZE);316 }317 } else if (page >= start_anon) {318 /*319 * This is the uninitialized portion of the segment.320 * It is not physically present in the ELF image.321 * To resolve the situation, a frame must be allocated322 * and cleared.323 */324 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0);325 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0);326 dirty = true;327 } else {328 size_t pad_lo, pad_hi;329 /*330 * The mixed case.331 *332 * The middle part is backed by the ELF image and333 * the lower and upper parts are anonymous memory.334 * (The segment can be and often is shorter than 1 page).335 */336 if (page < entry->p_vaddr)337 pad_lo = entry->p_vaddr - page;338 else339 pad_lo = 0;340 341 if (start_anon < page + PAGE_SIZE)342 pad_hi = page + PAGE_SIZE - start_anon;343 else344 pad_hi = 0;345 346 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0);347 memcpy((void *) (PA2KA(frame) + pad_lo),348 (void *) (base + i * FRAME_SIZE + pad_lo),349 FRAME_SIZE - pad_lo - pad_hi);350 if (entry->p_flags & PF_X) {351 smc_coherence_block((void *) (PA2KA(frame) + pad_lo),352 FRAME_SIZE - pad_lo - pad_hi);353 }354 memsetb((void *) PA2KA(frame), pad_lo, 0);355 memsetb((void *) (PA2KA(frame) + FRAME_SIZE - pad_hi), pad_hi,356 0);357 dirty = true;358 }359 360 if (dirty && area->sh_info) {361 frame_reference_add(ADDR2PFN(frame));362 btree_insert(&area->sh_info->pagemap, page - area->base,363 (void *) frame, leaf);364 }365 366 if (area->sh_info)367 mutex_unlock(&area->sh_info->lock);368 369 page_mapping_insert(AS, addr, frame, as_area_get_flags(area));370 if (!used_space_insert(area, page, 1))371 panic("Cannot insert used space.");372 373 return AS_PF_OK;374 }375 376 /** Free a frame that is backed by the ELF backend.377 *378 * The address space area and page tables must be already locked.379 *380 * @param area Pointer to the address space area.381 * @param page Page that is mapped to frame. Must be aligned to382 * PAGE_SIZE.383 * @param frame Frame to be released.384 *385 */386 void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame)387 {388 elf_segment_header_t *entry = area->backend_data.segment;389 uintptr_t start_anon;390 391 ASSERT(page_table_locked(area->as));392 ASSERT(mutex_locked(&area->lock));393 394 ASSERT(page >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE));395 ASSERT(page < entry->p_vaddr + entry->p_memsz);396 397 start_anon = entry->p_vaddr + entry->p_filesz;398 399 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) {400 if (entry->p_flags & PF_W) {401 /*402 * Free the frame with the copy of writable segment403 * data.404 */405 frame_free_noreserve(frame);406 }407 } else {408 /*409 * The frame is either anonymous memory or the mixed case (i.e.410 * lower part is backed by the ELF image and the upper is411 * anonymous). In any case, a frame needs to be freed.412 */413 frame_free_noreserve(frame);414 }415 }416 417 358 /** @} 418 359 */
Note:
See TracChangeset
for help on using the changeset viewer.