Changeset 03523dc in mainline for kernel/generic/src/mm/backend_elf.c
- Timestamp:
- 2011-01-15T16:12:46Z (14 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 6b9e85b
- Parents:
- 630a8ef
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/backend_elf.c
r630a8ef r03523dc 43 43 #include <mm/slab.h> 44 44 #include <mm/page.h> 45 #include <mm/reserve.h> 45 46 #include <genarch/mm/page_pt.h> 46 47 #include <genarch/mm/page_ht.h> … … 55 56 #endif 56 57 58 static bool elf_create(as_area_t *); 59 static bool elf_resize(as_area_t *, size_t); 60 static void elf_share(as_area_t *); 61 static void elf_destroy(as_area_t *); 62 57 63 static int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access); 58 64 static void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame); 59 static void elf_share(as_area_t *area);60 65 61 66 mem_backend_t elf_backend = { 67 .create = elf_create, 68 .resize = elf_resize, 69 .share = elf_share, 70 .destroy = elf_destroy, 71 62 72 .page_fault = elf_page_fault, 63 73 .frame_free = elf_frame_free, 64 .share = elf_share65 74 }; 66 75 67 /** Service a page fault in the ELF backend address space area. 68 * 69 * The address space area and page tables must be already locked. 70 * 71 * @param area Pointer to the address space area. 72 * @param addr Faulting virtual address. 73 * @param access Access mode that caused the fault (i.e. 74 * read/write/exec). 75 * 76 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK 77 * on success (i.e. serviced). 78 */ 79 int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access) 80 { 81 elf_header_t *elf = area->backend_data.elf; 82 elf_segment_header_t *entry = area->backend_data.segment; 83 btree_node_t *leaf; 84 uintptr_t base, frame, page, start_anon; 85 size_t i; 86 bool dirty = false; 87 88 ASSERT(page_table_locked(AS)); 89 ASSERT(mutex_locked(&area->lock)); 90 91 if (!as_area_check_access(area, access)) 92 return AS_PF_FAULT; 93 94 ASSERT((addr >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) && 95 (addr < entry->p_vaddr + entry->p_memsz)); 96 i = (addr - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH; 97 base = (uintptr_t) 98 (((void *) elf) + ALIGN_DOWN(entry->p_offset, PAGE_SIZE)); 99 100 /* Virtual address of faulting page*/ 101 page = ALIGN_DOWN(addr, PAGE_SIZE); 102 103 /* Virtual address of the end of initialized part of segment */ 104 start_anon = entry->p_vaddr + entry->p_filesz; 105 106 if (area->sh_info) { 107 bool found = false; 108 109 /* 110 * The address space area is shared. 111 */ 112 113 mutex_lock(&area->sh_info->lock); 114 frame = (uintptr_t) btree_search(&area->sh_info->pagemap, 115 page - area->base, &leaf); 116 if (!frame) { 117 unsigned int i; 118 119 /* 120 * Workaround for valid NULL address. 121 */ 122 123 for (i = 0; i < leaf->keys; i++) { 124 if (leaf->key[i] == page - area->base) { 125 found = true; 126 break; 127 } 128 } 129 } 130 if (frame || found) { 131 frame_reference_add(ADDR2PFN(frame)); 132 page_mapping_insert(AS, addr, frame, 133 as_area_get_flags(area)); 134 if (!used_space_insert(area, page, 1)) 135 panic("Cannot insert used space."); 136 mutex_unlock(&area->sh_info->lock); 137 return AS_PF_OK; 138 } 139 } 140 141 /* 142 * The area is either not shared or the pagemap does not contain the 143 * mapping. 144 */ 145 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) { 146 /* 147 * Initialized portion of the segment. The memory is backed 148 * directly by the content of the ELF image. Pages are 149 * only copied if the segment is writable so that there 150 * can be more instantions of the same memory ELF image 151 * used at a time. Note that this could be later done 152 * as COW. 153 */ 154 if (entry->p_flags & PF_W) { 155 frame = (uintptr_t)frame_alloc_noreserve(ONE_FRAME, 0); 156 memcpy((void *) PA2KA(frame), 157 (void *) (base + i * FRAME_SIZE), FRAME_SIZE); 158 if (entry->p_flags & PF_X) { 159 smc_coherence_block((void *) PA2KA(frame), 160 FRAME_SIZE); 161 } 162 dirty = true; 163 } else { 164 frame = KA2PA(base + i * FRAME_SIZE); 165 } 166 } else if (page >= start_anon) { 167 /* 168 * This is the uninitialized portion of the segment. 169 * It is not physically present in the ELF image. 170 * To resolve the situation, a frame must be allocated 171 * and cleared. 172 */ 173 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0); 174 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 175 dirty = true; 176 } else { 177 size_t pad_lo, pad_hi; 178 /* 179 * The mixed case. 180 * 181 * The middle part is backed by the ELF image and 182 * the lower and upper parts are anonymous memory. 183 * (The segment can be and often is shorter than 1 page). 184 */ 185 if (page < entry->p_vaddr) 186 pad_lo = entry->p_vaddr - page; 187 else 188 pad_lo = 0; 189 190 if (start_anon < page + PAGE_SIZE) 191 pad_hi = page + PAGE_SIZE - start_anon; 192 else 193 pad_hi = 0; 194 195 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0); 196 memcpy((void *) (PA2KA(frame) + pad_lo), 197 (void *) (base + i * FRAME_SIZE + pad_lo), 198 FRAME_SIZE - pad_lo - pad_hi); 199 if (entry->p_flags & PF_X) { 200 smc_coherence_block((void *) (PA2KA(frame) + pad_lo), 201 FRAME_SIZE - pad_lo - pad_hi); 202 } 203 memsetb((void *) PA2KA(frame), pad_lo, 0); 204 memsetb((void *) (PA2KA(frame) + FRAME_SIZE - pad_hi), pad_hi, 205 0); 206 dirty = true; 207 } 208 209 if (dirty && area->sh_info) { 210 frame_reference_add(ADDR2PFN(frame)); 211 btree_insert(&area->sh_info->pagemap, page - area->base, 212 (void *) frame, leaf); 213 } 214 215 if (area->sh_info) 216 mutex_unlock(&area->sh_info->lock); 217 218 page_mapping_insert(AS, addr, frame, as_area_get_flags(area)); 219 if (!used_space_insert(area, page, 1)) 220 panic("Cannot insert used space."); 221 222 return AS_PF_OK; 223 } 224 225 /** Free a frame that is backed by the ELF backend. 226 * 227 * The address space area and page tables must be already locked. 228 * 229 * @param area Pointer to the address space area. 230 * @param page Page that is mapped to frame. Must be aligned to 231 * PAGE_SIZE. 232 * @param frame Frame to be released. 233 * 234 */ 235 void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame) 236 { 237 elf_segment_header_t *entry = area->backend_data.segment; 238 uintptr_t start_anon; 239 240 ASSERT(page_table_locked(area->as)); 241 ASSERT(mutex_locked(&area->lock)); 242 243 ASSERT(page >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)); 244 ASSERT(page < entry->p_vaddr + entry->p_memsz); 245 246 start_anon = entry->p_vaddr + entry->p_filesz; 247 248 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) { 249 if (entry->p_flags & PF_W) { 250 /* 251 * Free the frame with the copy of writable segment 252 * data. 253 */ 254 frame_free_noreserve(frame); 255 } 256 } else { 257 /* 258 * The frame is either anonymous memory or the mixed case (i.e. 259 * lower part is backed by the ELF image and the upper is 260 * anonymous). In any case, a frame needs to be freed. 261 */ 262 frame_free_noreserve(frame); 263 } 76 bool elf_create(as_area_t *area) 77 { 78 /** 79 * @todo: 80 * Reserve only how much is necessary for anonymous pages plus the 81 * supporting structures allocated during the page fault. 82 */ 83 return reserve_try_alloc(area->pages); 84 } 85 86 bool elf_resize(as_area_t *area, size_t new_pages) 87 { 88 if (new_pages > area->pages) 89 return reserve_try_alloc(new_pages - area->pages); 90 else if (new_pages < area->pages) 91 reserve_free(area->pages - new_pages); 92 93 return true; 264 94 } 265 95 … … 352 182 } 353 183 184 void elf_destroy(as_area_t *area) 185 { 186 /** 187 * @todo: 188 * Unreserve only how much was really reserved. 189 */ 190 reserve_free(area->pages); 191 } 192 193 /** Service a page fault in the ELF backend address space area. 194 * 195 * The address space area and page tables must be already locked. 196 * 197 * @param area Pointer to the address space area. 198 * @param addr Faulting virtual address. 199 * @param access Access mode that caused the fault (i.e. 200 * read/write/exec). 201 * 202 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK 203 * on success (i.e. serviced). 204 */ 205 int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access) 206 { 207 elf_header_t *elf = area->backend_data.elf; 208 elf_segment_header_t *entry = area->backend_data.segment; 209 btree_node_t *leaf; 210 uintptr_t base, frame, page, start_anon; 211 size_t i; 212 bool dirty = false; 213 214 ASSERT(page_table_locked(AS)); 215 ASSERT(mutex_locked(&area->lock)); 216 217 if (!as_area_check_access(area, access)) 218 return AS_PF_FAULT; 219 220 ASSERT((addr >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) && 221 (addr < entry->p_vaddr + entry->p_memsz)); 222 i = (addr - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH; 223 base = (uintptr_t) 224 (((void *) elf) + ALIGN_DOWN(entry->p_offset, PAGE_SIZE)); 225 226 /* Virtual address of faulting page*/ 227 page = ALIGN_DOWN(addr, PAGE_SIZE); 228 229 /* Virtual address of the end of initialized part of segment */ 230 start_anon = entry->p_vaddr + entry->p_filesz; 231 232 if (area->sh_info) { 233 bool found = false; 234 235 /* 236 * The address space area is shared. 237 */ 238 239 mutex_lock(&area->sh_info->lock); 240 frame = (uintptr_t) btree_search(&area->sh_info->pagemap, 241 page - area->base, &leaf); 242 if (!frame) { 243 unsigned int i; 244 245 /* 246 * Workaround for valid NULL address. 247 */ 248 249 for (i = 0; i < leaf->keys; i++) { 250 if (leaf->key[i] == page - area->base) { 251 found = true; 252 break; 253 } 254 } 255 } 256 if (frame || found) { 257 frame_reference_add(ADDR2PFN(frame)); 258 page_mapping_insert(AS, addr, frame, 259 as_area_get_flags(area)); 260 if (!used_space_insert(area, page, 1)) 261 panic("Cannot insert used space."); 262 mutex_unlock(&area->sh_info->lock); 263 return AS_PF_OK; 264 } 265 } 266 267 /* 268 * The area is either not shared or the pagemap does not contain the 269 * mapping. 270 */ 271 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) { 272 /* 273 * Initialized portion of the segment. The memory is backed 274 * directly by the content of the ELF image. Pages are 275 * only copied if the segment is writable so that there 276 * can be more instantions of the same memory ELF image 277 * used at a time. Note that this could be later done 278 * as COW. 279 */ 280 if (entry->p_flags & PF_W) { 281 frame = (uintptr_t)frame_alloc_noreserve(ONE_FRAME, 0); 282 memcpy((void *) PA2KA(frame), 283 (void *) (base + i * FRAME_SIZE), FRAME_SIZE); 284 if (entry->p_flags & PF_X) { 285 smc_coherence_block((void *) PA2KA(frame), 286 FRAME_SIZE); 287 } 288 dirty = true; 289 } else { 290 frame = KA2PA(base + i * FRAME_SIZE); 291 } 292 } else if (page >= start_anon) { 293 /* 294 * This is the uninitialized portion of the segment. 295 * It is not physically present in the ELF image. 296 * To resolve the situation, a frame must be allocated 297 * and cleared. 298 */ 299 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0); 300 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 301 dirty = true; 302 } else { 303 size_t pad_lo, pad_hi; 304 /* 305 * The mixed case. 306 * 307 * The middle part is backed by the ELF image and 308 * the lower and upper parts are anonymous memory. 309 * (The segment can be and often is shorter than 1 page). 310 */ 311 if (page < entry->p_vaddr) 312 pad_lo = entry->p_vaddr - page; 313 else 314 pad_lo = 0; 315 316 if (start_anon < page + PAGE_SIZE) 317 pad_hi = page + PAGE_SIZE - start_anon; 318 else 319 pad_hi = 0; 320 321 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0); 322 memcpy((void *) (PA2KA(frame) + pad_lo), 323 (void *) (base + i * FRAME_SIZE + pad_lo), 324 FRAME_SIZE - pad_lo - pad_hi); 325 if (entry->p_flags & PF_X) { 326 smc_coherence_block((void *) (PA2KA(frame) + pad_lo), 327 FRAME_SIZE - pad_lo - pad_hi); 328 } 329 memsetb((void *) PA2KA(frame), pad_lo, 0); 330 memsetb((void *) (PA2KA(frame) + FRAME_SIZE - pad_hi), pad_hi, 331 0); 332 dirty = true; 333 } 334 335 if (dirty && area->sh_info) { 336 frame_reference_add(ADDR2PFN(frame)); 337 btree_insert(&area->sh_info->pagemap, page - area->base, 338 (void *) frame, leaf); 339 } 340 341 if (area->sh_info) 342 mutex_unlock(&area->sh_info->lock); 343 344 page_mapping_insert(AS, addr, frame, as_area_get_flags(area)); 345 if (!used_space_insert(area, page, 1)) 346 panic("Cannot insert used space."); 347 348 return AS_PF_OK; 349 } 350 351 /** Free a frame that is backed by the ELF backend. 352 * 353 * The address space area and page tables must be already locked. 354 * 355 * @param area Pointer to the address space area. 356 * @param page Page that is mapped to frame. Must be aligned to 357 * PAGE_SIZE. 358 * @param frame Frame to be released. 359 * 360 */ 361 void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame) 362 { 363 elf_segment_header_t *entry = area->backend_data.segment; 364 uintptr_t start_anon; 365 366 ASSERT(page_table_locked(area->as)); 367 ASSERT(mutex_locked(&area->lock)); 368 369 ASSERT(page >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)); 370 ASSERT(page < entry->p_vaddr + entry->p_memsz); 371 372 start_anon = entry->p_vaddr + entry->p_filesz; 373 374 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) { 375 if (entry->p_flags & PF_W) { 376 /* 377 * Free the frame with the copy of writable segment 378 * data. 379 */ 380 frame_free_noreserve(frame); 381 } 382 } else { 383 /* 384 * The frame is either anonymous memory or the mixed case (i.e. 385 * lower part is backed by the ELF image and the upper is 386 * anonymous). In any case, a frame needs to be freed. 387 */ 388 frame_free_noreserve(frame); 389 } 390 } 391 354 392 /** @} 355 393 */
Note:
See TracChangeset
for help on using the changeset viewer.