Changeset 03523dc in mainline
- Timestamp:
- 2011-01-15T16:12:46Z (14 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 6b9e85b
- Parents:
- 630a8ef
- Location:
- kernel/generic
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/include/mm/as.h
r630a8ef r03523dc 231 231 /** Address space area backend structure. */ 232 232 typedef struct mem_backend { 233 bool (* create)(as_area_t *); 234 bool (* resize)(as_area_t *, size_t); 235 void (* share)(as_area_t *); 236 void (* destroy)(as_area_t *); 237 233 238 int (* page_fault)(as_area_t *, uintptr_t, pf_access_t); 234 239 void (* frame_free)(as_area_t *, uintptr_t, uintptr_t); 235 void (* share)(as_area_t *);236 240 } mem_backend_t; 237 241 -
kernel/generic/src/mm/backend_anon.c
r630a8ef r03523dc 39 39 #include <mm/as.h> 40 40 #include <mm/page.h> 41 #include <mm/reserve.h> 41 42 #include <genarch/mm/page_pt.h> 42 43 #include <genarch/mm/page_ht.h> … … 55 56 #endif 56 57 58 static bool anon_create(as_area_t *); 59 static bool anon_resize(as_area_t *, size_t); 60 static void anon_share(as_area_t *area); 61 static void anon_destroy(as_area_t *); 62 57 63 static int anon_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access); 58 64 static void anon_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame); 59 static void anon_share(as_area_t *area);60 65 61 66 mem_backend_t anon_backend = { 67 .create = anon_create, 68 .resize = anon_resize, 69 .share = anon_share, 70 .destroy = anon_destroy, 71 62 72 .page_fault = anon_page_fault, 63 73 .frame_free = anon_frame_free, 64 .share = anon_share65 74 }; 75 76 bool anon_create(as_area_t *area) 77 { 78 return reserve_try_alloc(area->pages); 79 } 80 81 bool anon_resize(as_area_t *area, size_t new_pages) 82 { 83 /** 84 * @todo 85 * Reserve also space needed for the supporting strutures allocated 86 * during page fault. 87 */ 88 89 if (new_pages > area->pages) 90 return reserve_try_alloc(new_pages - area->pages); 91 else if (new_pages < area->pages) 92 reserve_free(area->pages - new_pages); 93 94 return true; 95 } 96 97 /** Share the anonymous address space area. 98 * 99 * Sharing of anonymous area is done by duplicating its entire mapping 100 * to the pagemap. Page faults will primarily search for frames there. 101 * 102 * The address space and address space area must be already locked. 103 * 104 * @param area Address space area to be shared. 105 */ 106 void anon_share(as_area_t *area) 107 { 108 link_t *cur; 109 110 ASSERT(mutex_locked(&area->as->lock)); 111 ASSERT(mutex_locked(&area->lock)); 112 113 /* 114 * Copy used portions of the area to sh_info's page map. 115 */ 116 mutex_lock(&area->sh_info->lock); 117 for (cur = area->used_space.leaf_head.next; 118 cur != &area->used_space.leaf_head; cur = cur->next) { 119 btree_node_t *node; 120 unsigned int i; 121 122 node = list_get_instance(cur, btree_node_t, leaf_link); 123 for (i = 0; i < node->keys; i++) { 124 uintptr_t base = node->key[i]; 125 size_t count = (size_t) node->value[i]; 126 unsigned int j; 127 128 for (j = 0; j < count; j++) { 129 pte_t *pte; 130 131 page_table_lock(area->as, false); 132 pte = page_mapping_find(area->as, 133 base + j * PAGE_SIZE); 134 ASSERT(pte && PTE_VALID(pte) && 135 PTE_PRESENT(pte)); 136 btree_insert(&area->sh_info->pagemap, 137 (base + j * PAGE_SIZE) - area->base, 138 (void *) PTE_GET_FRAME(pte), NULL); 139 page_table_unlock(area->as, false); 140 141 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte)); 142 frame_reference_add(pfn); 143 } 144 145 } 146 } 147 mutex_unlock(&area->sh_info->lock); 148 } 149 150 void anon_destroy(as_area_t *area) 151 { 152 reserve_free(area->pages); 153 } 154 66 155 67 156 /** Service a page fault in the anonymous memory address space area. … … 178 267 } 179 268 180 /** Share the anonymous address space area.181 *182 * Sharing of anonymous area is done by duplicating its entire mapping183 * to the pagemap. Page faults will primarily search for frames there.184 *185 * The address space and address space area must be already locked.186 *187 * @param area Address space area to be shared.188 */189 void anon_share(as_area_t *area)190 {191 link_t *cur;192 193 ASSERT(mutex_locked(&area->as->lock));194 ASSERT(mutex_locked(&area->lock));195 196 /*197 * Copy used portions of the area to sh_info's page map.198 */199 mutex_lock(&area->sh_info->lock);200 for (cur = area->used_space.leaf_head.next;201 cur != &area->used_space.leaf_head; cur = cur->next) {202 btree_node_t *node;203 unsigned int i;204 205 node = list_get_instance(cur, btree_node_t, leaf_link);206 for (i = 0; i < node->keys; i++) {207 uintptr_t base = node->key[i];208 size_t count = (size_t) node->value[i];209 unsigned int j;210 211 for (j = 0; j < count; j++) {212 pte_t *pte;213 214 page_table_lock(area->as, false);215 pte = page_mapping_find(area->as,216 base + j * PAGE_SIZE);217 ASSERT(pte && PTE_VALID(pte) &&218 PTE_PRESENT(pte));219 btree_insert(&area->sh_info->pagemap,220 (base + j * PAGE_SIZE) - area->base,221 (void *) PTE_GET_FRAME(pte), NULL);222 page_table_unlock(area->as, false);223 224 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte));225 frame_reference_add(pfn);226 }227 228 }229 }230 mutex_unlock(&area->sh_info->lock);231 }232 233 269 /** @} 234 270 */ -
kernel/generic/src/mm/backend_elf.c
r630a8ef r03523dc 43 43 #include <mm/slab.h> 44 44 #include <mm/page.h> 45 #include <mm/reserve.h> 45 46 #include <genarch/mm/page_pt.h> 46 47 #include <genarch/mm/page_ht.h> … … 55 56 #endif 56 57 58 static bool elf_create(as_area_t *); 59 static bool elf_resize(as_area_t *, size_t); 60 static void elf_share(as_area_t *); 61 static void elf_destroy(as_area_t *); 62 57 63 static int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access); 58 64 static void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame); 59 static void elf_share(as_area_t *area);60 65 61 66 mem_backend_t elf_backend = { 67 .create = elf_create, 68 .resize = elf_resize, 69 .share = elf_share, 70 .destroy = elf_destroy, 71 62 72 .page_fault = elf_page_fault, 63 73 .frame_free = elf_frame_free, 64 .share = elf_share65 74 }; 66 75 67 /** Service a page fault in the ELF backend address space area. 68 * 69 * The address space area and page tables must be already locked. 70 * 71 * @param area Pointer to the address space area. 72 * @param addr Faulting virtual address. 73 * @param access Access mode that caused the fault (i.e. 74 * read/write/exec). 75 * 76 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK 77 * on success (i.e. serviced). 78 */ 79 int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access) 80 { 81 elf_header_t *elf = area->backend_data.elf; 82 elf_segment_header_t *entry = area->backend_data.segment; 83 btree_node_t *leaf; 84 uintptr_t base, frame, page, start_anon; 85 size_t i; 86 bool dirty = false; 87 88 ASSERT(page_table_locked(AS)); 89 ASSERT(mutex_locked(&area->lock)); 90 91 if (!as_area_check_access(area, access)) 92 return AS_PF_FAULT; 93 94 ASSERT((addr >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) && 95 (addr < entry->p_vaddr + entry->p_memsz)); 96 i = (addr - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH; 97 base = (uintptr_t) 98 (((void *) elf) + ALIGN_DOWN(entry->p_offset, PAGE_SIZE)); 99 100 /* Virtual address of faulting page*/ 101 page = ALIGN_DOWN(addr, PAGE_SIZE); 102 103 /* Virtual address of the end of initialized part of segment */ 104 start_anon = entry->p_vaddr + entry->p_filesz; 105 106 if (area->sh_info) { 107 bool found = false; 108 109 /* 110 * The address space area is shared. 111 */ 112 113 mutex_lock(&area->sh_info->lock); 114 frame = (uintptr_t) btree_search(&area->sh_info->pagemap, 115 page - area->base, &leaf); 116 if (!frame) { 117 unsigned int i; 118 119 /* 120 * Workaround for valid NULL address. 121 */ 122 123 for (i = 0; i < leaf->keys; i++) { 124 if (leaf->key[i] == page - area->base) { 125 found = true; 126 break; 127 } 128 } 129 } 130 if (frame || found) { 131 frame_reference_add(ADDR2PFN(frame)); 132 page_mapping_insert(AS, addr, frame, 133 as_area_get_flags(area)); 134 if (!used_space_insert(area, page, 1)) 135 panic("Cannot insert used space."); 136 mutex_unlock(&area->sh_info->lock); 137 return AS_PF_OK; 138 } 139 } 140 141 /* 142 * The area is either not shared or the pagemap does not contain the 143 * mapping. 144 */ 145 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) { 146 /* 147 * Initialized portion of the segment. The memory is backed 148 * directly by the content of the ELF image. Pages are 149 * only copied if the segment is writable so that there 150 * can be more instantions of the same memory ELF image 151 * used at a time. Note that this could be later done 152 * as COW. 153 */ 154 if (entry->p_flags & PF_W) { 155 frame = (uintptr_t)frame_alloc_noreserve(ONE_FRAME, 0); 156 memcpy((void *) PA2KA(frame), 157 (void *) (base + i * FRAME_SIZE), FRAME_SIZE); 158 if (entry->p_flags & PF_X) { 159 smc_coherence_block((void *) PA2KA(frame), 160 FRAME_SIZE); 161 } 162 dirty = true; 163 } else { 164 frame = KA2PA(base + i * FRAME_SIZE); 165 } 166 } else if (page >= start_anon) { 167 /* 168 * This is the uninitialized portion of the segment. 169 * It is not physically present in the ELF image. 170 * To resolve the situation, a frame must be allocated 171 * and cleared. 172 */ 173 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0); 174 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 175 dirty = true; 176 } else { 177 size_t pad_lo, pad_hi; 178 /* 179 * The mixed case. 180 * 181 * The middle part is backed by the ELF image and 182 * the lower and upper parts are anonymous memory. 183 * (The segment can be and often is shorter than 1 page). 184 */ 185 if (page < entry->p_vaddr) 186 pad_lo = entry->p_vaddr - page; 187 else 188 pad_lo = 0; 189 190 if (start_anon < page + PAGE_SIZE) 191 pad_hi = page + PAGE_SIZE - start_anon; 192 else 193 pad_hi = 0; 194 195 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0); 196 memcpy((void *) (PA2KA(frame) + pad_lo), 197 (void *) (base + i * FRAME_SIZE + pad_lo), 198 FRAME_SIZE - pad_lo - pad_hi); 199 if (entry->p_flags & PF_X) { 200 smc_coherence_block((void *) (PA2KA(frame) + pad_lo), 201 FRAME_SIZE - pad_lo - pad_hi); 202 } 203 memsetb((void *) PA2KA(frame), pad_lo, 0); 204 memsetb((void *) (PA2KA(frame) + FRAME_SIZE - pad_hi), pad_hi, 205 0); 206 dirty = true; 207 } 208 209 if (dirty && area->sh_info) { 210 frame_reference_add(ADDR2PFN(frame)); 211 btree_insert(&area->sh_info->pagemap, page - area->base, 212 (void *) frame, leaf); 213 } 214 215 if (area->sh_info) 216 mutex_unlock(&area->sh_info->lock); 217 218 page_mapping_insert(AS, addr, frame, as_area_get_flags(area)); 219 if (!used_space_insert(area, page, 1)) 220 panic("Cannot insert used space."); 221 222 return AS_PF_OK; 223 } 224 225 /** Free a frame that is backed by the ELF backend. 226 * 227 * The address space area and page tables must be already locked. 228 * 229 * @param area Pointer to the address space area. 230 * @param page Page that is mapped to frame. Must be aligned to 231 * PAGE_SIZE. 232 * @param frame Frame to be released. 233 * 234 */ 235 void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame) 236 { 237 elf_segment_header_t *entry = area->backend_data.segment; 238 uintptr_t start_anon; 239 240 ASSERT(page_table_locked(area->as)); 241 ASSERT(mutex_locked(&area->lock)); 242 243 ASSERT(page >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)); 244 ASSERT(page < entry->p_vaddr + entry->p_memsz); 245 246 start_anon = entry->p_vaddr + entry->p_filesz; 247 248 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) { 249 if (entry->p_flags & PF_W) { 250 /* 251 * Free the frame with the copy of writable segment 252 * data. 253 */ 254 frame_free_noreserve(frame); 255 } 256 } else { 257 /* 258 * The frame is either anonymous memory or the mixed case (i.e. 259 * lower part is backed by the ELF image and the upper is 260 * anonymous). In any case, a frame needs to be freed. 261 */ 262 frame_free_noreserve(frame); 263 } 76 bool elf_create(as_area_t *area) 77 { 78 /** 79 * @todo: 80 * Reserve only how much is necessary for anonymous pages plus the 81 * supporting structures allocated during the page fault. 82 */ 83 return reserve_try_alloc(area->pages); 84 } 85 86 bool elf_resize(as_area_t *area, size_t new_pages) 87 { 88 if (new_pages > area->pages) 89 return reserve_try_alloc(new_pages - area->pages); 90 else if (new_pages < area->pages) 91 reserve_free(area->pages - new_pages); 92 93 return true; 264 94 } 265 95 … … 352 182 } 353 183 184 void elf_destroy(as_area_t *area) 185 { 186 /** 187 * @todo: 188 * Unreserve only how much was really reserved. 189 */ 190 reserve_free(area->pages); 191 } 192 193 /** Service a page fault in the ELF backend address space area. 194 * 195 * The address space area and page tables must be already locked. 196 * 197 * @param area Pointer to the address space area. 198 * @param addr Faulting virtual address. 199 * @param access Access mode that caused the fault (i.e. 200 * read/write/exec). 201 * 202 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK 203 * on success (i.e. serviced). 204 */ 205 int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access) 206 { 207 elf_header_t *elf = area->backend_data.elf; 208 elf_segment_header_t *entry = area->backend_data.segment; 209 btree_node_t *leaf; 210 uintptr_t base, frame, page, start_anon; 211 size_t i; 212 bool dirty = false; 213 214 ASSERT(page_table_locked(AS)); 215 ASSERT(mutex_locked(&area->lock)); 216 217 if (!as_area_check_access(area, access)) 218 return AS_PF_FAULT; 219 220 ASSERT((addr >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) && 221 (addr < entry->p_vaddr + entry->p_memsz)); 222 i = (addr - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH; 223 base = (uintptr_t) 224 (((void *) elf) + ALIGN_DOWN(entry->p_offset, PAGE_SIZE)); 225 226 /* Virtual address of faulting page*/ 227 page = ALIGN_DOWN(addr, PAGE_SIZE); 228 229 /* Virtual address of the end of initialized part of segment */ 230 start_anon = entry->p_vaddr + entry->p_filesz; 231 232 if (area->sh_info) { 233 bool found = false; 234 235 /* 236 * The address space area is shared. 237 */ 238 239 mutex_lock(&area->sh_info->lock); 240 frame = (uintptr_t) btree_search(&area->sh_info->pagemap, 241 page - area->base, &leaf); 242 if (!frame) { 243 unsigned int i; 244 245 /* 246 * Workaround for valid NULL address. 247 */ 248 249 for (i = 0; i < leaf->keys; i++) { 250 if (leaf->key[i] == page - area->base) { 251 found = true; 252 break; 253 } 254 } 255 } 256 if (frame || found) { 257 frame_reference_add(ADDR2PFN(frame)); 258 page_mapping_insert(AS, addr, frame, 259 as_area_get_flags(area)); 260 if (!used_space_insert(area, page, 1)) 261 panic("Cannot insert used space."); 262 mutex_unlock(&area->sh_info->lock); 263 return AS_PF_OK; 264 } 265 } 266 267 /* 268 * The area is either not shared or the pagemap does not contain the 269 * mapping. 270 */ 271 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) { 272 /* 273 * Initialized portion of the segment. The memory is backed 274 * directly by the content of the ELF image. Pages are 275 * only copied if the segment is writable so that there 276 * can be more instantions of the same memory ELF image 277 * used at a time. Note that this could be later done 278 * as COW. 279 */ 280 if (entry->p_flags & PF_W) { 281 frame = (uintptr_t)frame_alloc_noreserve(ONE_FRAME, 0); 282 memcpy((void *) PA2KA(frame), 283 (void *) (base + i * FRAME_SIZE), FRAME_SIZE); 284 if (entry->p_flags & PF_X) { 285 smc_coherence_block((void *) PA2KA(frame), 286 FRAME_SIZE); 287 } 288 dirty = true; 289 } else { 290 frame = KA2PA(base + i * FRAME_SIZE); 291 } 292 } else if (page >= start_anon) { 293 /* 294 * This is the uninitialized portion of the segment. 295 * It is not physically present in the ELF image. 296 * To resolve the situation, a frame must be allocated 297 * and cleared. 298 */ 299 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0); 300 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 301 dirty = true; 302 } else { 303 size_t pad_lo, pad_hi; 304 /* 305 * The mixed case. 306 * 307 * The middle part is backed by the ELF image and 308 * the lower and upper parts are anonymous memory. 309 * (The segment can be and often is shorter than 1 page). 310 */ 311 if (page < entry->p_vaddr) 312 pad_lo = entry->p_vaddr - page; 313 else 314 pad_lo = 0; 315 316 if (start_anon < page + PAGE_SIZE) 317 pad_hi = page + PAGE_SIZE - start_anon; 318 else 319 pad_hi = 0; 320 321 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0); 322 memcpy((void *) (PA2KA(frame) + pad_lo), 323 (void *) (base + i * FRAME_SIZE + pad_lo), 324 FRAME_SIZE - pad_lo - pad_hi); 325 if (entry->p_flags & PF_X) { 326 smc_coherence_block((void *) (PA2KA(frame) + pad_lo), 327 FRAME_SIZE - pad_lo - pad_hi); 328 } 329 memsetb((void *) PA2KA(frame), pad_lo, 0); 330 memsetb((void *) (PA2KA(frame) + FRAME_SIZE - pad_hi), pad_hi, 331 0); 332 dirty = true; 333 } 334 335 if (dirty && area->sh_info) { 336 frame_reference_add(ADDR2PFN(frame)); 337 btree_insert(&area->sh_info->pagemap, page - area->base, 338 (void *) frame, leaf); 339 } 340 341 if (area->sh_info) 342 mutex_unlock(&area->sh_info->lock); 343 344 page_mapping_insert(AS, addr, frame, as_area_get_flags(area)); 345 if (!used_space_insert(area, page, 1)) 346 panic("Cannot insert used space."); 347 348 return AS_PF_OK; 349 } 350 351 /** Free a frame that is backed by the ELF backend. 352 * 353 * The address space area and page tables must be already locked. 354 * 355 * @param area Pointer to the address space area. 356 * @param page Page that is mapped to frame. Must be aligned to 357 * PAGE_SIZE. 358 * @param frame Frame to be released. 359 * 360 */ 361 void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame) 362 { 363 elf_segment_header_t *entry = area->backend_data.segment; 364 uintptr_t start_anon; 365 366 ASSERT(page_table_locked(area->as)); 367 ASSERT(mutex_locked(&area->lock)); 368 369 ASSERT(page >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)); 370 ASSERT(page < entry->p_vaddr + entry->p_memsz); 371 372 start_anon = entry->p_vaddr + entry->p_filesz; 373 374 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) { 375 if (entry->p_flags & PF_W) { 376 /* 377 * Free the frame with the copy of writable segment 378 * data. 379 */ 380 frame_free_noreserve(frame); 381 } 382 } else { 383 /* 384 * The frame is either anonymous memory or the mixed case (i.e. 385 * lower part is backed by the ELF image and the upper is 386 * anonymous). In any case, a frame needs to be freed. 387 */ 388 frame_free_noreserve(frame); 389 } 390 } 391 354 392 /** @} 355 393 */ -
kernel/generic/src/mm/backend_phys.c
r630a8ef r03523dc 48 48 #include <align.h> 49 49 50 static bool phys_create(as_area_t *); 51 static void phys_share(as_area_t *area); 52 static void phys_destroy(as_area_t *); 53 50 54 static int phys_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access); 51 static void phys_share(as_area_t *area);52 55 53 56 mem_backend_t phys_backend = { 57 .create = phys_create, 58 .resize = NULL, 59 .share = phys_share, 60 .destroy = phys_destroy, 61 54 62 .page_fault = phys_page_fault, 55 63 .frame_free = NULL, 56 .share = phys_share57 64 }; 65 66 bool phys_create(as_area_t *area) 67 { 68 return true; 69 } 70 71 /** Share address space area backed by physical memory. 72 * 73 * Do actually nothing as sharing of address space areas 74 * that are backed up by physical memory is very easy. 75 * Note that the function must be defined so that 76 * as_area_share() will succeed. 77 */ 78 void phys_share(as_area_t *area) 79 { 80 ASSERT(mutex_locked(&area->as->lock)); 81 ASSERT(mutex_locked(&area->lock)); 82 } 83 84 85 void phys_destroy(as_area_t *area) 86 { 87 /* Nothing to do. */ 88 } 58 89 59 90 /** Service a page fault in the address space area backed by physical memory. … … 87 118 } 88 119 89 /** Share address space area backed by physical memory.90 *91 * Do actually nothing as sharing of address space areas92 * that are backed up by physical memory is very easy.93 * Note that the function must be defined so that94 * as_area_share() will succeed.95 */96 void phys_share(as_area_t *area)97 {98 ASSERT(mutex_locked(&area->as->lock));99 ASSERT(mutex_locked(&area->lock));100 }101 102 120 /** @} 103 121 */
Note:
See TracChangeset
for help on using the changeset viewer.