Changeset 3375bd4 in mainline for kernel/generic/src/mm
- Timestamp:
- 2011-05-17T07:44:17Z (14 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 04c418d, 2586860, 5e6e50b
- Parents:
- 72cd53d (diff), 0d8a304 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- kernel/generic/src/mm
- Files:
-
- 1 added
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/as.c
r72cd53d r3375bd4 80 80 #include <arch/interrupt.h> 81 81 82 #ifdef CONFIG_VIRT_IDX_DCACHE83 #include <arch/mm/cache.h>84 #endif /* CONFIG_VIRT_IDX_DCACHE */85 86 82 /** 87 83 * Each architecture decides what functions will be used to carry out … … 447 443 else 448 444 memsetb(&area->backend_data, sizeof(area->backend_data), 0); 445 446 if (area->backend && area->backend->create) { 447 if (!area->backend->create(area)) { 448 free(area); 449 mutex_unlock(&as->lock); 450 return NULL; 451 } 452 } 449 453 450 454 btree_create(&area->used_space); … … 690 694 } 691 695 696 if (area->backend && area->backend->resize) { 697 if (!area->backend->resize(area, pages)) { 698 mutex_unlock(&area->lock); 699 mutex_unlock(&as->lock); 700 return ENOMEM; 701 } 702 } 703 692 704 area->pages = pages; 693 705 … … 756 768 return ENOENT; 757 769 } 770 771 if (area->backend && area->backend->destroy) 772 area->backend->destroy(area); 758 773 759 774 uintptr_t base = area->base; -
kernel/generic/src/mm/backend_anon.c
r72cd53d r3375bd4 39 39 #include <mm/as.h> 40 40 #include <mm/page.h> 41 #include <mm/reserve.h> 41 42 #include <genarch/mm/page_pt.h> 42 43 #include <genarch/mm/page_ht.h> … … 51 52 #include <arch.h> 52 53 53 #ifdef CONFIG_VIRT_IDX_DCACHE 54 #include <arch/mm/cache.h> 55 #endif 56 57 static int anon_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access); 58 static void anon_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame);59 static void anon_ share(as_area_t *area);54 static bool anon_create(as_area_t *); 55 static bool anon_resize(as_area_t *, size_t); 56 static void anon_share(as_area_t *); 57 static void anon_destroy(as_area_t *); 58 59 static int anon_page_fault(as_area_t *, uintptr_t, pf_access_t); 60 static void anon_frame_free(as_area_t *, uintptr_t, uintptr_t); 60 61 61 62 mem_backend_t anon_backend = { 63 .create = anon_create, 64 .resize = anon_resize, 65 .share = anon_share, 66 .destroy = anon_destroy, 67 62 68 .page_fault = anon_page_fault, 63 69 .frame_free = anon_frame_free, 64 .share = anon_share65 70 }; 71 72 bool anon_create(as_area_t *area) 73 { 74 return reserve_try_alloc(area->pages); 75 } 76 77 bool anon_resize(as_area_t *area, size_t new_pages) 78 { 79 if (new_pages > area->pages) 80 return reserve_try_alloc(new_pages - area->pages); 81 else if (new_pages < area->pages) 82 reserve_free(area->pages - new_pages); 83 84 return true; 85 } 86 87 /** Share the anonymous address space area. 88 * 89 * Sharing of anonymous area is done by duplicating its entire mapping 90 * to the pagemap. Page faults will primarily search for frames there. 91 * 92 * The address space and address space area must be already locked. 93 * 94 * @param area Address space area to be shared. 95 */ 96 void anon_share(as_area_t *area) 97 { 98 link_t *cur; 99 100 ASSERT(mutex_locked(&area->as->lock)); 101 ASSERT(mutex_locked(&area->lock)); 102 103 /* 104 * Copy used portions of the area to sh_info's page map. 105 */ 106 mutex_lock(&area->sh_info->lock); 107 for (cur = area->used_space.leaf_head.next; 108 cur != &area->used_space.leaf_head; cur = cur->next) { 109 btree_node_t *node; 110 unsigned int i; 111 112 node = list_get_instance(cur, btree_node_t, leaf_link); 113 for (i = 0; i < node->keys; i++) { 114 uintptr_t base = node->key[i]; 115 size_t count = (size_t) node->value[i]; 116 unsigned int j; 117 118 for (j = 0; j < count; j++) { 119 pte_t *pte; 120 121 page_table_lock(area->as, false); 122 pte = page_mapping_find(area->as, 123 base + j * PAGE_SIZE); 124 ASSERT(pte && PTE_VALID(pte) && 125 PTE_PRESENT(pte)); 126 btree_insert(&area->sh_info->pagemap, 127 (base + j * PAGE_SIZE) - area->base, 128 (void *) PTE_GET_FRAME(pte), NULL); 129 page_table_unlock(area->as, false); 130 131 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte)); 132 frame_reference_add(pfn); 133 } 134 135 } 136 } 137 mutex_unlock(&area->sh_info->lock); 138 } 139 140 void anon_destroy(as_area_t *area) 141 { 142 reserve_free(area->pages); 143 } 144 66 145 67 146 /** Service a page fault in the anonymous memory address space area. … … 115 194 } 116 195 if (allocate) { 117 frame = (uintptr_t) frame_alloc(ONE_FRAME, 0); 196 frame = (uintptr_t) frame_alloc_noreserve( 197 ONE_FRAME, 0); 118 198 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 119 199 … … 145 225 * the different causes 146 226 */ 147 frame = (uintptr_t) frame_alloc (ONE_FRAME, 0);227 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0); 148 228 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 149 229 } … … 174 254 ASSERT(mutex_locked(&area->lock)); 175 255 176 frame_free(frame); 177 } 178 179 /** Share the anonymous address space area. 180 * 181 * Sharing of anonymous area is done by duplicating its entire mapping 182 * to the pagemap. Page faults will primarily search for frames there. 183 * 184 * The address space and address space area must be already locked. 185 * 186 * @param area Address space area to be shared. 187 */ 188 void anon_share(as_area_t *area) 189 { 190 link_t *cur; 191 192 ASSERT(mutex_locked(&area->as->lock)); 193 ASSERT(mutex_locked(&area->lock)); 194 195 /* 196 * Copy used portions of the area to sh_info's page map. 197 */ 198 mutex_lock(&area->sh_info->lock); 199 for (cur = area->used_space.leaf_head.next; 200 cur != &area->used_space.leaf_head; cur = cur->next) { 201 btree_node_t *node; 202 unsigned int i; 203 204 node = list_get_instance(cur, btree_node_t, leaf_link); 205 for (i = 0; i < node->keys; i++) { 206 uintptr_t base = node->key[i]; 207 size_t count = (size_t) node->value[i]; 208 unsigned int j; 209 210 for (j = 0; j < count; j++) { 211 pte_t *pte; 212 213 page_table_lock(area->as, false); 214 pte = page_mapping_find(area->as, 215 base + j * PAGE_SIZE); 216 ASSERT(pte && PTE_VALID(pte) && 217 PTE_PRESENT(pte)); 218 btree_insert(&area->sh_info->pagemap, 219 (base + j * PAGE_SIZE) - area->base, 220 (void *) PTE_GET_FRAME(pte), NULL); 221 page_table_unlock(area->as, false); 222 223 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte)); 224 frame_reference_add(pfn); 225 } 226 227 } 228 } 229 mutex_unlock(&area->sh_info->lock); 256 frame_free_noreserve(frame); 230 257 } 231 258 -
kernel/generic/src/mm/backend_elf.c
r72cd53d r3375bd4 43 43 #include <mm/slab.h> 44 44 #include <mm/page.h> 45 #include <mm/reserve.h> 45 46 #include <genarch/mm/page_pt.h> 46 47 #include <genarch/mm/page_ht.h> … … 51 52 #include <arch/barrier.h> 52 53 53 #ifdef CONFIG_VIRT_IDX_DCACHE 54 #include <arch/mm/cache.h> 55 #endif 56 57 static int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access); 58 static void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame);59 static void elf_ share(as_area_t *area);54 static bool elf_create(as_area_t *); 55 static bool elf_resize(as_area_t *, size_t); 56 static void elf_share(as_area_t *); 57 static void elf_destroy(as_area_t *); 58 59 static int elf_page_fault(as_area_t *, uintptr_t, pf_access_t); 60 static void elf_frame_free(as_area_t *, uintptr_t, uintptr_t); 60 61 61 62 mem_backend_t elf_backend = { 63 .create = elf_create, 64 .resize = elf_resize, 65 .share = elf_share, 66 .destroy = elf_destroy, 67 62 68 .page_fault = elf_page_fault, 63 69 .frame_free = elf_frame_free, 64 .share = elf_share65 70 }; 66 71 67 /** Service a page fault in the ELF backend address space area. 68 * 69 * The address space area and page tables must be already locked. 70 * 71 * @param area Pointer to the address space area. 72 * @param addr Faulting virtual address. 73 * @param access Access mode that caused the fault (i.e. 74 * read/write/exec). 75 * 76 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK 77 * on success (i.e. serviced). 78 */ 79 int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access) 80 { 81 elf_header_t *elf = area->backend_data.elf; 72 static size_t elf_nonanon_pages_get(as_area_t *area) 73 { 82 74 elf_segment_header_t *entry = area->backend_data.segment; 83 btree_node_t *leaf; 84 uintptr_t base, frame, page, start_anon; 85 size_t i; 86 bool dirty = false; 87 88 ASSERT(page_table_locked(AS)); 89 ASSERT(mutex_locked(&area->lock)); 90 91 if (!as_area_check_access(area, access)) 92 return AS_PF_FAULT; 75 uintptr_t first = ALIGN_UP(entry->p_vaddr, PAGE_SIZE); 76 uintptr_t last = ALIGN_DOWN(entry->p_vaddr + entry->p_filesz, 77 PAGE_SIZE); 78 79 if (entry->p_flags & PF_W) 80 return 0; 81 82 if (last < first) 83 return 0; 84 85 return last - first; 86 } 87 88 bool elf_create(as_area_t *area) 89 { 90 size_t nonanon_pages = elf_nonanon_pages_get(area); 91 92 if (area->pages <= nonanon_pages) 93 return true; 93 94 94 if (addr < ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) 95 return AS_PF_FAULT; 95 return reserve_try_alloc(area->pages - nonanon_pages); 96 } 97 98 bool elf_resize(as_area_t *area, size_t new_pages) 99 { 100 size_t nonanon_pages = elf_nonanon_pages_get(area); 101 102 if (new_pages > area->pages) { 103 /* The area is growing. */ 104 if (area->pages >= nonanon_pages) 105 return reserve_try_alloc(new_pages - area->pages); 106 else if (new_pages > nonanon_pages) 107 return reserve_try_alloc(new_pages - nonanon_pages); 108 } else if (new_pages < area->pages) { 109 /* The area is shrinking. */ 110 if (new_pages >= nonanon_pages) 111 reserve_free(area->pages - new_pages); 112 else if (area->pages > nonanon_pages) 113 reserve_free(nonanon_pages - new_pages); 114 } 96 115 97 if (addr >= entry->p_vaddr + entry->p_memsz) 98 return AS_PF_FAULT; 99 100 i = (addr - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH; 101 base = (uintptr_t) 102 (((void *) elf) + ALIGN_DOWN(entry->p_offset, PAGE_SIZE)); 103 104 /* Virtual address of faulting page*/ 105 page = ALIGN_DOWN(addr, PAGE_SIZE); 106 107 /* Virtual address of the end of initialized part of segment */ 108 start_anon = entry->p_vaddr + entry->p_filesz; 109 110 if (area->sh_info) { 111 bool found = false; 112 113 /* 114 * The address space area is shared. 115 */ 116 117 mutex_lock(&area->sh_info->lock); 118 frame = (uintptr_t) btree_search(&area->sh_info->pagemap, 119 page - area->base, &leaf); 120 if (!frame) { 121 unsigned int i; 122 123 /* 124 * Workaround for valid NULL address. 125 */ 126 127 for (i = 0; i < leaf->keys; i++) { 128 if (leaf->key[i] == page - area->base) { 129 found = true; 130 break; 131 } 132 } 133 } 134 if (frame || found) { 135 frame_reference_add(ADDR2PFN(frame)); 136 page_mapping_insert(AS, addr, frame, 137 as_area_get_flags(area)); 138 if (!used_space_insert(area, page, 1)) 139 panic("Cannot insert used space."); 140 mutex_unlock(&area->sh_info->lock); 141 return AS_PF_OK; 142 } 143 } 144 145 /* 146 * The area is either not shared or the pagemap does not contain the 147 * mapping. 148 */ 149 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) { 150 /* 151 * Initialized portion of the segment. The memory is backed 152 * directly by the content of the ELF image. Pages are 153 * only copied if the segment is writable so that there 154 * can be more instantions of the same memory ELF image 155 * used at a time. Note that this could be later done 156 * as COW. 157 */ 158 if (entry->p_flags & PF_W) { 159 frame = (uintptr_t)frame_alloc(ONE_FRAME, 0); 160 memcpy((void *) PA2KA(frame), 161 (void *) (base + i * FRAME_SIZE), FRAME_SIZE); 162 if (entry->p_flags & PF_X) { 163 smc_coherence_block((void *) PA2KA(frame), 164 FRAME_SIZE); 165 } 166 dirty = true; 167 } else { 168 frame = KA2PA(base + i * FRAME_SIZE); 169 } 170 } else if (page >= start_anon) { 171 /* 172 * This is the uninitialized portion of the segment. 173 * It is not physically present in the ELF image. 174 * To resolve the situation, a frame must be allocated 175 * and cleared. 176 */ 177 frame = (uintptr_t)frame_alloc(ONE_FRAME, 0); 178 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 179 dirty = true; 180 } else { 181 size_t pad_lo, pad_hi; 182 /* 183 * The mixed case. 184 * 185 * The middle part is backed by the ELF image and 186 * the lower and upper parts are anonymous memory. 187 * (The segment can be and often is shorter than 1 page). 188 */ 189 if (page < entry->p_vaddr) 190 pad_lo = entry->p_vaddr - page; 191 else 192 pad_lo = 0; 193 194 if (start_anon < page + PAGE_SIZE) 195 pad_hi = page + PAGE_SIZE - start_anon; 196 else 197 pad_hi = 0; 198 199 frame = (uintptr_t)frame_alloc(ONE_FRAME, 0); 200 memcpy((void *) (PA2KA(frame) + pad_lo), 201 (void *) (base + i * FRAME_SIZE + pad_lo), 202 FRAME_SIZE - pad_lo - pad_hi); 203 if (entry->p_flags & PF_X) { 204 smc_coherence_block((void *) (PA2KA(frame) + pad_lo), 205 FRAME_SIZE - pad_lo - pad_hi); 206 } 207 memsetb((void *) PA2KA(frame), pad_lo, 0); 208 memsetb((void *) (PA2KA(frame) + FRAME_SIZE - pad_hi), pad_hi, 209 0); 210 dirty = true; 211 } 212 213 if (dirty && area->sh_info) { 214 frame_reference_add(ADDR2PFN(frame)); 215 btree_insert(&area->sh_info->pagemap, page - area->base, 216 (void *) frame, leaf); 217 } 218 219 if (area->sh_info) 220 mutex_unlock(&area->sh_info->lock); 221 222 page_mapping_insert(AS, addr, frame, as_area_get_flags(area)); 223 if (!used_space_insert(area, page, 1)) 224 panic("Cannot insert used space."); 225 226 return AS_PF_OK; 227 } 228 229 /** Free a frame that is backed by the ELF backend. 230 * 231 * The address space area and page tables must be already locked. 232 * 233 * @param area Pointer to the address space area. 234 * @param page Page that is mapped to frame. Must be aligned to 235 * PAGE_SIZE. 236 * @param frame Frame to be released. 237 * 238 */ 239 void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame) 240 { 241 elf_segment_header_t *entry = area->backend_data.segment; 242 uintptr_t start_anon; 243 244 ASSERT(page_table_locked(area->as)); 245 ASSERT(mutex_locked(&area->lock)); 246 247 ASSERT(page >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)); 248 ASSERT(page < entry->p_vaddr + entry->p_memsz); 249 250 start_anon = entry->p_vaddr + entry->p_filesz; 251 252 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) { 253 if (entry->p_flags & PF_W) { 254 /* 255 * Free the frame with the copy of writable segment 256 * data. 257 */ 258 frame_free(frame); 259 } 260 } else { 261 /* 262 * The frame is either anonymous memory or the mixed case (i.e. 263 * lower part is backed by the ELF image and the upper is 264 * anonymous). In any case, a frame needs to be freed. 265 */ 266 frame_free(frame); 267 } 116 return true; 268 117 } 269 118 … … 356 205 } 357 206 207 void elf_destroy(as_area_t *area) 208 { 209 size_t nonanon_pages = elf_nonanon_pages_get(area); 210 211 if (area->pages > nonanon_pages) 212 reserve_free(area->pages - nonanon_pages); 213 } 214 215 /** Service a page fault in the ELF backend address space area. 216 * 217 * The address space area and page tables must be already locked. 218 * 219 * @param area Pointer to the address space area. 220 * @param addr Faulting virtual address. 221 * @param access Access mode that caused the fault (i.e. 222 * read/write/exec). 223 * 224 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK 225 * on success (i.e. serviced). 226 */ 227 int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access) 228 { 229 elf_header_t *elf = area->backend_data.elf; 230 elf_segment_header_t *entry = area->backend_data.segment; 231 btree_node_t *leaf; 232 uintptr_t base, frame, page, start_anon; 233 size_t i; 234 bool dirty = false; 235 236 ASSERT(page_table_locked(AS)); 237 ASSERT(mutex_locked(&area->lock)); 238 239 if (!as_area_check_access(area, access)) 240 return AS_PF_FAULT; 241 242 if (addr < ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) 243 return AS_PF_FAULT; 244 245 if (addr >= entry->p_vaddr + entry->p_memsz) 246 return AS_PF_FAULT; 247 248 i = (addr - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH; 249 base = (uintptr_t) 250 (((void *) elf) + ALIGN_DOWN(entry->p_offset, PAGE_SIZE)); 251 252 /* Virtual address of faulting page*/ 253 page = ALIGN_DOWN(addr, PAGE_SIZE); 254 255 /* Virtual address of the end of initialized part of segment */ 256 start_anon = entry->p_vaddr + entry->p_filesz; 257 258 if (area->sh_info) { 259 bool found = false; 260 261 /* 262 * The address space area is shared. 263 */ 264 265 mutex_lock(&area->sh_info->lock); 266 frame = (uintptr_t) btree_search(&area->sh_info->pagemap, 267 page - area->base, &leaf); 268 if (!frame) { 269 unsigned int i; 270 271 /* 272 * Workaround for valid NULL address. 273 */ 274 275 for (i = 0; i < leaf->keys; i++) { 276 if (leaf->key[i] == page - area->base) { 277 found = true; 278 break; 279 } 280 } 281 } 282 if (frame || found) { 283 frame_reference_add(ADDR2PFN(frame)); 284 page_mapping_insert(AS, addr, frame, 285 as_area_get_flags(area)); 286 if (!used_space_insert(area, page, 1)) 287 panic("Cannot insert used space."); 288 mutex_unlock(&area->sh_info->lock); 289 return AS_PF_OK; 290 } 291 } 292 293 /* 294 * The area is either not shared or the pagemap does not contain the 295 * mapping. 296 */ 297 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) { 298 /* 299 * Initialized portion of the segment. The memory is backed 300 * directly by the content of the ELF image. Pages are 301 * only copied if the segment is writable so that there 302 * can be more instantions of the same memory ELF image 303 * used at a time. Note that this could be later done 304 * as COW. 305 */ 306 if (entry->p_flags & PF_W) { 307 frame = (uintptr_t)frame_alloc_noreserve(ONE_FRAME, 0); 308 memcpy((void *) PA2KA(frame), 309 (void *) (base + i * FRAME_SIZE), FRAME_SIZE); 310 if (entry->p_flags & PF_X) { 311 smc_coherence_block((void *) PA2KA(frame), 312 FRAME_SIZE); 313 } 314 dirty = true; 315 } else { 316 frame = KA2PA(base + i * FRAME_SIZE); 317 } 318 } else if (page >= start_anon) { 319 /* 320 * This is the uninitialized portion of the segment. 321 * It is not physically present in the ELF image. 322 * To resolve the situation, a frame must be allocated 323 * and cleared. 324 */ 325 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0); 326 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 327 dirty = true; 328 } else { 329 size_t pad_lo, pad_hi; 330 /* 331 * The mixed case. 332 * 333 * The middle part is backed by the ELF image and 334 * the lower and upper parts are anonymous memory. 335 * (The segment can be and often is shorter than 1 page). 336 */ 337 if (page < entry->p_vaddr) 338 pad_lo = entry->p_vaddr - page; 339 else 340 pad_lo = 0; 341 342 if (start_anon < page + PAGE_SIZE) 343 pad_hi = page + PAGE_SIZE - start_anon; 344 else 345 pad_hi = 0; 346 347 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0); 348 memcpy((void *) (PA2KA(frame) + pad_lo), 349 (void *) (base + i * FRAME_SIZE + pad_lo), 350 FRAME_SIZE - pad_lo - pad_hi); 351 if (entry->p_flags & PF_X) { 352 smc_coherence_block((void *) (PA2KA(frame) + pad_lo), 353 FRAME_SIZE - pad_lo - pad_hi); 354 } 355 memsetb((void *) PA2KA(frame), pad_lo, 0); 356 memsetb((void *) (PA2KA(frame) + FRAME_SIZE - pad_hi), pad_hi, 357 0); 358 dirty = true; 359 } 360 361 if (dirty && area->sh_info) { 362 frame_reference_add(ADDR2PFN(frame)); 363 btree_insert(&area->sh_info->pagemap, page - area->base, 364 (void *) frame, leaf); 365 } 366 367 if (area->sh_info) 368 mutex_unlock(&area->sh_info->lock); 369 370 page_mapping_insert(AS, addr, frame, as_area_get_flags(area)); 371 if (!used_space_insert(area, page, 1)) 372 panic("Cannot insert used space."); 373 374 return AS_PF_OK; 375 } 376 377 /** Free a frame that is backed by the ELF backend. 378 * 379 * The address space area and page tables must be already locked. 380 * 381 * @param area Pointer to the address space area. 382 * @param page Page that is mapped to frame. Must be aligned to 383 * PAGE_SIZE. 384 * @param frame Frame to be released. 385 * 386 */ 387 void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame) 388 { 389 elf_segment_header_t *entry = area->backend_data.segment; 390 uintptr_t start_anon; 391 392 ASSERT(page_table_locked(area->as)); 393 ASSERT(mutex_locked(&area->lock)); 394 395 ASSERT(page >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)); 396 ASSERT(page < entry->p_vaddr + entry->p_memsz); 397 398 start_anon = entry->p_vaddr + entry->p_filesz; 399 400 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) { 401 if (entry->p_flags & PF_W) { 402 /* 403 * Free the frame with the copy of writable segment 404 * data. 405 */ 406 frame_free_noreserve(frame); 407 } 408 } else { 409 /* 410 * The frame is either anonymous memory or the mixed case (i.e. 411 * lower part is backed by the ELF image and the upper is 412 * anonymous). In any case, a frame needs to be freed. 413 */ 414 frame_free_noreserve(frame); 415 } 416 } 417 358 418 /** @} 359 419 */ -
kernel/generic/src/mm/backend_phys.c
r72cd53d r3375bd4 48 48 #include <align.h> 49 49 50 static int phys_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access); 51 static void phys_share(as_area_t *area); 50 static bool phys_create(as_area_t *); 51 static void phys_share(as_area_t *); 52 static void phys_destroy(as_area_t *); 53 54 static int phys_page_fault(as_area_t *, uintptr_t, pf_access_t); 52 55 53 56 mem_backend_t phys_backend = { 57 .create = phys_create, 58 .resize = NULL, 59 .share = phys_share, 60 .destroy = phys_destroy, 61 54 62 .page_fault = phys_page_fault, 55 63 .frame_free = NULL, 56 .share = phys_share57 64 }; 65 66 bool phys_create(as_area_t *area) 67 { 68 return true; 69 } 70 71 /** Share address space area backed by physical memory. 72 * 73 * Do actually nothing as sharing of address space areas 74 * that are backed up by physical memory is very easy. 75 * Note that the function must be defined so that 76 * as_area_share() will succeed. 77 */ 78 void phys_share(as_area_t *area) 79 { 80 ASSERT(mutex_locked(&area->as->lock)); 81 ASSERT(mutex_locked(&area->lock)); 82 } 83 84 85 void phys_destroy(as_area_t *area) 86 { 87 /* Nothing to do. */ 88 } 58 89 59 90 /** Service a page fault in the address space area backed by physical memory. … … 88 119 } 89 120 90 /** Share address space area backed by physical memory.91 *92 * Do actually nothing as sharing of address space areas93 * that are backed up by physical memory is very easy.94 * Note that the function must be defined so that95 * as_area_share() will succeed.96 */97 void phys_share(as_area_t *area)98 {99 ASSERT(mutex_locked(&area->as->lock));100 ASSERT(mutex_locked(&area->lock));101 }102 103 121 /** @} 104 122 */ -
kernel/generic/src/mm/frame.c
r72cd53d r3375bd4 45 45 #include <typedefs.h> 46 46 #include <mm/frame.h> 47 #include <mm/reserve.h> 47 48 #include <mm/as.h> 48 49 #include <panic.h> … … 59 60 #include <macros.h> 60 61 #include <config.h> 62 #include <str.h> 61 63 62 64 zones_t zones; … … 180 182 * 181 183 */ 182 #ifdef CONFIG_DEBUG 183 NO_TRACE static size_t total_frames_free(void) 184 NO_TRACE static size_t frame_total_free_get_internal(void) 184 185 { 185 186 size_t total = 0; 186 187 size_t i; 188 187 189 for (i = 0; i < zones.count; i++) 188 190 total += zones.info[i].free_count; … … 190 192 return total; 191 193 } 192 #endif /* CONFIG_DEBUG */ 194 195 NO_TRACE size_t frame_total_free_get(void) 196 { 197 size_t total; 198 199 irq_spinlock_lock(&zones.lock, true); 200 total = frame_total_free_get_internal(); 201 irq_spinlock_unlock(&zones.lock, true); 202 203 return total; 204 } 205 193 206 194 207 /** Find a zone with a given frames. … … 472 485 * @param frame_idx Frame index relative to zone. 473 486 * 474 */ 475 NO_TRACE static void zone_frame_free(zone_t *zone, size_t frame_idx) 487 * @return Number of freed frames. 488 * 489 */ 490 NO_TRACE static size_t zone_frame_free(zone_t *zone, size_t frame_idx) 476 491 { 477 492 ASSERT(zone_flags_available(zone->flags)); 478 493 479 494 frame_t *frame = &zone->frames[frame_idx]; 480 481 /* Remember frame order */ 482 uint8_t order = frame->buddy_order; 495 size_t size = 0; 483 496 484 497 ASSERT(frame->refcount); 485 498 486 499 if (!--frame->refcount) { 487 buddy_system_free(zone->buddy_system, &frame->buddy_link);488 500 size = 1 << frame->buddy_order; 501 buddy_system_free(zone->buddy_system, &frame->buddy_link); 489 502 /* Update zone information. */ 490 zone->free_count += (1 << order); 491 zone->busy_count -= (1 << order); 492 } 503 zone->free_count += size; 504 zone->busy_count -= size; 505 } 506 507 return size; 493 508 } 494 509 … … 516 531 ASSERT(link); 517 532 zone->free_count--; 533 reserve_force_alloc(1); 518 534 } 519 535 … … 645 661 for (i = 0; i < cframes; i++) { 646 662 zones.info[znum].busy_count++; 647 zone_frame_free(&zones.info[znum],663 (void) zone_frame_free(&zones.info[znum], 648 664 pfn - zones.info[znum].base + i); 649 665 } … … 683 699 /* Free unneeded frames */ 684 700 for (i = count; i < (size_t) (1 << order); i++) 685 zone_frame_free(&zones.info[znum], i + frame_idx);701 (void) zone_frame_free(&zones.info[znum], i + frame_idx); 686 702 } 687 703 … … 695 711 * not to be 2^order size. Once the allocator is running it is no longer 696 712 * possible, merged configuration data occupies more space :-/ 697 *698 * The function uses699 713 * 700 714 */ … … 999 1013 size_t hint = pzone ? (*pzone) : 0; 1000 1014 1015 /* 1016 * If not told otherwise, we must first reserve the memory. 1017 */ 1018 if (!(flags & FRAME_NO_RESERVE)) 1019 reserve_force_alloc(size); 1020 1001 1021 loop: 1002 1022 irq_spinlock_lock(&zones.lock, true); … … 1033 1053 if (flags & FRAME_ATOMIC) { 1034 1054 irq_spinlock_unlock(&zones.lock, true); 1055 if (!(flags & FRAME_NO_RESERVE)) 1056 reserve_free(size); 1035 1057 return NULL; 1036 1058 } 1037 1059 1038 1060 #ifdef CONFIG_DEBUG 1039 size_t avail = total_frames_free();1061 size_t avail = frame_total_free_get_internal(); 1040 1062 #endif 1041 1063 … … 1088 1110 } 1089 1111 1112 void *frame_alloc(uint8_t order, frame_flags_t flags) 1113 { 1114 return frame_alloc_generic(order, flags, NULL); 1115 } 1116 1117 void *frame_alloc_noreserve(uint8_t order, frame_flags_t flags) 1118 { 1119 return frame_alloc_generic(order, flags | FRAME_NO_RESERVE, NULL); 1120 } 1121 1090 1122 /** Free a frame. 1091 1123 * … … 1095 1127 * 1096 1128 * @param frame Physical Address of of the frame to be freed. 1097 * 1098 */ 1099 void frame_free(uintptr_t frame) 1100 { 1129 * @param flags Flags to control memory reservation. 1130 * 1131 */ 1132 void frame_free_generic(uintptr_t frame, frame_flags_t flags) 1133 { 1134 size_t size; 1135 1101 1136 irq_spinlock_lock(&zones.lock, true); 1102 1137 … … 1106 1141 pfn_t pfn = ADDR2PFN(frame); 1107 1142 size_t znum = find_zone(pfn, 1, 0); 1143 1108 1144 1109 1145 ASSERT(znum != (size_t) -1); 1110 1146 1111 zone_frame_free(&zones.info[znum], pfn - zones.info[znum].base);1147 size = zone_frame_free(&zones.info[znum], pfn - zones.info[znum].base); 1112 1148 1113 1149 irq_spinlock_unlock(&zones.lock, true); … … 1118 1154 mutex_lock(&mem_avail_mtx); 1119 1155 if (mem_avail_req > 0) 1120 mem_avail_req --;1156 mem_avail_req -= min(mem_avail_req, size); 1121 1157 1122 1158 if (mem_avail_req == 0) { … … 1125 1161 } 1126 1162 mutex_unlock(&mem_avail_mtx); 1163 1164 if (!(flags & FRAME_NO_RESERVE)) 1165 reserve_free(size); 1166 } 1167 1168 void frame_free(uintptr_t frame) 1169 { 1170 frame_free_generic(frame, 0); 1171 } 1172 1173 void frame_free_noreserve(uintptr_t frame) 1174 { 1175 frame_free_generic(frame, FRAME_NO_RESERVE); 1127 1176 } 1128 1177 … … 1355 1404 bool available = zone_flags_available(flags); 1356 1405 1406 uint64_t size; 1407 const char *size_suffix; 1408 bin_order_suffix(FRAMES2SIZE(count), &size, &size_suffix, false); 1409 1357 1410 printf("Zone number: %zu\n", znum); 1358 1411 printf("Zone base address: %p\n", (void *) base); 1359 printf("Zone size: %zu frames (% zu KiB)\n", count,1360 SIZE2KB(FRAMES2SIZE(count)));1412 printf("Zone size: %zu frames (%" PRIu64 " %s)\n", count, 1413 size, size_suffix); 1361 1414 printf("Zone flags: %c%c%c\n", 1362 1415 available ? 'A' : ' ', … … 1365 1418 1366 1419 if (available) { 1367 printf("Allocated space: %zu frames (%zu KiB)\n", 1368 busy_count, SIZE2KB(FRAMES2SIZE(busy_count))); 1369 printf("Available space: %zu frames (%zu KiB)\n", 1370 free_count, SIZE2KB(FRAMES2SIZE(free_count))); 1420 bin_order_suffix(FRAMES2SIZE(busy_count), &size, &size_suffix, 1421 false); 1422 printf("Allocated space: %zu frames (%" PRIu64 " %s)\n", 1423 busy_count, size, size_suffix); 1424 bin_order_suffix(FRAMES2SIZE(free_count), &size, &size_suffix, 1425 false); 1426 printf("Available space: %zu frames (%" PRIu64 " %s)\n", 1427 free_count, size, size_suffix); 1371 1428 } 1372 1429 }
Note:
See TracChangeset
for help on using the changeset viewer.