Changes in kernel/generic/src/mm/backend_anon.c [b4ffe5bc:1d432f9] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/backend_anon.c
rb4ffe5bc r1d432f9 39 39 #include <mm/as.h> 40 40 #include <mm/page.h> 41 #include <mm/reserve.h>42 41 #include <genarch/mm/page_pt.h> 43 42 #include <genarch/mm/page_ht.h> … … 50 49 #include <typedefs.h> 51 50 #include <align.h> 52 #include <memstr.h>53 51 #include <arch.h> 54 52 55 static bool anon_create(as_area_t *); 56 static bool anon_resize(as_area_t *, size_t); 57 static void anon_share(as_area_t *); 58 static void anon_destroy(as_area_t *); 59 60 static int anon_page_fault(as_area_t *, uintptr_t, pf_access_t);61 static void anon_ frame_free(as_area_t *, uintptr_t, uintptr_t);53 #ifdef CONFIG_VIRT_IDX_DCACHE 54 #include <arch/mm/cache.h> 55 #endif 56 57 static int anon_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access); 58 static void anon_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame); 59 static void anon_share(as_area_t *area); 62 60 63 61 mem_backend_t anon_backend = { 64 .create = anon_create,65 .resize = anon_resize,66 .share = anon_share,67 .destroy = anon_destroy,68 69 62 .page_fault = anon_page_fault, 70 63 .frame_free = anon_frame_free, 64 .share = anon_share 71 65 }; 72 73 bool anon_create(as_area_t *area)74 {75 return reserve_try_alloc(area->pages);76 }77 78 bool anon_resize(as_area_t *area, size_t new_pages)79 {80 if (new_pages > area->pages)81 return reserve_try_alloc(new_pages - area->pages);82 else if (new_pages < area->pages)83 reserve_free(area->pages - new_pages);84 85 return true;86 }87 88 /** Share the anonymous address space area.89 *90 * Sharing of anonymous area is done by duplicating its entire mapping91 * to the pagemap. Page faults will primarily search for frames there.92 *93 * The address space and address space area must be already locked.94 *95 * @param area Address space area to be shared.96 */97 void anon_share(as_area_t *area)98 {99 link_t *cur;100 101 ASSERT(mutex_locked(&area->as->lock));102 ASSERT(mutex_locked(&area->lock));103 104 /*105 * Copy used portions of the area to sh_info's page map.106 */107 mutex_lock(&area->sh_info->lock);108 for (cur = area->used_space.leaf_head.next;109 cur != &area->used_space.leaf_head; cur = cur->next) {110 btree_node_t *node;111 unsigned int i;112 113 node = list_get_instance(cur, btree_node_t, leaf_link);114 for (i = 0; i < node->keys; i++) {115 uintptr_t base = node->key[i];116 size_t count = (size_t) node->value[i];117 unsigned int j;118 119 for (j = 0; j < count; j++) {120 pte_t *pte;121 122 page_table_lock(area->as, false);123 pte = page_mapping_find(area->as,124 base + P2SZ(j), false);125 ASSERT(pte && PTE_VALID(pte) &&126 PTE_PRESENT(pte));127 btree_insert(&area->sh_info->pagemap,128 (base + P2SZ(j)) - area->base,129 (void *) PTE_GET_FRAME(pte), NULL);130 page_table_unlock(area->as, false);131 132 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte));133 frame_reference_add(pfn);134 }135 136 }137 }138 mutex_unlock(&area->sh_info->lock);139 }140 141 void anon_destroy(as_area_t *area)142 {143 reserve_free(area->pages);144 }145 146 66 147 67 /** Service a page fault in the anonymous memory address space area. … … 195 115 } 196 116 if (allocate) { 197 frame = (uintptr_t) frame_alloc_noreserve( 198 ONE_FRAME, 0); 117 frame = (uintptr_t) frame_alloc(ONE_FRAME, 0); 199 118 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 200 119 … … 226 145 * the different causes 227 146 */ 228 frame = (uintptr_t) frame_alloc _noreserve(ONE_FRAME, 0);147 frame = (uintptr_t) frame_alloc(ONE_FRAME, 0); 229 148 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 230 149 } … … 255 174 ASSERT(mutex_locked(&area->lock)); 256 175 257 frame_free _noreserve(frame);176 frame_free(frame); 258 177 } 259 178 179 /** Share the anonymous address space area. 180 * 181 * Sharing of anonymous area is done by duplicating its entire mapping 182 * to the pagemap. Page faults will primarily search for frames there. 183 * 184 * The address space and address space area must be already locked. 185 * 186 * @param area Address space area to be shared. 187 */ 188 void anon_share(as_area_t *area) 189 { 190 link_t *cur; 191 192 ASSERT(mutex_locked(&area->as->lock)); 193 ASSERT(mutex_locked(&area->lock)); 194 195 /* 196 * Copy used portions of the area to sh_info's page map. 197 */ 198 mutex_lock(&area->sh_info->lock); 199 for (cur = area->used_space.leaf_head.next; 200 cur != &area->used_space.leaf_head; cur = cur->next) { 201 btree_node_t *node; 202 unsigned int i; 203 204 node = list_get_instance(cur, btree_node_t, leaf_link); 205 for (i = 0; i < node->keys; i++) { 206 uintptr_t base = node->key[i]; 207 size_t count = (size_t) node->value[i]; 208 unsigned int j; 209 210 for (j = 0; j < count; j++) { 211 pte_t *pte; 212 213 page_table_lock(area->as, false); 214 pte = page_mapping_find(area->as, 215 base + j * PAGE_SIZE); 216 ASSERT(pte && PTE_VALID(pte) && 217 PTE_PRESENT(pte)); 218 btree_insert(&area->sh_info->pagemap, 219 (base + j * PAGE_SIZE) - area->base, 220 (void *) PTE_GET_FRAME(pte), NULL); 221 page_table_unlock(area->as, false); 222 223 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte)); 224 frame_reference_add(pfn); 225 } 226 227 } 228 } 229 mutex_unlock(&area->sh_info->lock); 230 } 231 260 232 /** @} 261 233 */
Note:
See TracChangeset
for help on using the changeset viewer.