Changes in kernel/generic/src/mm/backend_anon.c [6b9e85b:1d432f9] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/backend_anon.c
r6b9e85b r1d432f9 39 39 #include <mm/as.h> 40 40 #include <mm/page.h> 41 #include <mm/reserve.h>42 41 #include <genarch/mm/page_pt.h> 43 42 #include <genarch/mm/page_ht.h> … … 52 51 #include <arch.h> 53 52 54 static bool anon_create(as_area_t *); 55 static bool anon_resize(as_area_t *, size_t); 56 static void anon_share(as_area_t *area); 57 static void anon_destroy(as_area_t *); 53 #ifdef CONFIG_VIRT_IDX_DCACHE 54 #include <arch/mm/cache.h> 55 #endif 58 56 59 57 static int anon_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access); 60 58 static void anon_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame); 59 static void anon_share(as_area_t *area); 61 60 62 61 mem_backend_t anon_backend = { 63 .create = anon_create,64 .resize = anon_resize,65 .share = anon_share,66 .destroy = anon_destroy,67 68 62 .page_fault = anon_page_fault, 69 63 .frame_free = anon_frame_free, 64 .share = anon_share 70 65 }; 71 72 bool anon_create(as_area_t *area)73 {74 return reserve_try_alloc(area->pages);75 }76 77 bool anon_resize(as_area_t *area, size_t new_pages)78 {79 /**80 * @todo81 * Reserve also space needed for the supporting strutures allocated82 * during page fault.83 */84 85 if (new_pages > area->pages)86 return reserve_try_alloc(new_pages - area->pages);87 else if (new_pages < area->pages)88 reserve_free(area->pages - new_pages);89 90 return true;91 }92 93 /** Share the anonymous address space area.94 *95 * Sharing of anonymous area is done by duplicating its entire mapping96 * to the pagemap. Page faults will primarily search for frames there.97 *98 * The address space and address space area must be already locked.99 *100 * @param area Address space area to be shared.101 */102 void anon_share(as_area_t *area)103 {104 link_t *cur;105 106 ASSERT(mutex_locked(&area->as->lock));107 ASSERT(mutex_locked(&area->lock));108 109 /*110 * Copy used portions of the area to sh_info's page map.111 */112 mutex_lock(&area->sh_info->lock);113 for (cur = area->used_space.leaf_head.next;114 cur != &area->used_space.leaf_head; cur = cur->next) {115 btree_node_t *node;116 unsigned int i;117 118 node = list_get_instance(cur, btree_node_t, leaf_link);119 for (i = 0; i < node->keys; i++) {120 uintptr_t base = node->key[i];121 size_t count = (size_t) node->value[i];122 unsigned int j;123 124 for (j = 0; j < count; j++) {125 pte_t *pte;126 127 page_table_lock(area->as, false);128 pte = page_mapping_find(area->as,129 base + j * PAGE_SIZE);130 ASSERT(pte && PTE_VALID(pte) &&131 PTE_PRESENT(pte));132 btree_insert(&area->sh_info->pagemap,133 (base + j * PAGE_SIZE) - area->base,134 (void *) PTE_GET_FRAME(pte), NULL);135 page_table_unlock(area->as, false);136 137 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte));138 frame_reference_add(pfn);139 }140 141 }142 }143 mutex_unlock(&area->sh_info->lock);144 }145 146 void anon_destroy(as_area_t *area)147 {148 reserve_free(area->pages);149 }150 151 66 152 67 /** Service a page fault in the anonymous memory address space area. … … 200 115 } 201 116 if (allocate) { 202 frame = (uintptr_t) frame_alloc_noreserve( 203 ONE_FRAME, 0); 117 frame = (uintptr_t) frame_alloc(ONE_FRAME, 0); 204 118 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 205 119 … … 231 145 * the different causes 232 146 */ 233 frame = (uintptr_t) frame_alloc _noreserve(ONE_FRAME, 0);147 frame = (uintptr_t) frame_alloc(ONE_FRAME, 0); 234 148 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 235 149 } … … 260 174 ASSERT(mutex_locked(&area->lock)); 261 175 262 frame_free _noreserve(frame);176 frame_free(frame); 263 177 } 264 178 179 /** Share the anonymous address space area. 180 * 181 * Sharing of anonymous area is done by duplicating its entire mapping 182 * to the pagemap. Page faults will primarily search for frames there. 183 * 184 * The address space and address space area must be already locked. 185 * 186 * @param area Address space area to be shared. 187 */ 188 void anon_share(as_area_t *area) 189 { 190 link_t *cur; 191 192 ASSERT(mutex_locked(&area->as->lock)); 193 ASSERT(mutex_locked(&area->lock)); 194 195 /* 196 * Copy used portions of the area to sh_info's page map. 197 */ 198 mutex_lock(&area->sh_info->lock); 199 for (cur = area->used_space.leaf_head.next; 200 cur != &area->used_space.leaf_head; cur = cur->next) { 201 btree_node_t *node; 202 unsigned int i; 203 204 node = list_get_instance(cur, btree_node_t, leaf_link); 205 for (i = 0; i < node->keys; i++) { 206 uintptr_t base = node->key[i]; 207 size_t count = (size_t) node->value[i]; 208 unsigned int j; 209 210 for (j = 0; j < count; j++) { 211 pte_t *pte; 212 213 page_table_lock(area->as, false); 214 pte = page_mapping_find(area->as, 215 base + j * PAGE_SIZE); 216 ASSERT(pte && PTE_VALID(pte) && 217 PTE_PRESENT(pte)); 218 btree_insert(&area->sh_info->pagemap, 219 (base + j * PAGE_SIZE) - area->base, 220 (void *) PTE_GET_FRAME(pte), NULL); 221 page_table_unlock(area->as, false); 222 223 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte)); 224 frame_reference_add(pfn); 225 } 226 227 } 228 } 229 mutex_unlock(&area->sh_info->lock); 230 } 231 265 232 /** @} 266 233 */
Note:
See TracChangeset
for help on using the changeset viewer.