Changes in kernel/generic/src/mm/backend_anon.c [1d432f9:cda1378] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/backend_anon.c
r1d432f9 rcda1378 39 39 #include <mm/as.h> 40 40 #include <mm/page.h> 41 #include <mm/reserve.h> 41 42 #include <genarch/mm/page_pt.h> 42 43 #include <genarch/mm/page_ht.h> … … 51 52 #include <arch.h> 52 53 53 #ifdef CONFIG_VIRT_IDX_DCACHE 54 #include <arch/mm/cache.h> 55 #endif 56 57 static int anon_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access); 58 static void anon_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame);59 static void anon_ share(as_area_t *area);54 static bool anon_create(as_area_t *); 55 static bool anon_resize(as_area_t *, size_t); 56 static void anon_share(as_area_t *); 57 static void anon_destroy(as_area_t *); 58 59 static int anon_page_fault(as_area_t *, uintptr_t, pf_access_t); 60 static void anon_frame_free(as_area_t *, uintptr_t, uintptr_t); 60 61 61 62 mem_backend_t anon_backend = { 63 .create = anon_create, 64 .resize = anon_resize, 65 .share = anon_share, 66 .destroy = anon_destroy, 67 62 68 .page_fault = anon_page_fault, 63 69 .frame_free = anon_frame_free, 64 .share = anon_share65 70 }; 71 72 bool anon_create(as_area_t *area) 73 { 74 return reserve_try_alloc(area->pages); 75 } 76 77 bool anon_resize(as_area_t *area, size_t new_pages) 78 { 79 if (new_pages > area->pages) 80 return reserve_try_alloc(new_pages - area->pages); 81 else if (new_pages < area->pages) 82 reserve_free(area->pages - new_pages); 83 84 return true; 85 } 86 87 /** Share the anonymous address space area. 88 * 89 * Sharing of anonymous area is done by duplicating its entire mapping 90 * to the pagemap. Page faults will primarily search for frames there. 91 * 92 * The address space and address space area must be already locked. 93 * 94 * @param area Address space area to be shared. 95 */ 96 void anon_share(as_area_t *area) 97 { 98 link_t *cur; 99 100 ASSERT(mutex_locked(&area->as->lock)); 101 ASSERT(mutex_locked(&area->lock)); 102 103 /* 104 * Copy used portions of the area to sh_info's page map. 105 */ 106 mutex_lock(&area->sh_info->lock); 107 for (cur = area->used_space.leaf_head.next; 108 cur != &area->used_space.leaf_head; cur = cur->next) { 109 btree_node_t *node; 110 unsigned int i; 111 112 node = list_get_instance(cur, btree_node_t, leaf_link); 113 for (i = 0; i < node->keys; i++) { 114 uintptr_t base = node->key[i]; 115 size_t count = (size_t) node->value[i]; 116 unsigned int j; 117 118 for (j = 0; j < count; j++) { 119 pte_t *pte; 120 121 page_table_lock(area->as, false); 122 pte = page_mapping_find(area->as, 123 base + j * PAGE_SIZE); 124 ASSERT(pte && PTE_VALID(pte) && 125 PTE_PRESENT(pte)); 126 btree_insert(&area->sh_info->pagemap, 127 (base + j * PAGE_SIZE) - area->base, 128 (void *) PTE_GET_FRAME(pte), NULL); 129 page_table_unlock(area->as, false); 130 131 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte)); 132 frame_reference_add(pfn); 133 } 134 135 } 136 } 137 mutex_unlock(&area->sh_info->lock); 138 } 139 140 void anon_destroy(as_area_t *area) 141 { 142 reserve_free(area->pages); 143 } 144 66 145 67 146 /** Service a page fault in the anonymous memory address space area. … … 115 194 } 116 195 if (allocate) { 117 frame = (uintptr_t) frame_alloc(ONE_FRAME, 0); 196 frame = (uintptr_t) frame_alloc_noreserve( 197 ONE_FRAME, 0); 118 198 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 119 199 … … 145 225 * the different causes 146 226 */ 147 frame = (uintptr_t) frame_alloc (ONE_FRAME, 0);227 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0); 148 228 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 149 229 } … … 174 254 ASSERT(mutex_locked(&area->lock)); 175 255 176 frame_free(frame); 177 } 178 179 /** Share the anonymous address space area. 180 * 181 * Sharing of anonymous area is done by duplicating its entire mapping 182 * to the pagemap. Page faults will primarily search for frames there. 183 * 184 * The address space and address space area must be already locked. 185 * 186 * @param area Address space area to be shared. 187 */ 188 void anon_share(as_area_t *area) 189 { 190 link_t *cur; 191 192 ASSERT(mutex_locked(&area->as->lock)); 193 ASSERT(mutex_locked(&area->lock)); 194 195 /* 196 * Copy used portions of the area to sh_info's page map. 197 */ 198 mutex_lock(&area->sh_info->lock); 199 for (cur = area->used_space.leaf_head.next; 200 cur != &area->used_space.leaf_head; cur = cur->next) { 201 btree_node_t *node; 202 unsigned int i; 203 204 node = list_get_instance(cur, btree_node_t, leaf_link); 205 for (i = 0; i < node->keys; i++) { 206 uintptr_t base = node->key[i]; 207 size_t count = (size_t) node->value[i]; 208 unsigned int j; 209 210 for (j = 0; j < count; j++) { 211 pte_t *pte; 212 213 page_table_lock(area->as, false); 214 pte = page_mapping_find(area->as, 215 base + j * PAGE_SIZE); 216 ASSERT(pte && PTE_VALID(pte) && 217 PTE_PRESENT(pte)); 218 btree_insert(&area->sh_info->pagemap, 219 (base + j * PAGE_SIZE) - area->base, 220 (void *) PTE_GET_FRAME(pte), NULL); 221 page_table_unlock(area->as, false); 222 223 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte)); 224 frame_reference_add(pfn); 225 } 226 227 } 228 } 229 mutex_unlock(&area->sh_info->lock); 256 frame_free_noreserve(frame); 230 257 } 231 258
Note:
See TracChangeset
for help on using the changeset viewer.