Changes in kernel/generic/src/mm/backend_anon.c [1d432f9:55b77d9] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/backend_anon.c
r1d432f9 r55b77d9 39 39 #include <mm/as.h> 40 40 #include <mm/page.h> 41 #include <mm/reserve.h> 41 42 #include <genarch/mm/page_pt.h> 42 43 #include <genarch/mm/page_ht.h> … … 49 50 #include <typedefs.h> 50 51 #include <align.h> 52 #include <memstr.h> 51 53 #include <arch.h> 52 54 53 #ifdef CONFIG_VIRT_IDX_DCACHE 54 #include <arch/mm/cache.h> 55 #endif 56 57 static int anon_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access); 58 static void anon_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame);59 static void anon_ share(as_area_t *area);55 static bool anon_create(as_area_t *); 56 static bool anon_resize(as_area_t *, size_t); 57 static void anon_share(as_area_t *); 58 static void anon_destroy(as_area_t *); 59 60 static int anon_page_fault(as_area_t *, uintptr_t, pf_access_t); 61 static void anon_frame_free(as_area_t *, uintptr_t, uintptr_t); 60 62 61 63 mem_backend_t anon_backend = { 64 .create = anon_create, 65 .resize = anon_resize, 66 .share = anon_share, 67 .destroy = anon_destroy, 68 62 69 .page_fault = anon_page_fault, 63 70 .frame_free = anon_frame_free, 64 .share = anon_share65 71 }; 72 73 bool anon_create(as_area_t *area) 74 { 75 return reserve_try_alloc(area->pages); 76 } 77 78 bool anon_resize(as_area_t *area, size_t new_pages) 79 { 80 if (new_pages > area->pages) 81 return reserve_try_alloc(new_pages - area->pages); 82 else if (new_pages < area->pages) 83 reserve_free(area->pages - new_pages); 84 85 return true; 86 } 87 88 /** Share the anonymous address space area. 89 * 90 * Sharing of anonymous area is done by duplicating its entire mapping 91 * to the pagemap. Page faults will primarily search for frames there. 92 * 93 * The address space and address space area must be already locked. 94 * 95 * @param area Address space area to be shared. 96 */ 97 void anon_share(as_area_t *area) 98 { 99 ASSERT(mutex_locked(&area->as->lock)); 100 ASSERT(mutex_locked(&area->lock)); 101 102 /* 103 * Copy used portions of the area to sh_info's page map. 104 */ 105 mutex_lock(&area->sh_info->lock); 106 list_foreach(area->used_space.leaf_list, cur) { 107 btree_node_t *node; 108 unsigned int i; 109 110 node = list_get_instance(cur, btree_node_t, leaf_link); 111 for (i = 0; i < node->keys; i++) { 112 uintptr_t base = node->key[i]; 113 size_t count = (size_t) node->value[i]; 114 unsigned int j; 115 116 for (j = 0; j < count; j++) { 117 pte_t *pte; 118 119 page_table_lock(area->as, false); 120 pte = page_mapping_find(area->as, 121 base + P2SZ(j), false); 122 ASSERT(pte && PTE_VALID(pte) && 123 PTE_PRESENT(pte)); 124 btree_insert(&area->sh_info->pagemap, 125 (base + P2SZ(j)) - area->base, 126 (void *) PTE_GET_FRAME(pte), NULL); 127 page_table_unlock(area->as, false); 128 129 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte)); 130 frame_reference_add(pfn); 131 } 132 133 } 134 } 135 mutex_unlock(&area->sh_info->lock); 136 } 137 138 void anon_destroy(as_area_t *area) 139 { 140 reserve_free(area->pages); 141 } 142 66 143 67 144 /** Service a page fault in the anonymous memory address space area. … … 115 192 } 116 193 if (allocate) { 117 frame = (uintptr_t) frame_alloc(ONE_FRAME, 0); 194 frame = (uintptr_t) frame_alloc_noreserve( 195 ONE_FRAME, 0); 118 196 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 119 197 … … 145 223 * the different causes 146 224 */ 147 frame = (uintptr_t) frame_alloc (ONE_FRAME, 0);225 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0); 148 226 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 149 227 } … … 174 252 ASSERT(mutex_locked(&area->lock)); 175 253 176 frame_free(frame); 177 } 178 179 /** Share the anonymous address space area. 180 * 181 * Sharing of anonymous area is done by duplicating its entire mapping 182 * to the pagemap. Page faults will primarily search for frames there. 183 * 184 * The address space and address space area must be already locked. 185 * 186 * @param area Address space area to be shared. 187 */ 188 void anon_share(as_area_t *area) 189 { 190 link_t *cur; 191 192 ASSERT(mutex_locked(&area->as->lock)); 193 ASSERT(mutex_locked(&area->lock)); 194 195 /* 196 * Copy used portions of the area to sh_info's page map. 197 */ 198 mutex_lock(&area->sh_info->lock); 199 for (cur = area->used_space.leaf_head.next; 200 cur != &area->used_space.leaf_head; cur = cur->next) { 201 btree_node_t *node; 202 unsigned int i; 203 204 node = list_get_instance(cur, btree_node_t, leaf_link); 205 for (i = 0; i < node->keys; i++) { 206 uintptr_t base = node->key[i]; 207 size_t count = (size_t) node->value[i]; 208 unsigned int j; 209 210 for (j = 0; j < count; j++) { 211 pte_t *pte; 212 213 page_table_lock(area->as, false); 214 pte = page_mapping_find(area->as, 215 base + j * PAGE_SIZE); 216 ASSERT(pte && PTE_VALID(pte) && 217 PTE_PRESENT(pte)); 218 btree_insert(&area->sh_info->pagemap, 219 (base + j * PAGE_SIZE) - area->base, 220 (void *) PTE_GET_FRAME(pte), NULL); 221 page_table_unlock(area->as, false); 222 223 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte)); 224 frame_reference_add(pfn); 225 } 226 227 } 228 } 229 mutex_unlock(&area->sh_info->lock); 254 frame_free_noreserve(frame); 230 255 } 231 256
Note:
See TracChangeset
for help on using the changeset viewer.