Changes in kernel/generic/src/mm/backend_anon.c [9d58539:83b6ba9f] in mainline
- File:
-
- 1 edited
-
kernel/generic/src/mm/backend_anon.c (modified) (13 diffs)
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/backend_anon.c
r9d58539 r83b6ba9f 59 59 static void anon_destroy(as_area_t *); 60 60 61 static bool anon_is_resizable(as_area_t *); 62 static bool anon_is_shareable(as_area_t *); 63 61 64 static int anon_page_fault(as_area_t *, uintptr_t, pf_access_t); 62 65 static void anon_frame_free(as_area_t *, uintptr_t, uintptr_t); … … 68 71 .destroy = anon_destroy, 69 72 73 .is_resizable = anon_is_resizable, 74 .is_shareable = anon_is_shareable, 75 70 76 .page_fault = anon_page_fault, 71 77 .frame_free = anon_frame_free, 78 79 .create_shared_data = NULL, 80 .destroy_shared_data = NULL 72 81 }; 73 82 74 83 bool anon_create(as_area_t *area) 75 84 { 85 if (area->flags & AS_AREA_LATE_RESERVE) 86 return true; 87 76 88 return reserve_try_alloc(area->pages); 77 89 } … … 79 91 bool anon_resize(as_area_t *area, size_t new_pages) 80 92 { 93 if (area->flags & AS_AREA_LATE_RESERVE) 94 return true; 95 81 96 if (new_pages > area->pages) 82 97 return reserve_try_alloc(new_pages - area->pages); … … 100 115 ASSERT(mutex_locked(&area->as->lock)); 101 116 ASSERT(mutex_locked(&area->lock)); 117 ASSERT(!(area->flags & AS_AREA_LATE_RESERVE)); 102 118 103 119 /* … … 105 121 */ 106 122 mutex_lock(&area->sh_info->lock); 107 list_foreach(area->used_space.leaf_list, cur) {108 btree_node_t *node;123 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 124 node) { 109 125 unsigned int i; 110 126 111 node = list_get_instance(cur, btree_node_t, leaf_link);112 127 for (i = 0; i < node->keys; i++) { 113 128 uintptr_t base = node->key[i]; … … 139 154 void anon_destroy(as_area_t *area) 140 155 { 156 if (area->flags & AS_AREA_LATE_RESERVE) 157 return; 158 141 159 reserve_free(area->pages); 142 160 } 143 161 162 bool anon_is_resizable(as_area_t *area) 163 { 164 return true; 165 } 166 167 bool anon_is_shareable(as_area_t *area) 168 { 169 return !(area->flags & AS_AREA_LATE_RESERVE); 170 } 144 171 145 172 /** Service a page fault in the anonymous memory address space area. … … 148 175 * 149 176 * @param area Pointer to the address space area. 150 * @param addr Faulting virtual address.177 * @param upage Faulting virtual page. 151 178 * @param access Access mode that caused the fault (i.e. read/write/exec). 152 179 * … … 154 181 * serviced). 155 182 */ 156 int anon_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access) 157 { 158 uintptr_t upage = ALIGN_DOWN(addr, PAGE_SIZE); 183 int anon_page_fault(as_area_t *area, uintptr_t upage, pf_access_t access) 184 { 159 185 uintptr_t kpage; 160 186 uintptr_t frame; … … 162 188 ASSERT(page_table_locked(AS)); 163 189 ASSERT(mutex_locked(&area->lock)); 190 ASSERT(IS_ALIGNED(upage, PAGE_SIZE)); 164 191 165 192 if (!as_area_check_access(area, access)) 166 193 return AS_PF_FAULT; 167 194 168 if (area->sh_info) { 195 mutex_lock(&area->sh_info->lock); 196 if (area->sh_info->shared) { 169 197 btree_node_t *leaf; 170 198 … … 176 204 * mapping, a new frame is allocated and the mapping is created. 177 205 */ 178 mutex_lock(&area->sh_info->lock);179 206 frame = (uintptr_t) btree_search(&area->sh_info->pagemap, 180 207 upage - area->base, &leaf); … … 208 235 } 209 236 frame_reference_add(ADDR2PFN(frame)); 210 mutex_unlock(&area->sh_info->lock);211 237 } else { 212 238 … … 225 251 * the different causes 226 252 */ 253 254 if (area->flags & AS_AREA_LATE_RESERVE) { 255 /* 256 * Reserve the memory for this page now. 257 */ 258 if (!reserve_try_alloc(1)) { 259 mutex_unlock(&area->sh_info->lock); 260 return AS_PF_SILENT; 261 } 262 } 263 227 264 kpage = km_temporary_page_get(&frame, FRAME_NO_RESERVE); 228 265 memsetb((void *) kpage, PAGE_SIZE, 0); 229 266 km_temporary_page_put(kpage); 230 267 } 268 mutex_unlock(&area->sh_info->lock); 231 269 232 270 /* … … 255 293 ASSERT(mutex_locked(&area->lock)); 256 294 257 frame_free_noreserve(frame); 295 if (area->flags & AS_AREA_LATE_RESERVE) { 296 /* 297 * In case of the late reserve areas, physical memory will not 298 * be unreserved when the area is destroyed so we need to use 299 * the normal unreserving frame_free(). 300 */ 301 frame_free(frame, 1); 302 } else { 303 /* 304 * The reserve will be given back when the area is destroyed or 305 * resized, so use the frame_free_noreserve() which does not 306 * manipulate the reserve or it would be given back twice. 307 */ 308 frame_free_noreserve(frame, 1); 309 } 258 310 } 259 311
Note:
See TracChangeset
for help on using the changeset viewer.
