Changes in / [b2fb47f:8b655705] in mainline
- Location:
- kernel
- Files:
-
- 2 added
- 1 deleted
- 10 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/Makefile
rb2fb47f r8b655705 228 228 generic/src/syscall/syscall.c \ 229 229 generic/src/syscall/copy.c \ 230 generic/src/mm/reserve.c \ 230 231 generic/src/mm/buddy.c \ 231 232 generic/src/mm/frame.c \ -
kernel/arch/sparc64/include/cpu.h
rb2fb47f r8b655705 59 59 #include <arch/asm.h> 60 60 61 #ifdef CONFIG_SMP62 #include <arch/mm/cache.h>63 #endif64 65 66 61 #if defined (SUN4U) 67 62 #include <arch/sun4u/cpu.h> -
kernel/arch/sparc64/include/sun4u/cpu.h
rb2fb47f r8b655705 60 60 #include <trace.h> 61 61 62 #ifdef CONFIG_SMP63 #include <arch/mm/cache.h>64 #endif65 66 62 typedef struct { 67 63 uint32_t mid; /**< Processor ID as read from -
kernel/generic/include/mm/as.h
rb2fb47f r8b655705 238 238 /** Address space area backend structure. */ 239 239 typedef struct mem_backend { 240 bool (* create)(as_area_t *); 241 bool (* resize)(as_area_t *, size_t); 242 void (* share)(as_area_t *); 243 void (* destroy)(as_area_t *); 244 240 245 int (* page_fault)(as_area_t *, uintptr_t, pf_access_t); 241 246 void (* frame_free)(as_area_t *, uintptr_t, uintptr_t); 242 void (* share)(as_area_t *);243 247 } mem_backend_t; 244 248 -
kernel/generic/include/mm/frame.h
rb2fb47f r8b655705 62 62 63 63 /** Convert the frame address to kernel VA. */ 64 #define FRAME_KA 0x 0164 #define FRAME_KA 0x1 65 65 /** Do not panic and do not sleep on failure. */ 66 #define FRAME_ATOMIC 0x 0266 #define FRAME_ATOMIC 0x2 67 67 /** Do not start reclaiming when no free memory. */ 68 #define FRAME_NO_RECLAIM 0x04 68 #define FRAME_NO_RECLAIM 0x4 69 /** Do not reserve / unreserve memory. */ 70 #define FRAME_NO_RESERVE 0x8 69 71 70 72 typedef uint8_t zone_flags_t; 71 73 72 74 /** Available zone (free for allocation) */ 73 #define ZONE_AVAILABLE 0x0 075 #define ZONE_AVAILABLE 0x0 74 76 /** Zone is reserved (not available for allocation) */ 75 #define ZONE_RESERVED 0x 0877 #define ZONE_RESERVED 0x8 76 78 /** Zone is used by firmware (not available for allocation) */ 77 79 #define ZONE_FIRMWARE 0x10 … … 85 87 uint8_t buddy_order; /**< Buddy system block order */ 86 88 link_t buddy_link; /**< Link to the next free block inside 87 one order */89 one order */ 88 90 void *parent; /**< If allocated by slab, this points there */ 89 91 } frame_t; … … 91 93 typedef struct { 92 94 pfn_t base; /**< Frame_no of the first frame 93 in the frames array */95 in the frames array */ 94 96 size_t count; /**< Size of zone */ 95 97 size_t free_count; /**< Number of free frame_t 96 structures */98 structures */ 97 99 size_t busy_count; /**< Number of busy frame_t 98 structures */100 structures */ 99 101 zone_flags_t flags; /**< Type of the zone */ 100 102 101 103 frame_t *frames; /**< Array of frame_t structures 102 in this zone */104 in this zone */ 103 105 buddy_system_t *buddy_system; /**< Buddy system for the zone */ 104 106 } zone_t; … … 146 148 ((~(((sysarg_t) -1) << (order)) & (index)) == 0) 147 149 #define IS_BUDDY_LEFT_BLOCK(zone, frame) \ 148 (((frame_index((zone), (frame)) >> (frame)->buddy_order) & 0x 01) == 0)150 (((frame_index((zone), (frame)) >> (frame)->buddy_order) & 0x1) == 0) 149 151 #define IS_BUDDY_RIGHT_BLOCK(zone, frame) \ 150 (((frame_index((zone), (frame)) >> (frame)->buddy_order) & 0x 01) == 1)152 (((frame_index((zone), (frame)) >> (frame)->buddy_order) & 0x1) == 1) 151 153 #define IS_BUDDY_LEFT_BLOCK_ABS(zone, frame) \ 152 (((frame_index_abs((zone), (frame)) >> (frame)->buddy_order) & 0x 01) == 0)154 (((frame_index_abs((zone), (frame)) >> (frame)->buddy_order) & 0x1) == 0) 153 155 #define IS_BUDDY_RIGHT_BLOCK_ABS(zone, frame) \ 154 (((frame_index_abs((zone), (frame)) >> (frame)->buddy_order) & 0x01) == 1) 155 156 #define frame_alloc(order, flags) \ 157 frame_alloc_generic(order, flags, NULL) 156 (((frame_index_abs((zone), (frame)) >> (frame)->buddy_order) & 0x1) == 1) 158 157 159 158 extern void frame_init(void); 160 159 extern void *frame_alloc_generic(uint8_t, frame_flags_t, size_t *); 160 extern void *frame_alloc(uint8_t, frame_flags_t); 161 extern void *frame_alloc_noreserve(uint8_t, frame_flags_t); 162 extern void frame_free_generic(uintptr_t, frame_flags_t); 161 163 extern void frame_free(uintptr_t); 164 extern void frame_free_noreserve(uintptr_t); 162 165 extern void frame_reference_add(pfn_t); 163 166 164 extern size_t find_zone(pfn_t frame, size_t count, size_t hint);167 extern size_t find_zone(pfn_t, size_t, size_t); 165 168 extern size_t zone_create(pfn_t, size_t, pfn_t, zone_flags_t); 166 169 extern void *frame_get_parent(pfn_t, size_t); -
kernel/generic/src/mm/as.c
rb2fb47f r8b655705 79 79 #include <syscall/copy.h> 80 80 #include <arch/interrupt.h> 81 82 #ifdef CONFIG_VIRT_IDX_DCACHE83 #include <arch/mm/cache.h>84 #endif /* CONFIG_VIRT_IDX_DCACHE */85 81 86 82 /** -
kernel/generic/src/mm/backend_anon.c
rb2fb47f r8b655705 39 39 #include <mm/as.h> 40 40 #include <mm/page.h> 41 #include <mm/reserve.h> 41 42 #include <genarch/mm/page_pt.h> 42 43 #include <genarch/mm/page_ht.h> … … 51 52 #include <arch.h> 52 53 53 #ifdef CONFIG_VIRT_IDX_DCACHE 54 #include <arch/mm/cache.h> 55 #endif 54 static bool anon_create(as_area_t *); 55 static bool anon_resize(as_area_t *, size_t); 56 static void anon_share(as_area_t *area); 57 static void anon_destroy(as_area_t *); 56 58 57 59 static int anon_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access); 58 60 static void anon_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame); 59 static void anon_share(as_area_t *area);60 61 61 62 mem_backend_t anon_backend = { 63 .create = anon_create, 64 .resize = anon_resize, 65 .share = anon_share, 66 .destroy = anon_destroy, 67 62 68 .page_fault = anon_page_fault, 63 69 .frame_free = anon_frame_free, 64 .share = anon_share65 70 }; 71 72 bool anon_create(as_area_t *area) 73 { 74 return reserve_try_alloc(area->pages); 75 } 76 77 bool anon_resize(as_area_t *area, size_t new_pages) 78 { 79 /** 80 * @todo 81 * Reserve also space needed for the supporting strutures allocated 82 * during page fault. 83 */ 84 85 if (new_pages > area->pages) 86 return reserve_try_alloc(new_pages - area->pages); 87 else if (new_pages < area->pages) 88 reserve_free(area->pages - new_pages); 89 90 return true; 91 } 92 93 /** Share the anonymous address space area. 94 * 95 * Sharing of anonymous area is done by duplicating its entire mapping 96 * to the pagemap. Page faults will primarily search for frames there. 97 * 98 * The address space and address space area must be already locked. 99 * 100 * @param area Address space area to be shared. 101 */ 102 void anon_share(as_area_t *area) 103 { 104 link_t *cur; 105 106 ASSERT(mutex_locked(&area->as->lock)); 107 ASSERT(mutex_locked(&area->lock)); 108 109 /* 110 * Copy used portions of the area to sh_info's page map. 111 */ 112 mutex_lock(&area->sh_info->lock); 113 for (cur = area->used_space.leaf_head.next; 114 cur != &area->used_space.leaf_head; cur = cur->next) { 115 btree_node_t *node; 116 unsigned int i; 117 118 node = list_get_instance(cur, btree_node_t, leaf_link); 119 for (i = 0; i < node->keys; i++) { 120 uintptr_t base = node->key[i]; 121 size_t count = (size_t) node->value[i]; 122 unsigned int j; 123 124 for (j = 0; j < count; j++) { 125 pte_t *pte; 126 127 page_table_lock(area->as, false); 128 pte = page_mapping_find(area->as, 129 base + j * PAGE_SIZE); 130 ASSERT(pte && PTE_VALID(pte) && 131 PTE_PRESENT(pte)); 132 btree_insert(&area->sh_info->pagemap, 133 (base + j * PAGE_SIZE) - area->base, 134 (void *) PTE_GET_FRAME(pte), NULL); 135 page_table_unlock(area->as, false); 136 137 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte)); 138 frame_reference_add(pfn); 139 } 140 141 } 142 } 143 mutex_unlock(&area->sh_info->lock); 144 } 145 146 void anon_destroy(as_area_t *area) 147 { 148 reserve_free(area->pages); 149 } 150 66 151 67 152 /** Service a page fault in the anonymous memory address space area. … … 115 200 } 116 201 if (allocate) { 117 frame = (uintptr_t) frame_alloc(ONE_FRAME, 0); 202 frame = (uintptr_t) frame_alloc_noreserve( 203 ONE_FRAME, 0); 118 204 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 119 205 … … 145 231 * the different causes 146 232 */ 147 frame = (uintptr_t) frame_alloc (ONE_FRAME, 0);233 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0); 148 234 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 149 235 } … … 174 260 ASSERT(mutex_locked(&area->lock)); 175 261 176 frame_free(frame); 177 } 178 179 /** Share the anonymous address space area. 180 * 181 * Sharing of anonymous area is done by duplicating its entire mapping 182 * to the pagemap. Page faults will primarily search for frames there. 183 * 184 * The address space and address space area must be already locked. 185 * 186 * @param area Address space area to be shared. 187 */ 188 void anon_share(as_area_t *area) 189 { 190 link_t *cur; 191 192 ASSERT(mutex_locked(&area->as->lock)); 193 ASSERT(mutex_locked(&area->lock)); 194 195 /* 196 * Copy used portions of the area to sh_info's page map. 197 */ 198 mutex_lock(&area->sh_info->lock); 199 for (cur = area->used_space.leaf_head.next; 200 cur != &area->used_space.leaf_head; cur = cur->next) { 201 btree_node_t *node; 202 unsigned int i; 203 204 node = list_get_instance(cur, btree_node_t, leaf_link); 205 for (i = 0; i < node->keys; i++) { 206 uintptr_t base = node->key[i]; 207 size_t count = (size_t) node->value[i]; 208 unsigned int j; 209 210 for (j = 0; j < count; j++) { 211 pte_t *pte; 212 213 page_table_lock(area->as, false); 214 pte = page_mapping_find(area->as, 215 base + j * PAGE_SIZE); 216 ASSERT(pte && PTE_VALID(pte) && 217 PTE_PRESENT(pte)); 218 btree_insert(&area->sh_info->pagemap, 219 (base + j * PAGE_SIZE) - area->base, 220 (void *) PTE_GET_FRAME(pte), NULL); 221 page_table_unlock(area->as, false); 222 223 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte)); 224 frame_reference_add(pfn); 225 } 226 227 } 228 } 229 mutex_unlock(&area->sh_info->lock); 262 frame_free_noreserve(frame); 230 263 } 231 264 -
kernel/generic/src/mm/backend_elf.c
rb2fb47f r8b655705 43 43 #include <mm/slab.h> 44 44 #include <mm/page.h> 45 #include <mm/reserve.h> 45 46 #include <genarch/mm/page_pt.h> 46 47 #include <genarch/mm/page_ht.h> … … 51 52 #include <arch/barrier.h> 52 53 53 #ifdef CONFIG_VIRT_IDX_DCACHE 54 #include <arch/mm/cache.h> 55 #endif 54 static bool elf_create(as_area_t *); 55 static bool elf_resize(as_area_t *, size_t); 56 static void elf_share(as_area_t *); 57 static void elf_destroy(as_area_t *); 56 58 57 59 static int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access); 58 60 static void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame); 59 static void elf_share(as_area_t *area);60 61 61 62 mem_backend_t elf_backend = { 63 .create = elf_create, 64 .resize = elf_resize, 65 .share = elf_share, 66 .destroy = elf_destroy, 67 62 68 .page_fault = elf_page_fault, 63 69 .frame_free = elf_frame_free, 64 .share = elf_share65 70 }; 66 71 67 /** Service a page fault in the ELF backend address space area. 68 * 69 * The address space area and page tables must be already locked. 70 * 71 * @param area Pointer to the address space area. 72 * @param addr Faulting virtual address. 73 * @param access Access mode that caused the fault (i.e. 74 * read/write/exec). 75 * 76 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK 77 * on success (i.e. serviced). 78 */ 79 int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access) 80 { 81 elf_header_t *elf = area->backend_data.elf; 82 elf_segment_header_t *entry = area->backend_data.segment; 83 btree_node_t *leaf; 84 uintptr_t base, frame, page, start_anon; 85 size_t i; 86 bool dirty = false; 87 88 ASSERT(page_table_locked(AS)); 89 ASSERT(mutex_locked(&area->lock)); 90 91 if (!as_area_check_access(area, access)) 92 return AS_PF_FAULT; 72 bool elf_create(as_area_t *area) 73 { 74 /** 75 * @todo: 76 * Reserve only how much is necessary for anonymous pages plus the 77 * supporting structures allocated during the page fault. 78 */ 79 return reserve_try_alloc(area->pages); 80 } 81 82 bool elf_resize(as_area_t *area, size_t new_pages) 83 { 84 if (new_pages > area->pages) 85 return reserve_try_alloc(new_pages - area->pages); 86 else if (new_pages < area->pages) 87 reserve_free(area->pages - new_pages); 93 88 94 if (addr < ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) 95 return AS_PF_FAULT; 96 97 if (addr >= entry->p_vaddr + entry->p_memsz) 98 return AS_PF_FAULT; 99 100 i = (addr - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH; 101 base = (uintptr_t) 102 (((void *) elf) + ALIGN_DOWN(entry->p_offset, PAGE_SIZE)); 103 104 /* Virtual address of faulting page*/ 105 page = ALIGN_DOWN(addr, PAGE_SIZE); 106 107 /* Virtual address of the end of initialized part of segment */ 108 start_anon = entry->p_vaddr + entry->p_filesz; 109 110 if (area->sh_info) { 111 bool found = false; 112 113 /* 114 * The address space area is shared. 115 */ 116 117 mutex_lock(&area->sh_info->lock); 118 frame = (uintptr_t) btree_search(&area->sh_info->pagemap, 119 page - area->base, &leaf); 120 if (!frame) { 121 unsigned int i; 122 123 /* 124 * Workaround for valid NULL address. 125 */ 126 127 for (i = 0; i < leaf->keys; i++) { 128 if (leaf->key[i] == page - area->base) { 129 found = true; 130 break; 131 } 132 } 133 } 134 if (frame || found) { 135 frame_reference_add(ADDR2PFN(frame)); 136 page_mapping_insert(AS, addr, frame, 137 as_area_get_flags(area)); 138 if (!used_space_insert(area, page, 1)) 139 panic("Cannot insert used space."); 140 mutex_unlock(&area->sh_info->lock); 141 return AS_PF_OK; 142 } 143 } 144 145 /* 146 * The area is either not shared or the pagemap does not contain the 147 * mapping. 148 */ 149 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) { 150 /* 151 * Initialized portion of the segment. The memory is backed 152 * directly by the content of the ELF image. Pages are 153 * only copied if the segment is writable so that there 154 * can be more instantions of the same memory ELF image 155 * used at a time. Note that this could be later done 156 * as COW. 157 */ 158 if (entry->p_flags & PF_W) { 159 frame = (uintptr_t)frame_alloc(ONE_FRAME, 0); 160 memcpy((void *) PA2KA(frame), 161 (void *) (base + i * FRAME_SIZE), FRAME_SIZE); 162 if (entry->p_flags & PF_X) { 163 smc_coherence_block((void *) PA2KA(frame), 164 FRAME_SIZE); 165 } 166 dirty = true; 167 } else { 168 frame = KA2PA(base + i * FRAME_SIZE); 169 } 170 } else if (page >= start_anon) { 171 /* 172 * This is the uninitialized portion of the segment. 173 * It is not physically present in the ELF image. 174 * To resolve the situation, a frame must be allocated 175 * and cleared. 176 */ 177 frame = (uintptr_t)frame_alloc(ONE_FRAME, 0); 178 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 179 dirty = true; 180 } else { 181 size_t pad_lo, pad_hi; 182 /* 183 * The mixed case. 184 * 185 * The middle part is backed by the ELF image and 186 * the lower and upper parts are anonymous memory. 187 * (The segment can be and often is shorter than 1 page). 188 */ 189 if (page < entry->p_vaddr) 190 pad_lo = entry->p_vaddr - page; 191 else 192 pad_lo = 0; 193 194 if (start_anon < page + PAGE_SIZE) 195 pad_hi = page + PAGE_SIZE - start_anon; 196 else 197 pad_hi = 0; 198 199 frame = (uintptr_t)frame_alloc(ONE_FRAME, 0); 200 memcpy((void *) (PA2KA(frame) + pad_lo), 201 (void *) (base + i * FRAME_SIZE + pad_lo), 202 FRAME_SIZE - pad_lo - pad_hi); 203 if (entry->p_flags & PF_X) { 204 smc_coherence_block((void *) (PA2KA(frame) + pad_lo), 205 FRAME_SIZE - pad_lo - pad_hi); 206 } 207 memsetb((void *) PA2KA(frame), pad_lo, 0); 208 memsetb((void *) (PA2KA(frame) + FRAME_SIZE - pad_hi), pad_hi, 209 0); 210 dirty = true; 211 } 212 213 if (dirty && area->sh_info) { 214 frame_reference_add(ADDR2PFN(frame)); 215 btree_insert(&area->sh_info->pagemap, page - area->base, 216 (void *) frame, leaf); 217 } 218 219 if (area->sh_info) 220 mutex_unlock(&area->sh_info->lock); 221 222 page_mapping_insert(AS, addr, frame, as_area_get_flags(area)); 223 if (!used_space_insert(area, page, 1)) 224 panic("Cannot insert used space."); 225 226 return AS_PF_OK; 227 } 228 229 /** Free a frame that is backed by the ELF backend. 230 * 231 * The address space area and page tables must be already locked. 232 * 233 * @param area Pointer to the address space area. 234 * @param page Page that is mapped to frame. Must be aligned to 235 * PAGE_SIZE. 236 * @param frame Frame to be released. 237 * 238 */ 239 void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame) 240 { 241 elf_segment_header_t *entry = area->backend_data.segment; 242 uintptr_t start_anon; 243 244 ASSERT(page_table_locked(area->as)); 245 ASSERT(mutex_locked(&area->lock)); 246 247 ASSERT(page >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)); 248 ASSERT(page < entry->p_vaddr + entry->p_memsz); 249 250 start_anon = entry->p_vaddr + entry->p_filesz; 251 252 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) { 253 if (entry->p_flags & PF_W) { 254 /* 255 * Free the frame with the copy of writable segment 256 * data. 257 */ 258 frame_free(frame); 259 } 260 } else { 261 /* 262 * The frame is either anonymous memory or the mixed case (i.e. 263 * lower part is backed by the ELF image and the upper is 264 * anonymous). In any case, a frame needs to be freed. 265 */ 266 frame_free(frame); 267 } 89 return true; 268 90 } 269 91 … … 356 178 } 357 179 180 void elf_destroy(as_area_t *area) 181 { 182 /** 183 * @todo: 184 * Unreserve only how much was really reserved. 185 */ 186 reserve_free(area->pages); 187 } 188 189 /** Service a page fault in the ELF backend address space area. 190 * 191 * The address space area and page tables must be already locked. 192 * 193 * @param area Pointer to the address space area. 194 * @param addr Faulting virtual address. 195 * @param access Access mode that caused the fault (i.e. 196 * read/write/exec). 197 * 198 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK 199 * on success (i.e. serviced). 200 */ 201 int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access) 202 { 203 elf_header_t *elf = area->backend_data.elf; 204 elf_segment_header_t *entry = area->backend_data.segment; 205 btree_node_t *leaf; 206 uintptr_t base, frame, page, start_anon; 207 size_t i; 208 bool dirty = false; 209 210 ASSERT(page_table_locked(AS)); 211 ASSERT(mutex_locked(&area->lock)); 212 213 if (!as_area_check_access(area, access)) 214 return AS_PF_FAULT; 215 216 if (addr < ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) 217 return AS_PF_FAULT; 218 219 if (addr >= entry->p_vaddr + entry->p_memsz) 220 return AS_PF_FAULT; 221 222 i = (addr - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH; 223 base = (uintptr_t) 224 (((void *) elf) + ALIGN_DOWN(entry->p_offset, PAGE_SIZE)); 225 226 /* Virtual address of faulting page*/ 227 page = ALIGN_DOWN(addr, PAGE_SIZE); 228 229 /* Virtual address of the end of initialized part of segment */ 230 start_anon = entry->p_vaddr + entry->p_filesz; 231 232 if (area->sh_info) { 233 bool found = false; 234 235 /* 236 * The address space area is shared. 237 */ 238 239 mutex_lock(&area->sh_info->lock); 240 frame = (uintptr_t) btree_search(&area->sh_info->pagemap, 241 page - area->base, &leaf); 242 if (!frame) { 243 unsigned int i; 244 245 /* 246 * Workaround for valid NULL address. 247 */ 248 249 for (i = 0; i < leaf->keys; i++) { 250 if (leaf->key[i] == page - area->base) { 251 found = true; 252 break; 253 } 254 } 255 } 256 if (frame || found) { 257 frame_reference_add(ADDR2PFN(frame)); 258 page_mapping_insert(AS, addr, frame, 259 as_area_get_flags(area)); 260 if (!used_space_insert(area, page, 1)) 261 panic("Cannot insert used space."); 262 mutex_unlock(&area->sh_info->lock); 263 return AS_PF_OK; 264 } 265 } 266 267 /* 268 * The area is either not shared or the pagemap does not contain the 269 * mapping. 270 */ 271 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) { 272 /* 273 * Initialized portion of the segment. The memory is backed 274 * directly by the content of the ELF image. Pages are 275 * only copied if the segment is writable so that there 276 * can be more instantions of the same memory ELF image 277 * used at a time. Note that this could be later done 278 * as COW. 279 */ 280 if (entry->p_flags & PF_W) { 281 frame = (uintptr_t)frame_alloc_noreserve(ONE_FRAME, 0); 282 memcpy((void *) PA2KA(frame), 283 (void *) (base + i * FRAME_SIZE), FRAME_SIZE); 284 if (entry->p_flags & PF_X) { 285 smc_coherence_block((void *) PA2KA(frame), 286 FRAME_SIZE); 287 } 288 dirty = true; 289 } else { 290 frame = KA2PA(base + i * FRAME_SIZE); 291 } 292 } else if (page >= start_anon) { 293 /* 294 * This is the uninitialized portion of the segment. 295 * It is not physically present in the ELF image. 296 * To resolve the situation, a frame must be allocated 297 * and cleared. 298 */ 299 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0); 300 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 301 dirty = true; 302 } else { 303 size_t pad_lo, pad_hi; 304 /* 305 * The mixed case. 306 * 307 * The middle part is backed by the ELF image and 308 * the lower and upper parts are anonymous memory. 309 * (The segment can be and often is shorter than 1 page). 310 */ 311 if (page < entry->p_vaddr) 312 pad_lo = entry->p_vaddr - page; 313 else 314 pad_lo = 0; 315 316 if (start_anon < page + PAGE_SIZE) 317 pad_hi = page + PAGE_SIZE - start_anon; 318 else 319 pad_hi = 0; 320 321 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0); 322 memcpy((void *) (PA2KA(frame) + pad_lo), 323 (void *) (base + i * FRAME_SIZE + pad_lo), 324 FRAME_SIZE - pad_lo - pad_hi); 325 if (entry->p_flags & PF_X) { 326 smc_coherence_block((void *) (PA2KA(frame) + pad_lo), 327 FRAME_SIZE - pad_lo - pad_hi); 328 } 329 memsetb((void *) PA2KA(frame), pad_lo, 0); 330 memsetb((void *) (PA2KA(frame) + FRAME_SIZE - pad_hi), pad_hi, 331 0); 332 dirty = true; 333 } 334 335 if (dirty && area->sh_info) { 336 frame_reference_add(ADDR2PFN(frame)); 337 btree_insert(&area->sh_info->pagemap, page - area->base, 338 (void *) frame, leaf); 339 } 340 341 if (area->sh_info) 342 mutex_unlock(&area->sh_info->lock); 343 344 page_mapping_insert(AS, addr, frame, as_area_get_flags(area)); 345 if (!used_space_insert(area, page, 1)) 346 panic("Cannot insert used space."); 347 348 return AS_PF_OK; 349 } 350 351 /** Free a frame that is backed by the ELF backend. 352 * 353 * The address space area and page tables must be already locked. 354 * 355 * @param area Pointer to the address space area. 356 * @param page Page that is mapped to frame. Must be aligned to 357 * PAGE_SIZE. 358 * @param frame Frame to be released. 359 * 360 */ 361 void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame) 362 { 363 elf_segment_header_t *entry = area->backend_data.segment; 364 uintptr_t start_anon; 365 366 ASSERT(page_table_locked(area->as)); 367 ASSERT(mutex_locked(&area->lock)); 368 369 ASSERT(page >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)); 370 ASSERT(page < entry->p_vaddr + entry->p_memsz); 371 372 start_anon = entry->p_vaddr + entry->p_filesz; 373 374 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) { 375 if (entry->p_flags & PF_W) { 376 /* 377 * Free the frame with the copy of writable segment 378 * data. 379 */ 380 frame_free_noreserve(frame); 381 } 382 } else { 383 /* 384 * The frame is either anonymous memory or the mixed case (i.e. 385 * lower part is backed by the ELF image and the upper is 386 * anonymous). In any case, a frame needs to be freed. 387 */ 388 frame_free_noreserve(frame); 389 } 390 } 391 358 392 /** @} 359 393 */ -
kernel/generic/src/mm/backend_phys.c
rb2fb47f r8b655705 48 48 #include <align.h> 49 49 50 static bool phys_create(as_area_t *); 51 static void phys_share(as_area_t *area); 52 static void phys_destroy(as_area_t *); 53 50 54 static int phys_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access); 51 static void phys_share(as_area_t *area);52 55 53 56 mem_backend_t phys_backend = { 57 .create = phys_create, 58 .resize = NULL, 59 .share = phys_share, 60 .destroy = phys_destroy, 61 54 62 .page_fault = phys_page_fault, 55 63 .frame_free = NULL, 56 .share = phys_share57 64 }; 65 66 bool phys_create(as_area_t *area) 67 { 68 return true; 69 } 70 71 /** Share address space area backed by physical memory. 72 * 73 * Do actually nothing as sharing of address space areas 74 * that are backed up by physical memory is very easy. 75 * Note that the function must be defined so that 76 * as_area_share() will succeed. 77 */ 78 void phys_share(as_area_t *area) 79 { 80 ASSERT(mutex_locked(&area->as->lock)); 81 ASSERT(mutex_locked(&area->lock)); 82 } 83 84 85 void phys_destroy(as_area_t *area) 86 { 87 /* Nothing to do. */ 88 } 58 89 59 90 /** Service a page fault in the address space area backed by physical memory. … … 88 119 } 89 120 90 /** Share address space area backed by physical memory.91 *92 * Do actually nothing as sharing of address space areas93 * that are backed up by physical memory is very easy.94 * Note that the function must be defined so that95 * as_area_share() will succeed.96 */97 void phys_share(as_area_t *area)98 {99 ASSERT(mutex_locked(&area->as->lock));100 ASSERT(mutex_locked(&area->lock));101 }102 103 121 /** @} 104 122 */ -
kernel/generic/src/mm/frame.c
rb2fb47f r8b655705 695 695 * not to be 2^order size. Once the allocator is running it is no longer 696 696 * possible, merged configuration data occupies more space :-/ 697 *698 * The function uses699 697 * 700 698 */ … … 1088 1086 } 1089 1087 1088 void *frame_alloc(uint8_t order, frame_flags_t flags) 1089 { 1090 return frame_alloc_generic(order, flags, NULL); 1091 } 1092 1093 void *frame_alloc_noreserve(uint8_t order, frame_flags_t flags) 1094 { 1095 return frame_alloc_generic(order, flags | FRAME_NO_RESERVE, NULL); 1096 } 1097 1090 1098 /** Free a frame. 1091 1099 * … … 1095 1103 * 1096 1104 * @param frame Physical Address of of the frame to be freed. 1097 * 1098 */ 1099 void frame_free(uintptr_t frame) 1105 * @param flags Flags to control memory reservation. 1106 * 1107 */ 1108 void frame_free_generic(uintptr_t frame, frame_flags_t flags) 1100 1109 { 1101 1110 irq_spinlock_lock(&zones.lock, true); … … 1125 1134 } 1126 1135 mutex_unlock(&mem_avail_mtx); 1136 } 1137 1138 void frame_free(uintptr_t frame) 1139 { 1140 frame_free_generic(frame, 0); 1141 } 1142 1143 void frame_free_noreserve(uintptr_t frame) 1144 { 1145 frame_free_generic(frame, FRAME_NO_RESERVE); 1127 1146 } 1128 1147
Note:
See TracChangeset
for help on using the changeset viewer.