Changes in / [e5a015b:b2fb47f] in mainline
- Files:
-
- 1 added
- 4 deleted
- 21 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/Makefile
re5a015b rb2fb47f 228 228 generic/src/syscall/syscall.c \ 229 229 generic/src/syscall/copy.c \ 230 generic/src/mm/reserve.c \231 230 generic/src/mm/buddy.c \ 232 231 generic/src/mm/frame.c \ -
kernel/arch/abs32le/include/types.h
re5a015b rb2fb47f 40 40 41 41 typedef uint32_t size_t; 42 typedef int32_t ssize_t;43 42 44 43 typedef uint32_t uintptr_t; -
kernel/arch/amd64/include/types.h
re5a015b rb2fb47f 37 37 38 38 typedef uint64_t size_t; 39 typedef int64_t ssize_t;40 39 41 40 typedef uint64_t uintptr_t; -
kernel/arch/arm32/include/types.h
re5a015b rb2fb47f 44 44 45 45 typedef uint32_t size_t; 46 typedef int32_t ssize_t;47 46 48 47 typedef uint32_t uintptr_t; -
kernel/arch/ia32/include/types.h
re5a015b rb2fb47f 37 37 38 38 typedef uint32_t size_t; 39 typedef int32_t ssize_t;40 39 41 40 typedef uint32_t uintptr_t; -
kernel/arch/ia64/include/types.h
re5a015b rb2fb47f 37 37 38 38 typedef uint64_t size_t; 39 typedef int64_t ssize_t;40 39 41 40 typedef uint64_t uintptr_t; -
kernel/arch/mips32/include/types.h
re5a015b rb2fb47f 37 37 38 38 typedef uint32_t size_t; 39 typedef int32_t ssize_t;40 39 41 40 typedef uint32_t uintptr_t; -
kernel/arch/ppc32/include/types.h
re5a015b rb2fb47f 37 37 38 38 typedef uint32_t size_t; 39 typedef int32_t ssize_t;40 39 41 40 typedef uint32_t uintptr_t; -
kernel/arch/sparc64/include/cpu.h
re5a015b rb2fb47f 59 59 #include <arch/asm.h> 60 60 61 #ifdef CONFIG_SMP 62 #include <arch/mm/cache.h> 63 #endif 64 65 61 66 #if defined (SUN4U) 62 67 #include <arch/sun4u/cpu.h> -
kernel/arch/sparc64/include/sun4u/cpu.h
re5a015b rb2fb47f 60 60 #include <trace.h> 61 61 62 #ifdef CONFIG_SMP 63 #include <arch/mm/cache.h> 64 #endif 65 62 66 typedef struct { 63 67 uint32_t mid; /**< Processor ID as read from -
kernel/arch/sparc64/include/types.h
re5a015b rb2fb47f 37 37 38 38 typedef uint64_t size_t; 39 typedef int64_t ssize_t;40 39 41 40 typedef uint64_t uintptr_t; -
kernel/generic/include/mm/as.h
re5a015b rb2fb47f 238 238 /** Address space area backend structure. */ 239 239 typedef struct mem_backend { 240 bool (* create)(as_area_t *);241 bool (* resize)(as_area_t *, size_t);242 void (* share)(as_area_t *);243 void (* destroy)(as_area_t *);244 245 240 int (* page_fault)(as_area_t *, uintptr_t, pf_access_t); 246 241 void (* frame_free)(as_area_t *, uintptr_t, uintptr_t); 242 void (* share)(as_area_t *); 247 243 } mem_backend_t; 248 244 -
kernel/generic/include/mm/frame.h
re5a015b rb2fb47f 62 62 63 63 /** Convert the frame address to kernel VA. */ 64 #define FRAME_KA 0x 164 #define FRAME_KA 0x01 65 65 /** Do not panic and do not sleep on failure. */ 66 #define FRAME_ATOMIC 0x 266 #define FRAME_ATOMIC 0x02 67 67 /** Do not start reclaiming when no free memory. */ 68 #define FRAME_NO_RECLAIM 0x4 69 /** Do not reserve / unreserve memory. */ 70 #define FRAME_NO_RESERVE 0x8 68 #define FRAME_NO_RECLAIM 0x04 71 69 72 70 typedef uint8_t zone_flags_t; 73 71 74 72 /** Available zone (free for allocation) */ 75 #define ZONE_AVAILABLE 0x0 73 #define ZONE_AVAILABLE 0x00 76 74 /** Zone is reserved (not available for allocation) */ 77 #define ZONE_RESERVED 0x 875 #define ZONE_RESERVED 0x08 78 76 /** Zone is used by firmware (not available for allocation) */ 79 77 #define ZONE_FIRMWARE 0x10 … … 87 85 uint8_t buddy_order; /**< Buddy system block order */ 88 86 link_t buddy_link; /**< Link to the next free block inside 89 87 one order */ 90 88 void *parent; /**< If allocated by slab, this points there */ 91 89 } frame_t; … … 93 91 typedef struct { 94 92 pfn_t base; /**< Frame_no of the first frame 95 93 in the frames array */ 96 94 size_t count; /**< Size of zone */ 97 95 size_t free_count; /**< Number of free frame_t 98 96 structures */ 99 97 size_t busy_count; /**< Number of busy frame_t 100 98 structures */ 101 99 zone_flags_t flags; /**< Type of the zone */ 102 100 103 101 frame_t *frames; /**< Array of frame_t structures 104 102 in this zone */ 105 103 buddy_system_t *buddy_system; /**< Buddy system for the zone */ 106 104 } zone_t; … … 148 146 ((~(((sysarg_t) -1) << (order)) & (index)) == 0) 149 147 #define IS_BUDDY_LEFT_BLOCK(zone, frame) \ 150 (((frame_index((zone), (frame)) >> (frame)->buddy_order) & 0x 1) == 0)148 (((frame_index((zone), (frame)) >> (frame)->buddy_order) & 0x01) == 0) 151 149 #define IS_BUDDY_RIGHT_BLOCK(zone, frame) \ 152 (((frame_index((zone), (frame)) >> (frame)->buddy_order) & 0x 1) == 1)150 (((frame_index((zone), (frame)) >> (frame)->buddy_order) & 0x01) == 1) 153 151 #define IS_BUDDY_LEFT_BLOCK_ABS(zone, frame) \ 154 (((frame_index_abs((zone), (frame)) >> (frame)->buddy_order) & 0x 1) == 0)152 (((frame_index_abs((zone), (frame)) >> (frame)->buddy_order) & 0x01) == 0) 155 153 #define IS_BUDDY_RIGHT_BLOCK_ABS(zone, frame) \ 156 (((frame_index_abs((zone), (frame)) >> (frame)->buddy_order) & 0x1) == 1) 154 (((frame_index_abs((zone), (frame)) >> (frame)->buddy_order) & 0x01) == 1) 155 156 #define frame_alloc(order, flags) \ 157 frame_alloc_generic(order, flags, NULL) 157 158 158 159 extern void frame_init(void); 159 160 extern void *frame_alloc_generic(uint8_t, frame_flags_t, size_t *); 160 extern void *frame_alloc(uint8_t, frame_flags_t);161 extern void *frame_alloc_noreserve(uint8_t, frame_flags_t);162 extern void frame_free_generic(uintptr_t, frame_flags_t);163 161 extern void frame_free(uintptr_t); 164 extern void frame_free_noreserve(uintptr_t);165 162 extern void frame_reference_add(pfn_t); 166 163 167 extern size_t find_zone(pfn_t , size_t, size_t);164 extern size_t find_zone(pfn_t frame, size_t count, size_t hint); 168 165 extern size_t zone_create(pfn_t, size_t, pfn_t, zone_flags_t); 169 166 extern void *frame_get_parent(pfn_t, size_t); -
kernel/generic/src/mm/as.c
re5a015b rb2fb47f 80 80 #include <arch/interrupt.h> 81 81 82 #ifdef CONFIG_VIRT_IDX_DCACHE 83 #include <arch/mm/cache.h> 84 #endif /* CONFIG_VIRT_IDX_DCACHE */ 85 82 86 /** 83 87 * Each architecture decides what functions will be used to carry out … … 443 447 else 444 448 memsetb(&area->backend_data, sizeof(area->backend_data), 0); 445 446 if (area->backend && area->backend->create) {447 if (!area->backend->create(area)) {448 free(area);449 mutex_unlock(&as->lock);450 return NULL;451 }452 }453 449 454 450 btree_create(&area->used_space); … … 694 690 } 695 691 696 if (area->backend && area->backend->resize) {697 if (!area->backend->resize(area, pages)) {698 mutex_unlock(&area->lock);699 mutex_unlock(&as->lock);700 return ENOMEM;701 }702 }703 704 692 area->pages = pages; 705 693 … … 768 756 return ENOENT; 769 757 } 770 771 if (area->backend && area->backend->destroy)772 area->backend->destroy(area);773 758 774 759 uintptr_t base = area->base; -
kernel/generic/src/mm/backend_anon.c
re5a015b rb2fb47f 39 39 #include <mm/as.h> 40 40 #include <mm/page.h> 41 #include <mm/reserve.h>42 41 #include <genarch/mm/page_pt.h> 43 42 #include <genarch/mm/page_ht.h> … … 52 51 #include <arch.h> 53 52 54 static bool anon_create(as_area_t *); 55 static bool anon_resize(as_area_t *, size_t); 56 static void anon_share(as_area_t *); 57 static void anon_destroy(as_area_t *); 53 #ifdef CONFIG_VIRT_IDX_DCACHE 54 #include <arch/mm/cache.h> 55 #endif 58 56 59 57 static int anon_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access); 60 58 static void anon_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame); 59 static void anon_share(as_area_t *area); 61 60 62 61 mem_backend_t anon_backend = { 63 .create = anon_create,64 .resize = anon_resize,65 .share = anon_share,66 .destroy = anon_destroy,67 68 62 .page_fault = anon_page_fault, 69 63 .frame_free = anon_frame_free, 64 .share = anon_share 70 65 }; 71 72 bool anon_create(as_area_t *area)73 {74 return reserve_try_alloc(area->pages);75 }76 77 bool anon_resize(as_area_t *area, size_t new_pages)78 {79 if (new_pages > area->pages)80 return reserve_try_alloc(new_pages - area->pages);81 else if (new_pages < area->pages)82 reserve_free(area->pages - new_pages);83 84 return true;85 }86 87 /** Share the anonymous address space area.88 *89 * Sharing of anonymous area is done by duplicating its entire mapping90 * to the pagemap. Page faults will primarily search for frames there.91 *92 * The address space and address space area must be already locked.93 *94 * @param area Address space area to be shared.95 */96 void anon_share(as_area_t *area)97 {98 link_t *cur;99 100 ASSERT(mutex_locked(&area->as->lock));101 ASSERT(mutex_locked(&area->lock));102 103 /*104 * Copy used portions of the area to sh_info's page map.105 */106 mutex_lock(&area->sh_info->lock);107 for (cur = area->used_space.leaf_head.next;108 cur != &area->used_space.leaf_head; cur = cur->next) {109 btree_node_t *node;110 unsigned int i;111 112 node = list_get_instance(cur, btree_node_t, leaf_link);113 for (i = 0; i < node->keys; i++) {114 uintptr_t base = node->key[i];115 size_t count = (size_t) node->value[i];116 unsigned int j;117 118 for (j = 0; j < count; j++) {119 pte_t *pte;120 121 page_table_lock(area->as, false);122 pte = page_mapping_find(area->as,123 base + j * PAGE_SIZE);124 ASSERT(pte && PTE_VALID(pte) &&125 PTE_PRESENT(pte));126 btree_insert(&area->sh_info->pagemap,127 (base + j * PAGE_SIZE) - area->base,128 (void *) PTE_GET_FRAME(pte), NULL);129 page_table_unlock(area->as, false);130 131 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte));132 frame_reference_add(pfn);133 }134 135 }136 }137 mutex_unlock(&area->sh_info->lock);138 }139 140 void anon_destroy(as_area_t *area)141 {142 reserve_free(area->pages);143 }144 145 66 146 67 /** Service a page fault in the anonymous memory address space area. … … 194 115 } 195 116 if (allocate) { 196 frame = (uintptr_t) frame_alloc_noreserve( 197 ONE_FRAME, 0); 117 frame = (uintptr_t) frame_alloc(ONE_FRAME, 0); 198 118 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 199 119 … … 225 145 * the different causes 226 146 */ 227 frame = (uintptr_t) frame_alloc _noreserve(ONE_FRAME, 0);147 frame = (uintptr_t) frame_alloc(ONE_FRAME, 0); 228 148 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 229 149 } … … 254 174 ASSERT(mutex_locked(&area->lock)); 255 175 256 frame_free _noreserve(frame);176 frame_free(frame); 257 177 } 258 178 179 /** Share the anonymous address space area. 180 * 181 * Sharing of anonymous area is done by duplicating its entire mapping 182 * to the pagemap. Page faults will primarily search for frames there. 183 * 184 * The address space and address space area must be already locked. 185 * 186 * @param area Address space area to be shared. 187 */ 188 void anon_share(as_area_t *area) 189 { 190 link_t *cur; 191 192 ASSERT(mutex_locked(&area->as->lock)); 193 ASSERT(mutex_locked(&area->lock)); 194 195 /* 196 * Copy used portions of the area to sh_info's page map. 197 */ 198 mutex_lock(&area->sh_info->lock); 199 for (cur = area->used_space.leaf_head.next; 200 cur != &area->used_space.leaf_head; cur = cur->next) { 201 btree_node_t *node; 202 unsigned int i; 203 204 node = list_get_instance(cur, btree_node_t, leaf_link); 205 for (i = 0; i < node->keys; i++) { 206 uintptr_t base = node->key[i]; 207 size_t count = (size_t) node->value[i]; 208 unsigned int j; 209 210 for (j = 0; j < count; j++) { 211 pte_t *pte; 212 213 page_table_lock(area->as, false); 214 pte = page_mapping_find(area->as, 215 base + j * PAGE_SIZE); 216 ASSERT(pte && PTE_VALID(pte) && 217 PTE_PRESENT(pte)); 218 btree_insert(&area->sh_info->pagemap, 219 (base + j * PAGE_SIZE) - area->base, 220 (void *) PTE_GET_FRAME(pte), NULL); 221 page_table_unlock(area->as, false); 222 223 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte)); 224 frame_reference_add(pfn); 225 } 226 227 } 228 } 229 mutex_unlock(&area->sh_info->lock); 230 } 231 259 232 /** @} 260 233 */ -
kernel/generic/src/mm/backend_elf.c
re5a015b rb2fb47f 43 43 #include <mm/slab.h> 44 44 #include <mm/page.h> 45 #include <mm/reserve.h>46 45 #include <genarch/mm/page_pt.h> 47 46 #include <genarch/mm/page_ht.h> … … 52 51 #include <arch/barrier.h> 53 52 54 static bool elf_create(as_area_t *); 55 static bool elf_resize(as_area_t *, size_t); 56 static void elf_share(as_area_t *); 57 static void elf_destroy(as_area_t *); 53 #ifdef CONFIG_VIRT_IDX_DCACHE 54 #include <arch/mm/cache.h> 55 #endif 58 56 59 57 static int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access); 60 58 static void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame); 59 static void elf_share(as_area_t *area); 61 60 62 61 mem_backend_t elf_backend = { 63 .create = elf_create,64 .resize = elf_resize,65 .share = elf_share,66 .destroy = elf_destroy,67 68 62 .page_fault = elf_page_fault, 69 63 .frame_free = elf_frame_free, 64 .share = elf_share 70 65 }; 71 66 72 bool elf_create(as_area_t *area) 67 /** Service a page fault in the ELF backend address space area. 68 * 69 * The address space area and page tables must be already locked. 70 * 71 * @param area Pointer to the address space area. 72 * @param addr Faulting virtual address. 73 * @param access Access mode that caused the fault (i.e. 74 * read/write/exec). 75 * 76 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK 77 * on success (i.e. serviced). 78 */ 79 int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access) 80 { 81 elf_header_t *elf = area->backend_data.elf; 82 elf_segment_header_t *entry = area->backend_data.segment; 83 btree_node_t *leaf; 84 uintptr_t base, frame, page, start_anon; 85 size_t i; 86 bool dirty = false; 87 88 ASSERT(page_table_locked(AS)); 89 ASSERT(mutex_locked(&area->lock)); 90 91 if (!as_area_check_access(area, access)) 92 return AS_PF_FAULT; 93 94 if (addr < ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) 95 return AS_PF_FAULT; 96 97 if (addr >= entry->p_vaddr + entry->p_memsz) 98 return AS_PF_FAULT; 99 100 i = (addr - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH; 101 base = (uintptr_t) 102 (((void *) elf) + ALIGN_DOWN(entry->p_offset, PAGE_SIZE)); 103 104 /* Virtual address of faulting page*/ 105 page = ALIGN_DOWN(addr, PAGE_SIZE); 106 107 /* Virtual address of the end of initialized part of segment */ 108 start_anon = entry->p_vaddr + entry->p_filesz; 109 110 if (area->sh_info) { 111 bool found = false; 112 113 /* 114 * The address space area is shared. 115 */ 116 117 mutex_lock(&area->sh_info->lock); 118 frame = (uintptr_t) btree_search(&area->sh_info->pagemap, 119 page - area->base, &leaf); 120 if (!frame) { 121 unsigned int i; 122 123 /* 124 * Workaround for valid NULL address. 125 */ 126 127 for (i = 0; i < leaf->keys; i++) { 128 if (leaf->key[i] == page - area->base) { 129 found = true; 130 break; 131 } 132 } 133 } 134 if (frame || found) { 135 frame_reference_add(ADDR2PFN(frame)); 136 page_mapping_insert(AS, addr, frame, 137 as_area_get_flags(area)); 138 if (!used_space_insert(area, page, 1)) 139 panic("Cannot insert used space."); 140 mutex_unlock(&area->sh_info->lock); 141 return AS_PF_OK; 142 } 143 } 144 145 /* 146 * The area is either not shared or the pagemap does not contain the 147 * mapping. 148 */ 149 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) { 150 /* 151 * Initialized portion of the segment. The memory is backed 152 * directly by the content of the ELF image. Pages are 153 * only copied if the segment is writable so that there 154 * can be more instantions of the same memory ELF image 155 * used at a time. Note that this could be later done 156 * as COW. 157 */ 158 if (entry->p_flags & PF_W) { 159 frame = (uintptr_t)frame_alloc(ONE_FRAME, 0); 160 memcpy((void *) PA2KA(frame), 161 (void *) (base + i * FRAME_SIZE), FRAME_SIZE); 162 if (entry->p_flags & PF_X) { 163 smc_coherence_block((void *) PA2KA(frame), 164 FRAME_SIZE); 165 } 166 dirty = true; 167 } else { 168 frame = KA2PA(base + i * FRAME_SIZE); 169 } 170 } else if (page >= start_anon) { 171 /* 172 * This is the uninitialized portion of the segment. 173 * It is not physically present in the ELF image. 174 * To resolve the situation, a frame must be allocated 175 * and cleared. 176 */ 177 frame = (uintptr_t)frame_alloc(ONE_FRAME, 0); 178 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 179 dirty = true; 180 } else { 181 size_t pad_lo, pad_hi; 182 /* 183 * The mixed case. 184 * 185 * The middle part is backed by the ELF image and 186 * the lower and upper parts are anonymous memory. 187 * (The segment can be and often is shorter than 1 page). 188 */ 189 if (page < entry->p_vaddr) 190 pad_lo = entry->p_vaddr - page; 191 else 192 pad_lo = 0; 193 194 if (start_anon < page + PAGE_SIZE) 195 pad_hi = page + PAGE_SIZE - start_anon; 196 else 197 pad_hi = 0; 198 199 frame = (uintptr_t)frame_alloc(ONE_FRAME, 0); 200 memcpy((void *) (PA2KA(frame) + pad_lo), 201 (void *) (base + i * FRAME_SIZE + pad_lo), 202 FRAME_SIZE - pad_lo - pad_hi); 203 if (entry->p_flags & PF_X) { 204 smc_coherence_block((void *) (PA2KA(frame) + pad_lo), 205 FRAME_SIZE - pad_lo - pad_hi); 206 } 207 memsetb((void *) PA2KA(frame), pad_lo, 0); 208 memsetb((void *) (PA2KA(frame) + FRAME_SIZE - pad_hi), pad_hi, 209 0); 210 dirty = true; 211 } 212 213 if (dirty && area->sh_info) { 214 frame_reference_add(ADDR2PFN(frame)); 215 btree_insert(&area->sh_info->pagemap, page - area->base, 216 (void *) frame, leaf); 217 } 218 219 if (area->sh_info) 220 mutex_unlock(&area->sh_info->lock); 221 222 page_mapping_insert(AS, addr, frame, as_area_get_flags(area)); 223 if (!used_space_insert(area, page, 1)) 224 panic("Cannot insert used space."); 225 226 return AS_PF_OK; 227 } 228 229 /** Free a frame that is backed by the ELF backend. 230 * 231 * The address space area and page tables must be already locked. 232 * 233 * @param area Pointer to the address space area. 234 * @param page Page that is mapped to frame. Must be aligned to 235 * PAGE_SIZE. 236 * @param frame Frame to be released. 237 * 238 */ 239 void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame) 73 240 { 74 241 elf_segment_header_t *entry = area->backend_data.segment; 75 size_t nonanon_pages = ALIGN_DOWN(entry->p_filesz, PAGE_SIZE); 76 77 if (area->pages <= nonanon_pages) 78 return true; 79 80 return reserve_try_alloc(area->pages - nonanon_pages); 81 } 82 83 bool elf_resize(as_area_t *area, size_t new_pages) 84 { 85 elf_segment_header_t *entry = area->backend_data.segment; 86 size_t nonanon_pages = ALIGN_DOWN(entry->p_filesz, PAGE_SIZE); 87 88 if (new_pages > area->pages) { 89 /* The area is growing. */ 90 if (area->pages >= nonanon_pages) 91 return reserve_try_alloc(new_pages - area->pages); 92 else if (new_pages > nonanon_pages) 93 return reserve_try_alloc(new_pages - nonanon_pages); 94 } else if (new_pages < area->pages) { 95 /* The area is shrinking. */ 96 if (new_pages >= nonanon_pages) 97 reserve_free(area->pages - new_pages); 98 else if (area->pages > nonanon_pages) 99 reserve_free(nonanon_pages - new_pages); 100 } 101 102 return true; 242 uintptr_t start_anon; 243 244 ASSERT(page_table_locked(area->as)); 245 ASSERT(mutex_locked(&area->lock)); 246 247 ASSERT(page >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)); 248 ASSERT(page < entry->p_vaddr + entry->p_memsz); 249 250 start_anon = entry->p_vaddr + entry->p_filesz; 251 252 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) { 253 if (entry->p_flags & PF_W) { 254 /* 255 * Free the frame with the copy of writable segment 256 * data. 257 */ 258 frame_free(frame); 259 } 260 } else { 261 /* 262 * The frame is either anonymous memory or the mixed case (i.e. 263 * lower part is backed by the ELF image and the upper is 264 * anonymous). In any case, a frame needs to be freed. 265 */ 266 frame_free(frame); 267 } 103 268 } 104 269 … … 191 356 } 192 357 193 void elf_destroy(as_area_t *area)194 {195 elf_segment_header_t *entry = area->backend_data.segment;196 size_t nonanon_pages = ALIGN_DOWN(entry->p_filesz, PAGE_SIZE);197 198 if (area->pages > nonanon_pages)199 reserve_free(area->pages - nonanon_pages);200 }201 202 /** Service a page fault in the ELF backend address space area.203 *204 * The address space area and page tables must be already locked.205 *206 * @param area Pointer to the address space area.207 * @param addr Faulting virtual address.208 * @param access Access mode that caused the fault (i.e.209 * read/write/exec).210 *211 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK212 * on success (i.e. serviced).213 */214 int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access)215 {216 elf_header_t *elf = area->backend_data.elf;217 elf_segment_header_t *entry = area->backend_data.segment;218 btree_node_t *leaf;219 uintptr_t base, frame, page, start_anon;220 size_t i;221 bool dirty = false;222 223 ASSERT(page_table_locked(AS));224 ASSERT(mutex_locked(&area->lock));225 226 if (!as_area_check_access(area, access))227 return AS_PF_FAULT;228 229 if (addr < ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE))230 return AS_PF_FAULT;231 232 if (addr >= entry->p_vaddr + entry->p_memsz)233 return AS_PF_FAULT;234 235 i = (addr - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH;236 base = (uintptr_t)237 (((void *) elf) + ALIGN_DOWN(entry->p_offset, PAGE_SIZE));238 239 /* Virtual address of faulting page*/240 page = ALIGN_DOWN(addr, PAGE_SIZE);241 242 /* Virtual address of the end of initialized part of segment */243 start_anon = entry->p_vaddr + entry->p_filesz;244 245 if (area->sh_info) {246 bool found = false;247 248 /*249 * The address space area is shared.250 */251 252 mutex_lock(&area->sh_info->lock);253 frame = (uintptr_t) btree_search(&area->sh_info->pagemap,254 page - area->base, &leaf);255 if (!frame) {256 unsigned int i;257 258 /*259 * Workaround for valid NULL address.260 */261 262 for (i = 0; i < leaf->keys; i++) {263 if (leaf->key[i] == page - area->base) {264 found = true;265 break;266 }267 }268 }269 if (frame || found) {270 frame_reference_add(ADDR2PFN(frame));271 page_mapping_insert(AS, addr, frame,272 as_area_get_flags(area));273 if (!used_space_insert(area, page, 1))274 panic("Cannot insert used space.");275 mutex_unlock(&area->sh_info->lock);276 return AS_PF_OK;277 }278 }279 280 /*281 * The area is either not shared or the pagemap does not contain the282 * mapping.283 */284 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) {285 /*286 * Initialized portion of the segment. The memory is backed287 * directly by the content of the ELF image. Pages are288 * only copied if the segment is writable so that there289 * can be more instantions of the same memory ELF image290 * used at a time. Note that this could be later done291 * as COW.292 */293 if (entry->p_flags & PF_W) {294 frame = (uintptr_t)frame_alloc_noreserve(ONE_FRAME, 0);295 memcpy((void *) PA2KA(frame),296 (void *) (base + i * FRAME_SIZE), FRAME_SIZE);297 if (entry->p_flags & PF_X) {298 smc_coherence_block((void *) PA2KA(frame),299 FRAME_SIZE);300 }301 dirty = true;302 } else {303 frame = KA2PA(base + i * FRAME_SIZE);304 }305 } else if (page >= start_anon) {306 /*307 * This is the uninitialized portion of the segment.308 * It is not physically present in the ELF image.309 * To resolve the situation, a frame must be allocated310 * and cleared.311 */312 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0);313 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0);314 dirty = true;315 } else {316 size_t pad_lo, pad_hi;317 /*318 * The mixed case.319 *320 * The middle part is backed by the ELF image and321 * the lower and upper parts are anonymous memory.322 * (The segment can be and often is shorter than 1 page).323 */324 if (page < entry->p_vaddr)325 pad_lo = entry->p_vaddr - page;326 else327 pad_lo = 0;328 329 if (start_anon < page + PAGE_SIZE)330 pad_hi = page + PAGE_SIZE - start_anon;331 else332 pad_hi = 0;333 334 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0);335 memcpy((void *) (PA2KA(frame) + pad_lo),336 (void *) (base + i * FRAME_SIZE + pad_lo),337 FRAME_SIZE - pad_lo - pad_hi);338 if (entry->p_flags & PF_X) {339 smc_coherence_block((void *) (PA2KA(frame) + pad_lo),340 FRAME_SIZE - pad_lo - pad_hi);341 }342 memsetb((void *) PA2KA(frame), pad_lo, 0);343 memsetb((void *) (PA2KA(frame) + FRAME_SIZE - pad_hi), pad_hi,344 0);345 dirty = true;346 }347 348 if (dirty && area->sh_info) {349 frame_reference_add(ADDR2PFN(frame));350 btree_insert(&area->sh_info->pagemap, page - area->base,351 (void *) frame, leaf);352 }353 354 if (area->sh_info)355 mutex_unlock(&area->sh_info->lock);356 357 page_mapping_insert(AS, addr, frame, as_area_get_flags(area));358 if (!used_space_insert(area, page, 1))359 panic("Cannot insert used space.");360 361 return AS_PF_OK;362 }363 364 /** Free a frame that is backed by the ELF backend.365 *366 * The address space area and page tables must be already locked.367 *368 * @param area Pointer to the address space area.369 * @param page Page that is mapped to frame. Must be aligned to370 * PAGE_SIZE.371 * @param frame Frame to be released.372 *373 */374 void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame)375 {376 elf_segment_header_t *entry = area->backend_data.segment;377 uintptr_t start_anon;378 379 ASSERT(page_table_locked(area->as));380 ASSERT(mutex_locked(&area->lock));381 382 ASSERT(page >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE));383 ASSERT(page < entry->p_vaddr + entry->p_memsz);384 385 start_anon = entry->p_vaddr + entry->p_filesz;386 387 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) {388 if (entry->p_flags & PF_W) {389 /*390 * Free the frame with the copy of writable segment391 * data.392 */393 frame_free_noreserve(frame);394 }395 } else {396 /*397 * The frame is either anonymous memory or the mixed case (i.e.398 * lower part is backed by the ELF image and the upper is399 * anonymous). In any case, a frame needs to be freed.400 */401 frame_free_noreserve(frame);402 }403 }404 405 358 /** @} 406 359 */ -
kernel/generic/src/mm/backend_phys.c
re5a015b rb2fb47f 48 48 #include <align.h> 49 49 50 static bool phys_create(as_area_t *);51 static void phys_share(as_area_t *);52 static void phys_destroy(as_area_t *);53 54 50 static int phys_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access); 51 static void phys_share(as_area_t *area); 55 52 56 53 mem_backend_t phys_backend = { 57 .create = phys_create,58 .resize = NULL,59 .share = phys_share,60 .destroy = phys_destroy,61 62 54 .page_fault = phys_page_fault, 63 55 .frame_free = NULL, 56 .share = phys_share 64 57 }; 65 66 bool phys_create(as_area_t *area)67 {68 return true;69 }70 71 /** Share address space area backed by physical memory.72 *73 * Do actually nothing as sharing of address space areas74 * that are backed up by physical memory is very easy.75 * Note that the function must be defined so that76 * as_area_share() will succeed.77 */78 void phys_share(as_area_t *area)79 {80 ASSERT(mutex_locked(&area->as->lock));81 ASSERT(mutex_locked(&area->lock));82 }83 84 85 void phys_destroy(as_area_t *area)86 {87 /* Nothing to do. */88 }89 58 90 59 /** Service a page fault in the address space area backed by physical memory. … … 119 88 } 120 89 90 /** Share address space area backed by physical memory. 91 * 92 * Do actually nothing as sharing of address space areas 93 * that are backed up by physical memory is very easy. 94 * Note that the function must be defined so that 95 * as_area_share() will succeed. 96 */ 97 void phys_share(as_area_t *area) 98 { 99 ASSERT(mutex_locked(&area->as->lock)); 100 ASSERT(mutex_locked(&area->lock)); 101 } 102 121 103 /** @} 122 104 */ -
kernel/generic/src/mm/frame.c
re5a015b rb2fb47f 45 45 #include <typedefs.h> 46 46 #include <mm/frame.h> 47 #include <mm/reserve.h>48 47 #include <mm/as.h> 49 48 #include <panic.h> … … 473 472 * @param frame_idx Frame index relative to zone. 474 473 * 475 * @return Number of freed frames. 476 * 477 */ 478 NO_TRACE static size_t zone_frame_free(zone_t *zone, size_t frame_idx) 474 */ 475 NO_TRACE static void zone_frame_free(zone_t *zone, size_t frame_idx) 479 476 { 480 477 ASSERT(zone_flags_available(zone->flags)); 481 478 482 479 frame_t *frame = &zone->frames[frame_idx]; 483 size_t size = 1 << frame->buddy_order; 480 481 /* Remember frame order */ 482 uint8_t order = frame->buddy_order; 484 483 485 484 ASSERT(frame->refcount); … … 489 488 490 489 /* Update zone information. */ 491 zone->free_count += size; 492 zone->busy_count -= size; 493 } 494 495 return size; 490 zone->free_count += (1 << order); 491 zone->busy_count -= (1 << order); 492 } 496 493 } 497 494 … … 519 516 ASSERT(link); 520 517 zone->free_count--; 521 reserve_force_alloc(1);522 518 } 523 519 … … 649 645 for (i = 0; i < cframes; i++) { 650 646 zones.info[znum].busy_count++; 651 (void)zone_frame_free(&zones.info[znum],647 zone_frame_free(&zones.info[znum], 652 648 pfn - zones.info[znum].base + i); 653 649 } … … 687 683 /* Free unneeded frames */ 688 684 for (i = count; i < (size_t) (1 << order); i++) 689 (void)zone_frame_free(&zones.info[znum], i + frame_idx);685 zone_frame_free(&zones.info[znum], i + frame_idx); 690 686 } 691 687 … … 699 695 * not to be 2^order size. Once the allocator is running it is no longer 700 696 * possible, merged configuration data occupies more space :-/ 697 * 698 * The function uses 701 699 * 702 700 */ … … 839 837 buddy_system_free(zone->buddy_system, &zone->frames[i].buddy_link); 840 838 } 841 842 /* "Unreserve" new frames. */843 reserve_free(count);844 839 } else 845 840 zone->frames = NULL; … … 1004 999 size_t hint = pzone ? (*pzone) : 0; 1005 1000 1006 /*1007 * If not told otherwise, we must first reserve the memory.1008 */1009 if (!(flags & FRAME_NO_RESERVE)) {1010 if (flags & FRAME_ATOMIC) {1011 if (!reserve_try_alloc(size))1012 return NULL;1013 } else {1014 reserve_force_alloc(size);1015 }1016 }1017 1018 1001 loop: 1019 1002 irq_spinlock_lock(&zones.lock, true); … … 1050 1033 if (flags & FRAME_ATOMIC) { 1051 1034 irq_spinlock_unlock(&zones.lock, true); 1052 if (!(flags & FRAME_NO_RESERVE))1053 reserve_free(size);1054 1035 return NULL; 1055 1036 } … … 1107 1088 } 1108 1089 1109 void *frame_alloc(uint8_t order, frame_flags_t flags)1110 {1111 return frame_alloc_generic(order, flags, NULL);1112 }1113 1114 void *frame_alloc_noreserve(uint8_t order, frame_flags_t flags)1115 {1116 return frame_alloc_generic(order, flags | FRAME_NO_RESERVE, NULL);1117 }1118 1119 1090 /** Free a frame. 1120 1091 * … … 1124 1095 * 1125 1096 * @param frame Physical Address of of the frame to be freed. 1126 * @param flags Flags to control memory reservation. 1127 * 1128 */ 1129 void frame_free_generic(uintptr_t frame, frame_flags_t flags) 1130 { 1131 size_t size; 1132 1097 * 1098 */ 1099 void frame_free(uintptr_t frame) 1100 { 1133 1101 irq_spinlock_lock(&zones.lock, true); 1134 1102 … … 1138 1106 pfn_t pfn = ADDR2PFN(frame); 1139 1107 size_t znum = find_zone(pfn, 1, 0); 1140 1141 1108 1142 1109 ASSERT(znum != (size_t) -1); 1143 1110 1144 size =zone_frame_free(&zones.info[znum], pfn - zones.info[znum].base);1111 zone_frame_free(&zones.info[znum], pfn - zones.info[znum].base); 1145 1112 1146 1113 irq_spinlock_unlock(&zones.lock, true); … … 1151 1118 mutex_lock(&mem_avail_mtx); 1152 1119 if (mem_avail_req > 0) 1153 mem_avail_req -= min(mem_avail_req, size);1120 mem_avail_req--; 1154 1121 1155 1122 if (mem_avail_req == 0) { … … 1158 1125 } 1159 1126 mutex_unlock(&mem_avail_mtx); 1160 1161 if (!(flags & FRAME_NO_RESERVE))1162 reserve_free(size);1163 }1164 1165 void frame_free(uintptr_t frame)1166 {1167 frame_free_generic(frame, 0);1168 }1169 1170 void frame_free_noreserve(uintptr_t frame)1171 {1172 frame_free_generic(frame, FRAME_NO_RESERVE);1173 1127 } 1174 1128 -
uspace/app/tester/Makefile
re5a015b rb2fb47f 49 49 loop/loop1.c \ 50 50 mm/malloc1.c \ 51 mm/malloc2.c \52 51 devs/devman1.c \ 53 52 hw/misc/virtchar1.c \ -
uspace/app/tester/tester.c
re5a015b rb2fb47f 62 62 #include "loop/loop1.def" 63 63 #include "mm/malloc1.def" 64 #include "mm/malloc2.def"65 64 #include "hw/serial/serial1.def" 66 65 #include "hw/misc/virtchar1.def" -
uspace/app/tester/tester.h
re5a015b rb2fb47f 78 78 extern const char *test_loop1(void); 79 79 extern const char *test_malloc1(void); 80 extern const char *test_malloc2(void);81 80 extern const char *test_serial1(void); 82 81 extern const char *test_virtchar1(void);
Note:
See TracChangeset
for help on using the changeset viewer.