Changeset 544a2e4 in mainline for kernel/generic/src
- Timestamp:
- 2011-05-30T21:37:43Z (15 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 7b712b60
- Parents:
- 18ba2e4f (diff), 0743493a (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- kernel/generic/src
- Files:
-
- 1 added
- 28 edited
-
adt/list.c (modified) (1 diff)
-
console/cmd.c (modified) (1 diff)
-
console/console.c (modified) (8 diffs)
-
ddi/ddi.c (modified) (1 diff)
-
debug/panic.c (modified) (1 diff)
-
interrupt/interrupt.c (modified) (1 diff)
-
ipc/event.c (modified) (3 diffs)
-
lib/elf.c (modified) (1 diff)
-
lib/memfnc.c (modified) (2 diffs)
-
lib/str.c (modified) (2 diffs)
-
main/main.c (modified) (7 diffs)
-
main/uinit.c (modified) (6 diffs)
-
mm/as.c (modified) (57 diffs)
-
mm/backend_anon.c (modified) (5 diffs)
-
mm/backend_elf.c (modified) (5 diffs)
-
mm/backend_phys.c (modified) (2 diffs)
-
mm/frame.c (modified) (18 diffs)
-
mm/page.c (modified) (4 diffs)
-
mm/reserve.c (added)
-
printf/vprintf.c (modified) (2 diffs)
-
proc/program.c (modified) (2 diffs)
-
proc/scheduler.c (modified) (2 diffs)
-
proc/task.c (modified) (7 diffs)
-
proc/the.c (modified) (2 diffs)
-
proc/thread.c (modified) (7 diffs)
-
security/cap.c (modified) (2 diffs)
-
synch/futex.c (modified) (2 diffs)
-
synch/spinlock.c (modified) (1 diff)
-
syscall/syscall.c (modified) (1 diff)
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/adt/list.c
r18ba2e4f r544a2e4 52 52 * 53 53 */ 54 boollist_member(const link_t *link, const link_t *head)54 int list_member(const link_t *link, const link_t *head) 55 55 { 56 56 bool found = false; -
kernel/generic/src/console/cmd.c
r18ba2e4f r544a2e4 1107 1107 release_console(); 1108 1108 1109 event_notify_0(EVENT_KCONSOLE );1109 event_notify_0(EVENT_KCONSOLE, false); 1110 1110 indev_pop_character(stdin); 1111 1111 -
kernel/generic/src/console/console.c
r18ba2e4f r544a2e4 53 53 #include <str.h> 54 54 55 #define KLOG_PAGES 455 #define KLOG_PAGES 8 56 56 #define KLOG_LENGTH (KLOG_PAGES * PAGE_SIZE / sizeof(wchar_t)) 57 #define KLOG_LATENCY 858 57 59 58 /** Kernel log cyclic buffer */ … … 61 60 62 61 /** Kernel log initialized */ 63 static bool klog_inited = false;62 static atomic_t klog_inited = {false}; 64 63 65 64 /** First kernel log characters */ … … 76 75 77 76 /** Kernel log spinlock */ 78 SPINLOCK_STATIC_INITIALIZE_NAME(klog_lock, " *klog_lock");77 SPINLOCK_STATIC_INITIALIZE_NAME(klog_lock, "klog_lock"); 79 78 80 79 /** Physical memory area used for klog buffer */ … … 166 165 sysinfo_set_item_val("klog.pages", NULL, KLOG_PAGES); 167 166 168 spinlock_lock(&klog_lock); 169 klog_inited = true; 170 spinlock_unlock(&klog_lock); 167 event_set_unmask_callback(EVENT_KLOG, klog_update); 168 atomic_set(&klog_inited, true); 171 169 } 172 170 … … 263 261 void klog_update(void) 264 262 { 263 if (!atomic_get(&klog_inited)) 264 return; 265 265 266 spinlock_lock(&klog_lock); 266 267 267 if ((klog_inited) && (event_is_subscribed(EVENT_KLOG)) && (klog_uspace > 0)) { 268 event_notify_3(EVENT_KLOG, klog_start, klog_len, klog_uspace); 269 klog_uspace = 0; 268 if (klog_uspace > 0) { 269 if (event_notify_3(EVENT_KLOG, true, klog_start, klog_len, 270 klog_uspace) == EOK) 271 klog_uspace = 0; 270 272 } 271 273 … … 275 277 void putchar(const wchar_t ch) 276 278 { 279 bool ordy = ((stdout) && (stdout->op->write)); 280 277 281 spinlock_lock(&klog_lock); 278 282 279 if ((klog_stored > 0) && (stdout) && (stdout->op->write)) { 280 /* Print charaters stored in kernel log */ 281 size_t i; 282 for (i = klog_len - klog_stored; i < klog_len; i++) 283 stdout->op->write(stdout, klog[(klog_start + i) % KLOG_LENGTH], silent); 284 klog_stored = 0; 283 /* Print charaters stored in kernel log */ 284 if (ordy) { 285 while (klog_stored > 0) { 286 wchar_t tmp = klog[(klog_start + klog_len - klog_stored) % KLOG_LENGTH]; 287 klog_stored--; 288 289 /* 290 * We need to give up the spinlock for 291 * the physical operation of writting out 292 * the character. 293 */ 294 spinlock_unlock(&klog_lock); 295 stdout->op->write(stdout, tmp, silent); 296 spinlock_lock(&klog_lock); 297 } 285 298 } 286 299 … … 292 305 klog_start = (klog_start + 1) % KLOG_LENGTH; 293 306 294 if ((stdout) && (stdout->op->write)) 307 if (!ordy) { 308 if (klog_stored < klog_len) 309 klog_stored++; 310 } 311 312 /* The character is stored for uspace */ 313 if (klog_uspace < klog_len) 314 klog_uspace++; 315 316 spinlock_unlock(&klog_lock); 317 318 if (ordy) { 319 /* 320 * Output the character. In this case 321 * it should be no longer buffered. 322 */ 295 323 stdout->op->write(stdout, ch, silent); 296 else {324 } else { 297 325 /* 298 326 * No standard output routine defined yet. … … 304 332 * Note that the early_putc() function might be 305 333 * a no-op on certain hardware configurations. 306 *307 334 */ 308 335 early_putchar(ch); 309 310 if (klog_stored < klog_len) 311 klog_stored++; 312 } 313 314 /* The character is stored for uspace */ 315 if (klog_uspace < klog_len) 316 klog_uspace++; 317 318 /* Check notify uspace to update */ 319 bool update; 320 if ((klog_uspace > KLOG_LATENCY) || (ch == '\n')) 321 update = true; 322 else 323 update = false; 324 325 spinlock_unlock(&klog_lock); 326 327 if (update) 336 } 337 338 /* Force notification on newline */ 339 if (ch == '\n') 328 340 klog_update(); 329 341 } -
kernel/generic/src/ddi/ddi.c
r18ba2e4f r544a2e4 224 224 task_t *task = task_find_by_id(id); 225 225 226 if ((!task) || (!cont ext_check(CONTEXT, task->context))) {226 if ((!task) || (!container_check(CONTAINER, task->container))) { 227 227 /* 228 228 * There is no task with the specified ID -
kernel/generic/src/debug/panic.c
r18ba2e4f r544a2e4 95 95 printf("\n"); 96 96 97 printf("THE=%p: ", THE); 98 if (THE != NULL) { 99 printf("pe=%" PRIun " thr=%p task=%p cpu=%p as=%p" 100 " magic=%#" PRIx32 "\n", THE->preemption_disabled, 101 THE->thread, THE->task, THE->cpu, THE->as, THE->magic); 102 } else 103 printf("invalid\n"); 104 97 105 if (istate) { 98 106 istate_decode(istate); -
kernel/generic/src/interrupt/interrupt.c
r18ba2e4f r544a2e4 205 205 * stack. 206 206 */ 207 return (istate_t *) ((uint8_t *) thread->kstack + THREAD_STACK_SIZE -208 sizeof(istate_t));207 return (istate_t *) ((uint8_t *) 208 thread->kstack + STACK_SIZE - sizeof(istate_t)); 209 209 } 210 210 -
kernel/generic/src/ipc/event.c
r18ba2e4f r544a2e4 48 48 static event_t events[EVENT_END]; 49 49 50 /** Initialize kernel events. */ 50 /** Initialize kernel events. 51 * 52 */ 51 53 void event_init(void) 52 54 { 53 unsigned int i; 54 55 for (i = 0; i < EVENT_END; i++) { 55 for (unsigned int i = 0; i < EVENT_END; i++) { 56 56 spinlock_initialize(&events[i].lock, "event.lock"); 57 57 events[i].answerbox = NULL; 58 58 events[i].counter = 0; 59 59 events[i].imethod = 0; 60 events[i].masked = false; 61 events[i].unmask_callback = NULL; 60 62 } 61 63 } 62 64 65 /** Unsubscribe kernel events associated with an answerbox 66 * 67 * @param answerbox Answerbox to be unsubscribed. 68 * 69 */ 70 void event_cleanup_answerbox(answerbox_t *answerbox) 71 { 72 for (unsigned int i = 0; i < EVENT_END; i++) { 73 spinlock_lock(&events[i].lock); 74 75 if (events[i].answerbox == answerbox) { 76 events[i].answerbox = NULL; 77 events[i].counter = 0; 78 events[i].imethod = 0; 79 events[i].masked = false; 80 } 81 82 spinlock_unlock(&events[i].lock); 83 } 84 } 85 86 /** Define a callback function for the event unmask event. 87 * 88 * @param evno Event type. 89 * @param callback Callback function to be called when 90 * the event is unmasked. 91 * 92 */ 93 void event_set_unmask_callback(event_type_t evno, event_callback_t callback) 94 { 95 ASSERT(evno < EVENT_END); 96 97 spinlock_lock(&events[evno].lock); 98 events[evno].unmask_callback = callback; 99 spinlock_unlock(&events[evno].lock); 100 } 101 102 /** Send kernel notification event 103 * 104 * @param evno Event type. 105 * @param mask Mask further notifications after a successful 106 * sending. 107 * @param a1 First argument. 108 * @param a2 Second argument. 109 * @param a3 Third argument. 110 * @param a4 Fourth argument. 111 * @param a5 Fifth argument. 112 * 113 * @return EOK if notification was successfully sent. 114 * @return ENOMEM if the notification IPC message failed to allocate. 115 * @return EBUSY if the notifications of the given type are 116 * currently masked. 117 * @return ENOENT if the notifications of the given type are 118 * currently not subscribed. 119 * 120 */ 121 int event_notify(event_type_t evno, bool mask, sysarg_t a1, sysarg_t a2, 122 sysarg_t a3, sysarg_t a4, sysarg_t a5) 123 { 124 ASSERT(evno < EVENT_END); 125 126 spinlock_lock(&events[evno].lock); 127 128 int ret; 129 130 if (events[evno].answerbox != NULL) { 131 if (!events[evno].masked) { 132 call_t *call = ipc_call_alloc(FRAME_ATOMIC); 133 134 if (call) { 135 call->flags |= IPC_CALL_NOTIF; 136 call->priv = ++events[evno].counter; 137 138 IPC_SET_IMETHOD(call->data, events[evno].imethod); 139 IPC_SET_ARG1(call->data, a1); 140 IPC_SET_ARG2(call->data, a2); 141 IPC_SET_ARG3(call->data, a3); 142 IPC_SET_ARG4(call->data, a4); 143 IPC_SET_ARG5(call->data, a5); 144 145 irq_spinlock_lock(&events[evno].answerbox->irq_lock, true); 146 list_append(&call->link, &events[evno].answerbox->irq_notifs); 147 irq_spinlock_unlock(&events[evno].answerbox->irq_lock, true); 148 149 waitq_wakeup(&events[evno].answerbox->wq, WAKEUP_FIRST); 150 151 if (mask) 152 events[evno].masked = true; 153 154 ret = EOK; 155 } else 156 ret = ENOMEM; 157 } else 158 ret = EBUSY; 159 } else 160 ret = ENOENT; 161 162 spinlock_unlock(&events[evno].lock); 163 164 return ret; 165 } 166 167 /** Subscribe event notifications 168 * 169 * @param evno Event type. 170 * @param imethod IPC interface and method to be used for 171 * the notifications. 172 * @param answerbox Answerbox to send the notifications to. 173 * 174 * @return EOK if the subscription was successful. 175 * @return EEXISTS if the notifications of the given type are 176 * already subscribed. 177 * 178 */ 63 179 static int event_subscribe(event_type_t evno, sysarg_t imethod, 64 180 answerbox_t *answerbox) 65 181 { 66 if (evno >= EVENT_END) 67 return ELIMIT; 182 ASSERT(evno < EVENT_END); 68 183 69 184 spinlock_lock(&events[evno].lock); … … 75 190 events[evno].imethod = imethod; 76 191 events[evno].counter = 0; 192 events[evno].masked = false; 77 193 res = EOK; 78 194 } else … … 84 200 } 85 201 202 /** Unmask event notifications 203 * 204 * @param evno Event type to unmask. 205 * 206 */ 207 static void event_unmask(event_type_t evno) 208 { 209 ASSERT(evno < EVENT_END); 210 211 spinlock_lock(&events[evno].lock); 212 events[evno].masked = false; 213 event_callback_t callback = events[evno].unmask_callback; 214 spinlock_unlock(&events[evno].lock); 215 216 /* 217 * Check if there is an unmask callback 218 * function defined for this event. 219 */ 220 if (callback != NULL) 221 callback(); 222 } 223 224 /** Event notification syscall wrapper 225 * 226 * @param evno Event type to subscribe. 227 * @param imethod IPC interface and method to be used for 228 * the notifications. 229 * 230 * @return EOK on success. 231 * @return ELIMIT on unknown event type. 232 * @return EEXISTS if the notifications of the given type are 233 * already subscribed. 234 * 235 */ 86 236 sysarg_t sys_event_subscribe(sysarg_t evno, sysarg_t imethod) 87 237 { 238 if (evno >= EVENT_END) 239 return ELIMIT; 240 88 241 return (sysarg_t) event_subscribe((event_type_t) evno, (sysarg_t) 89 242 imethod, &TASK->answerbox); 90 243 } 91 244 92 bool event_is_subscribed(event_type_t evno) 93 { 94 bool res; 95 96 ASSERT(evno < EVENT_END); 97 98 spinlock_lock(&events[evno].lock); 99 res = events[evno].answerbox != NULL; 100 spinlock_unlock(&events[evno].lock); 101 102 return res; 103 } 104 105 106 void event_cleanup_answerbox(answerbox_t *answerbox) 107 { 108 unsigned int i; 109 110 for (i = 0; i < EVENT_END; i++) { 111 spinlock_lock(&events[i].lock); 112 if (events[i].answerbox == answerbox) { 113 events[i].answerbox = NULL; 114 events[i].counter = 0; 115 events[i].imethod = 0; 116 } 117 spinlock_unlock(&events[i].lock); 118 } 119 } 120 121 void event_notify(event_type_t evno, sysarg_t a1, sysarg_t a2, sysarg_t a3, 122 sysarg_t a4, sysarg_t a5) 123 { 124 ASSERT(evno < EVENT_END); 125 126 spinlock_lock(&events[evno].lock); 127 if (events[evno].answerbox != NULL) { 128 call_t *call = ipc_call_alloc(FRAME_ATOMIC); 129 if (call) { 130 call->flags |= IPC_CALL_NOTIF; 131 call->priv = ++events[evno].counter; 132 IPC_SET_IMETHOD(call->data, events[evno].imethod); 133 IPC_SET_ARG1(call->data, a1); 134 IPC_SET_ARG2(call->data, a2); 135 IPC_SET_ARG3(call->data, a3); 136 IPC_SET_ARG4(call->data, a4); 137 IPC_SET_ARG5(call->data, a5); 138 139 irq_spinlock_lock(&events[evno].answerbox->irq_lock, true); 140 list_append(&call->link, &events[evno].answerbox->irq_notifs); 141 irq_spinlock_unlock(&events[evno].answerbox->irq_lock, true); 142 143 waitq_wakeup(&events[evno].answerbox->wq, WAKEUP_FIRST); 144 } 145 } 146 spinlock_unlock(&events[evno].lock); 245 /** Event notification unmask syscall wrapper 246 * 247 * Note that currently no tests are performed whether the calling 248 * task is entitled to unmask the notifications. However, thanks 249 * to the fact that notification masking is only a performance 250 * optimization, this has probably no security implications. 251 * 252 * @param evno Event type to unmask. 253 * 254 * @return EOK on success. 255 * @return ELIMIT on unknown event type. 256 * 257 */ 258 sysarg_t sys_event_unmask(sysarg_t evno) 259 { 260 if (evno >= EVENT_END) 261 return ELIMIT; 262 263 event_unmask((event_type_t) evno); 264 return EOK; 147 265 } 148 266 -
kernel/generic/src/lib/elf.c
r18ba2e4f r544a2e4 114 114 } 115 115 116 /* Inspect all section headers and proc cess them. */116 /* Inspect all section headers and process them. */ 117 117 for (i = 0; i < header->e_shnum; i++) { 118 118 elf_section_header_t *sechdr = -
kernel/generic/src/lib/memfnc.c
r18ba2e4f r544a2e4 56 56 void *memset(void *dst, int val, size_t cnt) 57 57 { 58 size_t i; 59 uint8_t *ptr = (uint8_t *) dst; 58 uint8_t *dp = (uint8_t *) dst; 60 59 61 for (i = 0; i < cnt; i++)62 ptr[i]= val;60 while (cnt-- != 0) 61 *dp++ = val; 63 62 64 63 return dst; … … 83 82 84 83 while (cnt-- != 0) 85 *dp++ = *sp++;84 *dp++ = *sp++; 86 85 87 86 return dst; -
kernel/generic/src/lib/str.c
r18ba2e4f r544a2e4 922 922 void order_suffix(const uint64_t val, uint64_t *rv, char *suffix) 923 923 { 924 if (val > 10000000000000000000ULL) {925 *rv = val / 1000000000000000000ULL;924 if (val > UINT64_C(10000000000000000000)) { 925 *rv = val / UINT64_C(1000000000000000000); 926 926 *suffix = 'Z'; 927 } else if (val > 1000000000000000000ULL) {928 *rv = val / 1000000000000000ULL;927 } else if (val > UINT64_C(1000000000000000000)) { 928 *rv = val / UINT64_C(1000000000000000); 929 929 *suffix = 'E'; 930 } else if (val > 1000000000000000ULL) {931 *rv = val / 1000000000000ULL;930 } else if (val > UINT64_C(1000000000000000)) { 931 *rv = val / UINT64_C(1000000000000); 932 932 *suffix = 'T'; 933 } else if (val > 1000000000000ULL) {934 *rv = val / 1000000000ULL;933 } else if (val > UINT64_C(1000000000000)) { 934 *rv = val / UINT64_C(1000000000); 935 935 *suffix = 'G'; 936 } else if (val > 1000000000ULL) {937 *rv = val / 1000000ULL;936 } else if (val > UINT64_C(1000000000)) { 937 *rv = val / UINT64_C(1000000); 938 938 *suffix = 'M'; 939 } else if (val > 1000000ULL) {940 *rv = val / 1000ULL;939 } else if (val > UINT64_C(1000000)) { 940 *rv = val / UINT64_C(1000); 941 941 *suffix = 'k'; 942 942 } else { … … 946 946 } 947 947 948 void bin_order_suffix(const uint64_t val, uint64_t *rv, const char **suffix, 949 bool fixed) 950 { 951 if (val > UINT64_C(1152921504606846976)) { 952 *rv = val / UINT64_C(1125899906842624); 953 *suffix = "EiB"; 954 } else if (val > UINT64_C(1125899906842624)) { 955 *rv = val / UINT64_C(1099511627776); 956 *suffix = "TiB"; 957 } else if (val > UINT64_C(1099511627776)) { 958 *rv = val / UINT64_C(1073741824); 959 *suffix = "GiB"; 960 } else if (val > UINT64_C(1073741824)) { 961 *rv = val / UINT64_C(1048576); 962 *suffix = "MiB"; 963 } else if (val > UINT64_C(1048576)) { 964 *rv = val / UINT64_C(1024); 965 *suffix = "KiB"; 966 } else { 967 *rv = val; 968 if (fixed) 969 *suffix = "B "; 970 else 971 *suffix = "B"; 972 } 973 } 974 948 975 /** @} 949 976 */ -
kernel/generic/src/main/main.c
r18ba2e4f r544a2e4 71 71 #include <mm/as.h> 72 72 #include <mm/slab.h> 73 #include <mm/reserve.h> 73 74 #include <synch/waitq.h> 74 75 #include <synch/futex.h> … … 117 118 #endif 118 119 119 #define CONFIG_STACK_SIZE ((1 << STACK_FRAMES) * STACK_SIZE)120 121 120 /** Main kernel routine for bootstrap CPU. 122 121 * … … 138 137 config.kernel_size = ALIGN_UP(hardcoded_ktext_size + 139 138 hardcoded_kdata_size, PAGE_SIZE); 140 config.stack_size = CONFIG_STACK_SIZE;139 config.stack_size = STACK_SIZE; 141 140 142 141 /* Initialy the stack is placed just after the kernel */ … … 164 163 165 164 context_save(&ctx); 166 context_set(&ctx, FADDR(main_bsp_separated_stack), config.stack_base,167 THREAD_STACK_SIZE);165 context_set(&ctx, FADDR(main_bsp_separated_stack), 166 config.stack_base, STACK_SIZE); 168 167 context_restore(&ctx); 169 168 /* not reached */ … … 217 216 ddi_init(); 218 217 arch_post_mm_init(); 218 reserve_init(); 219 219 arch_pre_smp_init(); 220 220 smp_init(); … … 223 223 slab_enable_cpucache(); 224 224 225 printf("Detected %u CPU(s), %" PRIu64 " MiB free memory\n", 226 config.cpu_count, SIZE2MB(zones_total_size())); 225 uint64_t size; 226 const char *size_suffix; 227 bin_order_suffix(zones_total_size(), &size, &size_suffix, false); 228 printf("Detected %u CPU(s), %" PRIu64 " %s free memory\n", 229 config.cpu_count, size, size_suffix); 227 230 228 231 cpu_init(); … … 318 321 context_save(&CPU->saved_context); 319 322 context_set(&CPU->saved_context, FADDR(main_ap_separated_stack), 320 (uintptr_t) CPU->stack, CPU_STACK_SIZE);323 (uintptr_t) CPU->stack, STACK_SIZE); 321 324 context_restore(&CPU->saved_context); 322 325 /* not reached */ -
kernel/generic/src/main/uinit.c
r18ba2e4f r544a2e4 33 33 /** 34 34 * @file 35 * @brief Userspace bootstrap thread.35 * @brief Userspace bootstrap thread. 36 36 * 37 37 * This file contains uinit kernel thread wich is used to start every … … 40 40 * @see SYS_THREAD_CREATE 41 41 */ 42 42 43 43 #include <main/uinit.h> 44 44 #include <typedefs.h> … … 48 48 #include <arch.h> 49 49 #include <udebug/udebug.h> 50 51 50 52 51 /** Thread used to bring up userspace thread. … … 58 57 { 59 58 uspace_arg_t uarg; 60 59 61 60 /* 62 61 * So far, we don't have a use for joining userspace threads so we … … 68 67 */ 69 68 thread_detach(THREAD); 70 69 71 70 #ifdef CONFIG_UDEBUG 72 71 udebug_stoppable_end(); … … 78 77 uarg.uspace_thread_function = NULL; 79 78 uarg.uspace_thread_arg = NULL; 80 79 81 80 free((uspace_arg_t *) arg); 82 81 -
kernel/generic/src/mm/as.c
r18ba2e4f r544a2e4 80 80 #include <arch/interrupt.h> 81 81 82 #ifdef CONFIG_VIRT_IDX_DCACHE83 #include <arch/mm/cache.h>84 #endif /* CONFIG_VIRT_IDX_DCACHE */85 86 82 /** 87 83 * Each architecture decides what functions will be used to carry out … … 306 302 * We don't want any area to have conflicts with NULL page. 307 303 */ 308 if (overlaps(addr, count << PAGE_WIDTH, (uintptr_t) NULL, PAGE_SIZE))304 if (overlaps(addr, P2SZ(count), (uintptr_t) NULL, PAGE_SIZE)) 309 305 return false; 310 306 … … 333 329 mutex_lock(&area->lock); 334 330 335 if (overlaps(addr, count << PAGE_WIDTH,336 area->base, area->pages << PAGE_WIDTH)) {331 if (overlaps(addr, P2SZ(count), area->base, 332 P2SZ(area->pages))) { 337 333 mutex_unlock(&area->lock); 338 334 return false; … … 350 346 mutex_lock(&area->lock); 351 347 352 if (overlaps(addr, count << PAGE_WIDTH,353 area->base, area->pages << PAGE_WIDTH)) {348 if (overlaps(addr, P2SZ(count), area->base, 349 P2SZ(area->pages))) { 354 350 mutex_unlock(&area->lock); 355 351 return false; … … 370 366 mutex_lock(&area->lock); 371 367 372 if (overlaps(addr, count << PAGE_WIDTH,373 area->base, area->pages << PAGE_WIDTH)) {368 if (overlaps(addr, P2SZ(count), area->base, 369 P2SZ(area->pages))) { 374 370 mutex_unlock(&area->lock); 375 371 return false; … … 384 380 */ 385 381 if (!KERNEL_ADDRESS_SPACE_SHADOWED) { 386 return !overlaps(addr, count << PAGE_WIDTH, 387 KERNEL_ADDRESS_SPACE_START, 382 return !overlaps(addr, P2SZ(count), KERNEL_ADDRESS_SPACE_START, 388 383 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START); 389 384 } … … 448 443 memsetb(&area->backend_data, sizeof(area->backend_data), 0); 449 444 445 if (area->backend && area->backend->create) { 446 if (!area->backend->create(area)) { 447 free(area); 448 mutex_unlock(&as->lock); 449 return NULL; 450 } 451 } 452 450 453 btree_create(&area->used_space); 451 454 btree_insert(&as->as_area_btree, base, (void *) area, NULL); … … 470 473 471 474 btree_node_t *leaf; 472 as_area_t *area = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); 475 as_area_t *area = (as_area_t *) btree_search(&as->as_area_btree, va, 476 &leaf); 473 477 if (area) { 474 478 /* va is the base address of an address space area */ … … 478 482 479 483 /* 480 * Search the leaf node and the righ most record of its left neighbour484 * Search the leaf node and the rightmost record of its left neighbour 481 485 * to find out whether this is a miss or va belongs to an address 482 486 * space area found there. … … 490 494 491 495 mutex_lock(&area->lock); 492 496 493 497 if ((area->base <= va) && 494 (va < area->base + (area->pages << PAGE_WIDTH)))498 (va <= area->base + (P2SZ(area->pages) - 1))) 495 499 return area; 496 500 … … 502 506 * Because of its position in the B+tree, it must have base < va. 503 507 */ 504 btree_node_t *lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); 508 btree_node_t *lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, 509 leaf); 505 510 if (lnode) { 506 511 area = (as_area_t *) lnode->value[lnode->keys - 1]; … … 508 513 mutex_lock(&area->lock); 509 514 510 if (va < area->base + (area->pages << PAGE_WIDTH))515 if (va <= area->base + (P2SZ(area->pages) - 1)) 511 516 return area; 512 517 … … 573 578 574 579 if (pages < area->pages) { 575 uintptr_t start_free = area->base + (pages << PAGE_WIDTH);580 uintptr_t start_free = area->base + P2SZ(pages); 576 581 577 582 /* … … 586 591 */ 587 592 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, 588 area->base + (pages << PAGE_WIDTH), area->pages - pages);593 area->base + P2SZ(pages), area->pages - pages); 589 594 590 595 /* … … 609 614 size_t i = 0; 610 615 611 if (overlaps(ptr, size << PAGE_WIDTH, area->base,612 pages << PAGE_WIDTH)) {616 if (overlaps(ptr, P2SZ(size), area->base, 617 P2SZ(pages))) { 613 618 614 if (ptr + (size << PAGE_WIDTH) <= start_free) {619 if (ptr + P2SZ(size) <= start_free) { 615 620 /* 616 621 * The whole interval fits … … 643 648 644 649 for (; i < size; i++) { 645 pte_t *pte = page_mapping_find(as, ptr +646 (i << PAGE_WIDTH));650 pte_t *pte = page_mapping_find(as, 651 ptr + P2SZ(i), false); 647 652 648 653 ASSERT(pte); … … 653 658 (area->backend->frame_free)) { 654 659 area->backend->frame_free(area, 655 ptr + (i << PAGE_WIDTH),660 ptr + P2SZ(i), 656 661 PTE_GET_FRAME(pte)); 657 662 } 658 663 659 page_mapping_remove(as, ptr + 660 (i << PAGE_WIDTH)); 664 page_mapping_remove(as, ptr + P2SZ(i)); 661 665 } 662 666 } … … 667 671 */ 668 672 669 tlb_invalidate_pages(as->asid, area->base + (pages << PAGE_WIDTH),673 tlb_invalidate_pages(as->asid, area->base + P2SZ(pages), 670 674 area->pages - pages); 671 675 672 676 /* 673 * Invalidate software translation caches (e.g. TSB on sparc64). 674 */ 675 as_invalidate_translation_cache(as, area->base + 676 (pages << PAGE_WIDTH), area->pages - pages); 677 * Invalidate software translation caches 678 * (e.g. TSB on sparc64, PHT on ppc32). 679 */ 680 as_invalidate_translation_cache(as, area->base + P2SZ(pages), 681 area->pages - pages); 677 682 tlb_shootdown_finalize(ipl); 678 683 … … 687 692 mutex_unlock(&as->lock); 688 693 return EADDRNOTAVAIL; 694 } 695 } 696 697 if (area->backend && area->backend->resize) { 698 if (!area->backend->resize(area, pages)) { 699 mutex_unlock(&area->lock); 700 mutex_unlock(&as->lock); 701 return ENOMEM; 689 702 } 690 703 } … … 756 769 return ENOENT; 757 770 } 771 772 if (area->backend && area->backend->destroy) 773 area->backend->destroy(area); 758 774 759 775 uintptr_t base = area->base; … … 782 798 783 799 for (size = 0; size < (size_t) node->value[i]; size++) { 784 pte_t *pte = 785 page_mapping_find(as, ptr + (size << PAGE_WIDTH));800 pte_t *pte = page_mapping_find(as, 801 ptr + P2SZ(size), false); 786 802 787 803 ASSERT(pte); … … 792 808 (area->backend->frame_free)) { 793 809 area->backend->frame_free(area, 794 ptr + (size << PAGE_WIDTH), PTE_GET_FRAME(pte)); 810 ptr + P2SZ(size), 811 PTE_GET_FRAME(pte)); 795 812 } 796 813 797 page_mapping_remove(as, ptr + (size << PAGE_WIDTH));814 page_mapping_remove(as, ptr + P2SZ(size)); 798 815 } 799 816 } … … 807 824 808 825 /* 809 * Invalidate potential software translation caches (e.g. TSB on810 * sparc64).826 * Invalidate potential software translation caches 827 * (e.g. TSB on sparc64, PHT on ppc32). 811 828 */ 812 829 as_invalidate_translation_cache(as, area->base, area->pages); … … 882 899 } 883 900 884 size_t src_size = src_area->pages << PAGE_WIDTH;901 size_t src_size = P2SZ(src_area->pages); 885 902 unsigned int src_flags = src_area->flags; 886 903 mem_backend_t *src_backend = src_area->backend; … … 1079 1096 for (cur = area->used_space.leaf_head.next; 1080 1097 cur != &area->used_space.leaf_head; cur = cur->next) { 1081 btree_node_t *node 1082 = list_get_instance(cur, btree_node_t,leaf_link);1098 btree_node_t *node = list_get_instance(cur, btree_node_t, 1099 leaf_link); 1083 1100 btree_key_t i; 1084 1101 … … 1088 1105 1089 1106 for (size = 0; size < (size_t) node->value[i]; size++) { 1090 pte_t *pte = 1091 p age_mapping_find(as, ptr + (size << PAGE_WIDTH));1107 pte_t *pte = page_mapping_find(as, 1108 ptr + P2SZ(size), false); 1092 1109 1093 1110 ASSERT(pte); … … 1098 1115 1099 1116 /* Remove old mapping */ 1100 page_mapping_remove(as, ptr + (size << PAGE_WIDTH));1117 page_mapping_remove(as, ptr + P2SZ(size)); 1101 1118 } 1102 1119 } … … 1110 1127 1111 1128 /* 1112 * Invalidate potential software translation caches (e.g. TSB on1113 * sparc64).1129 * Invalidate potential software translation caches 1130 * (e.g. TSB on sparc64, PHT on ppc32). 1114 1131 */ 1115 1132 as_invalidate_translation_cache(as, area->base, area->pages); … … 1144 1161 1145 1162 /* Insert the new mapping */ 1146 page_mapping_insert(as, ptr + (size << PAGE_WIDTH),1163 page_mapping_insert(as, ptr + P2SZ(size), 1147 1164 old_frame[frame_idx++], page_flags); 1148 1165 … … 1225 1242 */ 1226 1243 pte_t *pte; 1227 if ((pte = page_mapping_find(AS, page ))) {1244 if ((pte = page_mapping_find(AS, page, false))) { 1228 1245 if (PTE_PRESENT(pte)) { 1229 1246 if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) || … … 1466 1483 1467 1484 if (src_area) { 1468 size = src_area->pages << PAGE_WIDTH;1485 size = P2SZ(src_area->pages); 1469 1486 mutex_unlock(&src_area->lock); 1470 1487 } else … … 1521 1538 if (page >= right_pg) { 1522 1539 /* Do nothing. */ 1523 } else if (overlaps(page, count << PAGE_WIDTH, left_pg,1524 left_cnt << PAGE_WIDTH)) {1540 } else if (overlaps(page, P2SZ(count), left_pg, 1541 P2SZ(left_cnt))) { 1525 1542 /* The interval intersects with the left interval. */ 1526 1543 return false; 1527 } else if (overlaps(page, count << PAGE_WIDTH, right_pg,1528 right_cnt << PAGE_WIDTH)) {1544 } else if (overlaps(page, P2SZ(count), right_pg, 1545 P2SZ(right_cnt))) { 1529 1546 /* The interval intersects with the right interval. */ 1530 1547 return false; 1531 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) &&1532 (page + (count << PAGE_WIDTH) == right_pg)) {1548 } else if ((page == left_pg + P2SZ(left_cnt)) && 1549 (page + P2SZ(count) == right_pg)) { 1533 1550 /* 1534 1551 * The interval can be added by merging the two already … … 1538 1555 btree_remove(&area->used_space, right_pg, leaf); 1539 1556 goto success; 1540 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) {1557 } else if (page == left_pg + P2SZ(left_cnt)) { 1541 1558 /* 1542 1559 * The interval can be added by simply growing the left … … 1545 1562 node->value[node->keys - 1] += count; 1546 1563 goto success; 1547 } else if (page + (count << PAGE_WIDTH) == right_pg) {1564 } else if (page + P2SZ(count) == right_pg) { 1548 1565 /* 1549 1566 * The interval can be addded by simply moving base of … … 1572 1589 */ 1573 1590 1574 if (overlaps(page, count << PAGE_WIDTH, right_pg, 1575 right_cnt << PAGE_WIDTH)) { 1591 if (overlaps(page, P2SZ(count), right_pg, P2SZ(right_cnt))) { 1576 1592 /* The interval intersects with the right interval. */ 1577 1593 return false; 1578 } else if (page + (count << PAGE_WIDTH) == right_pg) {1594 } else if (page + P2SZ(count) == right_pg) { 1579 1595 /* 1580 1596 * The interval can be added by moving the base of the … … 1611 1627 if (page < left_pg) { 1612 1628 /* Do nothing. */ 1613 } else if (overlaps(page, count << PAGE_WIDTH, left_pg,1614 left_cnt << PAGE_WIDTH)) {1629 } else if (overlaps(page, P2SZ(count), left_pg, 1630 P2SZ(left_cnt))) { 1615 1631 /* The interval intersects with the left interval. */ 1616 1632 return false; 1617 } else if (overlaps(page, count << PAGE_WIDTH, right_pg,1618 right_cnt << PAGE_WIDTH)) {1633 } else if (overlaps(page, P2SZ(count), right_pg, 1634 P2SZ(right_cnt))) { 1619 1635 /* The interval intersects with the right interval. */ 1620 1636 return false; 1621 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) &&1622 (page + (count << PAGE_WIDTH) == right_pg)) {1637 } else if ((page == left_pg + P2SZ(left_cnt)) && 1638 (page + P2SZ(count) == right_pg)) { 1623 1639 /* 1624 1640 * The interval can be added by merging the two already … … 1628 1644 btree_remove(&area->used_space, right_pg, node); 1629 1645 goto success; 1630 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) {1646 } else if (page == left_pg + P2SZ(left_cnt)) { 1631 1647 /* 1632 1648 * The interval can be added by simply growing the left … … 1635 1651 leaf->value[leaf->keys - 1] += count; 1636 1652 goto success; 1637 } else if (page + (count << PAGE_WIDTH) == right_pg) {1653 } else if (page + P2SZ(count) == right_pg) { 1638 1654 /* 1639 1655 * The interval can be addded by simply moving base of … … 1662 1678 */ 1663 1679 1664 if (overlaps(page, count << PAGE_WIDTH, left_pg, 1665 left_cnt << PAGE_WIDTH)) { 1680 if (overlaps(page, P2SZ(count), left_pg, P2SZ(left_cnt))) { 1666 1681 /* The interval intersects with the left interval. */ 1667 1682 return false; 1668 } else if (left_pg + (left_cnt << PAGE_WIDTH) == page) {1683 } else if (left_pg + P2SZ(left_cnt) == page) { 1669 1684 /* 1670 1685 * The interval can be added by growing the left … … 1701 1716 */ 1702 1717 1703 if (overlaps(page, count << PAGE_WIDTH, left_pg,1704 left_cnt << PAGE_WIDTH)) {1718 if (overlaps(page, P2SZ(count), left_pg, 1719 P2SZ(left_cnt))) { 1705 1720 /* 1706 1721 * The interval intersects with the left … … 1708 1723 */ 1709 1724 return false; 1710 } else if (overlaps(page, count << PAGE_WIDTH, right_pg,1711 right_cnt << PAGE_WIDTH)) {1725 } else if (overlaps(page, P2SZ(count), right_pg, 1726 P2SZ(right_cnt))) { 1712 1727 /* 1713 1728 * The interval intersects with the right … … 1715 1730 */ 1716 1731 return false; 1717 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) &&1718 (page + (count << PAGE_WIDTH) == right_pg)) {1732 } else if ((page == left_pg + P2SZ(left_cnt)) && 1733 (page + P2SZ(count) == right_pg)) { 1719 1734 /* 1720 1735 * The interval can be added by merging the two … … 1724 1739 btree_remove(&area->used_space, right_pg, leaf); 1725 1740 goto success; 1726 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) {1741 } else if (page == left_pg + P2SZ(left_cnt)) { 1727 1742 /* 1728 1743 * The interval can be added by simply growing … … 1731 1746 leaf->value[i - 1] += count; 1732 1747 goto success; 1733 } else if (page + (count << PAGE_WIDTH) == right_pg) {1748 } else if (page + P2SZ(count) == right_pg) { 1734 1749 /* 1735 1750 * The interval can be addded by simply moving … … 1797 1812 for (i = 0; i < leaf->keys; i++) { 1798 1813 if (leaf->key[i] == page) { 1799 leaf->key[i] += count << PAGE_WIDTH;1814 leaf->key[i] += P2SZ(count); 1800 1815 leaf->value[i] -= count; 1801 1816 goto success; … … 1807 1822 } 1808 1823 1809 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, leaf); 1824 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, 1825 leaf); 1810 1826 if ((node) && (page < leaf->key[0])) { 1811 1827 uintptr_t left_pg = node->key[node->keys - 1]; 1812 1828 size_t left_cnt = (size_t) node->value[node->keys - 1]; 1813 1829 1814 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page, 1815 count << PAGE_WIDTH)) { 1816 if (page + (count << PAGE_WIDTH) == 1817 left_pg + (left_cnt << PAGE_WIDTH)) { 1830 if (overlaps(left_pg, P2SZ(left_cnt), page, P2SZ(count))) { 1831 if (page + P2SZ(count) == left_pg + P2SZ(left_cnt)) { 1818 1832 /* 1819 1833 * The interval is contained in the rightmost … … 1824 1838 node->value[node->keys - 1] -= count; 1825 1839 goto success; 1826 } else if (page + (count << PAGE_WIDTH) < 1827 left_pg + (left_cnt << PAGE_WIDTH)) { 1840 } else if (page + P2SZ(count) < 1841 left_pg + P2SZ(left_cnt)) { 1842 size_t new_cnt; 1843 1828 1844 /* 1829 1845 * The interval is contained in the rightmost … … 1833 1849 * new interval. 1834 1850 */ 1835 size_t new_cnt = ((left_pg + (left_cnt << PAGE_WIDTH)) -1836 (page + (count << PAGE_WIDTH))) >> PAGE_WIDTH;1851 new_cnt = ((left_pg + P2SZ(left_cnt)) - 1852 (page + P2SZ(count))) >> PAGE_WIDTH; 1837 1853 node->value[node->keys - 1] -= count + new_cnt; 1838 1854 btree_insert(&area->used_space, page + 1839 (count << PAGE_WIDTH), (void *) new_cnt, leaf);1855 P2SZ(count), (void *) new_cnt, leaf); 1840 1856 goto success; 1841 1857 } … … 1850 1866 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 1851 1867 1852 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page, 1853 count << PAGE_WIDTH)) { 1854 if (page + (count << PAGE_WIDTH) == 1855 left_pg + (left_cnt << PAGE_WIDTH)) { 1868 if (overlaps(left_pg, P2SZ(left_cnt), page, P2SZ(count))) { 1869 if (page + P2SZ(count) == left_pg + P2SZ(left_cnt)) { 1856 1870 /* 1857 1871 * The interval is contained in the rightmost … … 1861 1875 leaf->value[leaf->keys - 1] -= count; 1862 1876 goto success; 1863 } else if (page + (count << PAGE_WIDTH) < left_pg + 1864 (left_cnt << PAGE_WIDTH)) { 1877 } else if (page + P2SZ(count) < left_pg + 1878 P2SZ(left_cnt)) { 1879 size_t new_cnt; 1880 1865 1881 /* 1866 1882 * The interval is contained in the rightmost … … 1870 1886 * interval. 1871 1887 */ 1872 size_t new_cnt = ((left_pg + (left_cnt << PAGE_WIDTH)) -1873 (page + (count << PAGE_WIDTH))) >> PAGE_WIDTH;1888 new_cnt = ((left_pg + P2SZ(left_cnt)) - 1889 (page + P2SZ(count))) >> PAGE_WIDTH; 1874 1890 leaf->value[leaf->keys - 1] -= count + new_cnt; 1875 1891 btree_insert(&area->used_space, page + 1876 (count << PAGE_WIDTH), (void *) new_cnt, leaf);1892 P2SZ(count), (void *) new_cnt, leaf); 1877 1893 goto success; 1878 1894 } … … 1896 1912 * to (i - 1) and i. 1897 1913 */ 1898 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page,1899 count << PAGE_WIDTH)) {1900 if (page + (count << PAGE_WIDTH) ==1901 left_pg + (left_cnt << PAGE_WIDTH)) {1914 if (overlaps(left_pg, P2SZ(left_cnt), page, 1915 P2SZ(count))) { 1916 if (page + P2SZ(count) == 1917 left_pg + P2SZ(left_cnt)) { 1902 1918 /* 1903 1919 * The interval is contained in the … … 1908 1924 leaf->value[i - 1] -= count; 1909 1925 goto success; 1910 } else if (page + (count << PAGE_WIDTH) < 1911 left_pg + (left_cnt << PAGE_WIDTH)) { 1926 } else if (page + P2SZ(count) < 1927 left_pg + P2SZ(left_cnt)) { 1928 size_t new_cnt; 1929 1912 1930 /* 1913 1931 * The interval is contained in the … … 1917 1935 * also inserting a new interval. 1918 1936 */ 1919 size_t new_cnt = ((left_pg + 1920 (left_cnt << PAGE_WIDTH)) - 1921 (page + (count << PAGE_WIDTH))) >> 1937 new_cnt = ((left_pg + P2SZ(left_cnt)) - 1938 (page + P2SZ(count))) >> 1922 1939 PAGE_WIDTH; 1923 1940 leaf->value[i - 1] -= count + new_cnt; 1924 1941 btree_insert(&area->used_space, page + 1925 (count << PAGE_WIDTH), (void *) new_cnt,1942 P2SZ(count), (void *) new_cnt, 1926 1943 leaf); 1927 1944 goto success; … … 2019 2036 btree_key_t i; 2020 2037 for (i = 0; (ret == 0) && (i < node->keys); i++) { 2038 uintptr_t addr; 2039 2021 2040 as_area_t *area = (as_area_t *) node->value[i]; 2022 2041 2023 2042 mutex_lock(&area->lock); 2024 2043 2025 uintptr_t addr = 2026 ALIGN_UP(area->base + (area->pages << PAGE_WIDTH), 2044 addr = ALIGN_UP(area->base + P2SZ(area->pages), 2027 2045 PAGE_SIZE); 2028 2046 … … 2083 2101 2084 2102 info[area_idx].start_addr = area->base; 2085 info[area_idx].size = FRAMES2SIZE(area->pages);2103 info[area_idx].size = P2SZ(area->pages); 2086 2104 info[area_idx].flags = area->flags; 2087 2105 ++area_idx; … … 2121 2139 " (%p - %p)\n", area, (void *) area->base, 2122 2140 area->pages, (void *) area->base, 2123 (void *) (area->base + FRAMES2SIZE(area->pages)));2141 (void *) (area->base + P2SZ(area->pages))); 2124 2142 mutex_unlock(&area->lock); 2125 2143 } -
kernel/generic/src/mm/backend_anon.c
r18ba2e4f r544a2e4 39 39 #include <mm/as.h> 40 40 #include <mm/page.h> 41 #include <mm/reserve.h> 41 42 #include <genarch/mm/page_pt.h> 42 43 #include <genarch/mm/page_ht.h> … … 49 50 #include <typedefs.h> 50 51 #include <align.h> 52 #include <memstr.h> 51 53 #include <arch.h> 52 54 53 #ifdef CONFIG_VIRT_IDX_DCACHE 54 #include <arch/mm/cache.h> 55 #endif 56 57 static int anon_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access); 58 static void anon_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame);59 static void anon_ share(as_area_t *area);55 static bool anon_create(as_area_t *); 56 static bool anon_resize(as_area_t *, size_t); 57 static void anon_share(as_area_t *); 58 static void anon_destroy(as_area_t *); 59 60 static int anon_page_fault(as_area_t *, uintptr_t, pf_access_t); 61 static void anon_frame_free(as_area_t *, uintptr_t, uintptr_t); 60 62 61 63 mem_backend_t anon_backend = { 64 .create = anon_create, 65 .resize = anon_resize, 66 .share = anon_share, 67 .destroy = anon_destroy, 68 62 69 .page_fault = anon_page_fault, 63 70 .frame_free = anon_frame_free, 64 .share = anon_share65 71 }; 72 73 bool anon_create(as_area_t *area) 74 { 75 return reserve_try_alloc(area->pages); 76 } 77 78 bool anon_resize(as_area_t *area, size_t new_pages) 79 { 80 if (new_pages > area->pages) 81 return reserve_try_alloc(new_pages - area->pages); 82 else if (new_pages < area->pages) 83 reserve_free(area->pages - new_pages); 84 85 return true; 86 } 87 88 /** Share the anonymous address space area. 89 * 90 * Sharing of anonymous area is done by duplicating its entire mapping 91 * to the pagemap. Page faults will primarily search for frames there. 92 * 93 * The address space and address space area must be already locked. 94 * 95 * @param area Address space area to be shared. 96 */ 97 void anon_share(as_area_t *area) 98 { 99 link_t *cur; 100 101 ASSERT(mutex_locked(&area->as->lock)); 102 ASSERT(mutex_locked(&area->lock)); 103 104 /* 105 * Copy used portions of the area to sh_info's page map. 106 */ 107 mutex_lock(&area->sh_info->lock); 108 for (cur = area->used_space.leaf_head.next; 109 cur != &area->used_space.leaf_head; cur = cur->next) { 110 btree_node_t *node; 111 unsigned int i; 112 113 node = list_get_instance(cur, btree_node_t, leaf_link); 114 for (i = 0; i < node->keys; i++) { 115 uintptr_t base = node->key[i]; 116 size_t count = (size_t) node->value[i]; 117 unsigned int j; 118 119 for (j = 0; j < count; j++) { 120 pte_t *pte; 121 122 page_table_lock(area->as, false); 123 pte = page_mapping_find(area->as, 124 base + P2SZ(j), false); 125 ASSERT(pte && PTE_VALID(pte) && 126 PTE_PRESENT(pte)); 127 btree_insert(&area->sh_info->pagemap, 128 (base + P2SZ(j)) - area->base, 129 (void *) PTE_GET_FRAME(pte), NULL); 130 page_table_unlock(area->as, false); 131 132 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte)); 133 frame_reference_add(pfn); 134 } 135 136 } 137 } 138 mutex_unlock(&area->sh_info->lock); 139 } 140 141 void anon_destroy(as_area_t *area) 142 { 143 reserve_free(area->pages); 144 } 145 66 146 67 147 /** Service a page fault in the anonymous memory address space area. … … 115 195 } 116 196 if (allocate) { 117 frame = (uintptr_t) frame_alloc(ONE_FRAME, 0); 197 frame = (uintptr_t) frame_alloc_noreserve( 198 ONE_FRAME, 0); 118 199 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 119 200 … … 145 226 * the different causes 146 227 */ 147 frame = (uintptr_t) frame_alloc (ONE_FRAME, 0);228 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0); 148 229 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 149 230 } … … 174 255 ASSERT(mutex_locked(&area->lock)); 175 256 176 frame_free(frame); 177 } 178 179 /** Share the anonymous address space area. 180 * 181 * Sharing of anonymous area is done by duplicating its entire mapping 182 * to the pagemap. Page faults will primarily search for frames there. 183 * 184 * The address space and address space area must be already locked. 185 * 186 * @param area Address space area to be shared. 187 */ 188 void anon_share(as_area_t *area) 189 { 190 link_t *cur; 191 192 ASSERT(mutex_locked(&area->as->lock)); 193 ASSERT(mutex_locked(&area->lock)); 194 195 /* 196 * Copy used portions of the area to sh_info's page map. 197 */ 198 mutex_lock(&area->sh_info->lock); 199 for (cur = area->used_space.leaf_head.next; 200 cur != &area->used_space.leaf_head; cur = cur->next) { 201 btree_node_t *node; 202 unsigned int i; 203 204 node = list_get_instance(cur, btree_node_t, leaf_link); 205 for (i = 0; i < node->keys; i++) { 206 uintptr_t base = node->key[i]; 207 size_t count = (size_t) node->value[i]; 208 unsigned int j; 209 210 for (j = 0; j < count; j++) { 211 pte_t *pte; 212 213 page_table_lock(area->as, false); 214 pte = page_mapping_find(area->as, 215 base + j * PAGE_SIZE); 216 ASSERT(pte && PTE_VALID(pte) && 217 PTE_PRESENT(pte)); 218 btree_insert(&area->sh_info->pagemap, 219 (base + j * PAGE_SIZE) - area->base, 220 (void *) PTE_GET_FRAME(pte), NULL); 221 page_table_unlock(area->as, false); 222 223 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte)); 224 frame_reference_add(pfn); 225 } 226 227 } 228 } 229 mutex_unlock(&area->sh_info->lock); 257 frame_free_noreserve(frame); 230 258 } 231 259 -
kernel/generic/src/mm/backend_elf.c
r18ba2e4f r544a2e4 43 43 #include <mm/slab.h> 44 44 #include <mm/page.h> 45 #include <mm/reserve.h> 45 46 #include <genarch/mm/page_pt.h> 46 47 #include <genarch/mm/page_ht.h> … … 51 52 #include <arch/barrier.h> 52 53 53 #ifdef CONFIG_VIRT_IDX_DCACHE 54 #include <arch/mm/cache.h> 55 #endif 56 57 static int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access); 58 static void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame);59 static void elf_ share(as_area_t *area);54 static bool elf_create(as_area_t *); 55 static bool elf_resize(as_area_t *, size_t); 56 static void elf_share(as_area_t *); 57 static void elf_destroy(as_area_t *); 58 59 static int elf_page_fault(as_area_t *, uintptr_t, pf_access_t); 60 static void elf_frame_free(as_area_t *, uintptr_t, uintptr_t); 60 61 61 62 mem_backend_t elf_backend = { 63 .create = elf_create, 64 .resize = elf_resize, 65 .share = elf_share, 66 .destroy = elf_destroy, 67 62 68 .page_fault = elf_page_fault, 63 69 .frame_free = elf_frame_free, 64 .share = elf_share65 70 }; 66 71 67 /** Service a page fault in the ELF backend address space area. 68 * 69 * The address space area and page tables must be already locked. 70 * 71 * @param area Pointer to the address space area. 72 * @param addr Faulting virtual address. 73 * @param access Access mode that caused the fault (i.e. 74 * read/write/exec). 75 * 76 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK 77 * on success (i.e. serviced). 78 */ 79 int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access) 80 { 81 elf_header_t *elf = area->backend_data.elf; 72 static size_t elf_nonanon_pages_get(as_area_t *area) 73 { 82 74 elf_segment_header_t *entry = area->backend_data.segment; 83 btree_node_t *leaf; 84 uintptr_t base, frame, page, start_anon; 85 size_t i; 86 bool dirty = false; 87 88 ASSERT(page_table_locked(AS)); 89 ASSERT(mutex_locked(&area->lock)); 90 91 if (!as_area_check_access(area, access)) 92 return AS_PF_FAULT; 75 uintptr_t first = ALIGN_UP(entry->p_vaddr, PAGE_SIZE); 76 uintptr_t last = ALIGN_DOWN(entry->p_vaddr + entry->p_filesz, 77 PAGE_SIZE); 78 79 if (entry->p_flags & PF_W) 80 return 0; 81 82 if (last < first) 83 return 0; 84 85 return last - first; 86 } 87 88 bool elf_create(as_area_t *area) 89 { 90 size_t nonanon_pages = elf_nonanon_pages_get(area); 91 92 if (area->pages <= nonanon_pages) 93 return true; 93 94 94 if (addr < ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) 95 return AS_PF_FAULT; 95 return reserve_try_alloc(area->pages - nonanon_pages); 96 } 97 98 bool elf_resize(as_area_t *area, size_t new_pages) 99 { 100 size_t nonanon_pages = elf_nonanon_pages_get(area); 101 102 if (new_pages > area->pages) { 103 /* The area is growing. */ 104 if (area->pages >= nonanon_pages) 105 return reserve_try_alloc(new_pages - area->pages); 106 else if (new_pages > nonanon_pages) 107 return reserve_try_alloc(new_pages - nonanon_pages); 108 } else if (new_pages < area->pages) { 109 /* The area is shrinking. */ 110 if (new_pages >= nonanon_pages) 111 reserve_free(area->pages - new_pages); 112 else if (area->pages > nonanon_pages) 113 reserve_free(nonanon_pages - new_pages); 114 } 96 115 97 if (addr >= entry->p_vaddr + entry->p_memsz) 98 return AS_PF_FAULT; 99 100 i = (addr - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH; 101 base = (uintptr_t) 102 (((void *) elf) + ALIGN_DOWN(entry->p_offset, PAGE_SIZE)); 103 104 /* Virtual address of faulting page*/ 105 page = ALIGN_DOWN(addr, PAGE_SIZE); 106 107 /* Virtual address of the end of initialized part of segment */ 108 start_anon = entry->p_vaddr + entry->p_filesz; 109 110 if (area->sh_info) { 111 bool found = false; 112 113 /* 114 * The address space area is shared. 115 */ 116 117 mutex_lock(&area->sh_info->lock); 118 frame = (uintptr_t) btree_search(&area->sh_info->pagemap, 119 page - area->base, &leaf); 120 if (!frame) { 121 unsigned int i; 122 123 /* 124 * Workaround for valid NULL address. 125 */ 126 127 for (i = 0; i < leaf->keys; i++) { 128 if (leaf->key[i] == page - area->base) { 129 found = true; 130 break; 131 } 132 } 133 } 134 if (frame || found) { 135 frame_reference_add(ADDR2PFN(frame)); 136 page_mapping_insert(AS, addr, frame, 137 as_area_get_flags(area)); 138 if (!used_space_insert(area, page, 1)) 139 panic("Cannot insert used space."); 140 mutex_unlock(&area->sh_info->lock); 141 return AS_PF_OK; 142 } 143 } 144 145 /* 146 * The area is either not shared or the pagemap does not contain the 147 * mapping. 148 */ 149 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) { 150 /* 151 * Initialized portion of the segment. The memory is backed 152 * directly by the content of the ELF image. Pages are 153 * only copied if the segment is writable so that there 154 * can be more instantions of the same memory ELF image 155 * used at a time. Note that this could be later done 156 * as COW. 157 */ 158 if (entry->p_flags & PF_W) { 159 frame = (uintptr_t)frame_alloc(ONE_FRAME, 0); 160 memcpy((void *) PA2KA(frame), 161 (void *) (base + i * FRAME_SIZE), FRAME_SIZE); 162 if (entry->p_flags & PF_X) { 163 smc_coherence_block((void *) PA2KA(frame), 164 FRAME_SIZE); 165 } 166 dirty = true; 167 } else { 168 frame = KA2PA(base + i * FRAME_SIZE); 169 } 170 } else if (page >= start_anon) { 171 /* 172 * This is the uninitialized portion of the segment. 173 * It is not physically present in the ELF image. 174 * To resolve the situation, a frame must be allocated 175 * and cleared. 176 */ 177 frame = (uintptr_t)frame_alloc(ONE_FRAME, 0); 178 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 179 dirty = true; 180 } else { 181 size_t pad_lo, pad_hi; 182 /* 183 * The mixed case. 184 * 185 * The middle part is backed by the ELF image and 186 * the lower and upper parts are anonymous memory. 187 * (The segment can be and often is shorter than 1 page). 188 */ 189 if (page < entry->p_vaddr) 190 pad_lo = entry->p_vaddr - page; 191 else 192 pad_lo = 0; 193 194 if (start_anon < page + PAGE_SIZE) 195 pad_hi = page + PAGE_SIZE - start_anon; 196 else 197 pad_hi = 0; 198 199 frame = (uintptr_t)frame_alloc(ONE_FRAME, 0); 200 memcpy((void *) (PA2KA(frame) + pad_lo), 201 (void *) (base + i * FRAME_SIZE + pad_lo), 202 FRAME_SIZE - pad_lo - pad_hi); 203 if (entry->p_flags & PF_X) { 204 smc_coherence_block((void *) (PA2KA(frame) + pad_lo), 205 FRAME_SIZE - pad_lo - pad_hi); 206 } 207 memsetb((void *) PA2KA(frame), pad_lo, 0); 208 memsetb((void *) (PA2KA(frame) + FRAME_SIZE - pad_hi), pad_hi, 209 0); 210 dirty = true; 211 } 212 213 if (dirty && area->sh_info) { 214 frame_reference_add(ADDR2PFN(frame)); 215 btree_insert(&area->sh_info->pagemap, page - area->base, 216 (void *) frame, leaf); 217 } 218 219 if (area->sh_info) 220 mutex_unlock(&area->sh_info->lock); 221 222 page_mapping_insert(AS, addr, frame, as_area_get_flags(area)); 223 if (!used_space_insert(area, page, 1)) 224 panic("Cannot insert used space."); 225 226 return AS_PF_OK; 227 } 228 229 /** Free a frame that is backed by the ELF backend. 230 * 231 * The address space area and page tables must be already locked. 232 * 233 * @param area Pointer to the address space area. 234 * @param page Page that is mapped to frame. Must be aligned to 235 * PAGE_SIZE. 236 * @param frame Frame to be released. 237 * 238 */ 239 void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame) 240 { 241 elf_segment_header_t *entry = area->backend_data.segment; 242 uintptr_t start_anon; 243 244 ASSERT(page_table_locked(area->as)); 245 ASSERT(mutex_locked(&area->lock)); 246 247 ASSERT(page >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)); 248 ASSERT(page < entry->p_vaddr + entry->p_memsz); 249 250 start_anon = entry->p_vaddr + entry->p_filesz; 251 252 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) { 253 if (entry->p_flags & PF_W) { 254 /* 255 * Free the frame with the copy of writable segment 256 * data. 257 */ 258 frame_free(frame); 259 } 260 } else { 261 /* 262 * The frame is either anonymous memory or the mixed case (i.e. 263 * lower part is backed by the ELF image and the upper is 264 * anonymous). In any case, a frame needs to be freed. 265 */ 266 frame_free(frame); 267 } 116 return true; 268 117 } 269 118 … … 321 170 if (!(area->flags & AS_AREA_WRITE)) 322 171 if (base >= entry->p_vaddr && 323 base + count * PAGE_SIZE<= start_anon)172 base + P2SZ(count) <= start_anon) 324 173 continue; 325 174 … … 333 182 if (!(area->flags & AS_AREA_WRITE)) 334 183 if (base >= entry->p_vaddr && 335 base + (j + 1) * PAGE_SIZE <= 336 start_anon) 184 base + P2SZ(j + 1) <= start_anon) 337 185 continue; 338 186 339 187 page_table_lock(area->as, false); 340 188 pte = page_mapping_find(area->as, 341 base + j * PAGE_SIZE);189 base + P2SZ(j), false); 342 190 ASSERT(pte && PTE_VALID(pte) && 343 191 PTE_PRESENT(pte)); 344 192 btree_insert(&area->sh_info->pagemap, 345 (base + j * PAGE_SIZE) - area->base,193 (base + P2SZ(j)) - area->base, 346 194 (void *) PTE_GET_FRAME(pte), NULL); 347 195 page_table_unlock(area->as, false); … … 356 204 } 357 205 206 void elf_destroy(as_area_t *area) 207 { 208 size_t nonanon_pages = elf_nonanon_pages_get(area); 209 210 if (area->pages > nonanon_pages) 211 reserve_free(area->pages - nonanon_pages); 212 } 213 214 /** Service a page fault in the ELF backend address space area. 215 * 216 * The address space area and page tables must be already locked. 217 * 218 * @param area Pointer to the address space area. 219 * @param addr Faulting virtual address. 220 * @param access Access mode that caused the fault (i.e. 221 * read/write/exec). 222 * 223 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK 224 * on success (i.e. serviced). 225 */ 226 int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access) 227 { 228 elf_header_t *elf = area->backend_data.elf; 229 elf_segment_header_t *entry = area->backend_data.segment; 230 btree_node_t *leaf; 231 uintptr_t base, frame, page, start_anon; 232 size_t i; 233 bool dirty = false; 234 235 ASSERT(page_table_locked(AS)); 236 ASSERT(mutex_locked(&area->lock)); 237 238 if (!as_area_check_access(area, access)) 239 return AS_PF_FAULT; 240 241 if (addr < ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) 242 return AS_PF_FAULT; 243 244 if (addr >= entry->p_vaddr + entry->p_memsz) 245 return AS_PF_FAULT; 246 247 i = (addr - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH; 248 base = (uintptr_t) 249 (((void *) elf) + ALIGN_DOWN(entry->p_offset, PAGE_SIZE)); 250 251 /* Virtual address of faulting page*/ 252 page = ALIGN_DOWN(addr, PAGE_SIZE); 253 254 /* Virtual address of the end of initialized part of segment */ 255 start_anon = entry->p_vaddr + entry->p_filesz; 256 257 if (area->sh_info) { 258 bool found = false; 259 260 /* 261 * The address space area is shared. 262 */ 263 264 mutex_lock(&area->sh_info->lock); 265 frame = (uintptr_t) btree_search(&area->sh_info->pagemap, 266 page - area->base, &leaf); 267 if (!frame) { 268 unsigned int i; 269 270 /* 271 * Workaround for valid NULL address. 272 */ 273 274 for (i = 0; i < leaf->keys; i++) { 275 if (leaf->key[i] == page - area->base) { 276 found = true; 277 break; 278 } 279 } 280 } 281 if (frame || found) { 282 frame_reference_add(ADDR2PFN(frame)); 283 page_mapping_insert(AS, addr, frame, 284 as_area_get_flags(area)); 285 if (!used_space_insert(area, page, 1)) 286 panic("Cannot insert used space."); 287 mutex_unlock(&area->sh_info->lock); 288 return AS_PF_OK; 289 } 290 } 291 292 /* 293 * The area is either not shared or the pagemap does not contain the 294 * mapping. 295 */ 296 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) { 297 /* 298 * Initialized portion of the segment. The memory is backed 299 * directly by the content of the ELF image. Pages are 300 * only copied if the segment is writable so that there 301 * can be more instantions of the same memory ELF image 302 * used at a time. Note that this could be later done 303 * as COW. 304 */ 305 if (entry->p_flags & PF_W) { 306 frame = (uintptr_t)frame_alloc_noreserve(ONE_FRAME, 0); 307 memcpy((void *) PA2KA(frame), 308 (void *) (base + i * FRAME_SIZE), FRAME_SIZE); 309 if (entry->p_flags & PF_X) { 310 smc_coherence_block((void *) PA2KA(frame), 311 FRAME_SIZE); 312 } 313 dirty = true; 314 } else { 315 frame = KA2PA(base + i * FRAME_SIZE); 316 } 317 } else if (page >= start_anon) { 318 /* 319 * This is the uninitialized portion of the segment. 320 * It is not physically present in the ELF image. 321 * To resolve the situation, a frame must be allocated 322 * and cleared. 323 */ 324 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0); 325 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 326 dirty = true; 327 } else { 328 size_t pad_lo, pad_hi; 329 /* 330 * The mixed case. 331 * 332 * The middle part is backed by the ELF image and 333 * the lower and upper parts are anonymous memory. 334 * (The segment can be and often is shorter than 1 page). 335 */ 336 if (page < entry->p_vaddr) 337 pad_lo = entry->p_vaddr - page; 338 else 339 pad_lo = 0; 340 341 if (start_anon < page + PAGE_SIZE) 342 pad_hi = page + PAGE_SIZE - start_anon; 343 else 344 pad_hi = 0; 345 346 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0); 347 memcpy((void *) (PA2KA(frame) + pad_lo), 348 (void *) (base + i * FRAME_SIZE + pad_lo), 349 FRAME_SIZE - pad_lo - pad_hi); 350 if (entry->p_flags & PF_X) { 351 smc_coherence_block((void *) (PA2KA(frame) + pad_lo), 352 FRAME_SIZE - pad_lo - pad_hi); 353 } 354 memsetb((void *) PA2KA(frame), pad_lo, 0); 355 memsetb((void *) (PA2KA(frame) + FRAME_SIZE - pad_hi), pad_hi, 356 0); 357 dirty = true; 358 } 359 360 if (dirty && area->sh_info) { 361 frame_reference_add(ADDR2PFN(frame)); 362 btree_insert(&area->sh_info->pagemap, page - area->base, 363 (void *) frame, leaf); 364 } 365 366 if (area->sh_info) 367 mutex_unlock(&area->sh_info->lock); 368 369 page_mapping_insert(AS, addr, frame, as_area_get_flags(area)); 370 if (!used_space_insert(area, page, 1)) 371 panic("Cannot insert used space."); 372 373 return AS_PF_OK; 374 } 375 376 /** Free a frame that is backed by the ELF backend. 377 * 378 * The address space area and page tables must be already locked. 379 * 380 * @param area Pointer to the address space area. 381 * @param page Page that is mapped to frame. Must be aligned to 382 * PAGE_SIZE. 383 * @param frame Frame to be released. 384 * 385 */ 386 void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame) 387 { 388 elf_segment_header_t *entry = area->backend_data.segment; 389 uintptr_t start_anon; 390 391 ASSERT(page_table_locked(area->as)); 392 ASSERT(mutex_locked(&area->lock)); 393 394 ASSERT(page >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)); 395 ASSERT(page < entry->p_vaddr + entry->p_memsz); 396 397 start_anon = entry->p_vaddr + entry->p_filesz; 398 399 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) { 400 if (entry->p_flags & PF_W) { 401 /* 402 * Free the frame with the copy of writable segment 403 * data. 404 */ 405 frame_free_noreserve(frame); 406 } 407 } else { 408 /* 409 * The frame is either anonymous memory or the mixed case (i.e. 410 * lower part is backed by the ELF image and the upper is 411 * anonymous). In any case, a frame needs to be freed. 412 */ 413 frame_free_noreserve(frame); 414 } 415 } 416 358 417 /** @} 359 418 */ -
kernel/generic/src/mm/backend_phys.c
r18ba2e4f r544a2e4 48 48 #include <align.h> 49 49 50 static int phys_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access); 51 static void phys_share(as_area_t *area); 50 static bool phys_create(as_area_t *); 51 static void phys_share(as_area_t *); 52 static void phys_destroy(as_area_t *); 53 54 static int phys_page_fault(as_area_t *, uintptr_t, pf_access_t); 52 55 53 56 mem_backend_t phys_backend = { 57 .create = phys_create, 58 .resize = NULL, 59 .share = phys_share, 60 .destroy = phys_destroy, 61 54 62 .page_fault = phys_page_fault, 55 63 .frame_free = NULL, 56 .share = phys_share57 64 }; 65 66 bool phys_create(as_area_t *area) 67 { 68 return true; 69 } 70 71 /** Share address space area backed by physical memory. 72 * 73 * Do actually nothing as sharing of address space areas 74 * that are backed up by physical memory is very easy. 75 * Note that the function must be defined so that 76 * as_area_share() will succeed. 77 */ 78 void phys_share(as_area_t *area) 79 { 80 ASSERT(mutex_locked(&area->as->lock)); 81 ASSERT(mutex_locked(&area->lock)); 82 } 83 84 85 void phys_destroy(as_area_t *area) 86 { 87 /* Nothing to do. */ 88 } 58 89 59 90 /** Service a page fault in the address space area backed by physical memory. … … 88 119 } 89 120 90 /** Share address space area backed by physical memory.91 *92 * Do actually nothing as sharing of address space areas93 * that are backed up by physical memory is very easy.94 * Note that the function must be defined so that95 * as_area_share() will succeed.96 */97 void phys_share(as_area_t *area)98 {99 ASSERT(mutex_locked(&area->as->lock));100 ASSERT(mutex_locked(&area->lock));101 }102 103 121 /** @} 104 122 */ -
kernel/generic/src/mm/frame.c
r18ba2e4f r544a2e4 45 45 #include <typedefs.h> 46 46 #include <mm/frame.h> 47 #include <mm/reserve.h> 47 48 #include <mm/as.h> 48 49 #include <panic.h> … … 59 60 #include <macros.h> 60 61 #include <config.h> 62 #include <str.h> 61 63 62 64 zones_t zones; … … 180 182 * 181 183 */ 182 #ifdef CONFIG_DEBUG 183 NO_TRACE static size_t total_frames_free(void) 184 NO_TRACE static size_t frame_total_free_get_internal(void) 184 185 { 185 186 size_t total = 0; 186 187 size_t i; 188 187 189 for (i = 0; i < zones.count; i++) 188 190 total += zones.info[i].free_count; … … 190 192 return total; 191 193 } 192 #endif /* CONFIG_DEBUG */ 194 195 NO_TRACE size_t frame_total_free_get(void) 196 { 197 size_t total; 198 199 irq_spinlock_lock(&zones.lock, true); 200 total = frame_total_free_get_internal(); 201 irq_spinlock_unlock(&zones.lock, true); 202 203 return total; 204 } 205 193 206 194 207 /** Find a zone with a given frames. … … 472 485 * @param frame_idx Frame index relative to zone. 473 486 * 474 */ 475 NO_TRACE static void zone_frame_free(zone_t *zone, size_t frame_idx) 487 * @return Number of freed frames. 488 * 489 */ 490 NO_TRACE static size_t zone_frame_free(zone_t *zone, size_t frame_idx) 476 491 { 477 492 ASSERT(zone_flags_available(zone->flags)); 478 493 479 494 frame_t *frame = &zone->frames[frame_idx]; 480 481 /* Remember frame order */ 482 uint8_t order = frame->buddy_order; 495 size_t size = 0; 483 496 484 497 ASSERT(frame->refcount); 485 498 486 499 if (!--frame->refcount) { 487 buddy_system_free(zone->buddy_system, &frame->buddy_link);488 500 size = 1 << frame->buddy_order; 501 buddy_system_free(zone->buddy_system, &frame->buddy_link); 489 502 /* Update zone information. */ 490 zone->free_count += (1 << order); 491 zone->busy_count -= (1 << order); 492 } 503 zone->free_count += size; 504 zone->busy_count -= size; 505 } 506 507 return size; 493 508 } 494 509 … … 516 531 ASSERT(link); 517 532 zone->free_count--; 533 reserve_force_alloc(1); 518 534 } 519 535 … … 645 661 for (i = 0; i < cframes; i++) { 646 662 zones.info[znum].busy_count++; 647 zone_frame_free(&zones.info[znum],663 (void) zone_frame_free(&zones.info[znum], 648 664 pfn - zones.info[znum].base + i); 649 665 } … … 683 699 /* Free unneeded frames */ 684 700 for (i = count; i < (size_t) (1 << order); i++) 685 zone_frame_free(&zones.info[znum], i + frame_idx);701 (void) zone_frame_free(&zones.info[znum], i + frame_idx); 686 702 } 687 703 … … 695 711 * not to be 2^order size. Once the allocator is running it is no longer 696 712 * possible, merged configuration data occupies more space :-/ 697 *698 * The function uses699 713 * 700 714 */ … … 999 1013 size_t hint = pzone ? (*pzone) : 0; 1000 1014 1015 /* 1016 * If not told otherwise, we must first reserve the memory. 1017 */ 1018 if (!(flags & FRAME_NO_RESERVE)) 1019 reserve_force_alloc(size); 1020 1001 1021 loop: 1002 1022 irq_spinlock_lock(&zones.lock, true); … … 1033 1053 if (flags & FRAME_ATOMIC) { 1034 1054 irq_spinlock_unlock(&zones.lock, true); 1055 if (!(flags & FRAME_NO_RESERVE)) 1056 reserve_free(size); 1035 1057 return NULL; 1036 1058 } 1037 1059 1038 1060 #ifdef CONFIG_DEBUG 1039 size_t avail = total_frames_free();1061 size_t avail = frame_total_free_get_internal(); 1040 1062 #endif 1041 1063 … … 1088 1110 } 1089 1111 1112 void *frame_alloc(uint8_t order, frame_flags_t flags) 1113 { 1114 return frame_alloc_generic(order, flags, NULL); 1115 } 1116 1117 void *frame_alloc_noreserve(uint8_t order, frame_flags_t flags) 1118 { 1119 return frame_alloc_generic(order, flags | FRAME_NO_RESERVE, NULL); 1120 } 1121 1090 1122 /** Free a frame. 1091 1123 * … … 1095 1127 * 1096 1128 * @param frame Physical Address of of the frame to be freed. 1097 * 1098 */ 1099 void frame_free(uintptr_t frame) 1100 { 1129 * @param flags Flags to control memory reservation. 1130 * 1131 */ 1132 void frame_free_generic(uintptr_t frame, frame_flags_t flags) 1133 { 1134 size_t size; 1135 1101 1136 irq_spinlock_lock(&zones.lock, true); 1102 1137 … … 1106 1141 pfn_t pfn = ADDR2PFN(frame); 1107 1142 size_t znum = find_zone(pfn, 1, 0); 1143 1108 1144 1109 1145 ASSERT(znum != (size_t) -1); 1110 1146 1111 zone_frame_free(&zones.info[znum], pfn - zones.info[znum].base);1147 size = zone_frame_free(&zones.info[znum], pfn - zones.info[znum].base); 1112 1148 1113 1149 irq_spinlock_unlock(&zones.lock, true); … … 1118 1154 mutex_lock(&mem_avail_mtx); 1119 1155 if (mem_avail_req > 0) 1120 mem_avail_req --;1156 mem_avail_req -= min(mem_avail_req, size); 1121 1157 1122 1158 if (mem_avail_req == 0) { … … 1125 1161 } 1126 1162 mutex_unlock(&mem_avail_mtx); 1163 1164 if (!(flags & FRAME_NO_RESERVE)) 1165 reserve_free(size); 1166 } 1167 1168 void frame_free(uintptr_t frame) 1169 { 1170 frame_free_generic(frame, 0); 1171 } 1172 1173 void frame_free_noreserve(uintptr_t frame) 1174 { 1175 frame_free_generic(frame, FRAME_NO_RESERVE); 1127 1176 } 1128 1177 … … 1355 1404 bool available = zone_flags_available(flags); 1356 1405 1406 uint64_t size; 1407 const char *size_suffix; 1408 bin_order_suffix(FRAMES2SIZE(count), &size, &size_suffix, false); 1409 1357 1410 printf("Zone number: %zu\n", znum); 1358 1411 printf("Zone base address: %p\n", (void *) base); 1359 printf("Zone size: %zu frames (% zu KiB)\n", count,1360 SIZE2KB(FRAMES2SIZE(count)));1412 printf("Zone size: %zu frames (%" PRIu64 " %s)\n", count, 1413 size, size_suffix); 1361 1414 printf("Zone flags: %c%c%c\n", 1362 1415 available ? 'A' : ' ', … … 1365 1418 1366 1419 if (available) { 1367 printf("Allocated space: %zu frames (%zu KiB)\n", 1368 busy_count, SIZE2KB(FRAMES2SIZE(busy_count))); 1369 printf("Available space: %zu frames (%zu KiB)\n", 1370 free_count, SIZE2KB(FRAMES2SIZE(free_count))); 1420 bin_order_suffix(FRAMES2SIZE(busy_count), &size, &size_suffix, 1421 false); 1422 printf("Allocated space: %zu frames (%" PRIu64 " %s)\n", 1423 busy_count, size, size_suffix); 1424 bin_order_suffix(FRAMES2SIZE(free_count), &size, &size_suffix, 1425 false); 1426 printf("Available space: %zu frames (%" PRIu64 " %s)\n", 1427 free_count, size, size_suffix); 1371 1428 } 1372 1429 } -
kernel/generic/src/mm/page.c
r18ba2e4f r544a2e4 108 108 * using flags. Allocate and setup any missing page tables. 109 109 * 110 * @param as Address space to w ich page belongs.110 * @param as Address space to which page belongs. 111 111 * @param page Virtual address of the page to be mapped. 112 112 * @param frame Physical address of memory frame to which the mapping is … … 135 135 * this call visible. 136 136 * 137 * @param as Address space to w ich page belongs.137 * @param as Address space to which page belongs. 138 138 * @param page Virtual address of the page to be demapped. 139 139 * … … 152 152 } 153 153 154 /** Find mapping for virtual page 154 /** Find mapping for virtual page. 155 155 * 156 * Find mapping for virtual page. 157 * 158 * @param as Address space to wich page belongs. 159 * @param page Virtual page. 156 * @param as Address space to which page belongs. 157 * @param page Virtual page. 158 * @param nolock True if the page tables need not be locked. 160 159 * 161 160 * @return NULL if there is no such mapping; requested mapping … … 163 162 * 164 163 */ 165 NO_TRACE pte_t *page_mapping_find(as_t *as, uintptr_t page )164 NO_TRACE pte_t *page_mapping_find(as_t *as, uintptr_t page, bool nolock) 166 165 { 167 ASSERT( page_table_locked(as));166 ASSERT(nolock || page_table_locked(as)); 168 167 169 168 ASSERT(page_mapping_operations); 170 169 ASSERT(page_mapping_operations->mapping_find); 171 170 172 return page_mapping_operations->mapping_find(as, page );171 return page_mapping_operations->mapping_find(as, page, nolock); 173 172 } 174 173 -
kernel/generic/src/printf/vprintf.c
r18ba2e4f r544a2e4 41 41 #include <typedefs.h> 42 42 #include <str.h> 43 44 IRQ_SPINLOCK_STATIC_INITIALIZE_NAME(printf_lock, "*printf_lock");45 43 46 44 static int vprintf_str_write(const char *str, size_t size, void *data) … … 93 91 }; 94 92 95 irq_spinlock_lock(&printf_lock, true); 96 int ret = printf_core(fmt, &ps, ap); 97 irq_spinlock_unlock(&printf_lock, true); 98 99 return ret; 93 return printf_core(fmt, &ps, ap); 100 94 } 101 95 -
kernel/generic/src/proc/program.c
r18ba2e4f r544a2e4 54 54 #include <proc/program.h> 55 55 56 #ifndef LOADED_PROG_STACK_PAGES_NO57 #define LOADED_PROG_STACK_PAGES_NO 158 #endif59 60 56 /** 61 57 * Points to the binary image used as the program loader. All non-initial … … 90 86 91 87 /* 92 * Create the dataaddress space area.88 * Create the stack address space area. 93 89 */ 94 90 as_area_t *area = as_area_create(as, 95 91 AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE, 96 LOADED_PROG_STACK_PAGES_NO * PAGE_SIZE, USTACK_ADDRESS,97 AS_AREA_ATTR_NONE,&anon_backend, NULL);92 STACK_SIZE, USTACK_ADDRESS, AS_AREA_ATTR_NONE, 93 &anon_backend, NULL); 98 94 if (!area) 99 95 return ENOMEM; -
kernel/generic/src/proc/scheduler.c
r18ba2e4f r544a2e4 354 354 355 355 /* 356 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM356 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, AS 357 357 * and preemption counter. At this point THE could be coming either 358 358 * from THREAD's or CPU's stack. … … 376 376 context_save(&CPU->saved_context); 377 377 context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), 378 (uintptr_t) CPU->stack, CPU_STACK_SIZE);378 (uintptr_t) CPU->stack, STACK_SIZE); 379 379 context_restore(&CPU->saved_context); 380 380 -
kernel/generic/src/proc/task.c
r18ba2e4f r544a2e4 190 190 str_cpy(task->name, TASK_NAME_BUFLEN, name); 191 191 192 task->cont ext = CONTEXT;192 task->container = CONTAINER; 193 193 task->capabilities = 0; 194 194 task->ucycles = 0; … … 211 211 212 212 if ((ipc_phone_0) && 213 (cont ext_check(ipc_phone_0->task->context, task->context)))213 (container_check(ipc_phone_0->task->container, task->container))) 214 214 ipc_phone_connect(&task->phones[0], ipc_phone_0); 215 215 … … 534 534 */ 535 535 if (notify) { 536 if (event_is_subscribed(EVENT_FAULT)) { 537 /* Notify the subscriber that a fault occurred. */ 538 event_notify_3(EVENT_FAULT, LOWER32(TASK->taskid), 539 UPPER32(TASK->taskid), (sysarg_t) THREAD); 540 536 /* Notify the subscriber that a fault occurred. */ 537 if (event_notify_3(EVENT_FAULT, false, LOWER32(TASK->taskid), 538 UPPER32(TASK->taskid), (sysarg_t) THREAD) == EOK) { 541 539 #ifdef CONFIG_UDEBUG 542 540 /* Wait for a debugging session. */ … … 586 584 printf("%-8" PRIu64 " %-14s %-5" PRIu32 " %10p %10p" 587 585 " %9" PRIu64 "%c %9" PRIu64 "%c\n", task->taskid, 588 task->name, task->cont ext, task, task->as,586 task->name, task->container, task, task->as, 589 587 ucycles, usuffix, kcycles, ksuffix); 590 588 #endif … … 597 595 else 598 596 printf("%-8" PRIu64 " %-14s %-5" PRIu32 " %18p %18p\n", 599 task->taskid, task->name, task->cont ext, task, task->as);597 task->taskid, task->name, task->container, task, task->as); 600 598 #endif 601 599 … … 627 625 printf("[id ] [threads] [calls] [callee\n"); 628 626 else 629 printf("[id ] [name ] [ct x] [address ] [as ]"627 printf("[id ] [name ] [ctn] [address ] [as ]" 630 628 " [ucycles ] [kcycles ]\n"); 631 629 #endif … … 636 634 " [callee\n"); 637 635 else 638 printf("[id ] [name ] [ct x] [address ]"636 printf("[id ] [name ] [ctn] [address ]" 639 637 " [as ]\n"); 640 638 #endif -
kernel/generic/src/proc/the.c
r18ba2e4f r544a2e4 58 58 the->task = NULL; 59 59 the->as = NULL; 60 the->magic = MAGIC; 60 61 } 61 62 … … 70 71 NO_TRACE void the_copy(the_t *src, the_t *dst) 71 72 { 73 ASSERT(src->magic == MAGIC); 72 74 *dst = *src; 73 75 } -
kernel/generic/src/proc/thread.c
r18ba2e4f r544a2e4 68 68 #include <errno.h> 69 69 70 71 #ifndef LOADED_PROG_STACK_PAGES_NO72 #define LOADED_PROG_STACK_PAGES_NO 173 #endif74 75 76 70 /** Thread states */ 77 71 const char *thread_states[] = { … … 300 294 301 295 /* Not needed, but good for debugging */ 302 memsetb(thread->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0);296 memsetb(thread->kstack, STACK_SIZE, 0); 303 297 304 298 irq_spinlock_lock(&tidlock, true); … … 308 302 context_save(&thread->saved_context); 309 303 context_set(&thread->saved_context, FADDR(cushion), 310 (uintptr_t) thread->kstack, THREAD_STACK_SIZE);304 (uintptr_t) thread->kstack, STACK_SIZE); 311 305 312 306 the_initialize((the_t *) thread->kstack); … … 605 599 printf("%-8" PRIu64 " %-14s %10p %-8s %10p %-5" PRIu32 "\n", 606 600 thread->tid, name, thread, thread_states[thread->state], 607 thread->task, thread->task->cont ext);601 thread->task, thread->task->container); 608 602 #endif 609 603 … … 617 611 printf("%-8" PRIu64 " %-14s %18p %-8s %18p %-5" PRIu32 "\n", 618 612 thread->tid, name, thread, thread_states[thread->state], 619 thread->task, thread->task->cont ext);613 thread->task, thread->task->container); 620 614 #endif 621 615 … … 658 652 else 659 653 printf("[id ] [name ] [address ] [state ] [task ]" 660 " [ct x]\n");654 " [ctn]\n"); 661 655 #endif 662 656 … … 667 661 } else 668 662 printf("[id ] [name ] [address ] [state ]" 669 " [task ] [ct x]\n");663 " [task ] [ctn]\n"); 670 664 #endif 671 665 -
kernel/generic/src/security/cap.c
r18ba2e4f r544a2e4 92 92 task_t *task = task_find_by_id(taskid); 93 93 94 if ((!task) || (!cont ext_check(CONTEXT, task->context))) {94 if ((!task) || (!container_check(CONTAINER, task->container))) { 95 95 irq_spinlock_unlock(&tasks_lock, true); 96 96 return (sysarg_t) ENOENT; … … 121 121 122 122 task_t *task = task_find_by_id(taskid); 123 if ((!task) || (!cont ext_check(CONTEXT, task->context))) {123 if ((!task) || (!container_check(CONTAINER, task->container))) { 124 124 irq_spinlock_unlock(&tasks_lock, true); 125 125 return (sysarg_t) ENOENT; -
kernel/generic/src/synch/futex.c
r18ba2e4f r544a2e4 119 119 */ 120 120 page_table_lock(AS, true); 121 t = page_mapping_find(AS, ALIGN_DOWN(uaddr, PAGE_SIZE) );121 t = page_mapping_find(AS, ALIGN_DOWN(uaddr, PAGE_SIZE), false); 122 122 if (!t || !PTE_VALID(t) || !PTE_PRESENT(t)) { 123 123 page_table_unlock(AS, true); … … 155 155 */ 156 156 page_table_lock(AS, true); 157 t = page_mapping_find(AS, ALIGN_DOWN(uaddr, PAGE_SIZE) );157 t = page_mapping_find(AS, ALIGN_DOWN(uaddr, PAGE_SIZE), false); 158 158 if (!t || !PTE_VALID(t) || !PTE_PRESENT(t)) { 159 159 page_table_unlock(AS, true); -
kernel/generic/src/synch/spinlock.c
r18ba2e4f r544a2e4 96 96 * run in a simulator) that caused problems with both 97 97 * printf_lock and the framebuffer lock. 98 *99 98 */ 100 99 if (lock->name[0] == '*') -
kernel/generic/src/syscall/syscall.c
r18ba2e4f r544a2e4 161 161 /* Event notification syscalls. */ 162 162 (syshandler_t) sys_event_subscribe, 163 (syshandler_t) sys_event_unmask, 163 164 164 165 /* Capabilities related syscalls. */
Note:
See TracChangeset
for help on using the changeset viewer.
