Changeset b1c57a8 in mainline for kernel/generic/src
- Timestamp:
- 2014-10-09T15:03:55Z (11 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- e367939c
- Parents:
- 21799398 (diff), 207e8880 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- kernel/generic/src
- Files:
-
- 6 added
- 30 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/adt/list.c
r21799398 rb1c57a8 68 68 } 69 69 70 /** Concatenate two lists 71 * 72 * Concatenate lists @a list1 and @a list2, producing a single 73 * list @a list1 containing items from both (in @a list1, @a list2 74 * order) and empty list @a list2. 75 * 76 * @param list1 First list and concatenated output 77 * @param list2 Second list and empty output. 78 * 70 /** Moves items of one list into another after the specified item. 71 * 72 * Inserts all items of @a list after item at @a pos in another list. 73 * Both lists may be empty. 74 * 75 * @param list Source list to move after pos. Empty afterwards. 76 * @param pos Source items will be placed after this item. 79 77 */ 80 void list_ concat(list_t *list1, list_t *list2)78 void list_splice(list_t *list, link_t *pos) 81 79 { 82 if (list_empty(list 2))80 if (list_empty(list)) 83 81 return; 84 85 list2->head.next->prev = list1->head.prev; 86 list2->head.prev->next = &list1->head; 87 list1->head.prev->next = list2->head.next; 88 list1->head.prev = list2->head.prev; 89 list_initialize(list2); 82 83 /* Attach list to destination. */ 84 list->head.next->prev = pos; 85 list->head.prev->next = pos->next; 86 87 /* Link destination list to the added list. */ 88 pos->next->prev = list->head.prev; 89 pos->next = list->head.next; 90 91 list_initialize(list); 90 92 } 91 93 -
kernel/generic/src/console/chardev.c
r21799398 rb1c57a8 39 39 #include <print.h> 40 40 #include <func.h> 41 #include < arch.h>41 #include <cpu.h> 42 42 43 43 /** Initialize input character device. -
kernel/generic/src/console/cmd.c
r21799398 rb1c57a8 70 70 #include <sysinfo/sysinfo.h> 71 71 #include <symtab.h> 72 #include <synch/workqueue.h> 73 #include <synch/rcu.h> 72 74 #include <errno.h> 73 75 … … 526 528 }; 527 529 530 /* Data and methods for the 'workq' command */ 531 static int cmd_workq(cmd_arg_t *argv); 532 static cmd_info_t workq_info = { 533 .name = "workq", 534 .description = "Show global workq information.", 535 .func = cmd_workq, 536 .argc = 0 537 }; 538 539 /* Data and methods for the 'workq' command */ 540 static int cmd_rcu(cmd_arg_t *argv); 541 static cmd_info_t rcu_info = { 542 .name = "rcu", 543 .description = "Show RCU run-time statistics.", 544 .func = cmd_rcu, 545 .argc = 0 546 }; 547 528 548 /* Data and methods for 'ipc' command */ 529 549 static int cmd_ipc(cmd_arg_t *argv); … … 589 609 &physmem_info, 590 610 &reboot_info, 611 &rcu_info, 591 612 &sched_info, 592 613 &set4_info, … … 599 620 &uptime_info, 600 621 &version_info, 622 &workq_info, 601 623 &zones_info, 602 624 &zone_info, … … 1270 1292 { 1271 1293 sched_print_list(); 1294 return 1; 1295 } 1296 1297 /** Prints information about the global work queue. 1298 * 1299 * @param argv Ignores 1300 * 1301 * @return Always 1 1302 */ 1303 int cmd_workq(cmd_arg_t *argv) 1304 { 1305 workq_global_print_info(); 1306 return 1; 1307 } 1308 1309 /** Prints RCU statistics. 1310 * 1311 * @param argv Ignores 1312 * 1313 * @return Always 1 1314 */ 1315 int cmd_rcu(cmd_arg_t *argv) 1316 { 1317 rcu_print_stat(); 1272 1318 return 1; 1273 1319 } -
kernel/generic/src/console/console.c
r21799398 rb1c57a8 53 53 #include <str.h> 54 54 #include <abi/kio.h> 55 #include <mm/frame.h> /* SIZE2FRAMES */ 56 #include <mm/slab.h> /* malloc */ 55 57 56 58 #define KIO_PAGES 8 -
kernel/generic/src/console/kconsole.c
r21799398 rb1c57a8 59 59 #include <putchar.h> 60 60 #include <str.h> 61 #include <mm/slab.h> 61 62 62 63 /** Simple kernel console. -
kernel/generic/src/cpu/cpu.c
r21799398 rb1c57a8 50 50 #include <sysinfo/sysinfo.h> 51 51 #include <arch/cycle.h> 52 #include <synch/rcu.h> 52 53 53 54 cpu_t *cpus; … … 105 106 cpu_identify(); 106 107 cpu_arch_init(); 108 rcu_cpu_init(); 107 109 } 108 110 -
kernel/generic/src/debug/panic.c
r21799398 rb1c57a8 96 96 printf("THE=%p: ", THE); 97 97 if (THE != NULL) { 98 printf("p d=%" PRIun " thread=%p task=%p cpu=%p as=%p"99 " magic=%#" PRIx32 "\n", THE->preemption _disabled,98 printf("pe=%" PRIun " thread=%p task=%p cpu=%p as=%p" 99 " magic=%#" PRIx32 "\n", THE->preemption, 100 100 THE->thread, THE->task, THE->cpu, THE->as, THE->magic); 101 101 -
kernel/generic/src/interrupt/interrupt.c
r21799398 rb1c57a8 112 112 } 113 113 114 /* Account CPU usage if it has wakedup from sleep */115 if (CPU ) {114 /* Account CPU usage if it woke up from sleep */ 115 if (CPU && CPU->idle) { 116 116 irq_spinlock_lock(&CPU->lock, false); 117 if (CPU->idle) { 118 uint64_t now = get_cycle(); 119 CPU->idle_cycles += now - CPU->last_cycle; 120 CPU->last_cycle = now; 121 CPU->idle = false; 122 } 117 uint64_t now = get_cycle(); 118 CPU->idle_cycles += now - CPU->last_cycle; 119 CPU->last_cycle = now; 120 CPU->idle = false; 123 121 irq_spinlock_unlock(&CPU->lock, false); 124 122 } -
kernel/generic/src/ipc/kbox.c
r21799398 rb1c57a8 44 44 #include <ipc/kbox.h> 45 45 #include <print.h> 46 #include <proc/thread.h> 46 47 47 48 void ipc_kbox_cleanup(void) -
kernel/generic/src/lib/str.c
r21799398 rb1c57a8 111 111 #include <debug.h> 112 112 #include <macros.h> 113 #include <mm/slab.h> 113 114 114 115 /** Check the condition if wchar_t is signed */ … … 567 568 /* There must be space for a null terminator in the buffer. */ 568 569 ASSERT(size > 0); 570 ASSERT(src != NULL); 569 571 570 572 size_t src_off = 0; -
kernel/generic/src/main/kinit.c
r21799398 rb1c57a8 79 79 #include <synch/waitq.h> 80 80 #include <synch/spinlock.h> 81 #include <synch/workqueue.h> 82 #include <synch/rcu.h> 81 83 82 84 #define ALIVE_CHARS 4 … … 105 107 */ 106 108 thread_detach(THREAD); 107 109 108 110 interrupts_disable(); 111 112 /* Start processing RCU callbacks. RCU is fully functional afterwards. */ 113 rcu_kinit_init(); 114 115 /* 116 * Start processing work queue items. Some may have been queued during boot. 117 */ 118 workq_global_worker_init(); 109 119 110 120 #ifdef CONFIG_SMP -
kernel/generic/src/main/main.c
r21799398 rb1c57a8 76 76 #include <synch/waitq.h> 77 77 #include <synch/futex.h> 78 #include <synch/workqueue.h> 79 #include <smp/smp_call.h> 78 80 #include <arch/arch.h> 79 81 #include <arch.h> … … 263 265 264 266 cpu_init(); 265 266 267 calibrate_delay_loop(); 268 arch_post_cpu_init(); 269 270 smp_call_init(); 271 workq_global_init(); 267 272 clock_counter_init(); 268 273 timeout_init(); … … 367 372 void main_ap_separated_stack(void) 368 373 { 374 smp_call_init(); 375 369 376 /* 370 377 * Configure timeouts for this cpu. -
kernel/generic/src/main/shutdown.c
r21799398 rb1c57a8 37 37 38 38 #include <arch.h> 39 #include <proc/task.h> 39 40 #include <func.h> 40 41 #include <print.h> -
kernel/generic/src/mm/frame.c
r21799398 rb1c57a8 61 61 #include <config.h> 62 62 #include <str.h> 63 #include <proc/thread.h> /* THREAD */ 63 64 64 65 zones_t zones; -
kernel/generic/src/mm/km.c
r21799398 rb1c57a8 49 49 #include <macros.h> 50 50 #include <bitops.h> 51 #include <proc/thread.h> 51 52 52 53 static ra_arena_t *km_ni_arena; -
kernel/generic/src/mm/slab.c
r21799398 rb1c57a8 114 114 #include <bitops.h> 115 115 #include <macros.h> 116 #include <cpu.h> 116 117 117 118 IRQ_SPINLOCK_STATIC_INITIALIZE(slab_cache_lock); -
kernel/generic/src/preempt/preemption.c
r21799398 rb1c57a8 37 37 38 38 #include <preemption.h> 39 #include <arch.h>40 #include <arch/asm.h>41 #include <arch/barrier.h>42 #include <debug.h>43 39 44 /** Increment preemption disabled counter. */45 void preemption_disable(void)46 {47 THE->preemption_disabled++;48 memory_barrier();49 }50 51 /** Decrement preemption disabled counter. */52 void preemption_enable(void)53 {54 ASSERT(PREEMPTION_DISABLED);55 memory_barrier();56 THE->preemption_disabled--;57 }58 40 59 41 /** @} -
kernel/generic/src/proc/scheduler.c
r21799398 rb1c57a8 52 52 #include <atomic.h> 53 53 #include <synch/spinlock.h> 54 #include <synch/workqueue.h> 55 #include <synch/rcu.h> 54 56 #include <config.h> 55 57 #include <context.h> … … 64 66 #include <debug.h> 65 67 #include <stacktrace.h> 68 #include <cpu.h> 66 69 67 70 static void scheduler_separated_stack(void); … … 87 90 { 88 91 before_thread_runs_arch(); 92 rcu_before_thread_runs(); 89 93 90 94 #ifdef CONFIG_FPU_LAZY … … 127 131 static void after_thread_ran(void) 128 132 { 133 workq_after_thread_ran(); 134 rcu_after_thread_ran(); 129 135 after_thread_ran_arch(); 130 136 } … … 219 225 goto loop; 220 226 } 227 228 ASSERT(!CPU->idle); 221 229 222 230 unsigned int i; … … 398 406 ASSERT((!THREAD) || (irq_spinlock_locked(&THREAD->lock))); 399 407 ASSERT(CPU != NULL); 408 ASSERT(interrupts_disabled()); 400 409 401 410 /* … … 421 430 422 431 case Exiting: 432 rcu_thread_exiting(); 423 433 repeat: 424 434 if (THREAD->detached) { -
kernel/generic/src/proc/task.c
r21799398 rb1c57a8 41 41 #include <mm/slab.h> 42 42 #include <atomic.h> 43 #include <synch/futex.h> 43 44 #include <synch/spinlock.h> 44 45 #include <synch/waitq.h> … … 163 164 164 165 irq_spinlock_initialize(&task->lock, "task_t_lock"); 165 mutex_initialize(&task->futexes_lock, MUTEX_PASSIVE);166 166 167 167 list_initialize(&task->threads); … … 175 175 spinlock_initialize(&task->active_calls_lock, "active_calls_lock"); 176 176 list_initialize(&task->active_calls); 177 177 178 178 #ifdef CONFIG_UDEBUG 179 179 /* Init kbox stuff */ … … 231 231 (void) ipc_phone_connect(&task->phones[0], ipc_phone_0); 232 232 233 btree_create(&task->futexes);233 futex_task_init(task); 234 234 235 235 /* … … 272 272 * Free up dynamically allocated state. 273 273 */ 274 btree_destroy(&task->futexes);274 futex_task_deinit(task); 275 275 276 276 /* -
kernel/generic/src/proc/the.c
r21799398 rb1c57a8 43 43 44 44 #include <arch.h> 45 #include <debug.h> 45 46 46 47 /** Initialize THE structure … … 53 54 void the_initialize(the_t *the) 54 55 { 55 the->preemption _disabled= 0;56 the->preemption = 0; 56 57 the->cpu = NULL; 57 58 the->thread = NULL; … … 59 60 the->as = NULL; 60 61 the->magic = MAGIC; 62 #ifdef RCU_PREEMPT_A 63 the->rcu_nesting = 0; 64 #endif 61 65 } 62 66 -
kernel/generic/src/proc/thread.c
r21799398 rb1c57a8 46 46 #include <synch/spinlock.h> 47 47 #include <synch/waitq.h> 48 #include <synch/workqueue.h> 49 #include <synch/rcu.h> 48 50 #include <cpu.h> 49 51 #include <str.h> … … 263 265 } 264 266 267 /** Invoked right before thread_ready() readies the thread. thread is locked. */ 268 static void before_thread_is_ready(thread_t *thread) 269 { 270 ASSERT(irq_spinlock_locked(&thread->lock)); 271 workq_before_thread_is_ready(thread); 272 } 273 265 274 /** Make thread ready 266 275 * … … 275 284 276 285 ASSERT(thread->state != Ready); 286 287 before_thread_is_ready(thread); 277 288 278 289 int i = (thread->priority < RQ_COUNT - 1) ? 279 290 ++thread->priority : thread->priority; 280 281 cpu_t *cpu; 282 if (thread->wired || thread->nomigrate || thread->fpu_context_engaged) { 283 ASSERT(thread->cpu != NULL); 284 cpu = thread->cpu; 285 } else 291 292 /* Check that thread->cpu is set whenever it needs to be. */ 293 ASSERT(thread->cpu != NULL || 294 (!thread->wired && !thread->nomigrate && !thread->fpu_context_engaged)); 295 296 /* 297 * Prefer to run on the same cpu as the last time. Used by wired 298 * threads as well as threads with disabled migration. 299 */ 300 cpu_t *cpu = thread->cpu; 301 if (cpu == NULL) 286 302 cpu = CPU; 287 303 … … 377 393 thread->task = task; 378 394 395 thread->workq = NULL; 396 379 397 thread->fpu_context_exists = false; 380 398 thread->fpu_context_engaged = false; … … 391 409 /* Might depend on previous initialization */ 392 410 thread_create_arch(thread); 411 412 rcu_thread_init(thread); 393 413 394 414 if ((flags & THREAD_FLAG_NOATTACH) != THREAD_FLAG_NOATTACH) … … 501 521 */ 502 522 ipc_cleanup(); 503 futex_ cleanup();523 futex_task_cleanup(); 504 524 LOG("Cleanup of task %" PRIu64" completed.", TASK->taskid); 505 525 } … … 521 541 /* Not reached */ 522 542 while (true); 543 } 544 545 /** Interrupts an existing thread so that it may exit as soon as possible. 546 * 547 * Threads that are blocked waiting for a synchronization primitive 548 * are woken up with a return code of ESYNCH_INTERRUPTED if the 549 * blocking call was interruptable. See waitq_sleep_timeout(). 550 * 551 * The caller must guarantee the thread object is valid during the entire 552 * function, eg by holding the threads_lock lock. 553 * 554 * Interrupted threads automatically exit when returning back to user space. 555 * 556 * @param thread A valid thread object. The caller must guarantee it 557 * will remain valid until thread_interrupt() exits. 558 */ 559 void thread_interrupt(thread_t *thread) 560 { 561 ASSERT(thread != NULL); 562 563 irq_spinlock_lock(&thread->lock, true); 564 565 thread->interrupted = true; 566 bool sleeping = (thread->state == Sleeping); 567 568 irq_spinlock_unlock(&thread->lock, true); 569 570 if (sleeping) 571 waitq_interrupt_sleep(thread); 572 } 573 574 /** Returns true if the thread was interrupted. 575 * 576 * @param thread A valid thread object. User must guarantee it will 577 * be alive during the entire call. 578 * @return true if the thread was already interrupted via thread_interrupt(). 579 */ 580 bool thread_interrupted(thread_t *thread) 581 { 582 ASSERT(thread != NULL); 583 584 bool interrupted; 585 586 irq_spinlock_lock(&thread->lock, true); 587 interrupted = thread->interrupted; 588 irq_spinlock_unlock(&thread->lock, true); 589 590 return interrupted; 523 591 } 524 592 -
kernel/generic/src/synch/condvar.c
r21799398 rb1c57a8 38 38 #include <synch/condvar.h> 39 39 #include <synch/mutex.h> 40 #include <synch/spinlock.h> 40 41 #include <synch/waitq.h> 41 42 #include <arch.h> … … 90 91 91 92 ipl = waitq_sleep_prepare(&cv->wq); 93 /* Unlock only after the waitq is locked so we don't miss a wakeup. */ 92 94 mutex_unlock(mtx); 93 95 … … 95 97 rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags); 96 98 99 waitq_sleep_finish(&cv->wq, rc, ipl); 100 /* Lock only after releasing the waitq to avoid a possible deadlock. */ 97 101 mutex_lock(mtx); 98 waitq_sleep_finish(&cv->wq, rc, ipl);99 102 100 103 return rc; 101 104 } 102 105 106 /** Wait for the condition to become true with a locked spinlock. 107 * 108 * The function is not aware of irq_spinlock. Therefore do not even 109 * try passing irq_spinlock_t to it. Use _condvar_wait_timeout_irq_spinlock() 110 * instead. 111 * 112 * @param cv Condition variable. 113 * @param lock Locked spinlock. 114 * @param usec Timeout value in microseconds. 115 * @param flags Select mode of operation. 116 * 117 * For exact description of meaning of possible combinations of usec and flags, 118 * see comment for waitq_sleep_timeout(). Note that when 119 * SYNCH_FLAGS_NON_BLOCKING is specified here, ESYNCH_WOULD_BLOCK is always 120 * returned. 121 * 122 * @return See comment for waitq_sleep_timeout(). 123 */ 124 int _condvar_wait_timeout_spinlock_impl(condvar_t *cv, spinlock_t *lock, 125 uint32_t usec, int flags) 126 { 127 int rc; 128 ipl_t ipl; 129 130 ipl = waitq_sleep_prepare(&cv->wq); 131 132 /* Unlock only after the waitq is locked so we don't miss a wakeup. */ 133 spinlock_unlock(lock); 134 135 cv->wq.missed_wakeups = 0; /* Enforce blocking. */ 136 rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags); 137 138 waitq_sleep_finish(&cv->wq, rc, ipl); 139 /* Lock only after releasing the waitq to avoid a possible deadlock. */ 140 spinlock_lock(lock); 141 142 return rc; 143 } 144 145 /** Wait for the condition to become true with a locked irq spinlock. 146 * 147 * @param cv Condition variable. 148 * @param lock Locked irq spinlock. 149 * @param usec Timeout value in microseconds. 150 * @param flags Select mode of operation. 151 * 152 * For exact description of meaning of possible combinations of usec and flags, 153 * see comment for waitq_sleep_timeout(). Note that when 154 * SYNCH_FLAGS_NON_BLOCKING is specified here, ESYNCH_WOULD_BLOCK is always 155 * returned. 156 * 157 * @return See comment for waitq_sleep_timeout(). 158 */ 159 int _condvar_wait_timeout_irq_spinlock(condvar_t *cv, irq_spinlock_t *irq_lock, 160 uint32_t usec, int flags) 161 { 162 int rc; 163 /* Save spinlock's state so we can restore it correctly later on. */ 164 ipl_t ipl = irq_lock->ipl; 165 bool guard = irq_lock->guard; 166 167 irq_lock->guard = false; 168 169 /* 170 * waitq_prepare() restores interrupts to the current state, 171 * ie disabled. Therefore, interrupts will remain disabled while 172 * it spins waiting for a pending timeout handler to complete. 173 * Although it spins with interrupts disabled there can only 174 * be a pending timeout if we failed to cancel an imminent 175 * timeout (on another cpu) during a wakeup. As a result the 176 * timeout handler is guaranteed to run (it is most likely already 177 * running) and there is no danger of a deadlock. 178 */ 179 rc = _condvar_wait_timeout_spinlock(cv, &irq_lock->lock, usec, flags); 180 181 irq_lock->guard = guard; 182 irq_lock->ipl = ipl; 183 184 return rc; 185 } 186 187 103 188 /** @} 104 189 */ -
kernel/generic/src/synch/futex.c
r21799398 rb1c57a8 1 1 /* 2 2 * Copyright (c) 2006 Jakub Jermar 3 * Copyright (c) 2012 Adam Hraska 3 4 * All rights reserved. 4 5 * … … 34 35 * @file 35 36 * @brief Kernel backend for futexes. 37 * 38 * Kernel futex objects are stored in a global hash table futex_ht 39 * where the physical address of the futex variable (futex_t.paddr) 40 * is used as the lookup key. As a result multiple address spaces 41 * may share the same futex variable. 42 * 43 * A kernel futex object is created the first time a task accesses 44 * the futex (having a futex variable at a physical address not 45 * encountered before). Futex object's lifetime is governed by 46 * a reference count that represents the number of all the different 47 * user space virtual addresses from all tasks that map to the 48 * physical address of the futex variable. A futex object is freed 49 * when the last task having accessed the futex exits. 50 * 51 * Each task keeps track of the futex objects it accessed in a list 52 * of pointers (futex_ptr_t, task->futex_list) to the different futex 53 * objects. 54 * 55 * To speed up translation of futex variables' virtual addresses 56 * to their physical addresses, futex pointers accessed by the 57 * task are furthermore stored in a concurrent hash table (CHT, 58 * task->futexes->ht). A single lookup without locks or accesses 59 * to the page table translates a futex variable's virtual address 60 * into its futex kernel object. 36 61 */ 37 62 … … 39 64 #include <synch/mutex.h> 40 65 #include <synch/spinlock.h> 66 #include <synch/rcu.h> 41 67 #include <mm/frame.h> 42 68 #include <mm/page.h> … … 46 72 #include <genarch/mm/page_pt.h> 47 73 #include <genarch/mm/page_ht.h> 74 #include <adt/cht.h> 48 75 #include <adt/hash_table.h> 49 76 #include <adt/list.h> … … 52 79 #include <panic.h> 53 80 #include <errno.h> 54 #include <print.h>55 81 56 82 #define FUTEX_HT_SIZE 1024 /* keep it a power of 2 */ 57 83 58 static void futex_initialize(futex_t *futex); 59 60 static futex_t *futex_find(uintptr_t paddr); 84 /** Task specific pointer to a global kernel futex object. */ 85 typedef struct futex_ptr { 86 /** CHT link. */ 87 cht_link_t cht_link; 88 /** List of all futex pointers used by the task. */ 89 link_t all_link; 90 /** Kernel futex object. */ 91 futex_t *futex; 92 /** User space virtual address of the futex variable in the task. */ 93 uintptr_t uaddr; 94 } futex_ptr_t; 95 96 97 static void destroy_task_cache(work_t *work); 98 99 static void futex_initialize(futex_t *futex, uintptr_t paddr); 100 static void futex_add_ref(futex_t *futex); 101 static void futex_release_ref(futex_t *futex); 102 static void futex_release_ref_locked(futex_t *futex); 103 104 static futex_t *get_futex(uintptr_t uaddr); 105 static futex_t *find_cached_futex(uintptr_t uaddr); 106 static futex_t *get_and_cache_futex(uintptr_t phys_addr, uintptr_t uaddr); 107 static bool find_futex_paddr(uintptr_t uaddr, uintptr_t *phys_addr); 108 61 109 static size_t futex_ht_hash(sysarg_t *key); 62 110 static bool futex_ht_compare(sysarg_t *key, size_t keys, link_t *item); 63 111 static void futex_ht_remove_callback(link_t *item); 64 112 65 /** 66 * Mutex protecting global futex hash table. 67 * It is also used to serialize access to all futex_t structures. 68 * Must be acquired before the task futex B+tree lock. 69 */ 70 static mutex_t futex_ht_lock; 71 72 /** Futex hash table. */ 113 static size_t task_fut_ht_hash(const cht_link_t *link); 114 static size_t task_fut_ht_key_hash(void *key); 115 static bool task_fut_ht_equal(const cht_link_t *item1, const cht_link_t *item2); 116 static bool task_fut_ht_key_equal(void *key, const cht_link_t *item); 117 118 119 /** Mutex protecting the global futex hash table. 120 * 121 * Acquire task specific TASK->futex_list_lock before this mutex. 122 */ 123 SPINLOCK_STATIC_INITIALIZE_NAME(futex_ht_lock, "futex-ht-lock"); 124 125 /** Global kernel futex hash table. Lock futex_ht_lock before accessing. 126 * 127 * Physical address of the futex variable is the lookup key. 128 */ 73 129 static hash_table_t futex_ht; 74 130 75 /** Futex hash table operations. */131 /** Global kernel futex hash table operations. */ 76 132 static hash_table_operations_t futex_ht_ops = { 77 133 .hash = futex_ht_hash, … … 80 136 }; 81 137 138 /** Task futex cache CHT operations. */ 139 static cht_ops_t task_futex_ht_ops = { 140 .hash = task_fut_ht_hash, 141 .key_hash = task_fut_ht_key_hash, 142 .equal = task_fut_ht_equal, 143 .key_equal = task_fut_ht_key_equal, 144 .remove_callback = NULL 145 }; 146 82 147 /** Initialize futex subsystem. */ 83 148 void futex_init(void) 84 149 { 85 mutex_initialize(&futex_ht_lock, MUTEX_PASSIVE);86 150 hash_table_create(&futex_ht, FUTEX_HT_SIZE, 1, &futex_ht_ops); 87 151 } 88 152 89 /** Initialize kernel futex structure. 90 * 91 * @param futex Kernel futex structure. 92 */ 93 void futex_initialize(futex_t *futex) 153 /** Initializes the futex structures for the new task. */ 154 void futex_task_init(struct task *task) 155 { 156 task->futexes = malloc(sizeof(struct futex_cache), 0); 157 158 cht_create(&task->futexes->ht, 0, 0, 0, true, &task_futex_ht_ops); 159 160 list_initialize(&task->futexes->list); 161 spinlock_initialize(&task->futexes->list_lock, "futex-list-lock"); 162 } 163 164 /** Destroys the futex structures for the dying task. */ 165 void futex_task_deinit(task_t *task) 166 { 167 /* Interrupts are disabled so we must not block (cannot run cht_destroy). */ 168 if (interrupts_disabled()) { 169 /* Invoke the blocking cht_destroy in the background. */ 170 workq_global_enqueue_noblock(&task->futexes->destroy_work, 171 destroy_task_cache); 172 } else { 173 /* We can block. Invoke cht_destroy in this thread. */ 174 destroy_task_cache(&task->futexes->destroy_work); 175 } 176 } 177 178 /** Deallocates a task's CHT futex cache (must already be empty). */ 179 static void destroy_task_cache(work_t *work) 180 { 181 struct futex_cache *cache = 182 member_to_inst(work, struct futex_cache, destroy_work); 183 184 /* 185 * Destroy the cache before manually freeing items of the cache in case 186 * table resize is in progress. 187 */ 188 cht_destroy_unsafe(&cache->ht); 189 190 /* Manually free futex_ptr cache items. */ 191 list_foreach_safe(cache->list, cur_link, next_link) { 192 futex_ptr_t *fut_ptr = member_to_inst(cur_link, futex_ptr_t, all_link); 193 194 list_remove(cur_link); 195 free(fut_ptr); 196 } 197 198 free(cache); 199 } 200 201 /** Remove references from futexes known to the current task. */ 202 void futex_task_cleanup(void) 203 { 204 struct futex_cache *futexes = TASK->futexes; 205 206 /* All threads of this task have terminated. This is the last thread. */ 207 spinlock_lock(&futexes->list_lock); 208 209 list_foreach_safe(futexes->list, cur_link, next_link) { 210 futex_ptr_t *fut_ptr = member_to_inst(cur_link, futex_ptr_t, all_link); 211 212 /* 213 * The function is free to free the futex. All other threads of this 214 * task have already terminated, so they have also definitely 215 * exited their CHT futex cache protecting rcu reader sections. 216 * Moreover release_ref() only frees the futex if this is the 217 * last task referencing the futex. Therefore, only threads 218 * of this task may have referenced the futex if it is to be freed. 219 */ 220 futex_release_ref_locked(fut_ptr->futex); 221 } 222 223 spinlock_unlock(&futexes->list_lock); 224 } 225 226 227 /** Initialize the kernel futex structure. 228 * 229 * @param futex Kernel futex structure. 230 * @param paddr Physical address of the futex variable. 231 */ 232 static void futex_initialize(futex_t *futex, uintptr_t paddr) 94 233 { 95 234 waitq_initialize(&futex->wq); 96 235 link_initialize(&futex->ht_link); 97 futex->paddr = 0;236 futex->paddr = paddr; 98 237 futex->refcount = 1; 238 } 239 240 /** Increments the counter of tasks referencing the futex. */ 241 static void futex_add_ref(futex_t *futex) 242 { 243 ASSERT(spinlock_locked(&futex_ht_lock)); 244 ASSERT(0 < futex->refcount); 245 ++futex->refcount; 246 } 247 248 /** Decrements the counter of tasks referencing the futex. May free the futex.*/ 249 static void futex_release_ref(futex_t *futex) 250 { 251 ASSERT(spinlock_locked(&futex_ht_lock)); 252 ASSERT(0 < futex->refcount); 253 254 --futex->refcount; 255 256 if (0 == futex->refcount) { 257 hash_table_remove(&futex_ht, &futex->paddr, 1); 258 } 259 } 260 261 /** Decrements the counter of tasks referencing the futex. May free the futex.*/ 262 static void futex_release_ref_locked(futex_t *futex) 263 { 264 spinlock_lock(&futex_ht_lock); 265 futex_release_ref(futex); 266 spinlock_unlock(&futex_ht_lock); 267 } 268 269 /** Returns a futex for the virtual address @a uaddr (or creates one). */ 270 static futex_t *get_futex(uintptr_t uaddr) 271 { 272 futex_t *futex = find_cached_futex(uaddr); 273 274 if (futex) 275 return futex; 276 277 uintptr_t paddr; 278 279 if (!find_futex_paddr(uaddr, &paddr)) { 280 return 0; 281 } 282 283 return get_and_cache_futex(paddr, uaddr); 284 } 285 286 287 /** Finds the physical address of the futex variable. */ 288 static bool find_futex_paddr(uintptr_t uaddr, uintptr_t *paddr) 289 { 290 spinlock_lock(&futex_ht_lock); 291 page_table_lock(AS, false); 292 293 bool found = false; 294 pte_t *t = page_mapping_find(AS, ALIGN_DOWN(uaddr, PAGE_SIZE), true); 295 296 if (t && PTE_VALID(t) && PTE_PRESENT(t)) { 297 found = true; 298 *paddr = PTE_GET_FRAME(t) + (uaddr - ALIGN_DOWN(uaddr, PAGE_SIZE)); 299 } 300 301 page_table_unlock(AS, false); 302 spinlock_unlock(&futex_ht_lock); 303 304 return found; 305 } 306 307 /** Returns the futex cached in this task with the virtual address uaddr. */ 308 static futex_t *find_cached_futex(uintptr_t uaddr) 309 { 310 cht_read_lock(); 311 312 futex_t *futex; 313 cht_link_t *futex_ptr_link = cht_find_lazy(&TASK->futexes->ht, &uaddr); 314 315 if (futex_ptr_link) { 316 futex_ptr_t *futex_ptr 317 = member_to_inst(futex_ptr_link, futex_ptr_t, cht_link); 318 319 futex = futex_ptr->futex; 320 } else { 321 futex = NULL; 322 } 323 324 cht_read_unlock(); 325 326 return futex; 327 } 328 329 330 /** 331 * Returns a kernel futex for the physical address @a phys_addr and caches 332 * it in this task under the virtual address @a uaddr (if not already cached). 333 */ 334 static futex_t *get_and_cache_futex(uintptr_t phys_addr, uintptr_t uaddr) 335 { 336 futex_t *futex = malloc(sizeof(futex_t), 0); 337 338 /* 339 * Find the futex object in the global futex table (or insert it 340 * if it is not present). 341 */ 342 spinlock_lock(&futex_ht_lock); 343 344 link_t *fut_link = hash_table_find(&futex_ht, &phys_addr); 345 346 if (fut_link) { 347 free(futex); 348 futex = member_to_inst(fut_link, futex_t, ht_link); 349 futex_add_ref(futex); 350 } else { 351 futex_initialize(futex, phys_addr); 352 hash_table_insert(&futex_ht, &phys_addr, &futex->ht_link); 353 } 354 355 spinlock_unlock(&futex_ht_lock); 356 357 /* 358 * Cache the link to the futex object for this task. 359 */ 360 futex_ptr_t *fut_ptr = malloc(sizeof(futex_ptr_t), 0); 361 cht_link_t *dup_link; 362 363 fut_ptr->futex = futex; 364 fut_ptr->uaddr = uaddr; 365 366 cht_read_lock(); 367 368 /* Cache the mapping from the virtual address to the futex for this task. */ 369 if (cht_insert_unique(&TASK->futexes->ht, &fut_ptr->cht_link, &dup_link)) { 370 spinlock_lock(&TASK->futexes->list_lock); 371 list_append(&fut_ptr->all_link, &TASK->futexes->list); 372 spinlock_unlock(&TASK->futexes->list_lock); 373 } else { 374 /* Another thread of this task beat us to it. Use that mapping instead.*/ 375 free(fut_ptr); 376 futex_release_ref_locked(futex); 377 378 futex_ptr_t *dup = member_to_inst(dup_link, futex_ptr_t, cht_link); 379 futex = dup->futex; 380 } 381 382 cht_read_unlock(); 383 384 return futex; 99 385 } 100 386 … … 109 395 sysarg_t sys_futex_sleep(uintptr_t uaddr) 110 396 { 111 futex_t *futex; 112 uintptr_t paddr; 113 pte_t *t; 114 int rc; 115 116 /* 117 * Find physical address of futex counter. 118 */ 119 page_table_lock(AS, true); 120 t = page_mapping_find(AS, ALIGN_DOWN(uaddr, PAGE_SIZE), false); 121 if (!t || !PTE_VALID(t) || !PTE_PRESENT(t)) { 122 page_table_unlock(AS, true); 397 futex_t *futex = get_futex(uaddr); 398 399 if (!futex) 123 400 return (sysarg_t) ENOENT; 124 } 125 paddr = PTE_GET_FRAME(t) + (uaddr - ALIGN_DOWN(uaddr, PAGE_SIZE)); 126 page_table_unlock(AS, true); 127 128 futex = futex_find(paddr); 129 130 #ifdef CONFIG_UDEBUG 131 udebug_stoppable_begin(); 132 #endif 133 rc = waitq_sleep_timeout(&futex->wq, 0, SYNCH_FLAGS_INTERRUPTIBLE); 134 #ifdef CONFIG_UDEBUG 135 udebug_stoppable_end(); 136 #endif 401 402 int rc = waitq_sleep_timeout(&futex->wq, 0, SYNCH_FLAGS_INTERRUPTIBLE); 403 137 404 return (sysarg_t) rc; 138 405 } … … 146 413 sysarg_t sys_futex_wakeup(uintptr_t uaddr) 147 414 { 148 futex_t *futex; 149 uintptr_t paddr; 150 pte_t *t; 151 152 /* 153 * Find physical address of futex counter. 154 */ 155 page_table_lock(AS, true); 156 t = page_mapping_find(AS, ALIGN_DOWN(uaddr, PAGE_SIZE), false); 157 if (!t || !PTE_VALID(t) || !PTE_PRESENT(t)) { 158 page_table_unlock(AS, true); 415 futex_t *futex = get_futex(uaddr); 416 417 if (futex) { 418 waitq_wakeup(&futex->wq, WAKEUP_FIRST); 419 return 0; 420 } else { 159 421 return (sysarg_t) ENOENT; 160 422 } 161 paddr = PTE_GET_FRAME(t) + (uaddr - ALIGN_DOWN(uaddr, PAGE_SIZE)); 162 page_table_unlock(AS, true); 163 164 futex = futex_find(paddr); 165 166 waitq_wakeup(&futex->wq, WAKEUP_FIRST); 167 168 return 0; 169 } 170 171 /** Find kernel address of the futex structure corresponding to paddr. 172 * 173 * If the structure does not exist already, a new one is created. 174 * 175 * @param paddr Physical address of the userspace futex counter. 176 * 177 * @return Address of the kernel futex structure. 178 */ 179 futex_t *futex_find(uintptr_t paddr) 180 { 181 link_t *item; 182 futex_t *futex; 183 btree_node_t *leaf; 184 185 /* 186 * Find the respective futex structure 187 * or allocate new one if it does not exist already. 188 */ 189 mutex_lock(&futex_ht_lock); 190 item = hash_table_find(&futex_ht, &paddr); 191 if (item) { 192 futex = hash_table_get_instance(item, futex_t, ht_link); 193 194 /* 195 * See if the current task knows this futex. 196 */ 197 mutex_lock(&TASK->futexes_lock); 198 if (!btree_search(&TASK->futexes, paddr, &leaf)) { 199 /* 200 * The futex is new to the current task. 201 * Upgrade its reference count and put it to the 202 * current task's B+tree of known futexes. 203 */ 204 futex->refcount++; 205 btree_insert(&TASK->futexes, paddr, futex, leaf); 206 } 207 mutex_unlock(&TASK->futexes_lock); 208 } else { 209 futex = (futex_t *) malloc(sizeof(futex_t), 0); 210 futex_initialize(futex); 211 futex->paddr = paddr; 212 hash_table_insert(&futex_ht, &paddr, &futex->ht_link); 213 214 /* 215 * This is the first task referencing the futex. 216 * It can be directly inserted into its 217 * B+tree of known futexes. 218 */ 219 mutex_lock(&TASK->futexes_lock); 220 btree_insert(&TASK->futexes, paddr, futex, NULL); 221 mutex_unlock(&TASK->futexes_lock); 222 223 } 224 mutex_unlock(&futex_ht_lock); 225 226 return futex; 227 } 423 } 424 228 425 229 426 /** Compute hash index into futex hash table. … … 268 465 } 269 466 270 /** Remove references from futexes known to the current task. */ 271 void futex_cleanup(void) 272 { 273 mutex_lock(&futex_ht_lock); 274 mutex_lock(&TASK->futexes_lock); 275 276 list_foreach(TASK->futexes.leaf_list, leaf_link, btree_node_t, node) { 277 unsigned int i; 278 279 for (i = 0; i < node->keys; i++) { 280 futex_t *ftx; 281 uintptr_t paddr = node->key[i]; 282 283 ftx = (futex_t *) node->value[i]; 284 if (--ftx->refcount == 0) 285 hash_table_remove(&futex_ht, &paddr, 1); 286 } 287 } 288 289 mutex_unlock(&TASK->futexes_lock); 290 mutex_unlock(&futex_ht_lock); 467 /* 468 * Operations of a task's CHT that caches mappings of futex user space 469 * virtual addresses to kernel futex objects. 470 */ 471 472 static size_t task_fut_ht_hash(const cht_link_t *link) 473 { 474 const futex_ptr_t *fut_ptr = member_to_inst(link, futex_ptr_t, cht_link); 475 return fut_ptr->uaddr; 476 } 477 478 static size_t task_fut_ht_key_hash(void *key) 479 { 480 return *(uintptr_t*)key; 481 } 482 483 static bool task_fut_ht_equal(const cht_link_t *item1, const cht_link_t *item2) 484 { 485 const futex_ptr_t *fut_ptr1 = member_to_inst(item1, futex_ptr_t, cht_link); 486 const futex_ptr_t *fut_ptr2 = member_to_inst(item2, futex_ptr_t, cht_link); 487 488 return fut_ptr1->uaddr == fut_ptr2->uaddr; 489 } 490 491 static bool task_fut_ht_key_equal(void *key, const cht_link_t *item) 492 { 493 const futex_ptr_t *fut_ptr = member_to_inst(item, futex_ptr_t, cht_link); 494 uintptr_t uaddr = *(uintptr_t*)key; 495 496 return fut_ptr->uaddr == uaddr; 291 497 } 292 498 -
kernel/generic/src/synch/mutex.c
r21799398 rb1c57a8 41 41 #include <arch.h> 42 42 #include <stacktrace.h> 43 #include <cpu.h> 44 #include <proc/thread.h> 43 45 44 46 /** Initialize mutex. -
kernel/generic/src/synch/smc.c
r21799398 rb1c57a8 41 41 #include <arch/barrier.h> 42 42 #include <synch/smc.h> 43 #include <mm/as.h> 43 44 44 45 sysarg_t sys_smc_coherence(uintptr_t va, size_t size) -
kernel/generic/src/synch/spinlock.c
r21799398 rb1c57a8 45 45 #include <symtab.h> 46 46 #include <stacktrace.h> 47 #include <cpu.h> 47 48 48 49 #ifdef CONFIG_SMP … … 198 199 * 199 200 * @param lock IRQ spinlock to be locked. 200 * @param irq_dis If true, interrupts are actually disabled 201 * prior locking the spinlock. If false, interrupts 202 * are expected to be already disabled. 201 * @param irq_dis If true, disables interrupts before locking the spinlock. 202 * If false, interrupts are expected to be already disabled. 203 203 * 204 204 */ -
kernel/generic/src/synch/waitq.c
r21799398 rb1c57a8 57 57 58 58 static void waitq_sleep_timed_out(void *); 59 static void waitq_complete_wakeup(waitq_t *); 60 59 61 60 62 /** Initialize wait queue … … 330 332 break; 331 333 default: 334 /* 335 * Wait for a waitq_wakeup() or waitq_unsleep() to complete 336 * before returning from waitq_sleep() to the caller. Otherwise 337 * the caller might expect that the wait queue is no longer used 338 * and deallocate it (although the wakeup on a another cpu has 339 * not yet completed and is using the wait queue). 340 * 341 * Note that we have to do this for ESYNCH_OK_BLOCKED and 342 * ESYNCH_INTERRUPTED, but not necessarily for ESYNCH_TIMEOUT 343 * where the timeout handler stops using the waitq before waking 344 * us up. To be on the safe side, ensure the waitq is not in use 345 * anymore in this case as well. 346 */ 347 waitq_complete_wakeup(wq); 332 348 break; 333 349 } … … 357 373 } else { 358 374 if (PARAM_NON_BLOCKING(flags, usec)) { 359 /* Return immediatel ly instead of going to sleep */375 /* Return immediately instead of going to sleep */ 360 376 return ESYNCH_WOULD_BLOCK; 361 377 } … … 442 458 irq_spinlock_unlock(&wq->lock, true); 443 459 } 460 461 /** If there is a wakeup in progress actively waits for it to complete. 462 * 463 * The function returns once the concurrently running waitq_wakeup() 464 * exits. It returns immediately if there are no concurrent wakeups 465 * at the time. 466 * 467 * Interrupts must be disabled. 468 * 469 * Example usage: 470 * @code 471 * void callback(waitq *wq) 472 * { 473 * // Do something and notify wait_for_completion() that we're done. 474 * waitq_wakeup(wq); 475 * } 476 * void wait_for_completion(void) 477 * { 478 * waitq wg; 479 * waitq_initialize(&wq); 480 * // Run callback() in the background, pass it wq. 481 * do_asynchronously(callback, &wq); 482 * // Wait for callback() to complete its work. 483 * waitq_sleep(&wq); 484 * // callback() completed its work, but it may still be accessing 485 * // wq in waitq_wakeup(). Therefore it is not yet safe to return 486 * // from waitq_sleep() or it would clobber up our stack (where wq 487 * // is stored). waitq_sleep() ensures the wait queue is no longer 488 * // in use by invoking waitq_complete_wakeup() internally. 489 * 490 * // waitq_sleep() returned, it is safe to free wq. 491 * } 492 * @endcode 493 * 494 * @param wq Pointer to a wait queue. 495 */ 496 static void waitq_complete_wakeup(waitq_t *wq) 497 { 498 ASSERT(interrupts_disabled()); 499 500 irq_spinlock_lock(&wq->lock, false); 501 irq_spinlock_unlock(&wq->lock, false); 502 } 503 444 504 445 505 /** Internal SMP- and IRQ-unsafe version of waitq_wakeup() -
kernel/generic/src/syscall/syscall.c
r21799398 rb1c57a8 50 50 #include <synch/futex.h> 51 51 #include <synch/smc.h> 52 #include <synch/smp_memory_barrier.h> 52 53 #include <ddi/ddi.h> 53 54 #include <ipc/event.h> … … 142 143 (syshandler_t) sys_futex_wakeup, 143 144 (syshandler_t) sys_smc_coherence, 145 (syshandler_t) sys_smp_memory_barrier, 146 144 147 145 148 /* Address space related syscalls. */ -
kernel/generic/src/time/clock.c
r21799398 rb1c57a8 212 212 irq_spinlock_unlock(&THREAD->lock, false); 213 213 214 if ( (!ticks) && (!PREEMPTION_DISABLED)) {214 if (ticks == 0 && PREEMPTION_ENABLED) { 215 215 scheduler(); 216 216 #ifdef CONFIG_UDEBUG -
kernel/generic/src/udebug/udebug.c
r21799398 rb1c57a8 44 44 #include <print.h> 45 45 #include <arch.h> 46 #include <proc/task.h> 47 #include <proc/thread.h> 46 48 47 49 /** Initialize udebug part of task structure.
Note:
See TracChangeset
for help on using the changeset viewer.