Changeset 235d31d in mainline for kernel/generic
- Timestamp:
- 2014-12-22T17:47:40Z (11 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 8c7d5ad
- Parents:
- eae91e0 (diff), 759ea0d (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- kernel/generic
- Files:
-
- 15 added
- 45 edited
-
include/adt/cht.h (added)
-
include/adt/hash.h (added)
-
include/adt/list.h (modified) (4 diffs)
-
include/arch.h (modified) (5 diffs)
-
include/atomic.h (modified) (1 diff)
-
include/compiler/barrier.h (added)
-
include/cpu.h (modified) (2 diffs)
-
include/cpu/cpu_mask.h (added)
-
include/lib/memfnc.h (modified) (1 diff)
-
include/macros.h (modified) (1 diff)
-
include/mm/as.h (modified) (1 diff)
-
include/preemption.h (modified) (1 diff)
-
include/proc/task.h (modified) (3 diffs)
-
include/proc/thread.h (modified) (4 diffs)
-
include/smp/smp_call.h (added)
-
include/synch/condvar.h (modified) (3 diffs)
-
include/synch/futex.h (modified) (1 diff)
-
include/synch/rcu.h (added)
-
include/synch/rcu_types.h (added)
-
include/synch/semaphore.h (modified) (1 diff)
-
include/synch/smp_memory_barrier.h (added)
-
include/synch/spinlock.h (modified) (3 diffs)
-
include/synch/workqueue.h (added)
-
src/adt/cht.c (added)
-
src/adt/list.c (modified) (1 diff)
-
src/console/chardev.c (modified) (1 diff)
-
src/console/cmd.c (modified) (5 diffs)
-
src/console/console.c (modified) (1 diff)
-
src/console/kconsole.c (modified) (1 diff)
-
src/cpu/cpu.c (modified) (2 diffs)
-
src/cpu/cpu_mask.c (added)
-
src/debug/panic.c (modified) (1 diff)
-
src/interrupt/interrupt.c (modified) (1 diff)
-
src/ipc/kbox.c (modified) (1 diff)
-
src/lib/str.c (modified) (2 diffs)
-
src/log/log.c (modified) (1 diff)
-
src/main/kinit.c (modified) (2 diffs)
-
src/main/main.c (modified) (3 diffs)
-
src/main/shutdown.c (modified) (1 diff)
-
src/mm/frame.c (modified) (1 diff)
-
src/mm/km.c (modified) (1 diff)
-
src/mm/slab.c (modified) (1 diff)
-
src/preempt/preemption.c (modified) (1 diff)
-
src/proc/scheduler.c (modified) (7 diffs)
-
src/proc/task.c (modified) (5 diffs)
-
src/proc/the.c (modified) (3 diffs)
-
src/proc/thread.c (modified) (7 diffs)
-
src/smp/smp_call.c (added)
-
src/synch/condvar.c (modified) (3 diffs)
-
src/synch/futex.c (modified) (9 diffs)
-
src/synch/mutex.c (modified) (1 diff)
-
src/synch/rcu.c (added)
-
src/synch/smc.c (modified) (1 diff)
-
src/synch/smp_memory_barrier.c (added)
-
src/synch/spinlock.c (modified) (2 diffs)
-
src/synch/waitq.c (modified) (4 diffs)
-
src/synch/workqueue.c (added)
-
src/syscall/syscall.c (modified) (2 diffs)
-
src/time/clock.c (modified) (1 diff)
-
src/udebug/udebug.c (modified) (1 diff)
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/include/adt/list.h
reae91e0 r235d31d 52 52 } list_t; 53 53 54 55 extern int list_member(const link_t *, const list_t *); 56 extern void list_splice(list_t *, link_t *); 57 extern unsigned int list_count(const list_t *); 58 59 54 60 /** Declare and initialize statically allocated list. 55 61 * … … 80 86 _link != &(list).head; _link = _link->prev) 81 87 88 /** Unlike list_foreach(), allows removing items while traversing a list. 89 * 90 * @code 91 * list_t mylist; 92 * typedef struct item { 93 * int value; 94 * link_t item_link; 95 * } item_t; 96 * 97 * //.. 98 * 99 * // Print each list element's value and remove the element from the list. 100 * list_foreach_safe(mylist, cur_link, next_link) { 101 * item_t *cur_item = list_get_instance(cur_link, item_t, item_link); 102 * printf("%d\n", cur_item->value); 103 * list_remove(cur_link); 104 * } 105 * @endcode 106 * 107 * @param list List to traverse. 108 * @param iterator Iterator to the current element of the list. 109 * The item this iterator points may be safely removed 110 * from the list. 111 * @param next_iter Iterator to the next element of the list. 112 */ 113 #define list_foreach_safe(list, iterator, next_iter) \ 114 for (link_t *iterator = (list).head.next, \ 115 *next_iter = iterator->next; \ 116 iterator != &(list).head; \ 117 iterator = next_iter, next_iter = iterator->next) 118 119 82 120 #define assert_link_not_used(link) \ 83 121 ASSERT(!link_used(link)) … … 289 327 { 290 328 headless_list_split_or_concat(part1, part2); 329 } 330 331 /** Concatenate two lists 332 * 333 * Concatenate lists @a list1 and @a list2, producing a single 334 * list @a list1 containing items from both (in @a list1, @a list2 335 * order) and empty list @a list2. 336 * 337 * @param list1 First list and concatenated output 338 * @param list2 Second list and empty output. 339 * 340 */ 341 NO_TRACE static inline void list_concat(list_t *list1, list_t *list2) 342 { 343 list_splice(list2, list1->head.prev); 291 344 } 292 345 … … 340 393 } 341 394 342 extern int list_member(const link_t *, const list_t *);343 extern void list_concat(list_t *, list_t *);344 extern unsigned int list_count(const list_t *);345 346 395 #endif 347 396 -
kernel/generic/include/arch.h
reae91e0 r235d31d 36 36 #define KERN_ARCH_H_ 37 37 38 #include <arch/arch.h> 39 #include < proc/thread.h>40 #include < proc/task.h>41 #include <mm/as.h> 38 #include <arch/arch.h> /* arch_pre_main() */ 39 #include <arch/asm.h> /* get_stack_base() */ 40 #include <config.h> 41 42 42 43 43 /* … … 49 49 #define THE ((the_t * )(get_stack_base())) 50 50 51 #define CPU THE->cpu52 #define THREAD THE->thread53 #define TASK THE->task54 #define AS THE->as55 #define PREEMPTION_DISABLED THE->preemption_disabled56 51 #define MAGIC UINT32_C(0xfacefeed) 57 52 … … 62 57 ((THE->task) ? (THE->task->container) : (DEFAULT_CONTAINER)) 63 58 59 /* Fwd decl. to avoid include hell. */ 60 struct thread; 61 struct task; 62 struct cpu; 63 struct as; 64 64 65 /** 65 66 * For each possible kernel stack, structure … … 68 69 */ 69 70 typedef struct { 70 size_t preemption_disabled; /**< Preemption disabled counter. */ 71 thread_t *thread; /**< Current thread. */ 72 task_t *task; /**< Current task. */ 73 cpu_t *cpu; /**< Executing cpu. */ 74 as_t *as; /**< Current address space. */ 75 uint32_t magic; /**< Magic value */ 71 size_t preemption; /**< Preemption disabled counter and flag. */ 72 #ifdef RCU_PREEMPT_A 73 size_t rcu_nesting; /**< RCU nesting count and flag. */ 74 #endif 75 struct thread *thread; /**< Current thread. */ 76 struct task *task; /**< Current task. */ 77 struct cpu *cpu; /**< Executing cpu. */ 78 struct as *as; /**< Current address space. */ 79 uint32_t magic; /**< Magic value */ 76 80 } the_t; 77 81 … … 91 95 extern void *arch_construct_function(fncptr_t *, void *, void *); 92 96 97 93 98 #endif 94 99 -
kernel/generic/include/atomic.h
reae91e0 r235d31d 53 53 } 54 54 55 56 /* 57 * If the architecture does not provide operations that are atomic 58 * only with respect to the local cpu (eg exception handlers) and 59 * not other cpus, implement these cpu local atomic operations with 60 * full blown smp-safe atomics. 61 */ 62 #ifndef local_atomic_exchange 63 #define local_atomic_exchange(var_addr, new_val) \ 64 __atomic_exchange_n((var_addr), (new_val), __ATOMIC_RELAXED) 65 #endif 66 67 68 55 69 #endif 56 70 -
kernel/generic/include/cpu.h
reae91e0 r235d31d 38 38 #include <mm/tlb.h> 39 39 #include <synch/spinlock.h> 40 #include <synch/rcu_types.h> 40 41 #include <proc/scheduler.h> 41 42 #include <arch/cpu.h> 42 43 #include <arch/context.h> 44 #include <adt/list.h> 45 #include <arch.h> 46 47 #define CPU THE->cpu 48 43 49 44 50 /** CPU structure. … … 94 100 95 101 /** 102 * SMP calls to invoke on this CPU. 103 */ 104 SPINLOCK_DECLARE(smp_calls_lock); 105 list_t smp_pending_calls; 106 107 /** RCU per-cpu data. Uses own locking. */ 108 rcu_cpu_data_t rcu; 109 110 /** 96 111 * Stack used by scheduler when there is no running thread. 97 112 */ -
kernel/generic/include/lib/memfnc.h
reae91e0 r235d31d 50 50 ATTRIBUTE_OPTIMIZE("-fno-tree-loop-distribute-patterns") DO_NOT_DISCARD; 51 51 52 #define alloca(size) __builtin_alloca((size)) 53 52 54 #endif 53 55 -
kernel/generic/include/macros.h
reae91e0 r235d31d 157 157 }) 158 158 159 160 #ifndef member_to_inst 161 #define member_to_inst(ptr_member, type, member_identif) \ 162 ((type*) (((void*)(ptr_member)) - ((void*)&(((type*)0)->member_identif)))) 163 #endif 164 165 159 166 #endif 160 167 -
kernel/generic/include/mm/as.h
reae91e0 r235d31d 48 48 #include <adt/btree.h> 49 49 #include <lib/elf.h> 50 #include <arch.h> 51 52 #define AS THE->as 53 50 54 51 55 /** -
kernel/generic/include/preemption.h
reae91e0 r235d31d 36 36 #define KERN_PREEMPTION_H_ 37 37 38 extern void preemption_disable(void); 39 extern void preemption_enable(void); 38 #include <arch.h> 39 #include <compiler/barrier.h> 40 #include <debug.h> 41 42 #define PREEMPTION_INC (1 << 0) 43 #define PREEMPTION_DISABLED (PREEMPTION_INC <= THE->preemption) 44 #define PREEMPTION_ENABLED (!PREEMPTION_DISABLED) 45 46 /** Increment preemption disabled counter. */ 47 #define preemption_disable() \ 48 do { \ 49 THE->preemption += PREEMPTION_INC; \ 50 compiler_barrier(); \ 51 } while (0) 52 53 /** Restores preemption but never reschedules. */ 54 #define preemption_enable() \ 55 do { \ 56 ASSERT(PREEMPTION_DISABLED); \ 57 compiler_barrier(); \ 58 THE->preemption -= PREEMPTION_INC; \ 59 } while (0) 60 40 61 41 62 #endif -
kernel/generic/include/proc/task.h
reae91e0 r235d31d 43 43 #include <synch/mutex.h> 44 44 #include <synch/futex.h> 45 #include <synch/workqueue.h> 45 46 #include <adt/avl.h> 46 47 #include <adt/btree.h> 48 #include <adt/cht.h> 47 49 #include <adt/list.h> 48 50 #include <security/cap.h> … … 57 59 #include <mm/as.h> 58 60 #include <abi/sysinfo.h> 61 #include <arch.h> 62 63 #define TASK THE->task 64 59 65 60 66 struct thread; … … 123 129 task_arch_t arch; 124 130 125 /** 126 * Serializes access to the B+tree of task's futexes. This mutex is 127 * independent on the task spinlock. 128 */ 129 mutex_t futexes_lock; 130 /** B+tree of futexes referenced by this task. */ 131 btree_t futexes; 131 struct futex_cache { 132 /** CHT mapping virtual addresses of futex variables to futex objects.*/ 133 cht_t ht; 134 /** Serializes access to futex_list.*/ 135 SPINLOCK_DECLARE(list_lock); 136 /** List of all futexes accesses by this task. */ 137 list_t list; 138 work_t destroy_work; 139 } *futexes; 132 140 133 141 /** Accumulated accounting. */ -
kernel/generic/include/proc/thread.h
reae91e0 r235d31d 41 41 #include <cpu.h> 42 42 #include <synch/spinlock.h> 43 #include <synch/rcu_types.h> 43 44 #include <adt/avl.h> 44 45 #include <mm/slab.h> … … 48 49 #include <udebug/udebug.h> 49 50 #include <abi/sysinfo.h> 51 #include <arch.h> 52 53 54 #define THREAD THE->thread 50 55 51 56 #define THREAD_NAME_BUFLEN 20 … … 180 185 /** Thread ID. */ 181 186 thread_id_t tid; 187 188 /** Work queue this thread belongs to or NULL. Immutable. */ 189 struct work_queue *workq; 190 /** Links work queue threads. Protected by workq->lock. */ 191 link_t workq_link; 192 /** True if the worker was blocked and is not running. Use thread->lock. */ 193 bool workq_blocked; 194 /** True if the worker will block in order to become idle. Use workq->lock. */ 195 bool workq_idling; 196 197 /** RCU thread related data. Protected by its own locks. */ 198 rcu_thread_data_t rcu; 182 199 183 200 /** Architecture-specific data. */ … … 217 234 extern void thread_ready(thread_t *); 218 235 extern void thread_exit(void) __attribute__((noreturn)); 236 extern void thread_interrupt(thread_t *); 237 extern bool thread_interrupted(thread_t *); 219 238 220 239 #ifndef thread_create_arch -
kernel/generic/include/synch/condvar.h
reae91e0 r235d31d 39 39 #include <synch/waitq.h> 40 40 #include <synch/mutex.h> 41 #include <synch/spinlock.h> 41 42 #include <abi/synch.h> 42 43 … … 50 51 _condvar_wait_timeout((cv), (mtx), (usec), SYNCH_FLAGS_NONE) 51 52 53 #ifdef CONFIG_SMP 54 #define _condvar_wait_timeout_spinlock(cv, lock, usec, flags) \ 55 _condvar_wait_timeout_spinlock_impl((cv), (lock), (usec), (flags)) 56 #else 57 #define _condvar_wait_timeout_spinlock(cv, lock, usec, flags) \ 58 _condvar_wait_timeout_spinlock_impl((cv), NULL, (usec), (flags)) 59 #endif 60 52 61 extern void condvar_initialize(condvar_t *cv); 53 62 extern void condvar_signal(condvar_t *cv); … … 55 64 extern int _condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, uint32_t usec, 56 65 int flags); 66 extern int _condvar_wait_timeout_spinlock_impl(condvar_t *cv, spinlock_t *lock, 67 uint32_t usec, int flags); 68 extern int _condvar_wait_timeout_irq_spinlock(condvar_t *cv, 69 irq_spinlock_t *irq_lock, uint32_t usec, int flags); 70 57 71 58 72 #endif -
kernel/generic/include/synch/futex.h
reae91e0 r235d31d 55 55 extern sysarg_t sys_futex_wakeup(uintptr_t); 56 56 57 extern void futex_cleanup(void); 57 extern void futex_task_cleanup(void); 58 extern void futex_task_init(struct task *); 59 extern void futex_task_deinit(struct task *); 58 60 59 61 #endif -
kernel/generic/include/synch/semaphore.h
reae91e0 r235d31d 53 53 _semaphore_down_timeout((s), (usec), SYNCH_FLAGS_NONE) 54 54 55 #define semaphore_down_interruptable(s) \ 56 (ESYNCH_INTERRUPTED != _semaphore_down_timeout((s), SYNCH_NO_TIMEOUT, \ 57 SYNCH_FLAGS_INTERRUPTIBLE)) 58 55 59 extern void semaphore_initialize(semaphore_t *, int); 56 60 extern int _semaphore_down_timeout(semaphore_t *, uint32_t, unsigned int); -
kernel/generic/include/synch/spinlock.h
reae91e0 r235d31d 45 45 #ifdef CONFIG_SMP 46 46 47 typedef struct {47 typedef struct spinlock { 48 48 atomic_t val; 49 49 … … 163 163 /* On UP systems, spinlocks are effectively left out. */ 164 164 165 /* Allow the use of spinlock_t as an incomplete type. */ 166 typedef struct spinlock spinlock_t; 167 165 168 #define SPINLOCK_DECLARE(name) 166 169 #define SPINLOCK_EXTERN(name) … … 177 180 178 181 #define spinlock_lock(lock) preemption_disable() 179 #define spinlock_trylock(lock) ( preemption_disable(), 1)182 #define spinlock_trylock(lock) ({ preemption_disable(); 1; }) 180 183 #define spinlock_unlock(lock) preemption_enable() 181 184 #define spinlock_locked(lock) 1 -
kernel/generic/src/adt/list.c
reae91e0 r235d31d 68 68 } 69 69 70 /** Concatenate two lists 71 * 72 * Concatenate lists @a list1 and @a list2, producing a single 73 * list @a list1 containing items from both (in @a list1, @a list2 74 * order) and empty list @a list2. 75 * 76 * @param list1 First list and concatenated output 77 * @param list2 Second list and empty output. 78 * 70 /** Moves items of one list into another after the specified item. 71 * 72 * Inserts all items of @a list after item at @a pos in another list. 73 * Both lists may be empty. 74 * 75 * @param list Source list to move after pos. Empty afterwards. 76 * @param pos Source items will be placed after this item. 79 77 */ 80 void list_ concat(list_t *list1, list_t *list2)78 void list_splice(list_t *list, link_t *pos) 81 79 { 82 if (list_empty(list 2))80 if (list_empty(list)) 83 81 return; 84 85 list2->head.next->prev = list1->head.prev; 86 list2->head.prev->next = &list1->head; 87 list1->head.prev->next = list2->head.next; 88 list1->head.prev = list2->head.prev; 89 list_initialize(list2); 82 83 /* Attach list to destination. */ 84 list->head.next->prev = pos; 85 list->head.prev->next = pos->next; 86 87 /* Link destination list to the added list. */ 88 pos->next->prev = list->head.prev; 89 pos->next = list->head.next; 90 91 list_initialize(list); 90 92 } 91 93 -
kernel/generic/src/console/chardev.c
reae91e0 r235d31d 39 39 #include <print.h> 40 40 #include <func.h> 41 #include < arch.h>41 #include <cpu.h> 42 42 43 43 /** Initialize input character device. -
kernel/generic/src/console/cmd.c
reae91e0 r235d31d 70 70 #include <sysinfo/sysinfo.h> 71 71 #include <symtab.h> 72 #include <synch/workqueue.h> 73 #include <synch/rcu.h> 72 74 #include <errno.h> 73 75 … … 526 528 }; 527 529 530 /* Data and methods for the 'workq' command */ 531 static int cmd_workq(cmd_arg_t *argv); 532 static cmd_info_t workq_info = { 533 .name = "workq", 534 .description = "Show global workq information.", 535 .func = cmd_workq, 536 .argc = 0 537 }; 538 539 /* Data and methods for the 'workq' command */ 540 static int cmd_rcu(cmd_arg_t *argv); 541 static cmd_info_t rcu_info = { 542 .name = "rcu", 543 .description = "Show RCU run-time statistics.", 544 .func = cmd_rcu, 545 .argc = 0 546 }; 547 528 548 /* Data and methods for 'ipc' command */ 529 549 static int cmd_ipc(cmd_arg_t *argv); … … 589 609 &physmem_info, 590 610 &reboot_info, 611 &rcu_info, 591 612 &sched_info, 592 613 &set4_info, … … 599 620 &uptime_info, 600 621 &version_info, 622 &workq_info, 601 623 &zones_info, 602 624 &zone_info, … … 1270 1292 { 1271 1293 sched_print_list(); 1294 return 1; 1295 } 1296 1297 /** Prints information about the global work queue. 1298 * 1299 * @param argv Ignores 1300 * 1301 * @return Always 1 1302 */ 1303 int cmd_workq(cmd_arg_t *argv) 1304 { 1305 workq_global_print_info(); 1306 return 1; 1307 } 1308 1309 /** Prints RCU statistics. 1310 * 1311 * @param argv Ignores 1312 * 1313 * @return Always 1 1314 */ 1315 int cmd_rcu(cmd_arg_t *argv) 1316 { 1317 rcu_print_stat(); 1272 1318 return 1; 1273 1319 } -
kernel/generic/src/console/console.c
reae91e0 r235d31d 53 53 #include <str.h> 54 54 #include <abi/kio.h> 55 #include <mm/frame.h> /* SIZE2FRAMES */ 56 #include <mm/slab.h> /* malloc */ 55 57 56 58 #define KIO_PAGES 8 -
kernel/generic/src/console/kconsole.c
reae91e0 r235d31d 59 59 #include <putchar.h> 60 60 #include <str.h> 61 #include <mm/slab.h> 61 62 62 63 /** Simple kernel console. -
kernel/generic/src/cpu/cpu.c
reae91e0 r235d31d 50 50 #include <sysinfo/sysinfo.h> 51 51 #include <arch/cycle.h> 52 #include <synch/rcu.h> 52 53 53 54 cpu_t *cpus; … … 105 106 cpu_identify(); 106 107 cpu_arch_init(); 108 rcu_cpu_init(); 107 109 } 108 110 -
kernel/generic/src/debug/panic.c
reae91e0 r235d31d 96 96 printf("THE=%p: ", THE); 97 97 if (THE != NULL) { 98 printf("p d=%" PRIun " thread=%p task=%p cpu=%p as=%p"99 " magic=%#" PRIx32 "\n", THE->preemption _disabled,98 printf("pe=%" PRIun " thread=%p task=%p cpu=%p as=%p" 99 " magic=%#" PRIx32 "\n", THE->preemption, 100 100 THE->thread, THE->task, THE->cpu, THE->as, THE->magic); 101 101 -
kernel/generic/src/interrupt/interrupt.c
reae91e0 r235d31d 112 112 } 113 113 114 /* Account CPU usage if it has wakedup from sleep */115 if (CPU ) {114 /* Account CPU usage if it woke up from sleep */ 115 if (CPU && CPU->idle) { 116 116 irq_spinlock_lock(&CPU->lock, false); 117 if (CPU->idle) { 118 uint64_t now = get_cycle(); 119 CPU->idle_cycles += now - CPU->last_cycle; 120 CPU->last_cycle = now; 121 CPU->idle = false; 122 } 117 uint64_t now = get_cycle(); 118 CPU->idle_cycles += now - CPU->last_cycle; 119 CPU->last_cycle = now; 120 CPU->idle = false; 123 121 irq_spinlock_unlock(&CPU->lock, false); 124 122 } -
kernel/generic/src/ipc/kbox.c
reae91e0 r235d31d 44 44 #include <ipc/kbox.h> 45 45 #include <print.h> 46 #include <proc/thread.h> 46 47 47 48 void ipc_kbox_cleanup(void) -
kernel/generic/src/lib/str.c
reae91e0 r235d31d 111 111 #include <debug.h> 112 112 #include <macros.h> 113 #include <mm/slab.h> 113 114 114 115 /** Check the condition if wchar_t is signed */ … … 567 568 /* There must be space for a null terminator in the buffer. */ 568 569 ASSERT(size > 0); 570 ASSERT(src != NULL); 569 571 570 572 size_t src_off = 0; -
kernel/generic/src/log/log.c
reae91e0 r235d31d 53 53 #include <console/console.h> 54 54 #include <abi/log.h> 55 #include <mm/slab.h> 55 56 56 57 #define LOG_PAGES 8 -
kernel/generic/src/main/kinit.c
reae91e0 r235d31d 79 79 #include <synch/waitq.h> 80 80 #include <synch/spinlock.h> 81 #include <synch/workqueue.h> 82 #include <synch/rcu.h> 81 83 82 84 #define ALIVE_CHARS 4 … … 105 107 */ 106 108 thread_detach(THREAD); 107 109 108 110 interrupts_disable(); 111 112 /* Start processing RCU callbacks. RCU is fully functional afterwards. */ 113 rcu_kinit_init(); 114 115 /* 116 * Start processing work queue items. Some may have been queued during boot. 117 */ 118 workq_global_worker_init(); 109 119 110 120 #ifdef CONFIG_SMP -
kernel/generic/src/main/main.c
reae91e0 r235d31d 76 76 #include <synch/waitq.h> 77 77 #include <synch/futex.h> 78 #include <synch/workqueue.h> 79 #include <smp/smp_call.h> 78 80 #include <arch/arch.h> 79 81 #include <arch.h> … … 263 265 264 266 cpu_init(); 265 266 267 calibrate_delay_loop(); 268 arch_post_cpu_init(); 269 270 smp_call_init(); 271 workq_global_init(); 267 272 clock_counter_init(); 268 273 timeout_init(); … … 367 372 void main_ap_separated_stack(void) 368 373 { 374 smp_call_init(); 375 369 376 /* 370 377 * Configure timeouts for this cpu. -
kernel/generic/src/main/shutdown.c
reae91e0 r235d31d 37 37 38 38 #include <arch.h> 39 #include <proc/task.h> 39 40 #include <func.h> 40 41 #include <print.h> -
kernel/generic/src/mm/frame.c
reae91e0 r235d31d 61 61 #include <config.h> 62 62 #include <str.h> 63 #include <proc/thread.h> /* THREAD */ 63 64 64 65 zones_t zones; -
kernel/generic/src/mm/km.c
reae91e0 r235d31d 49 49 #include <macros.h> 50 50 #include <bitops.h> 51 #include <proc/thread.h> 51 52 52 53 static ra_arena_t *km_ni_arena; -
kernel/generic/src/mm/slab.c
reae91e0 r235d31d 114 114 #include <bitops.h> 115 115 #include <macros.h> 116 #include <cpu.h> 116 117 117 118 IRQ_SPINLOCK_STATIC_INITIALIZE(slab_cache_lock); -
kernel/generic/src/preempt/preemption.c
reae91e0 r235d31d 37 37 38 38 #include <preemption.h> 39 #include <arch.h>40 #include <arch/asm.h>41 #include <arch/barrier.h>42 #include <debug.h>43 39 44 /** Increment preemption disabled counter. */45 void preemption_disable(void)46 {47 THE->preemption_disabled++;48 memory_barrier();49 }50 51 /** Decrement preemption disabled counter. */52 void preemption_enable(void)53 {54 ASSERT(PREEMPTION_DISABLED);55 memory_barrier();56 THE->preemption_disabled--;57 }58 40 59 41 /** @} -
kernel/generic/src/proc/scheduler.c
reae91e0 r235d31d 52 52 #include <atomic.h> 53 53 #include <synch/spinlock.h> 54 #include <synch/workqueue.h> 55 #include <synch/rcu.h> 54 56 #include <config.h> 55 57 #include <context.h> … … 64 66 #include <debug.h> 65 67 #include <stacktrace.h> 68 #include <cpu.h> 66 69 67 70 static void scheduler_separated_stack(void); … … 87 90 { 88 91 before_thread_runs_arch(); 92 rcu_before_thread_runs(); 89 93 90 94 #ifdef CONFIG_FPU_LAZY … … 127 131 static void after_thread_ran(void) 128 132 { 133 workq_after_thread_ran(); 134 rcu_after_thread_ran(); 129 135 after_thread_ran_arch(); 130 136 } … … 219 225 goto loop; 220 226 } 227 228 ASSERT(!CPU->idle); 221 229 222 230 unsigned int i; … … 398 406 ASSERT((!THREAD) || (irq_spinlock_locked(&THREAD->lock))); 399 407 ASSERT(CPU != NULL); 408 ASSERT(interrupts_disabled()); 400 409 401 410 /* … … 421 430 422 431 case Exiting: 432 rcu_thread_exiting(); 423 433 repeat: 424 434 if (THREAD->detached) { -
kernel/generic/src/proc/task.c
reae91e0 r235d31d 41 41 #include <mm/slab.h> 42 42 #include <atomic.h> 43 #include <synch/futex.h> 43 44 #include <synch/spinlock.h> 44 45 #include <synch/waitq.h> … … 163 164 164 165 irq_spinlock_initialize(&task->lock, "task_t_lock"); 165 mutex_initialize(&task->futexes_lock, MUTEX_PASSIVE);166 166 167 167 list_initialize(&task->threads); … … 175 175 spinlock_initialize(&task->active_calls_lock, "active_calls_lock"); 176 176 list_initialize(&task->active_calls); 177 177 178 178 #ifdef CONFIG_UDEBUG 179 179 /* Init kbox stuff */ … … 231 231 (void) ipc_phone_connect(&task->phones[0], ipc_phone_0); 232 232 233 btree_create(&task->futexes);233 futex_task_init(task); 234 234 235 235 /* … … 272 272 * Free up dynamically allocated state. 273 273 */ 274 btree_destroy(&task->futexes);274 futex_task_deinit(task); 275 275 276 276 /* -
kernel/generic/src/proc/the.c
reae91e0 r235d31d 43 43 44 44 #include <arch.h> 45 #include <debug.h> 45 46 46 47 /** Initialize THE structure … … 53 54 void the_initialize(the_t *the) 54 55 { 55 the->preemption _disabled= 0;56 the->preemption = 0; 56 57 the->cpu = NULL; 57 58 the->thread = NULL; … … 59 60 the->as = NULL; 60 61 the->magic = MAGIC; 62 #ifdef RCU_PREEMPT_A 63 the->rcu_nesting = 0; 64 #endif 61 65 } 62 66 -
kernel/generic/src/proc/thread.c
reae91e0 r235d31d 46 46 #include <synch/spinlock.h> 47 47 #include <synch/waitq.h> 48 #include <synch/workqueue.h> 49 #include <synch/rcu.h> 48 50 #include <cpu.h> 49 51 #include <str.h> … … 263 265 } 264 266 267 /** Invoked right before thread_ready() readies the thread. thread is locked. */ 268 static void before_thread_is_ready(thread_t *thread) 269 { 270 ASSERT(irq_spinlock_locked(&thread->lock)); 271 workq_before_thread_is_ready(thread); 272 } 273 265 274 /** Make thread ready 266 275 * … … 275 284 276 285 ASSERT(thread->state != Ready); 286 287 before_thread_is_ready(thread); 277 288 278 289 int i = (thread->priority < RQ_COUNT - 1) ? 279 290 ++thread->priority : thread->priority; 280 281 cpu_t *cpu; 282 if (thread->wired || thread->nomigrate || thread->fpu_context_engaged) { 283 ASSERT(thread->cpu != NULL); 284 cpu = thread->cpu; 285 } else 291 292 /* Check that thread->cpu is set whenever it needs to be. */ 293 ASSERT(thread->cpu != NULL || 294 (!thread->wired && !thread->nomigrate && !thread->fpu_context_engaged)); 295 296 /* 297 * Prefer to run on the same cpu as the last time. Used by wired 298 * threads as well as threads with disabled migration. 299 */ 300 cpu_t *cpu = thread->cpu; 301 if (cpu == NULL) 286 302 cpu = CPU; 287 303 … … 377 393 thread->task = task; 378 394 395 thread->workq = NULL; 396 379 397 thread->fpu_context_exists = false; 380 398 thread->fpu_context_engaged = false; … … 391 409 /* Might depend on previous initialization */ 392 410 thread_create_arch(thread); 411 412 rcu_thread_init(thread); 393 413 394 414 if ((flags & THREAD_FLAG_NOATTACH) != THREAD_FLAG_NOATTACH) … … 501 521 */ 502 522 ipc_cleanup(); 503 futex_ cleanup();523 futex_task_cleanup(); 504 524 LOG("Cleanup of task %" PRIu64" completed.", TASK->taskid); 505 525 } … … 521 541 /* Not reached */ 522 542 while (true); 543 } 544 545 /** Interrupts an existing thread so that it may exit as soon as possible. 546 * 547 * Threads that are blocked waiting for a synchronization primitive 548 * are woken up with a return code of ESYNCH_INTERRUPTED if the 549 * blocking call was interruptable. See waitq_sleep_timeout(). 550 * 551 * The caller must guarantee the thread object is valid during the entire 552 * function, eg by holding the threads_lock lock. 553 * 554 * Interrupted threads automatically exit when returning back to user space. 555 * 556 * @param thread A valid thread object. The caller must guarantee it 557 * will remain valid until thread_interrupt() exits. 558 */ 559 void thread_interrupt(thread_t *thread) 560 { 561 ASSERT(thread != NULL); 562 563 irq_spinlock_lock(&thread->lock, true); 564 565 thread->interrupted = true; 566 bool sleeping = (thread->state == Sleeping); 567 568 irq_spinlock_unlock(&thread->lock, true); 569 570 if (sleeping) 571 waitq_interrupt_sleep(thread); 572 } 573 574 /** Returns true if the thread was interrupted. 575 * 576 * @param thread A valid thread object. User must guarantee it will 577 * be alive during the entire call. 578 * @return true if the thread was already interrupted via thread_interrupt(). 579 */ 580 bool thread_interrupted(thread_t *thread) 581 { 582 ASSERT(thread != NULL); 583 584 bool interrupted; 585 586 irq_spinlock_lock(&thread->lock, true); 587 interrupted = thread->interrupted; 588 irq_spinlock_unlock(&thread->lock, true); 589 590 return interrupted; 523 591 } 524 592 -
kernel/generic/src/synch/condvar.c
reae91e0 r235d31d 38 38 #include <synch/condvar.h> 39 39 #include <synch/mutex.h> 40 #include <synch/spinlock.h> 40 41 #include <synch/waitq.h> 41 42 #include <arch.h> … … 90 91 91 92 ipl = waitq_sleep_prepare(&cv->wq); 93 /* Unlock only after the waitq is locked so we don't miss a wakeup. */ 92 94 mutex_unlock(mtx); 93 95 … … 95 97 rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags); 96 98 99 waitq_sleep_finish(&cv->wq, rc, ipl); 100 /* Lock only after releasing the waitq to avoid a possible deadlock. */ 97 101 mutex_lock(mtx); 98 waitq_sleep_finish(&cv->wq, rc, ipl);99 102 100 103 return rc; 101 104 } 102 105 106 /** Wait for the condition to become true with a locked spinlock. 107 * 108 * The function is not aware of irq_spinlock. Therefore do not even 109 * try passing irq_spinlock_t to it. Use _condvar_wait_timeout_irq_spinlock() 110 * instead. 111 * 112 * @param cv Condition variable. 113 * @param lock Locked spinlock. 114 * @param usec Timeout value in microseconds. 115 * @param flags Select mode of operation. 116 * 117 * For exact description of meaning of possible combinations of usec and flags, 118 * see comment for waitq_sleep_timeout(). Note that when 119 * SYNCH_FLAGS_NON_BLOCKING is specified here, ESYNCH_WOULD_BLOCK is always 120 * returned. 121 * 122 * @return See comment for waitq_sleep_timeout(). 123 */ 124 int _condvar_wait_timeout_spinlock_impl(condvar_t *cv, spinlock_t *lock, 125 uint32_t usec, int flags) 126 { 127 int rc; 128 ipl_t ipl; 129 130 ipl = waitq_sleep_prepare(&cv->wq); 131 132 /* Unlock only after the waitq is locked so we don't miss a wakeup. */ 133 spinlock_unlock(lock); 134 135 cv->wq.missed_wakeups = 0; /* Enforce blocking. */ 136 rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags); 137 138 waitq_sleep_finish(&cv->wq, rc, ipl); 139 /* Lock only after releasing the waitq to avoid a possible deadlock. */ 140 spinlock_lock(lock); 141 142 return rc; 143 } 144 145 /** Wait for the condition to become true with a locked irq spinlock. 146 * 147 * @param cv Condition variable. 148 * @param lock Locked irq spinlock. 149 * @param usec Timeout value in microseconds. 150 * @param flags Select mode of operation. 151 * 152 * For exact description of meaning of possible combinations of usec and flags, 153 * see comment for waitq_sleep_timeout(). Note that when 154 * SYNCH_FLAGS_NON_BLOCKING is specified here, ESYNCH_WOULD_BLOCK is always 155 * returned. 156 * 157 * @return See comment for waitq_sleep_timeout(). 158 */ 159 int _condvar_wait_timeout_irq_spinlock(condvar_t *cv, irq_spinlock_t *irq_lock, 160 uint32_t usec, int flags) 161 { 162 int rc; 163 /* Save spinlock's state so we can restore it correctly later on. */ 164 ipl_t ipl = irq_lock->ipl; 165 bool guard = irq_lock->guard; 166 167 irq_lock->guard = false; 168 169 /* 170 * waitq_prepare() restores interrupts to the current state, 171 * ie disabled. Therefore, interrupts will remain disabled while 172 * it spins waiting for a pending timeout handler to complete. 173 * Although it spins with interrupts disabled there can only 174 * be a pending timeout if we failed to cancel an imminent 175 * timeout (on another cpu) during a wakeup. As a result the 176 * timeout handler is guaranteed to run (it is most likely already 177 * running) and there is no danger of a deadlock. 178 */ 179 rc = _condvar_wait_timeout_spinlock(cv, &irq_lock->lock, usec, flags); 180 181 irq_lock->guard = guard; 182 irq_lock->ipl = ipl; 183 184 return rc; 185 } 186 187 103 188 /** @} 104 189 */ -
kernel/generic/src/synch/futex.c
reae91e0 r235d31d 1 1 /* 2 2 * Copyright (c) 2006 Jakub Jermar 3 * Copyright (c) 2012 Adam Hraska 3 4 * All rights reserved. 4 5 * … … 34 35 * @file 35 36 * @brief Kernel backend for futexes. 37 * 38 * Kernel futex objects are stored in a global hash table futex_ht 39 * where the physical address of the futex variable (futex_t.paddr) 40 * is used as the lookup key. As a result multiple address spaces 41 * may share the same futex variable. 42 * 43 * A kernel futex object is created the first time a task accesses 44 * the futex (having a futex variable at a physical address not 45 * encountered before). Futex object's lifetime is governed by 46 * a reference count that represents the number of all the different 47 * user space virtual addresses from all tasks that map to the 48 * physical address of the futex variable. A futex object is freed 49 * when the last task having accessed the futex exits. 50 * 51 * Each task keeps track of the futex objects it accessed in a list 52 * of pointers (futex_ptr_t, task->futex_list) to the different futex 53 * objects. 54 * 55 * To speed up translation of futex variables' virtual addresses 56 * to their physical addresses, futex pointers accessed by the 57 * task are furthermore stored in a concurrent hash table (CHT, 58 * task->futexes->ht). A single lookup without locks or accesses 59 * to the page table translates a futex variable's virtual address 60 * into its futex kernel object. 36 61 */ 37 62 … … 39 64 #include <synch/mutex.h> 40 65 #include <synch/spinlock.h> 66 #include <synch/rcu.h> 41 67 #include <mm/frame.h> 42 68 #include <mm/page.h> … … 46 72 #include <genarch/mm/page_pt.h> 47 73 #include <genarch/mm/page_ht.h> 74 #include <adt/cht.h> 48 75 #include <adt/hash_table.h> 49 76 #include <adt/list.h> … … 52 79 #include <panic.h> 53 80 #include <errno.h> 54 #include <print.h>55 81 56 82 #define FUTEX_HT_SIZE 1024 /* keep it a power of 2 */ 57 83 58 static void futex_initialize(futex_t *futex); 59 60 static futex_t *futex_find(uintptr_t paddr); 84 /** Task specific pointer to a global kernel futex object. */ 85 typedef struct futex_ptr { 86 /** CHT link. */ 87 cht_link_t cht_link; 88 /** List of all futex pointers used by the task. */ 89 link_t all_link; 90 /** Kernel futex object. */ 91 futex_t *futex; 92 /** User space virtual address of the futex variable in the task. */ 93 uintptr_t uaddr; 94 } futex_ptr_t; 95 96 97 static void destroy_task_cache(work_t *work); 98 99 static void futex_initialize(futex_t *futex, uintptr_t paddr); 100 static void futex_add_ref(futex_t *futex); 101 static void futex_release_ref(futex_t *futex); 102 static void futex_release_ref_locked(futex_t *futex); 103 104 static futex_t *get_futex(uintptr_t uaddr); 105 static futex_t *find_cached_futex(uintptr_t uaddr); 106 static futex_t *get_and_cache_futex(uintptr_t phys_addr, uintptr_t uaddr); 107 static bool find_futex_paddr(uintptr_t uaddr, uintptr_t *phys_addr); 108 61 109 static size_t futex_ht_hash(sysarg_t *key); 62 110 static bool futex_ht_compare(sysarg_t *key, size_t keys, link_t *item); 63 111 static void futex_ht_remove_callback(link_t *item); 64 112 65 /** 66 * Mutex protecting global futex hash table. 67 * It is also used to serialize access to all futex_t structures. 68 * Must be acquired before the task futex B+tree lock. 69 */ 70 static mutex_t futex_ht_lock; 71 72 /** Futex hash table. */ 113 static size_t task_fut_ht_hash(const cht_link_t *link); 114 static size_t task_fut_ht_key_hash(void *key); 115 static bool task_fut_ht_equal(const cht_link_t *item1, const cht_link_t *item2); 116 static bool task_fut_ht_key_equal(void *key, const cht_link_t *item); 117 118 119 /** Mutex protecting the global futex hash table. 120 * 121 * Acquire task specific TASK->futex_list_lock before this mutex. 122 */ 123 SPINLOCK_STATIC_INITIALIZE_NAME(futex_ht_lock, "futex-ht-lock"); 124 125 /** Global kernel futex hash table. Lock futex_ht_lock before accessing. 126 * 127 * Physical address of the futex variable is the lookup key. 128 */ 73 129 static hash_table_t futex_ht; 74 130 75 /** Futex hash table operations. */131 /** Global kernel futex hash table operations. */ 76 132 static hash_table_operations_t futex_ht_ops = { 77 133 .hash = futex_ht_hash, … … 80 136 }; 81 137 138 /** Task futex cache CHT operations. */ 139 static cht_ops_t task_futex_ht_ops = { 140 .hash = task_fut_ht_hash, 141 .key_hash = task_fut_ht_key_hash, 142 .equal = task_fut_ht_equal, 143 .key_equal = task_fut_ht_key_equal, 144 .remove_callback = NULL 145 }; 146 82 147 /** Initialize futex subsystem. */ 83 148 void futex_init(void) 84 149 { 85 mutex_initialize(&futex_ht_lock, MUTEX_PASSIVE);86 150 hash_table_create(&futex_ht, FUTEX_HT_SIZE, 1, &futex_ht_ops); 87 151 } 88 152 89 /** Initialize kernel futex structure. 90 * 91 * @param futex Kernel futex structure. 92 */ 93 void futex_initialize(futex_t *futex) 153 /** Initializes the futex structures for the new task. */ 154 void futex_task_init(struct task *task) 155 { 156 task->futexes = malloc(sizeof(struct futex_cache), 0); 157 158 cht_create(&task->futexes->ht, 0, 0, 0, true, &task_futex_ht_ops); 159 160 list_initialize(&task->futexes->list); 161 spinlock_initialize(&task->futexes->list_lock, "futex-list-lock"); 162 } 163 164 /** Destroys the futex structures for the dying task. */ 165 void futex_task_deinit(task_t *task) 166 { 167 /* Interrupts are disabled so we must not block (cannot run cht_destroy). */ 168 if (interrupts_disabled()) { 169 /* Invoke the blocking cht_destroy in the background. */ 170 workq_global_enqueue_noblock(&task->futexes->destroy_work, 171 destroy_task_cache); 172 } else { 173 /* We can block. Invoke cht_destroy in this thread. */ 174 destroy_task_cache(&task->futexes->destroy_work); 175 } 176 } 177 178 /** Deallocates a task's CHT futex cache (must already be empty). */ 179 static void destroy_task_cache(work_t *work) 180 { 181 struct futex_cache *cache = 182 member_to_inst(work, struct futex_cache, destroy_work); 183 184 /* 185 * Destroy the cache before manually freeing items of the cache in case 186 * table resize is in progress. 187 */ 188 cht_destroy_unsafe(&cache->ht); 189 190 /* Manually free futex_ptr cache items. */ 191 list_foreach_safe(cache->list, cur_link, next_link) { 192 futex_ptr_t *fut_ptr = member_to_inst(cur_link, futex_ptr_t, all_link); 193 194 list_remove(cur_link); 195 free(fut_ptr); 196 } 197 198 free(cache); 199 } 200 201 /** Remove references from futexes known to the current task. */ 202 void futex_task_cleanup(void) 203 { 204 struct futex_cache *futexes = TASK->futexes; 205 206 /* All threads of this task have terminated. This is the last thread. */ 207 spinlock_lock(&futexes->list_lock); 208 209 list_foreach_safe(futexes->list, cur_link, next_link) { 210 futex_ptr_t *fut_ptr = member_to_inst(cur_link, futex_ptr_t, all_link); 211 212 /* 213 * The function is free to free the futex. All other threads of this 214 * task have already terminated, so they have also definitely 215 * exited their CHT futex cache protecting rcu reader sections. 216 * Moreover release_ref() only frees the futex if this is the 217 * last task referencing the futex. Therefore, only threads 218 * of this task may have referenced the futex if it is to be freed. 219 */ 220 futex_release_ref_locked(fut_ptr->futex); 221 } 222 223 spinlock_unlock(&futexes->list_lock); 224 } 225 226 227 /** Initialize the kernel futex structure. 228 * 229 * @param futex Kernel futex structure. 230 * @param paddr Physical address of the futex variable. 231 */ 232 static void futex_initialize(futex_t *futex, uintptr_t paddr) 94 233 { 95 234 waitq_initialize(&futex->wq); 96 235 link_initialize(&futex->ht_link); 97 futex->paddr = 0;236 futex->paddr = paddr; 98 237 futex->refcount = 1; 238 } 239 240 /** Increments the counter of tasks referencing the futex. */ 241 static void futex_add_ref(futex_t *futex) 242 { 243 ASSERT(spinlock_locked(&futex_ht_lock)); 244 ASSERT(0 < futex->refcount); 245 ++futex->refcount; 246 } 247 248 /** Decrements the counter of tasks referencing the futex. May free the futex.*/ 249 static void futex_release_ref(futex_t *futex) 250 { 251 ASSERT(spinlock_locked(&futex_ht_lock)); 252 ASSERT(0 < futex->refcount); 253 254 --futex->refcount; 255 256 if (0 == futex->refcount) { 257 hash_table_remove(&futex_ht, &futex->paddr, 1); 258 } 259 } 260 261 /** Decrements the counter of tasks referencing the futex. May free the futex.*/ 262 static void futex_release_ref_locked(futex_t *futex) 263 { 264 spinlock_lock(&futex_ht_lock); 265 futex_release_ref(futex); 266 spinlock_unlock(&futex_ht_lock); 267 } 268 269 /** Returns a futex for the virtual address @a uaddr (or creates one). */ 270 static futex_t *get_futex(uintptr_t uaddr) 271 { 272 futex_t *futex = find_cached_futex(uaddr); 273 274 if (futex) 275 return futex; 276 277 uintptr_t paddr; 278 279 if (!find_futex_paddr(uaddr, &paddr)) { 280 return 0; 281 } 282 283 return get_and_cache_futex(paddr, uaddr); 284 } 285 286 287 /** Finds the physical address of the futex variable. */ 288 static bool find_futex_paddr(uintptr_t uaddr, uintptr_t *paddr) 289 { 290 page_table_lock(AS, false); 291 spinlock_lock(&futex_ht_lock); 292 293 bool found = false; 294 pte_t *t = page_mapping_find(AS, ALIGN_DOWN(uaddr, PAGE_SIZE), true); 295 296 if (t && PTE_VALID(t) && PTE_PRESENT(t)) { 297 found = true; 298 *paddr = PTE_GET_FRAME(t) + (uaddr - ALIGN_DOWN(uaddr, PAGE_SIZE)); 299 } 300 301 spinlock_unlock(&futex_ht_lock); 302 page_table_unlock(AS, false); 303 304 return found; 305 } 306 307 /** Returns the futex cached in this task with the virtual address uaddr. */ 308 static futex_t *find_cached_futex(uintptr_t uaddr) 309 { 310 cht_read_lock(); 311 312 futex_t *futex; 313 cht_link_t *futex_ptr_link = cht_find_lazy(&TASK->futexes->ht, &uaddr); 314 315 if (futex_ptr_link) { 316 futex_ptr_t *futex_ptr 317 = member_to_inst(futex_ptr_link, futex_ptr_t, cht_link); 318 319 futex = futex_ptr->futex; 320 } else { 321 futex = NULL; 322 } 323 324 cht_read_unlock(); 325 326 return futex; 327 } 328 329 330 /** 331 * Returns a kernel futex for the physical address @a phys_addr and caches 332 * it in this task under the virtual address @a uaddr (if not already cached). 333 */ 334 static futex_t *get_and_cache_futex(uintptr_t phys_addr, uintptr_t uaddr) 335 { 336 futex_t *futex = malloc(sizeof(futex_t), 0); 337 338 /* 339 * Find the futex object in the global futex table (or insert it 340 * if it is not present). 341 */ 342 spinlock_lock(&futex_ht_lock); 343 344 link_t *fut_link = hash_table_find(&futex_ht, &phys_addr); 345 346 if (fut_link) { 347 free(futex); 348 futex = member_to_inst(fut_link, futex_t, ht_link); 349 futex_add_ref(futex); 350 } else { 351 futex_initialize(futex, phys_addr); 352 hash_table_insert(&futex_ht, &phys_addr, &futex->ht_link); 353 } 354 355 spinlock_unlock(&futex_ht_lock); 356 357 /* 358 * Cache the link to the futex object for this task. 359 */ 360 futex_ptr_t *fut_ptr = malloc(sizeof(futex_ptr_t), 0); 361 cht_link_t *dup_link; 362 363 fut_ptr->futex = futex; 364 fut_ptr->uaddr = uaddr; 365 366 cht_read_lock(); 367 368 /* Cache the mapping from the virtual address to the futex for this task. */ 369 if (cht_insert_unique(&TASK->futexes->ht, &fut_ptr->cht_link, &dup_link)) { 370 spinlock_lock(&TASK->futexes->list_lock); 371 list_append(&fut_ptr->all_link, &TASK->futexes->list); 372 spinlock_unlock(&TASK->futexes->list_lock); 373 } else { 374 /* Another thread of this task beat us to it. Use that mapping instead.*/ 375 free(fut_ptr); 376 futex_release_ref_locked(futex); 377 378 futex_ptr_t *dup = member_to_inst(dup_link, futex_ptr_t, cht_link); 379 futex = dup->futex; 380 } 381 382 cht_read_unlock(); 383 384 return futex; 99 385 } 100 386 … … 109 395 sysarg_t sys_futex_sleep(uintptr_t uaddr) 110 396 { 111 futex_t *futex; 112 uintptr_t paddr; 113 pte_t *t; 114 int rc; 115 116 /* 117 * Find physical address of futex counter. 118 */ 119 page_table_lock(AS, true); 120 t = page_mapping_find(AS, ALIGN_DOWN(uaddr, PAGE_SIZE), false); 121 if (!t || !PTE_VALID(t) || !PTE_PRESENT(t)) { 122 page_table_unlock(AS, true); 397 futex_t *futex = get_futex(uaddr); 398 399 if (!futex) 123 400 return (sysarg_t) ENOENT; 124 } 125 paddr = PTE_GET_FRAME(t) + (uaddr - ALIGN_DOWN(uaddr, PAGE_SIZE)); 126 page_table_unlock(AS, true); 127 128 futex = futex_find(paddr); 129 130 #ifdef CONFIG_UDEBUG 131 udebug_stoppable_begin(); 132 #endif 133 rc = waitq_sleep_timeout(&futex->wq, 0, SYNCH_FLAGS_INTERRUPTIBLE); 134 #ifdef CONFIG_UDEBUG 135 udebug_stoppable_end(); 136 #endif 401 402 int rc = waitq_sleep_timeout(&futex->wq, 0, SYNCH_FLAGS_INTERRUPTIBLE); 403 137 404 return (sysarg_t) rc; 138 405 } … … 146 413 sysarg_t sys_futex_wakeup(uintptr_t uaddr) 147 414 { 148 futex_t *futex; 149 uintptr_t paddr; 150 pte_t *t; 151 152 /* 153 * Find physical address of futex counter. 154 */ 155 page_table_lock(AS, true); 156 t = page_mapping_find(AS, ALIGN_DOWN(uaddr, PAGE_SIZE), false); 157 if (!t || !PTE_VALID(t) || !PTE_PRESENT(t)) { 158 page_table_unlock(AS, true); 415 futex_t *futex = get_futex(uaddr); 416 417 if (futex) { 418 waitq_wakeup(&futex->wq, WAKEUP_FIRST); 419 return 0; 420 } else { 159 421 return (sysarg_t) ENOENT; 160 422 } 161 paddr = PTE_GET_FRAME(t) + (uaddr - ALIGN_DOWN(uaddr, PAGE_SIZE)); 162 page_table_unlock(AS, true); 163 164 futex = futex_find(paddr); 165 166 waitq_wakeup(&futex->wq, WAKEUP_FIRST); 167 168 return 0; 169 } 170 171 /** Find kernel address of the futex structure corresponding to paddr. 172 * 173 * If the structure does not exist already, a new one is created. 174 * 175 * @param paddr Physical address of the userspace futex counter. 176 * 177 * @return Address of the kernel futex structure. 178 */ 179 futex_t *futex_find(uintptr_t paddr) 180 { 181 link_t *item; 182 futex_t *futex; 183 btree_node_t *leaf; 184 185 /* 186 * Find the respective futex structure 187 * or allocate new one if it does not exist already. 188 */ 189 mutex_lock(&futex_ht_lock); 190 item = hash_table_find(&futex_ht, &paddr); 191 if (item) { 192 futex = hash_table_get_instance(item, futex_t, ht_link); 193 194 /* 195 * See if the current task knows this futex. 196 */ 197 mutex_lock(&TASK->futexes_lock); 198 if (!btree_search(&TASK->futexes, paddr, &leaf)) { 199 /* 200 * The futex is new to the current task. 201 * Upgrade its reference count and put it to the 202 * current task's B+tree of known futexes. 203 */ 204 futex->refcount++; 205 btree_insert(&TASK->futexes, paddr, futex, leaf); 206 } 207 mutex_unlock(&TASK->futexes_lock); 208 } else { 209 futex = (futex_t *) malloc(sizeof(futex_t), 0); 210 futex_initialize(futex); 211 futex->paddr = paddr; 212 hash_table_insert(&futex_ht, &paddr, &futex->ht_link); 213 214 /* 215 * This is the first task referencing the futex. 216 * It can be directly inserted into its 217 * B+tree of known futexes. 218 */ 219 mutex_lock(&TASK->futexes_lock); 220 btree_insert(&TASK->futexes, paddr, futex, NULL); 221 mutex_unlock(&TASK->futexes_lock); 222 223 } 224 mutex_unlock(&futex_ht_lock); 225 226 return futex; 227 } 423 } 424 228 425 229 426 /** Compute hash index into futex hash table. … … 268 465 } 269 466 270 /** Remove references from futexes known to the current task. */ 271 void futex_cleanup(void) 272 { 273 mutex_lock(&futex_ht_lock); 274 mutex_lock(&TASK->futexes_lock); 275 276 list_foreach(TASK->futexes.leaf_list, leaf_link, btree_node_t, node) { 277 unsigned int i; 278 279 for (i = 0; i < node->keys; i++) { 280 futex_t *ftx; 281 uintptr_t paddr = node->key[i]; 282 283 ftx = (futex_t *) node->value[i]; 284 if (--ftx->refcount == 0) 285 hash_table_remove(&futex_ht, &paddr, 1); 286 } 287 } 288 289 mutex_unlock(&TASK->futexes_lock); 290 mutex_unlock(&futex_ht_lock); 467 /* 468 * Operations of a task's CHT that caches mappings of futex user space 469 * virtual addresses to kernel futex objects. 470 */ 471 472 static size_t task_fut_ht_hash(const cht_link_t *link) 473 { 474 const futex_ptr_t *fut_ptr = member_to_inst(link, futex_ptr_t, cht_link); 475 return fut_ptr->uaddr; 476 } 477 478 static size_t task_fut_ht_key_hash(void *key) 479 { 480 return *(uintptr_t*)key; 481 } 482 483 static bool task_fut_ht_equal(const cht_link_t *item1, const cht_link_t *item2) 484 { 485 const futex_ptr_t *fut_ptr1 = member_to_inst(item1, futex_ptr_t, cht_link); 486 const futex_ptr_t *fut_ptr2 = member_to_inst(item2, futex_ptr_t, cht_link); 487 488 return fut_ptr1->uaddr == fut_ptr2->uaddr; 489 } 490 491 static bool task_fut_ht_key_equal(void *key, const cht_link_t *item) 492 { 493 const futex_ptr_t *fut_ptr = member_to_inst(item, futex_ptr_t, cht_link); 494 uintptr_t uaddr = *(uintptr_t*)key; 495 496 return fut_ptr->uaddr == uaddr; 291 497 } 292 498 -
kernel/generic/src/synch/mutex.c
reae91e0 r235d31d 41 41 #include <arch.h> 42 42 #include <stacktrace.h> 43 #include <cpu.h> 44 #include <proc/thread.h> 43 45 44 46 /** Initialize mutex. -
kernel/generic/src/synch/smc.c
reae91e0 r235d31d 41 41 #include <arch/barrier.h> 42 42 #include <synch/smc.h> 43 #include <mm/as.h> 43 44 44 45 sysarg_t sys_smc_coherence(uintptr_t va, size_t size) -
kernel/generic/src/synch/spinlock.c
reae91e0 r235d31d 45 45 #include <symtab.h> 46 46 #include <stacktrace.h> 47 #include <cpu.h> 47 48 48 49 #ifdef CONFIG_SMP … … 198 199 * 199 200 * @param lock IRQ spinlock to be locked. 200 * @param irq_dis If true, interrupts are actually disabled 201 * prior locking the spinlock. If false, interrupts 202 * are expected to be already disabled. 201 * @param irq_dis If true, disables interrupts before locking the spinlock. 202 * If false, interrupts are expected to be already disabled. 203 203 * 204 204 */ -
kernel/generic/src/synch/waitq.c
reae91e0 r235d31d 57 57 58 58 static void waitq_sleep_timed_out(void *); 59 static void waitq_complete_wakeup(waitq_t *); 60 59 61 60 62 /** Initialize wait queue … … 330 332 break; 331 333 default: 334 /* 335 * Wait for a waitq_wakeup() or waitq_unsleep() to complete 336 * before returning from waitq_sleep() to the caller. Otherwise 337 * the caller might expect that the wait queue is no longer used 338 * and deallocate it (although the wakeup on a another cpu has 339 * not yet completed and is using the wait queue). 340 * 341 * Note that we have to do this for ESYNCH_OK_BLOCKED and 342 * ESYNCH_INTERRUPTED, but not necessarily for ESYNCH_TIMEOUT 343 * where the timeout handler stops using the waitq before waking 344 * us up. To be on the safe side, ensure the waitq is not in use 345 * anymore in this case as well. 346 */ 347 waitq_complete_wakeup(wq); 332 348 break; 333 349 } … … 357 373 } else { 358 374 if (PARAM_NON_BLOCKING(flags, usec)) { 359 /* Return immediatel ly instead of going to sleep */375 /* Return immediately instead of going to sleep */ 360 376 return ESYNCH_WOULD_BLOCK; 361 377 } … … 442 458 irq_spinlock_unlock(&wq->lock, true); 443 459 } 460 461 /** If there is a wakeup in progress actively waits for it to complete. 462 * 463 * The function returns once the concurrently running waitq_wakeup() 464 * exits. It returns immediately if there are no concurrent wakeups 465 * at the time. 466 * 467 * Interrupts must be disabled. 468 * 469 * Example usage: 470 * @code 471 * void callback(waitq *wq) 472 * { 473 * // Do something and notify wait_for_completion() that we're done. 474 * waitq_wakeup(wq); 475 * } 476 * void wait_for_completion(void) 477 * { 478 * waitq wg; 479 * waitq_initialize(&wq); 480 * // Run callback() in the background, pass it wq. 481 * do_asynchronously(callback, &wq); 482 * // Wait for callback() to complete its work. 483 * waitq_sleep(&wq); 484 * // callback() completed its work, but it may still be accessing 485 * // wq in waitq_wakeup(). Therefore it is not yet safe to return 486 * // from waitq_sleep() or it would clobber up our stack (where wq 487 * // is stored). waitq_sleep() ensures the wait queue is no longer 488 * // in use by invoking waitq_complete_wakeup() internally. 489 * 490 * // waitq_sleep() returned, it is safe to free wq. 491 * } 492 * @endcode 493 * 494 * @param wq Pointer to a wait queue. 495 */ 496 static void waitq_complete_wakeup(waitq_t *wq) 497 { 498 ASSERT(interrupts_disabled()); 499 500 irq_spinlock_lock(&wq->lock, false); 501 irq_spinlock_unlock(&wq->lock, false); 502 } 503 444 504 445 505 /** Internal SMP- and IRQ-unsafe version of waitq_wakeup() -
kernel/generic/src/syscall/syscall.c
reae91e0 r235d31d 50 50 #include <synch/futex.h> 51 51 #include <synch/smc.h> 52 #include <synch/smp_memory_barrier.h> 52 53 #include <ddi/ddi.h> 53 54 #include <ipc/event.h> … … 142 143 (syshandler_t) sys_futex_wakeup, 143 144 (syshandler_t) sys_smc_coherence, 145 (syshandler_t) sys_smp_memory_barrier, 146 144 147 145 148 /* Address space related syscalls. */ -
kernel/generic/src/time/clock.c
reae91e0 r235d31d 212 212 irq_spinlock_unlock(&THREAD->lock, false); 213 213 214 if ( (!ticks) && (!PREEMPTION_DISABLED)) {214 if (ticks == 0 && PREEMPTION_ENABLED) { 215 215 scheduler(); 216 216 #ifdef CONFIG_UDEBUG -
kernel/generic/src/udebug/udebug.c
reae91e0 r235d31d 44 44 #include <print.h> 45 45 #include <arch.h> 46 #include <proc/task.h> 47 #include <proc/thread.h> 46 48 47 49 /** Initialize udebug part of task structure.
Note:
See TracChangeset
for help on using the changeset viewer.
