Changeset 1a5fe4f in mainline
- Timestamp:
- 2018-11-09T18:09:55Z (6 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 1892d2c
- Parents:
- 3875f106 (diff), ef4218f (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - git-author:
- Jakub Jermář <jakub@…> (2018-11-09 18:09:55)
- git-committer:
- GitHub <noreply@…> (2018-11-09 18:09:55)
- Files:
-
- 34 deleted
- 44 edited
Legend:
- Unmodified
- Added
- Removed
-
HelenOS.config
r3875f106 r1a5fe4f 430 430 ! [COMPILER=gcc_cross] CONFIG_LTO (n/y) 431 431 432 % Kernel RCU algorithm433 @ "PREEMPT_PODZIMEK" Preemptible Podzimek-RCU434 @ "PREEMPT_A" Preemptible A-RCU435 ! RCU (choice)436 437 432 % Compress init data 438 433 ! CONFIG_COMPRESSED_INIT (y/n) -
abi/include/abi/syscall.h
r3875f106 r1a5fe4f 54 54 SYS_FUTEX_WAKEUP, 55 55 SYS_SMC_COHERENCE, 56 SYS_SMP_MEMORY_BARRIER,57 56 58 57 SYS_AS_AREA_CREATE, -
defaults/amd64/Makefile.config
r3875f106 r1a5fe4f 44 44 CONFIG_TEST = y 45 45 46 # Kernel RCU implementation47 RCU = PREEMPT_A48 49 46 # Input device class 50 47 CONFIG_HID_IN = generic -
defaults/arm32/Makefile.config
r3875f106 r1a5fe4f 32 32 CONFIG_TEST = y 33 33 34 # Kernel RCU implementation35 RCU = PREEMPT_A36 37 34 # What is your input device? 38 35 CONFIG_HID_IN = generic -
defaults/ia32/Makefile.config
r3875f106 r1a5fe4f 47 47 CONFIG_TEST = y 48 48 49 # Kernel RCU implementation50 RCU = PREEMPT_A51 52 49 # Input device class 53 50 CONFIG_HID_IN = generic -
defaults/ia64/Makefile.config
r3875f106 r1a5fe4f 44 44 CONFIG_TEST = y 45 45 46 # Kernel RCU implementation47 RCU = PREEMPT_A48 49 46 # Input device class 50 47 CONFIG_HID_IN = generic -
defaults/mips32/Makefile.config
r3875f106 r1a5fe4f 38 38 CONFIG_TEST = y 39 39 40 # Kernel RCU implementation41 RCU = PREEMPT_A42 43 40 # Input device class 44 41 CONFIG_HID_IN = generic -
defaults/ppc32/Makefile.config
r3875f106 r1a5fe4f 35 35 CONFIG_TEST = y 36 36 37 # Kernel RCU implementation38 RCU = PREEMPT_A39 40 37 # Input device class 41 38 CONFIG_HID_IN = generic -
defaults/riscv64/Makefile.config
r3875f106 r1a5fe4f 32 32 CONFIG_TEST = y 33 33 34 # Kernel RCU implementation35 RCU = PREEMPT_A36 37 34 # What is your input device? 38 35 CONFIG_HID_IN = generic -
defaults/sparc64/Makefile.config
r3875f106 r1a5fe4f 47 47 CONFIG_TEST = y 48 48 49 # Kernel RCU implementation50 RCU = PREEMPT_A51 52 49 # Input device class 53 50 CONFIG_HID_IN = generic -
defaults/special/Makefile.config
r3875f106 r1a5fe4f 32 32 CONFIG_TEST = y 33 33 34 # Kernel RCU implementation35 RCU = PREEMPT_A36 37 34 # Optimization level 38 35 OPTIMIZATION = 3 -
kernel/Makefile
r3875f106 r1a5fe4f 163 163 generic/src/adt/bitmap.c \ 164 164 generic/src/adt/btree.c \ 165 generic/src/adt/cht.c \166 165 generic/src/adt/hash_table.c \ 167 166 generic/src/adt/list.c \ … … 227 226 generic/src/synch/semaphore.c \ 228 227 generic/src/synch/smc.c \ 229 generic/src/synch/smp_memory_barrier.c \230 228 generic/src/synch/waitq.c \ 231 229 generic/src/synch/futex.c \ 232 generic/src/synch/workqueue.c \233 generic/src/synch/rcu.c \234 230 generic/src/smp/ipi.c \ 235 231 generic/src/smp/smp.c \ 236 generic/src/smp/smp_call.c \237 232 generic/src/ipc/ipc.c \ 238 233 generic/src/ipc/sysipc.c \ … … 284 279 test/atomic/atomic1.c \ 285 280 test/btree/btree1.c \ 286 test/cht/cht1.c \287 281 test/fault/fault1.c \ 288 282 test/mm/falloc1.c \ … … 293 287 test/synch/semaphore1.c \ 294 288 test/synch/semaphore2.c \ 295 test/synch/workqueue2.c \296 test/synch/workqueue3.c \297 test/synch/rcu1.c \298 289 test/print/print1.c \ 299 290 test/print/print2.c \ … … 301 292 test/print/print4.c \ 302 293 test/print/print5.c \ 303 test/thread/thread1.c \ 304 test/smpcall/smpcall1.c 294 test/thread/thread1.c 305 295 306 296 ifeq ($(KARCH),mips32) -
kernel/arch/abs32le/Makefile.inc
r3875f106 r1a5fe4f 51 51 arch/$(KARCH)/src/cpu/cpu.c \ 52 52 arch/$(KARCH)/src/smp/smp.c \ 53 arch/$(KARCH)/src/smp/smp_call.c \54 53 arch/$(KARCH)/src/smp/ipi.c \ 55 54 arch/$(KARCH)/src/mm/km.c \ -
kernel/arch/amd64/Makefile.inc
r3875f106 r1a5fe4f 85 85 arch/$(KARCH)/src/smp/ipi.c \ 86 86 arch/$(KARCH)/src/smp/mps.c \ 87 arch/$(KARCH)/src/smp/smp_call.c \88 87 arch/$(KARCH)/src/smp/smp.c 89 88 endif -
kernel/arch/amd64/include/arch/interrupt.h
r3875f106 r1a5fe4f 81 81 #define VECTOR_TLB_SHOOTDOWN_IPI (IVT_FREEBASE + 1) 82 82 #define VECTOR_DEBUG_IPI (IVT_FREEBASE + 2) 83 #define VECTOR_SMP_CALL_IPI (IVT_FREEBASE + 3)84 83 85 84 extern void (*disable_irqs_function)(uint16_t); -
kernel/arch/amd64/src/interrupt.c
r3875f106 r1a5fe4f 53 53 #include <symtab.h> 54 54 #include <stacktrace.h> 55 #include <smp/smp_call.h>56 55 57 56 /* … … 161 160 tlb_shootdown_ipi_recv(); 162 161 } 163 164 static void arch_smp_call_ipi_recv(unsigned int n, istate_t *istate)165 {166 trap_virtual_eoi();167 smp_call_ipi_recv();168 }169 162 #endif 170 163 … … 229 222 exc_register(VECTOR_TLB_SHOOTDOWN_IPI, "tlb_shootdown", true, 230 223 (iroutine_t) tlb_shootdown_ipi); 231 exc_register(VECTOR_SMP_CALL_IPI, "smp_call", true,232 (iroutine_t) arch_smp_call_ipi_recv);233 224 #endif 234 225 } -
kernel/arch/ia32/Makefile.inc
r3875f106 r1a5fe4f 76 76 arch/$(KARCH)/src/smp/mps.c \ 77 77 arch/$(KARCH)/src/smp/smp.c \ 78 arch/$(KARCH)/src/smp/smp_call.c \79 78 arch/$(KARCH)/src/atomic.S \ 80 79 arch/$(KARCH)/src/smp/ipi.c \ -
kernel/arch/ia32/include/arch/interrupt.h
r3875f106 r1a5fe4f 85 85 #define VECTOR_TLB_SHOOTDOWN_IPI (IVT_FREEBASE + 1) 86 86 #define VECTOR_DEBUG_IPI (IVT_FREEBASE + 2) 87 #define VECTOR_SMP_CALL_IPI (IVT_FREEBASE + 3)88 87 89 88 extern void (*disable_irqs_function)(uint16_t); -
kernel/arch/ia32/src/interrupt.c
r3875f106 r1a5fe4f 55 55 #include <symtab.h> 56 56 #include <stacktrace.h> 57 #include <smp/smp_call.h>58 57 #include <proc/task.h> 59 58 … … 183 182 tlb_shootdown_ipi_recv(); 184 183 } 185 186 static void arch_smp_call_ipi_recv(unsigned int n, istate_t *istate)187 {188 trap_virtual_eoi();189 smp_call_ipi_recv();190 }191 184 #endif 192 185 … … 250 243 exc_register(VECTOR_TLB_SHOOTDOWN_IPI, "tlb_shootdown", true, 251 244 (iroutine_t) tlb_shootdown_ipi); 252 exc_register(VECTOR_SMP_CALL_IPI, "smp_call", true,253 (iroutine_t) arch_smp_call_ipi_recv);254 245 #endif 255 246 } -
kernel/arch/ia64/Makefile.inc
r3875f106 r1a5fe4f 58 58 arch/$(KARCH)/src/smc.c \ 59 59 arch/$(KARCH)/src/smp/smp.c \ 60 arch/$(KARCH)/src/smp/smp_call.c \61 60 arch/$(KARCH)/src/drivers/it.c 62 61 -
kernel/arch/mips32/Makefile.inc
r3875f106 r1a5fe4f 68 68 arch/$(KARCH)/src/smc.c \ 69 69 arch/$(KARCH)/src/smp/smp.c \ 70 arch/$(KARCH)/src/smp/smp_call.c \71 70 arch/$(KARCH)/src/machine_func.c 72 71 -
kernel/arch/sparc64/Makefile.inc
r3875f106 r1a5fe4f 99 99 ARCH_SOURCES += \ 100 100 arch/$(KARCH)/src/smp/$(USARCH)/smp.c \ 101 arch/$(KARCH)/src/smp/$(USARCH)/smp_call.c \102 101 arch/$(KARCH)/src/smp/$(USARCH)/ipi.c 103 102 endif -
kernel/arch/sparc64/include/arch/interrupt.h
r3875f106 r1a5fe4f 47 47 enum { 48 48 IPI_TLB_SHOOTDOWN = VECTOR_TLB_SHOOTDOWN_IPI, 49 IPI_SMP_CALL50 49 }; 51 50 -
kernel/arch/sparc64/src/smp/sun4u/ipi.c
r3875f106 r1a5fe4f 35 35 #include <smp/ipi.h> 36 36 #include <arch/barrier.h> 37 #include <arch/smp/sun4u/ipi.h>38 37 #include <assert.h> 39 38 #include <cpu.h> … … 43 42 #include <config.h> 44 43 #include <mm/tlb.h> 45 #include <smp/smp_call.h>46 44 #include <arch/interrupt.h> 47 45 #include <arch/trap/interrupt.h> … … 175 173 } 176 174 177 /*178 * Deliver an IPI to the specified processors (except the current one).179 *180 * Interrupts must be disabled.181 *182 * @param cpu_id Destination cpu id (index into cpus array). Must not183 * be the current cpu.184 * @param ipi IPI number.185 */186 void ipi_unicast_arch(unsigned int cpu_id, int ipi)187 {188 assert(&cpus[cpu_id] != CPU);189 190 if (ipi == IPI_SMP_CALL) {191 cross_call(cpus[cpu_id].arch.mid, smp_call_ipi_recv);192 } else {193 panic("Unknown IPI (%d).\n", ipi);194 return;195 }196 }197 198 175 /** @} 199 176 */ -
kernel/generic/include/arch.h
r3875f106 r1a5fe4f 66 66 typedef struct { 67 67 size_t preemption; /**< Preemption disabled counter and flag. */ 68 #ifdef RCU_PREEMPT_A69 size_t rcu_nesting; /**< RCU nesting count and flag. */70 #endif71 68 struct thread *thread; /**< Current thread. */ 72 69 struct task *task; /**< Current task. */ -
kernel/generic/include/cpu.h
r3875f106 r1a5fe4f 38 38 #include <mm/tlb.h> 39 39 #include <synch/spinlock.h> 40 #include <synch/rcu_types.h>41 40 #include <proc/scheduler.h> 42 41 #include <arch/cpu.h> … … 99 98 100 99 /** 101 * SMP calls to invoke on this CPU.102 */103 SPINLOCK_DECLARE(smp_calls_lock);104 list_t smp_pending_calls;105 106 /** RCU per-cpu data. Uses own locking. */107 rcu_cpu_data_t rcu;108 109 /**110 100 * Stack used by scheduler when there is no running thread. 111 101 */ -
kernel/generic/include/proc/task.h
r3875f106 r1a5fe4f 43 43 #include <synch/mutex.h> 44 44 #include <synch/futex.h> 45 #include <synch/workqueue.h>46 #include <adt/cht.h>47 45 #include <adt/list.h> 48 46 #include <adt/odict.h> … … 130 128 task_arch_t arch; 131 129 132 struct futex_cache { 133 /** CHT mapping virtual addresses of futex variables to futex objects.*/ 134 cht_t ht; 135 /** Serializes access to futex_list.*/ 136 SPINLOCK_DECLARE(list_lock); 137 /** List of all futexes accesses by this task. */ 138 list_t list; 139 work_t destroy_work; 140 } *futexes; 130 /** Serializes access to futex_list.*/ 131 SPINLOCK_DECLARE(futex_list_lock); 132 /** List of all futexes accesses by this task. */ 133 list_t futex_list; 141 134 142 135 /** Accumulated accounting. */ -
kernel/generic/include/proc/thread.h
r3875f106 r1a5fe4f 41 41 #include <cpu.h> 42 42 #include <synch/spinlock.h> 43 #include <synch/rcu_types.h>44 43 #include <adt/odict.h> 45 44 #include <mm/slab.h> … … 194 193 thread_id_t tid; 195 194 196 /** Work queue this thread belongs to or NULL. Immutable. */197 struct work_queue *workq;198 /** Links work queue threads. Protected by workq->lock. */199 link_t workq_link;200 /** True if the worker was blocked and is not running. Use thread->lock. */201 bool workq_blocked;202 /** True if the worker will block in order to become idle. Use workq->lock. */203 bool workq_idling;204 205 /** RCU thread related data. Protected by its own locks. */206 rcu_thread_data_t rcu;207 208 195 /** Architecture-specific data. */ 209 196 thread_arch_t arch; -
kernel/generic/include/synch/futex.h
r3875f106 r1a5fe4f 58 58 extern void futex_task_cleanup(void); 59 59 extern void futex_task_init(struct task *); 60 extern void futex_task_deinit(struct task *);61 60 62 61 #endif -
kernel/generic/src/console/cmd.c
r3875f106 r1a5fe4f 70 70 #include <sysinfo/sysinfo.h> 71 71 #include <symtab.h> 72 #include <synch/workqueue.h>73 #include <synch/rcu.h>74 72 #include <errno.h> 75 73 … … 536 534 }; 537 535 538 /* Data and methods for the 'workq' command */539 static int cmd_workq(cmd_arg_t *argv);540 static cmd_info_t workq_info = {541 .name = "workq",542 .description = "Show global workq information.",543 .func = cmd_workq,544 .argc = 0545 };546 547 /* Data and methods for the 'workq' command */548 static int cmd_rcu(cmd_arg_t *argv);549 static cmd_info_t rcu_info = {550 .name = "rcu",551 .description = "Show RCU run-time statistics.",552 .func = cmd_rcu,553 .argc = 0554 };555 556 536 /* Data and methods for 'ipc' command */ 557 537 static int cmd_ipc(cmd_arg_t *argv); … … 618 598 &physmem_info, 619 599 &reboot_info, 620 &rcu_info,621 600 &sched_info, 622 601 &set4_info, … … 628 607 &uptime_info, 629 608 &version_info, 630 &workq_info,631 609 &zones_info, 632 610 &zone_info, … … 1302 1280 } 1303 1281 1304 /** Prints information about the global work queue.1305 *1306 * @param argv Ignores1307 *1308 * @return Always 11309 */1310 int cmd_workq(cmd_arg_t *argv)1311 {1312 workq_global_print_info();1313 return 1;1314 }1315 1316 /** Prints RCU statistics.1317 *1318 * @param argv Ignores1319 *1320 * @return Always 11321 */1322 int cmd_rcu(cmd_arg_t *argv)1323 {1324 rcu_print_stat();1325 return 1;1326 }1327 1328 1282 /** Command for listing memory zones 1329 1283 * -
kernel/generic/src/cpu/cpu.c
r3875f106 r1a5fe4f 50 50 #include <sysinfo/sysinfo.h> 51 51 #include <arch/cycle.h> 52 #include <synch/rcu.h>53 52 54 53 cpu_t *cpus; … … 108 107 cpu_identify(); 109 108 cpu_arch_init(); 110 rcu_cpu_init();111 109 } 112 110 -
kernel/generic/src/main/kinit.c
r3875f106 r1a5fe4f 79 79 #include <synch/waitq.h> 80 80 #include <synch/spinlock.h> 81 #include <synch/workqueue.h>82 #include <synch/rcu.h>83 81 84 82 #define ALIVE_CHARS 4 … … 109 107 110 108 interrupts_disable(); 111 112 /* Start processing RCU callbacks. RCU is fully functional afterwards. */113 rcu_kinit_init();114 115 /*116 * Start processing work queue items. Some may have been queued during boot.117 */118 workq_global_worker_init();119 109 120 110 #ifdef CONFIG_SMP -
kernel/generic/src/main/main.c
r3875f106 r1a5fe4f 78 78 #include <synch/waitq.h> 79 79 #include <synch/futex.h> 80 #include <synch/workqueue.h>81 #include <smp/smp_call.h>82 80 #include <arch/arch.h> 83 81 #include <arch.h> … … 274 272 ARCH_OP(post_cpu_init); 275 273 276 smp_call_init();277 workq_global_init();278 274 clock_counter_init(); 279 275 timeout_init(); … … 381 377 void main_ap_separated_stack(void) 382 378 { 383 smp_call_init();384 385 379 /* 386 380 * Configure timeouts for this cpu. -
kernel/generic/src/proc/scheduler.c
r3875f106 r1a5fe4f 54 54 #include <atomic.h> 55 55 #include <synch/spinlock.h> 56 #include <synch/workqueue.h>57 #include <synch/rcu.h>58 56 #include <config.h> 59 57 #include <context.h> … … 90 88 { 91 89 before_thread_runs_arch(); 92 rcu_before_thread_runs();93 90 94 91 #ifdef CONFIG_FPU_LAZY … … 131 128 static void after_thread_ran(void) 132 129 { 133 workq_after_thread_ran();134 rcu_after_thread_ran();135 130 after_thread_ran_arch(); 136 131 } … … 430 425 431 426 case Exiting: 432 rcu_thread_exiting();433 427 repeat: 434 428 if (THREAD->detached) { -
kernel/generic/src/proc/task.c
r3875f106 r1a5fe4f 285 285 286 286 /* 287 * Free up dynamically allocated state.288 */289 futex_task_deinit(task);290 291 /*292 287 * Drop our reference to the address space. 293 288 */ -
kernel/generic/src/proc/the.c
r3875f106 r1a5fe4f 60 60 the->as = NULL; 61 61 the->magic = MAGIC; 62 #ifdef RCU_PREEMPT_A63 the->rcu_nesting = 0;64 #endif65 62 } 66 63 -
kernel/generic/src/proc/thread.c
r3875f106 r1a5fe4f 48 48 #include <synch/spinlock.h> 49 49 #include <synch/waitq.h> 50 #include <synch/workqueue.h>51 #include <synch/rcu.h>52 50 #include <cpu.h> 53 51 #include <str.h> … … 69 67 #include <syscall/copy.h> 70 68 #include <errno.h> 69 #include <debug.h> 71 70 72 71 /** Thread states */ … … 272 271 { 273 272 assert(irq_spinlock_locked(&thread->lock)); 274 workq_before_thread_is_ready(thread);275 273 } 276 274 … … 399 397 thread->task = task; 400 398 401 thread->workq = NULL;402 403 399 thread->fpu_context_exists = false; 404 400 thread->fpu_context_engaged = false; … … 414 410 /* Might depend on previous initialization */ 415 411 thread_create_arch(thread); 416 417 rcu_thread_init(thread);418 412 419 413 if ((flags & THREAD_FLAG_NOATTACH) != THREAD_FLAG_NOATTACH) -
kernel/generic/src/synch/futex.c
r3875f106 r1a5fe4f 45 45 * encountered before). Futex object's lifetime is governed by 46 46 * a reference count that represents the number of all the different 47 * user space virtual addresses from all tasks that map to the 48 * physical address of the futex variable. A futex object is freed 47 * tasks that reference the futex variable. A futex object is freed 49 48 * when the last task having accessed the futex exits. 50 49 * … … 52 51 * of pointers (futex_ptr_t, task->futex_list) to the different futex 53 52 * objects. 54 *55 * To speed up translation of futex variables' virtual addresses56 * to their physical addresses, futex pointers accessed by the57 * task are furthermore stored in a concurrent hash table (CHT,58 * task->futexes->ht). A single lookup without locks or accesses59 * to the page table translates a futex variable's virtual address60 * into its futex kernel object.61 53 */ 62 54 … … 65 57 #include <synch/mutex.h> 66 58 #include <synch/spinlock.h> 67 #include <synch/rcu.h>68 59 #include <mm/frame.h> 69 60 #include <mm/page.h> … … 73 64 #include <genarch/mm/page_pt.h> 74 65 #include <genarch/mm/page_ht.h> 75 #include <adt/cht.h>76 66 #include <adt/hash.h> 77 67 #include <adt/hash_table.h> … … 84 74 /** Task specific pointer to a global kernel futex object. */ 85 75 typedef struct futex_ptr { 86 /** CHT link. */ 87 cht_link_t cht_link; 88 /** List of all futex pointers used by the task. */ 89 link_t all_link; 76 /** Link for the list of all futex pointers used by a task. */ 77 link_t task_link; 90 78 /** Kernel futex object. */ 91 79 futex_t *futex; 92 /** User space virtual address of the futex variable in the task. */93 uintptr_t uaddr;94 80 } futex_ptr_t; 95 96 static void destroy_task_cache(work_t *work);97 81 98 82 static void futex_initialize(futex_t *futex, uintptr_t paddr); … … 102 86 103 87 static futex_t *get_futex(uintptr_t uaddr); 104 static futex_t *find_cached_futex(uintptr_t uaddr);105 static futex_t *get_and_cache_futex(uintptr_t phys_addr, uintptr_t uaddr);106 88 static bool find_futex_paddr(uintptr_t uaddr, uintptr_t *phys_addr); 107 89 … … 110 92 static bool futex_ht_key_equal(void *key, const ht_link_t *item); 111 93 static void futex_ht_remove_callback(ht_link_t *item); 112 113 static size_t task_fut_ht_hash(const cht_link_t *link);114 static size_t task_fut_ht_key_hash(void *key);115 static bool task_fut_ht_equal(const cht_link_t *item1, const cht_link_t *item2);116 static bool task_fut_ht_key_equal(void *key, const cht_link_t *item);117 94 118 95 /** Mutex protecting the global futex hash table. … … 136 113 }; 137 114 138 /** Task futex cache CHT operations. */139 static cht_ops_t task_futex_ht_ops = {140 .hash = task_fut_ht_hash,141 .key_hash = task_fut_ht_key_hash,142 .equal = task_fut_ht_equal,143 .key_equal = task_fut_ht_key_equal,144 .remove_callback = NULL145 };146 147 115 /** Initialize futex subsystem. */ 148 116 void futex_init(void) … … 154 122 void futex_task_init(struct task *task) 155 123 { 156 task->futexes = nfmalloc(sizeof(struct futex_cache)); 157 158 cht_create(&task->futexes->ht, 0, 0, 0, true, &task_futex_ht_ops); 159 160 list_initialize(&task->futexes->list); 161 spinlock_initialize(&task->futexes->list_lock, "futex-list-lock"); 162 } 163 164 /** Destroys the futex structures for the dying task. */ 165 void futex_task_deinit(task_t *task) 166 { 167 /* Interrupts are disabled so we must not block (cannot run cht_destroy). */ 168 if (interrupts_disabled()) { 169 /* Invoke the blocking cht_destroy in the background. */ 170 workq_global_enqueue_noblock(&task->futexes->destroy_work, 171 destroy_task_cache); 172 } else { 173 /* We can block. Invoke cht_destroy in this thread. */ 174 destroy_task_cache(&task->futexes->destroy_work); 175 } 176 } 177 178 /** Deallocates a task's CHT futex cache (must already be empty). */ 179 static void destroy_task_cache(work_t *work) 180 { 181 struct futex_cache *cache = 182 member_to_inst(work, struct futex_cache, destroy_work); 183 184 /* 185 * Destroy the cache before manually freeing items of the cache in case 186 * table resize is in progress. 187 */ 188 cht_destroy_unsafe(&cache->ht); 189 190 /* Manually free futex_ptr cache items. */ 191 list_foreach_safe(cache->list, cur_link, next_link) { 192 futex_ptr_t *fut_ptr = member_to_inst(cur_link, futex_ptr_t, all_link); 193 194 list_remove(cur_link); 195 free(fut_ptr); 196 } 197 198 free(cache); 124 list_initialize(&task->futex_list); 125 spinlock_initialize(&task->futex_list_lock, "futex-list-lock"); 199 126 } 200 127 … … 202 129 void futex_task_cleanup(void) 203 130 { 204 struct futex_cache *futexes = TASK->futexes;205 206 131 /* All threads of this task have terminated. This is the last thread. */ 207 spinlock_lock(&futexes->list_lock); 208 209 list_foreach_safe(futexes->list, cur_link, next_link) { 210 futex_ptr_t *fut_ptr = member_to_inst(cur_link, futex_ptr_t, all_link); 211 212 /* 213 * The function is free to free the futex. All other threads of this 214 * task have already terminated, so they have also definitely 215 * exited their CHT futex cache protecting rcu reader sections. 216 * Moreover release_ref() only frees the futex if this is the 217 * last task referencing the futex. Therefore, only threads 218 * of this task may have referenced the futex if it is to be freed. 219 */ 220 futex_release_ref_locked(fut_ptr->futex); 221 } 222 223 spinlock_unlock(&futexes->list_lock); 132 spinlock_lock(&TASK->futex_list_lock); 133 134 list_foreach_safe(TASK->futex_list, cur_link, next_link) { 135 futex_ptr_t *futex_ptr = member_to_inst(cur_link, futex_ptr_t, 136 task_link); 137 138 futex_release_ref_locked(futex_ptr->futex); 139 free(futex_ptr); 140 } 141 142 spinlock_unlock(&TASK->futex_list_lock); 224 143 } 225 144 … … 240 159 { 241 160 assert(spinlock_locked(&futex_ht_lock)); 242 assert( 0 < futex->refcount);161 assert(futex->refcount > 0); 243 162 ++futex->refcount; 244 163 } … … 248 167 { 249 168 assert(spinlock_locked(&futex_ht_lock)); 250 assert( 0 < futex->refcount);169 assert(futex->refcount > 0); 251 170 252 171 --futex->refcount; 253 172 254 if ( 0 == futex->refcount) {173 if (futex->refcount == 0) 255 174 hash_table_remove(&futex_ht, &futex->paddr); 256 }257 175 } 258 176 … … 268 186 static futex_t *get_futex(uintptr_t uaddr) 269 187 { 270 futex_t *futex = find_cached_futex(uaddr);271 272 if (futex)273 return futex;274 275 188 uintptr_t paddr; 276 189 277 if (!find_futex_paddr(uaddr, &paddr)) { 278 return 0; 279 } 280 281 return get_and_cache_futex(paddr, uaddr); 190 if (!find_futex_paddr(uaddr, &paddr)) 191 return NULL; 192 193 futex_t *futex = malloc(sizeof(futex_t)); 194 if (!futex) 195 return NULL; 196 197 futex_ptr_t *futex_ptr = malloc(sizeof(futex_ptr_t)); 198 if (!futex_ptr) { 199 free(futex); 200 return NULL; 201 } 202 203 /* 204 * Find the futex object in the global futex table (or insert it 205 * if it is not present). 206 */ 207 spinlock_lock(&TASK->futex_list_lock); 208 spinlock_lock(&futex_ht_lock); 209 210 ht_link_t *fut_link = hash_table_find(&futex_ht, &paddr); 211 212 if (fut_link) { 213 free(futex); 214 futex = member_to_inst(fut_link, futex_t, ht_link); 215 216 /* 217 * See if the futex is already known to the TASK 218 */ 219 bool found = false; 220 list_foreach(TASK->futex_list, task_link, futex_ptr_t, fp) { 221 if (fp->futex->paddr == paddr) { 222 found = true; 223 break; 224 } 225 } 226 /* 227 * If not, put it on the TASK->futex_list and bump its reference 228 * count 229 */ 230 if (!found) { 231 list_append(&futex_ptr->task_link, &TASK->futex_list); 232 futex_add_ref(futex); 233 } else 234 free(futex_ptr); 235 } else { 236 futex_initialize(futex, paddr); 237 hash_table_insert(&futex_ht, &futex->ht_link); 238 239 /* 240 * This is a new futex, so it is not on the TASK->futex_list yet 241 */ 242 futex_ptr->futex = futex; 243 list_append(&futex_ptr->task_link, &TASK->futex_list); 244 } 245 246 spinlock_unlock(&futex_ht_lock); 247 spinlock_unlock(&TASK->futex_list_lock); 248 249 return futex; 282 250 } 283 251 … … 306 274 } 307 275 308 /** Returns the futex cached in this task with the virtual address uaddr. */309 static futex_t *find_cached_futex(uintptr_t uaddr)310 {311 cht_read_lock();312 313 futex_t *futex;314 cht_link_t *futex_ptr_link = cht_find_lazy(&TASK->futexes->ht, &uaddr);315 316 if (futex_ptr_link) {317 futex_ptr_t *futex_ptr =318 member_to_inst(futex_ptr_link, futex_ptr_t, cht_link);319 320 futex = futex_ptr->futex;321 } else {322 futex = NULL;323 }324 325 cht_read_unlock();326 327 return futex;328 }329 330 /**331 * Returns a kernel futex for the physical address @a phys_addr and caches332 * it in this task under the virtual address @a uaddr (if not already cached).333 */334 static futex_t *get_and_cache_futex(uintptr_t phys_addr, uintptr_t uaddr)335 {336 futex_t *futex = malloc(sizeof(futex_t));337 if (!futex)338 return NULL;339 340 /*341 * Find the futex object in the global futex table (or insert it342 * if it is not present).343 */344 spinlock_lock(&futex_ht_lock);345 346 ht_link_t *fut_link = hash_table_find(&futex_ht, &phys_addr);347 348 if (fut_link) {349 free(futex);350 futex = member_to_inst(fut_link, futex_t, ht_link);351 futex_add_ref(futex);352 } else {353 futex_initialize(futex, phys_addr);354 hash_table_insert(&futex_ht, &futex->ht_link);355 }356 357 spinlock_unlock(&futex_ht_lock);358 359 /*360 * Cache the link to the futex object for this task.361 */362 futex_ptr_t *fut_ptr = malloc(sizeof(futex_ptr_t));363 if (!fut_ptr) {364 spinlock_lock(&futex_ht_lock);365 futex_release_ref(futex);366 spinlock_unlock(&futex_ht_lock);367 return NULL;368 }369 cht_link_t *dup_link;370 371 fut_ptr->futex = futex;372 fut_ptr->uaddr = uaddr;373 374 cht_read_lock();375 376 /* Cache the mapping from the virtual address to the futex for this task. */377 if (cht_insert_unique(&TASK->futexes->ht, &fut_ptr->cht_link, &dup_link)) {378 spinlock_lock(&TASK->futexes->list_lock);379 list_append(&fut_ptr->all_link, &TASK->futexes->list);380 spinlock_unlock(&TASK->futexes->list_lock);381 } else {382 /* Another thread of this task beat us to it. Use that mapping instead.*/383 free(fut_ptr);384 futex_release_ref_locked(futex);385 386 futex_ptr_t *dup = member_to_inst(dup_link, futex_ptr_t, cht_link);387 futex = dup->futex;388 }389 390 cht_read_unlock();391 392 return futex;393 }394 395 276 /** Sleep in futex wait queue with a timeout. 277 * 396 278 * If the sleep times out or is interrupted, the next wakeup is ignored. 397 279 * The userspace portion of the call must handle this condition. … … 477 359 } 478 360 479 /*480 * Operations of a task's CHT that caches mappings of futex user space481 * virtual addresses to kernel futex objects.482 */483 484 static size_t task_fut_ht_hash(const cht_link_t *link)485 {486 const futex_ptr_t *fut_ptr = member_to_inst(link, futex_ptr_t, cht_link);487 return fut_ptr->uaddr;488 }489 490 static size_t task_fut_ht_key_hash(void *key)491 {492 return *(uintptr_t *)key;493 }494 495 static bool task_fut_ht_equal(const cht_link_t *item1, const cht_link_t *item2)496 {497 const futex_ptr_t *fut_ptr1 = member_to_inst(item1, futex_ptr_t, cht_link);498 const futex_ptr_t *fut_ptr2 = member_to_inst(item2, futex_ptr_t, cht_link);499 500 return fut_ptr1->uaddr == fut_ptr2->uaddr;501 }502 503 static bool task_fut_ht_key_equal(void *key, const cht_link_t *item)504 {505 const futex_ptr_t *fut_ptr = member_to_inst(item, futex_ptr_t, cht_link);506 uintptr_t uaddr = *(uintptr_t *)key;507 508 return fut_ptr->uaddr == uaddr;509 }510 511 361 /** @} 512 362 */ -
kernel/generic/src/syscall/syscall.c
r3875f106 r1a5fe4f 48 48 #include <synch/futex.h> 49 49 #include <synch/smc.h> 50 #include <synch/smp_memory_barrier.h>51 50 #include <ddi/ddi.h> 52 51 #include <ipc/event.h> … … 140 139 [SYS_FUTEX_WAKEUP] = (syshandler_t) sys_futex_wakeup, 141 140 [SYS_SMC_COHERENCE] = (syshandler_t) sys_smc_coherence, 142 [SYS_SMP_MEMORY_BARRIER] = (syshandler_t) sys_smp_memory_barrier,143 141 144 142 /* Address space related syscalls. */ -
kernel/test/test.c
r3875f106 r1a5fe4f 42 42 #include <atomic/atomic1.def> 43 43 #include <btree/btree1.def> 44 #include <cht/cht1.def>45 44 #include <debug/mips1.def> 46 45 #include <fault/fault1.def> … … 52 51 #include <synch/semaphore1.def> 53 52 #include <synch/semaphore2.def> 54 #include <synch/rcu1.def>55 #include <synch/workqueue2.def>56 #include <synch/workqueue3.def>57 53 #include <print/print1.def> 58 54 #include <print/print2.def> … … 61 57 #include <print/print5.def> 62 58 #include <thread/thread1.def> 63 #include <smpcall/smpcall1.def>64 59 { 65 60 .name = NULL, -
kernel/test/test.h
r3875f106 r1a5fe4f 60 60 extern const char *test_avltree1(void); 61 61 extern const char *test_btree1(void); 62 extern const char *test_cht1(void);63 62 extern const char *test_mips1(void); 64 63 extern const char *test_fault1(void); … … 77 76 extern const char *test_print5(void); 78 77 extern const char *test_thread1(void); 79 extern const char *test_smpcall1(void);80 extern const char *test_workqueue_all(void);81 extern const char *test_workqueue3(void);82 extern const char *test_workqueue3quit(void);83 extern const char *test_rcu1(void);84 78 85 79 extern test_t tests[]; -
uspace/Makefile
r3875f106 r1a5fe4f 67 67 app/nterm \ 68 68 app/redir \ 69 app/rcutest \70 69 app/sbi \ 71 70 app/sportdmp \ -
uspace/lib/c/Makefile
r3875f106 r1a5fe4f 79 79 generic/pcb.c \ 80 80 generic/smc.c \ 81 generic/smp_memory_barrier.c \82 81 generic/task.c \ 83 82 generic/imath.c \ … … 134 133 generic/thread/tls.c \ 135 134 generic/thread/futex.c \ 136 generic/thread/rcu.c \137 135 generic/thread/mpsc.c \ 138 136 generic/sysinfo.c \ -
uspace/lib/c/include/barrier.h
r3875f106 r1a5fe4f 38 38 #include <stdatomic.h> 39 39 40 extern void smp_memory_barrier(void);41 42 40 static inline void compiler_barrier(void) 43 41 {
Note:
See TracChangeset
for help on using the changeset viewer.