Changes in / [1a5fe4f:3875f106] in mainline
- Files:
-
- 34 added
- 44 edited
Legend:
- Unmodified
- Added
- Removed
-
HelenOS.config
r1a5fe4f r3875f106 430 430 ! [COMPILER=gcc_cross] CONFIG_LTO (n/y) 431 431 432 % Kernel RCU algorithm 433 @ "PREEMPT_PODZIMEK" Preemptible Podzimek-RCU 434 @ "PREEMPT_A" Preemptible A-RCU 435 ! RCU (choice) 436 432 437 % Compress init data 433 438 ! CONFIG_COMPRESSED_INIT (y/n) -
abi/include/abi/syscall.h
r1a5fe4f r3875f106 54 54 SYS_FUTEX_WAKEUP, 55 55 SYS_SMC_COHERENCE, 56 SYS_SMP_MEMORY_BARRIER, 56 57 57 58 SYS_AS_AREA_CREATE, -
defaults/amd64/Makefile.config
r1a5fe4f r3875f106 44 44 CONFIG_TEST = y 45 45 46 # Kernel RCU implementation 47 RCU = PREEMPT_A 48 46 49 # Input device class 47 50 CONFIG_HID_IN = generic -
defaults/arm32/Makefile.config
r1a5fe4f r3875f106 32 32 CONFIG_TEST = y 33 33 34 # Kernel RCU implementation 35 RCU = PREEMPT_A 36 34 37 # What is your input device? 35 38 CONFIG_HID_IN = generic -
defaults/ia32/Makefile.config
r1a5fe4f r3875f106 47 47 CONFIG_TEST = y 48 48 49 # Kernel RCU implementation 50 RCU = PREEMPT_A 51 49 52 # Input device class 50 53 CONFIG_HID_IN = generic -
defaults/ia64/Makefile.config
r1a5fe4f r3875f106 44 44 CONFIG_TEST = y 45 45 46 # Kernel RCU implementation 47 RCU = PREEMPT_A 48 46 49 # Input device class 47 50 CONFIG_HID_IN = generic -
defaults/mips32/Makefile.config
r1a5fe4f r3875f106 38 38 CONFIG_TEST = y 39 39 40 # Kernel RCU implementation 41 RCU = PREEMPT_A 42 40 43 # Input device class 41 44 CONFIG_HID_IN = generic -
defaults/ppc32/Makefile.config
r1a5fe4f r3875f106 35 35 CONFIG_TEST = y 36 36 37 # Kernel RCU implementation 38 RCU = PREEMPT_A 39 37 40 # Input device class 38 41 CONFIG_HID_IN = generic -
defaults/riscv64/Makefile.config
r1a5fe4f r3875f106 32 32 CONFIG_TEST = y 33 33 34 # Kernel RCU implementation 35 RCU = PREEMPT_A 36 34 37 # What is your input device? 35 38 CONFIG_HID_IN = generic -
defaults/sparc64/Makefile.config
r1a5fe4f r3875f106 47 47 CONFIG_TEST = y 48 48 49 # Kernel RCU implementation 50 RCU = PREEMPT_A 51 49 52 # Input device class 50 53 CONFIG_HID_IN = generic -
defaults/special/Makefile.config
r1a5fe4f r3875f106 32 32 CONFIG_TEST = y 33 33 34 # Kernel RCU implementation 35 RCU = PREEMPT_A 36 34 37 # Optimization level 35 38 OPTIMIZATION = 3 -
kernel/Makefile
r1a5fe4f r3875f106 163 163 generic/src/adt/bitmap.c \ 164 164 generic/src/adt/btree.c \ 165 generic/src/adt/cht.c \ 165 166 generic/src/adt/hash_table.c \ 166 167 generic/src/adt/list.c \ … … 226 227 generic/src/synch/semaphore.c \ 227 228 generic/src/synch/smc.c \ 229 generic/src/synch/smp_memory_barrier.c \ 228 230 generic/src/synch/waitq.c \ 229 231 generic/src/synch/futex.c \ 232 generic/src/synch/workqueue.c \ 233 generic/src/synch/rcu.c \ 230 234 generic/src/smp/ipi.c \ 231 235 generic/src/smp/smp.c \ 236 generic/src/smp/smp_call.c \ 232 237 generic/src/ipc/ipc.c \ 233 238 generic/src/ipc/sysipc.c \ … … 279 284 test/atomic/atomic1.c \ 280 285 test/btree/btree1.c \ 286 test/cht/cht1.c \ 281 287 test/fault/fault1.c \ 282 288 test/mm/falloc1.c \ … … 287 293 test/synch/semaphore1.c \ 288 294 test/synch/semaphore2.c \ 295 test/synch/workqueue2.c \ 296 test/synch/workqueue3.c \ 297 test/synch/rcu1.c \ 289 298 test/print/print1.c \ 290 299 test/print/print2.c \ … … 292 301 test/print/print4.c \ 293 302 test/print/print5.c \ 294 test/thread/thread1.c 303 test/thread/thread1.c \ 304 test/smpcall/smpcall1.c 295 305 296 306 ifeq ($(KARCH),mips32) -
kernel/arch/abs32le/Makefile.inc
r1a5fe4f r3875f106 51 51 arch/$(KARCH)/src/cpu/cpu.c \ 52 52 arch/$(KARCH)/src/smp/smp.c \ 53 arch/$(KARCH)/src/smp/smp_call.c \ 53 54 arch/$(KARCH)/src/smp/ipi.c \ 54 55 arch/$(KARCH)/src/mm/km.c \ -
kernel/arch/amd64/Makefile.inc
r1a5fe4f r3875f106 85 85 arch/$(KARCH)/src/smp/ipi.c \ 86 86 arch/$(KARCH)/src/smp/mps.c \ 87 arch/$(KARCH)/src/smp/smp_call.c \ 87 88 arch/$(KARCH)/src/smp/smp.c 88 89 endif -
kernel/arch/amd64/include/arch/interrupt.h
r1a5fe4f r3875f106 81 81 #define VECTOR_TLB_SHOOTDOWN_IPI (IVT_FREEBASE + 1) 82 82 #define VECTOR_DEBUG_IPI (IVT_FREEBASE + 2) 83 #define VECTOR_SMP_CALL_IPI (IVT_FREEBASE + 3) 83 84 84 85 extern void (*disable_irqs_function)(uint16_t); -
kernel/arch/amd64/src/interrupt.c
r1a5fe4f r3875f106 53 53 #include <symtab.h> 54 54 #include <stacktrace.h> 55 #include <smp/smp_call.h> 55 56 56 57 /* … … 160 161 tlb_shootdown_ipi_recv(); 161 162 } 163 164 static void arch_smp_call_ipi_recv(unsigned int n, istate_t *istate) 165 { 166 trap_virtual_eoi(); 167 smp_call_ipi_recv(); 168 } 162 169 #endif 163 170 … … 222 229 exc_register(VECTOR_TLB_SHOOTDOWN_IPI, "tlb_shootdown", true, 223 230 (iroutine_t) tlb_shootdown_ipi); 231 exc_register(VECTOR_SMP_CALL_IPI, "smp_call", true, 232 (iroutine_t) arch_smp_call_ipi_recv); 224 233 #endif 225 234 } -
kernel/arch/ia32/Makefile.inc
r1a5fe4f r3875f106 76 76 arch/$(KARCH)/src/smp/mps.c \ 77 77 arch/$(KARCH)/src/smp/smp.c \ 78 arch/$(KARCH)/src/smp/smp_call.c \ 78 79 arch/$(KARCH)/src/atomic.S \ 79 80 arch/$(KARCH)/src/smp/ipi.c \ -
kernel/arch/ia32/include/arch/interrupt.h
r1a5fe4f r3875f106 85 85 #define VECTOR_TLB_SHOOTDOWN_IPI (IVT_FREEBASE + 1) 86 86 #define VECTOR_DEBUG_IPI (IVT_FREEBASE + 2) 87 #define VECTOR_SMP_CALL_IPI (IVT_FREEBASE + 3) 87 88 88 89 extern void (*disable_irqs_function)(uint16_t); -
kernel/arch/ia32/src/interrupt.c
r1a5fe4f r3875f106 55 55 #include <symtab.h> 56 56 #include <stacktrace.h> 57 #include <smp/smp_call.h> 57 58 #include <proc/task.h> 58 59 … … 182 183 tlb_shootdown_ipi_recv(); 183 184 } 185 186 static void arch_smp_call_ipi_recv(unsigned int n, istate_t *istate) 187 { 188 trap_virtual_eoi(); 189 smp_call_ipi_recv(); 190 } 184 191 #endif 185 192 … … 243 250 exc_register(VECTOR_TLB_SHOOTDOWN_IPI, "tlb_shootdown", true, 244 251 (iroutine_t) tlb_shootdown_ipi); 252 exc_register(VECTOR_SMP_CALL_IPI, "smp_call", true, 253 (iroutine_t) arch_smp_call_ipi_recv); 245 254 #endif 246 255 } -
kernel/arch/ia64/Makefile.inc
r1a5fe4f r3875f106 58 58 arch/$(KARCH)/src/smc.c \ 59 59 arch/$(KARCH)/src/smp/smp.c \ 60 arch/$(KARCH)/src/smp/smp_call.c \ 60 61 arch/$(KARCH)/src/drivers/it.c 61 62 -
kernel/arch/mips32/Makefile.inc
r1a5fe4f r3875f106 68 68 arch/$(KARCH)/src/smc.c \ 69 69 arch/$(KARCH)/src/smp/smp.c \ 70 arch/$(KARCH)/src/smp/smp_call.c \ 70 71 arch/$(KARCH)/src/machine_func.c 71 72 -
kernel/arch/sparc64/Makefile.inc
r1a5fe4f r3875f106 99 99 ARCH_SOURCES += \ 100 100 arch/$(KARCH)/src/smp/$(USARCH)/smp.c \ 101 arch/$(KARCH)/src/smp/$(USARCH)/smp_call.c \ 101 102 arch/$(KARCH)/src/smp/$(USARCH)/ipi.c 102 103 endif -
kernel/arch/sparc64/include/arch/interrupt.h
r1a5fe4f r3875f106 47 47 enum { 48 48 IPI_TLB_SHOOTDOWN = VECTOR_TLB_SHOOTDOWN_IPI, 49 IPI_SMP_CALL 49 50 }; 50 51 -
kernel/arch/sparc64/src/smp/sun4u/ipi.c
r1a5fe4f r3875f106 35 35 #include <smp/ipi.h> 36 36 #include <arch/barrier.h> 37 #include <arch/smp/sun4u/ipi.h> 37 38 #include <assert.h> 38 39 #include <cpu.h> … … 42 43 #include <config.h> 43 44 #include <mm/tlb.h> 45 #include <smp/smp_call.h> 44 46 #include <arch/interrupt.h> 45 47 #include <arch/trap/interrupt.h> … … 173 175 } 174 176 177 /* 178 * Deliver an IPI to the specified processors (except the current one). 179 * 180 * Interrupts must be disabled. 181 * 182 * @param cpu_id Destination cpu id (index into cpus array). Must not 183 * be the current cpu. 184 * @param ipi IPI number. 185 */ 186 void ipi_unicast_arch(unsigned int cpu_id, int ipi) 187 { 188 assert(&cpus[cpu_id] != CPU); 189 190 if (ipi == IPI_SMP_CALL) { 191 cross_call(cpus[cpu_id].arch.mid, smp_call_ipi_recv); 192 } else { 193 panic("Unknown IPI (%d).\n", ipi); 194 return; 195 } 196 } 197 175 198 /** @} 176 199 */ -
kernel/generic/include/arch.h
r1a5fe4f r3875f106 66 66 typedef struct { 67 67 size_t preemption; /**< Preemption disabled counter and flag. */ 68 #ifdef RCU_PREEMPT_A 69 size_t rcu_nesting; /**< RCU nesting count and flag. */ 70 #endif 68 71 struct thread *thread; /**< Current thread. */ 69 72 struct task *task; /**< Current task. */ -
kernel/generic/include/cpu.h
r1a5fe4f r3875f106 38 38 #include <mm/tlb.h> 39 39 #include <synch/spinlock.h> 40 #include <synch/rcu_types.h> 40 41 #include <proc/scheduler.h> 41 42 #include <arch/cpu.h> … … 98 99 99 100 /** 101 * SMP calls to invoke on this CPU. 102 */ 103 SPINLOCK_DECLARE(smp_calls_lock); 104 list_t smp_pending_calls; 105 106 /** RCU per-cpu data. Uses own locking. */ 107 rcu_cpu_data_t rcu; 108 109 /** 100 110 * Stack used by scheduler when there is no running thread. 101 111 */ -
kernel/generic/include/proc/task.h
r1a5fe4f r3875f106 43 43 #include <synch/mutex.h> 44 44 #include <synch/futex.h> 45 #include <synch/workqueue.h> 46 #include <adt/cht.h> 45 47 #include <adt/list.h> 46 48 #include <adt/odict.h> … … 128 130 task_arch_t arch; 129 131 130 /** Serializes access to futex_list.*/ 131 SPINLOCK_DECLARE(futex_list_lock); 132 /** List of all futexes accesses by this task. */ 133 list_t futex_list; 132 struct futex_cache { 133 /** CHT mapping virtual addresses of futex variables to futex objects.*/ 134 cht_t ht; 135 /** Serializes access to futex_list.*/ 136 SPINLOCK_DECLARE(list_lock); 137 /** List of all futexes accesses by this task. */ 138 list_t list; 139 work_t destroy_work; 140 } *futexes; 134 141 135 142 /** Accumulated accounting. */ -
kernel/generic/include/proc/thread.h
r1a5fe4f r3875f106 41 41 #include <cpu.h> 42 42 #include <synch/spinlock.h> 43 #include <synch/rcu_types.h> 43 44 #include <adt/odict.h> 44 45 #include <mm/slab.h> … … 193 194 thread_id_t tid; 194 195 196 /** Work queue this thread belongs to or NULL. Immutable. */ 197 struct work_queue *workq; 198 /** Links work queue threads. Protected by workq->lock. */ 199 link_t workq_link; 200 /** True if the worker was blocked and is not running. Use thread->lock. */ 201 bool workq_blocked; 202 /** True if the worker will block in order to become idle. Use workq->lock. */ 203 bool workq_idling; 204 205 /** RCU thread related data. Protected by its own locks. */ 206 rcu_thread_data_t rcu; 207 195 208 /** Architecture-specific data. */ 196 209 thread_arch_t arch; -
kernel/generic/include/synch/futex.h
r1a5fe4f r3875f106 58 58 extern void futex_task_cleanup(void); 59 59 extern void futex_task_init(struct task *); 60 extern void futex_task_deinit(struct task *); 60 61 61 62 #endif -
kernel/generic/src/console/cmd.c
r1a5fe4f r3875f106 70 70 #include <sysinfo/sysinfo.h> 71 71 #include <symtab.h> 72 #include <synch/workqueue.h> 73 #include <synch/rcu.h> 72 74 #include <errno.h> 73 75 … … 534 536 }; 535 537 538 /* Data and methods for the 'workq' command */ 539 static int cmd_workq(cmd_arg_t *argv); 540 static cmd_info_t workq_info = { 541 .name = "workq", 542 .description = "Show global workq information.", 543 .func = cmd_workq, 544 .argc = 0 545 }; 546 547 /* Data and methods for the 'workq' command */ 548 static int cmd_rcu(cmd_arg_t *argv); 549 static cmd_info_t rcu_info = { 550 .name = "rcu", 551 .description = "Show RCU run-time statistics.", 552 .func = cmd_rcu, 553 .argc = 0 554 }; 555 536 556 /* Data and methods for 'ipc' command */ 537 557 static int cmd_ipc(cmd_arg_t *argv); … … 598 618 &physmem_info, 599 619 &reboot_info, 620 &rcu_info, 600 621 &sched_info, 601 622 &set4_info, … … 607 628 &uptime_info, 608 629 &version_info, 630 &workq_info, 609 631 &zones_info, 610 632 &zone_info, … … 1280 1302 } 1281 1303 1304 /** Prints information about the global work queue. 1305 * 1306 * @param argv Ignores 1307 * 1308 * @return Always 1 1309 */ 1310 int cmd_workq(cmd_arg_t *argv) 1311 { 1312 workq_global_print_info(); 1313 return 1; 1314 } 1315 1316 /** Prints RCU statistics. 1317 * 1318 * @param argv Ignores 1319 * 1320 * @return Always 1 1321 */ 1322 int cmd_rcu(cmd_arg_t *argv) 1323 { 1324 rcu_print_stat(); 1325 return 1; 1326 } 1327 1282 1328 /** Command for listing memory zones 1283 1329 * -
kernel/generic/src/cpu/cpu.c
r1a5fe4f r3875f106 50 50 #include <sysinfo/sysinfo.h> 51 51 #include <arch/cycle.h> 52 #include <synch/rcu.h> 52 53 53 54 cpu_t *cpus; … … 107 108 cpu_identify(); 108 109 cpu_arch_init(); 110 rcu_cpu_init(); 109 111 } 110 112 -
kernel/generic/src/main/kinit.c
r1a5fe4f r3875f106 79 79 #include <synch/waitq.h> 80 80 #include <synch/spinlock.h> 81 #include <synch/workqueue.h> 82 #include <synch/rcu.h> 81 83 82 84 #define ALIVE_CHARS 4 … … 107 109 108 110 interrupts_disable(); 111 112 /* Start processing RCU callbacks. RCU is fully functional afterwards. */ 113 rcu_kinit_init(); 114 115 /* 116 * Start processing work queue items. Some may have been queued during boot. 117 */ 118 workq_global_worker_init(); 109 119 110 120 #ifdef CONFIG_SMP -
kernel/generic/src/main/main.c
r1a5fe4f r3875f106 78 78 #include <synch/waitq.h> 79 79 #include <synch/futex.h> 80 #include <synch/workqueue.h> 81 #include <smp/smp_call.h> 80 82 #include <arch/arch.h> 81 83 #include <arch.h> … … 272 274 ARCH_OP(post_cpu_init); 273 275 276 smp_call_init(); 277 workq_global_init(); 274 278 clock_counter_init(); 275 279 timeout_init(); … … 377 381 void main_ap_separated_stack(void) 378 382 { 383 smp_call_init(); 384 379 385 /* 380 386 * Configure timeouts for this cpu. -
kernel/generic/src/proc/scheduler.c
r1a5fe4f r3875f106 54 54 #include <atomic.h> 55 55 #include <synch/spinlock.h> 56 #include <synch/workqueue.h> 57 #include <synch/rcu.h> 56 58 #include <config.h> 57 59 #include <context.h> … … 88 90 { 89 91 before_thread_runs_arch(); 92 rcu_before_thread_runs(); 90 93 91 94 #ifdef CONFIG_FPU_LAZY … … 128 131 static void after_thread_ran(void) 129 132 { 133 workq_after_thread_ran(); 134 rcu_after_thread_ran(); 130 135 after_thread_ran_arch(); 131 136 } … … 425 430 426 431 case Exiting: 432 rcu_thread_exiting(); 427 433 repeat: 428 434 if (THREAD->detached) { -
kernel/generic/src/proc/task.c
r1a5fe4f r3875f106 285 285 286 286 /* 287 * Free up dynamically allocated state. 288 */ 289 futex_task_deinit(task); 290 291 /* 287 292 * Drop our reference to the address space. 288 293 */ -
kernel/generic/src/proc/the.c
r1a5fe4f r3875f106 60 60 the->as = NULL; 61 61 the->magic = MAGIC; 62 #ifdef RCU_PREEMPT_A 63 the->rcu_nesting = 0; 64 #endif 62 65 } 63 66 -
kernel/generic/src/proc/thread.c
r1a5fe4f r3875f106 48 48 #include <synch/spinlock.h> 49 49 #include <synch/waitq.h> 50 #include <synch/workqueue.h> 51 #include <synch/rcu.h> 50 52 #include <cpu.h> 51 53 #include <str.h> … … 67 69 #include <syscall/copy.h> 68 70 #include <errno.h> 69 #include <debug.h>70 71 71 72 /** Thread states */ … … 271 272 { 272 273 assert(irq_spinlock_locked(&thread->lock)); 274 workq_before_thread_is_ready(thread); 273 275 } 274 276 … … 397 399 thread->task = task; 398 400 401 thread->workq = NULL; 402 399 403 thread->fpu_context_exists = false; 400 404 thread->fpu_context_engaged = false; … … 410 414 /* Might depend on previous initialization */ 411 415 thread_create_arch(thread); 416 417 rcu_thread_init(thread); 412 418 413 419 if ((flags & THREAD_FLAG_NOATTACH) != THREAD_FLAG_NOATTACH) -
kernel/generic/src/synch/futex.c
r1a5fe4f r3875f106 45 45 * encountered before). Futex object's lifetime is governed by 46 46 * a reference count that represents the number of all the different 47 * tasks that reference the futex variable. A futex object is freed 47 * user space virtual addresses from all tasks that map to the 48 * physical address of the futex variable. A futex object is freed 48 49 * when the last task having accessed the futex exits. 49 50 * … … 51 52 * of pointers (futex_ptr_t, task->futex_list) to the different futex 52 53 * objects. 54 * 55 * To speed up translation of futex variables' virtual addresses 56 * to their physical addresses, futex pointers accessed by the 57 * task are furthermore stored in a concurrent hash table (CHT, 58 * task->futexes->ht). A single lookup without locks or accesses 59 * to the page table translates a futex variable's virtual address 60 * into its futex kernel object. 53 61 */ 54 62 … … 57 65 #include <synch/mutex.h> 58 66 #include <synch/spinlock.h> 67 #include <synch/rcu.h> 59 68 #include <mm/frame.h> 60 69 #include <mm/page.h> … … 64 73 #include <genarch/mm/page_pt.h> 65 74 #include <genarch/mm/page_ht.h> 75 #include <adt/cht.h> 66 76 #include <adt/hash.h> 67 77 #include <adt/hash_table.h> … … 74 84 /** Task specific pointer to a global kernel futex object. */ 75 85 typedef struct futex_ptr { 76 /** Link for the list of all futex pointers used by a task. */ 77 link_t task_link; 86 /** CHT link. */ 87 cht_link_t cht_link; 88 /** List of all futex pointers used by the task. */ 89 link_t all_link; 78 90 /** Kernel futex object. */ 79 91 futex_t *futex; 92 /** User space virtual address of the futex variable in the task. */ 93 uintptr_t uaddr; 80 94 } futex_ptr_t; 95 96 static void destroy_task_cache(work_t *work); 81 97 82 98 static void futex_initialize(futex_t *futex, uintptr_t paddr); … … 86 102 87 103 static futex_t *get_futex(uintptr_t uaddr); 104 static futex_t *find_cached_futex(uintptr_t uaddr); 105 static futex_t *get_and_cache_futex(uintptr_t phys_addr, uintptr_t uaddr); 88 106 static bool find_futex_paddr(uintptr_t uaddr, uintptr_t *phys_addr); 89 107 … … 92 110 static bool futex_ht_key_equal(void *key, const ht_link_t *item); 93 111 static void futex_ht_remove_callback(ht_link_t *item); 112 113 static size_t task_fut_ht_hash(const cht_link_t *link); 114 static size_t task_fut_ht_key_hash(void *key); 115 static bool task_fut_ht_equal(const cht_link_t *item1, const cht_link_t *item2); 116 static bool task_fut_ht_key_equal(void *key, const cht_link_t *item); 94 117 95 118 /** Mutex protecting the global futex hash table. … … 113 136 }; 114 137 138 /** Task futex cache CHT operations. */ 139 static cht_ops_t task_futex_ht_ops = { 140 .hash = task_fut_ht_hash, 141 .key_hash = task_fut_ht_key_hash, 142 .equal = task_fut_ht_equal, 143 .key_equal = task_fut_ht_key_equal, 144 .remove_callback = NULL 145 }; 146 115 147 /** Initialize futex subsystem. */ 116 148 void futex_init(void) … … 122 154 void futex_task_init(struct task *task) 123 155 { 124 list_initialize(&task->futex_list); 125 spinlock_initialize(&task->futex_list_lock, "futex-list-lock"); 156 task->futexes = nfmalloc(sizeof(struct futex_cache)); 157 158 cht_create(&task->futexes->ht, 0, 0, 0, true, &task_futex_ht_ops); 159 160 list_initialize(&task->futexes->list); 161 spinlock_initialize(&task->futexes->list_lock, "futex-list-lock"); 162 } 163 164 /** Destroys the futex structures for the dying task. */ 165 void futex_task_deinit(task_t *task) 166 { 167 /* Interrupts are disabled so we must not block (cannot run cht_destroy). */ 168 if (interrupts_disabled()) { 169 /* Invoke the blocking cht_destroy in the background. */ 170 workq_global_enqueue_noblock(&task->futexes->destroy_work, 171 destroy_task_cache); 172 } else { 173 /* We can block. Invoke cht_destroy in this thread. */ 174 destroy_task_cache(&task->futexes->destroy_work); 175 } 176 } 177 178 /** Deallocates a task's CHT futex cache (must already be empty). */ 179 static void destroy_task_cache(work_t *work) 180 { 181 struct futex_cache *cache = 182 member_to_inst(work, struct futex_cache, destroy_work); 183 184 /* 185 * Destroy the cache before manually freeing items of the cache in case 186 * table resize is in progress. 187 */ 188 cht_destroy_unsafe(&cache->ht); 189 190 /* Manually free futex_ptr cache items. */ 191 list_foreach_safe(cache->list, cur_link, next_link) { 192 futex_ptr_t *fut_ptr = member_to_inst(cur_link, futex_ptr_t, all_link); 193 194 list_remove(cur_link); 195 free(fut_ptr); 196 } 197 198 free(cache); 126 199 } 127 200 … … 129 202 void futex_task_cleanup(void) 130 203 { 204 struct futex_cache *futexes = TASK->futexes; 205 131 206 /* All threads of this task have terminated. This is the last thread. */ 132 spinlock_lock(&TASK->futex_list_lock); 133 134 list_foreach_safe(TASK->futex_list, cur_link, next_link) { 135 futex_ptr_t *futex_ptr = member_to_inst(cur_link, futex_ptr_t, 136 task_link); 137 138 futex_release_ref_locked(futex_ptr->futex); 139 free(futex_ptr); 140 } 141 142 spinlock_unlock(&TASK->futex_list_lock); 207 spinlock_lock(&futexes->list_lock); 208 209 list_foreach_safe(futexes->list, cur_link, next_link) { 210 futex_ptr_t *fut_ptr = member_to_inst(cur_link, futex_ptr_t, all_link); 211 212 /* 213 * The function is free to free the futex. All other threads of this 214 * task have already terminated, so they have also definitely 215 * exited their CHT futex cache protecting rcu reader sections. 216 * Moreover release_ref() only frees the futex if this is the 217 * last task referencing the futex. Therefore, only threads 218 * of this task may have referenced the futex if it is to be freed. 219 */ 220 futex_release_ref_locked(fut_ptr->futex); 221 } 222 223 spinlock_unlock(&futexes->list_lock); 143 224 } 144 225 … … 159 240 { 160 241 assert(spinlock_locked(&futex_ht_lock)); 161 assert( futex->refcount > 0);242 assert(0 < futex->refcount); 162 243 ++futex->refcount; 163 244 } … … 167 248 { 168 249 assert(spinlock_locked(&futex_ht_lock)); 169 assert( futex->refcount > 0);250 assert(0 < futex->refcount); 170 251 171 252 --futex->refcount; 172 253 173 if ( futex->refcount == 0)254 if (0 == futex->refcount) { 174 255 hash_table_remove(&futex_ht, &futex->paddr); 256 } 175 257 } 176 258 … … 186 268 static futex_t *get_futex(uintptr_t uaddr) 187 269 { 270 futex_t *futex = find_cached_futex(uaddr); 271 272 if (futex) 273 return futex; 274 188 275 uintptr_t paddr; 189 276 190 if (!find_futex_paddr(uaddr, &paddr)) 191 return NULL; 192 193 futex_t *futex = malloc(sizeof(futex_t)); 194 if (!futex) 195 return NULL; 196 197 futex_ptr_t *futex_ptr = malloc(sizeof(futex_ptr_t)); 198 if (!futex_ptr) { 199 free(futex); 200 return NULL; 201 } 202 203 /* 204 * Find the futex object in the global futex table (or insert it 205 * if it is not present). 206 */ 207 spinlock_lock(&TASK->futex_list_lock); 208 spinlock_lock(&futex_ht_lock); 209 210 ht_link_t *fut_link = hash_table_find(&futex_ht, &paddr); 211 212 if (fut_link) { 213 free(futex); 214 futex = member_to_inst(fut_link, futex_t, ht_link); 215 216 /* 217 * See if the futex is already known to the TASK 218 */ 219 bool found = false; 220 list_foreach(TASK->futex_list, task_link, futex_ptr_t, fp) { 221 if (fp->futex->paddr == paddr) { 222 found = true; 223 break; 224 } 225 } 226 /* 227 * If not, put it on the TASK->futex_list and bump its reference 228 * count 229 */ 230 if (!found) { 231 list_append(&futex_ptr->task_link, &TASK->futex_list); 232 futex_add_ref(futex); 233 } else 234 free(futex_ptr); 235 } else { 236 futex_initialize(futex, paddr); 237 hash_table_insert(&futex_ht, &futex->ht_link); 238 239 /* 240 * This is a new futex, so it is not on the TASK->futex_list yet 241 */ 242 futex_ptr->futex = futex; 243 list_append(&futex_ptr->task_link, &TASK->futex_list); 244 } 245 246 spinlock_unlock(&futex_ht_lock); 247 spinlock_unlock(&TASK->futex_list_lock); 248 249 return futex; 277 if (!find_futex_paddr(uaddr, &paddr)) { 278 return 0; 279 } 280 281 return get_and_cache_futex(paddr, uaddr); 250 282 } 251 283 … … 274 306 } 275 307 308 /** Returns the futex cached in this task with the virtual address uaddr. */ 309 static futex_t *find_cached_futex(uintptr_t uaddr) 310 { 311 cht_read_lock(); 312 313 futex_t *futex; 314 cht_link_t *futex_ptr_link = cht_find_lazy(&TASK->futexes->ht, &uaddr); 315 316 if (futex_ptr_link) { 317 futex_ptr_t *futex_ptr = 318 member_to_inst(futex_ptr_link, futex_ptr_t, cht_link); 319 320 futex = futex_ptr->futex; 321 } else { 322 futex = NULL; 323 } 324 325 cht_read_unlock(); 326 327 return futex; 328 } 329 330 /** 331 * Returns a kernel futex for the physical address @a phys_addr and caches 332 * it in this task under the virtual address @a uaddr (if not already cached). 333 */ 334 static futex_t *get_and_cache_futex(uintptr_t phys_addr, uintptr_t uaddr) 335 { 336 futex_t *futex = malloc(sizeof(futex_t)); 337 if (!futex) 338 return NULL; 339 340 /* 341 * Find the futex object in the global futex table (or insert it 342 * if it is not present). 343 */ 344 spinlock_lock(&futex_ht_lock); 345 346 ht_link_t *fut_link = hash_table_find(&futex_ht, &phys_addr); 347 348 if (fut_link) { 349 free(futex); 350 futex = member_to_inst(fut_link, futex_t, ht_link); 351 futex_add_ref(futex); 352 } else { 353 futex_initialize(futex, phys_addr); 354 hash_table_insert(&futex_ht, &futex->ht_link); 355 } 356 357 spinlock_unlock(&futex_ht_lock); 358 359 /* 360 * Cache the link to the futex object for this task. 361 */ 362 futex_ptr_t *fut_ptr = malloc(sizeof(futex_ptr_t)); 363 if (!fut_ptr) { 364 spinlock_lock(&futex_ht_lock); 365 futex_release_ref(futex); 366 spinlock_unlock(&futex_ht_lock); 367 return NULL; 368 } 369 cht_link_t *dup_link; 370 371 fut_ptr->futex = futex; 372 fut_ptr->uaddr = uaddr; 373 374 cht_read_lock(); 375 376 /* Cache the mapping from the virtual address to the futex for this task. */ 377 if (cht_insert_unique(&TASK->futexes->ht, &fut_ptr->cht_link, &dup_link)) { 378 spinlock_lock(&TASK->futexes->list_lock); 379 list_append(&fut_ptr->all_link, &TASK->futexes->list); 380 spinlock_unlock(&TASK->futexes->list_lock); 381 } else { 382 /* Another thread of this task beat us to it. Use that mapping instead.*/ 383 free(fut_ptr); 384 futex_release_ref_locked(futex); 385 386 futex_ptr_t *dup = member_to_inst(dup_link, futex_ptr_t, cht_link); 387 futex = dup->futex; 388 } 389 390 cht_read_unlock(); 391 392 return futex; 393 } 394 276 395 /** Sleep in futex wait queue with a timeout. 277 *278 396 * If the sleep times out or is interrupted, the next wakeup is ignored. 279 397 * The userspace portion of the call must handle this condition. … … 359 477 } 360 478 479 /* 480 * Operations of a task's CHT that caches mappings of futex user space 481 * virtual addresses to kernel futex objects. 482 */ 483 484 static size_t task_fut_ht_hash(const cht_link_t *link) 485 { 486 const futex_ptr_t *fut_ptr = member_to_inst(link, futex_ptr_t, cht_link); 487 return fut_ptr->uaddr; 488 } 489 490 static size_t task_fut_ht_key_hash(void *key) 491 { 492 return *(uintptr_t *)key; 493 } 494 495 static bool task_fut_ht_equal(const cht_link_t *item1, const cht_link_t *item2) 496 { 497 const futex_ptr_t *fut_ptr1 = member_to_inst(item1, futex_ptr_t, cht_link); 498 const futex_ptr_t *fut_ptr2 = member_to_inst(item2, futex_ptr_t, cht_link); 499 500 return fut_ptr1->uaddr == fut_ptr2->uaddr; 501 } 502 503 static bool task_fut_ht_key_equal(void *key, const cht_link_t *item) 504 { 505 const futex_ptr_t *fut_ptr = member_to_inst(item, futex_ptr_t, cht_link); 506 uintptr_t uaddr = *(uintptr_t *)key; 507 508 return fut_ptr->uaddr == uaddr; 509 } 510 361 511 /** @} 362 512 */ -
kernel/generic/src/syscall/syscall.c
r1a5fe4f r3875f106 48 48 #include <synch/futex.h> 49 49 #include <synch/smc.h> 50 #include <synch/smp_memory_barrier.h> 50 51 #include <ddi/ddi.h> 51 52 #include <ipc/event.h> … … 139 140 [SYS_FUTEX_WAKEUP] = (syshandler_t) sys_futex_wakeup, 140 141 [SYS_SMC_COHERENCE] = (syshandler_t) sys_smc_coherence, 142 [SYS_SMP_MEMORY_BARRIER] = (syshandler_t) sys_smp_memory_barrier, 141 143 142 144 /* Address space related syscalls. */ -
kernel/test/test.c
r1a5fe4f r3875f106 42 42 #include <atomic/atomic1.def> 43 43 #include <btree/btree1.def> 44 #include <cht/cht1.def> 44 45 #include <debug/mips1.def> 45 46 #include <fault/fault1.def> … … 51 52 #include <synch/semaphore1.def> 52 53 #include <synch/semaphore2.def> 54 #include <synch/rcu1.def> 55 #include <synch/workqueue2.def> 56 #include <synch/workqueue3.def> 53 57 #include <print/print1.def> 54 58 #include <print/print2.def> … … 57 61 #include <print/print5.def> 58 62 #include <thread/thread1.def> 63 #include <smpcall/smpcall1.def> 59 64 { 60 65 .name = NULL, -
kernel/test/test.h
r1a5fe4f r3875f106 60 60 extern const char *test_avltree1(void); 61 61 extern const char *test_btree1(void); 62 extern const char *test_cht1(void); 62 63 extern const char *test_mips1(void); 63 64 extern const char *test_fault1(void); … … 76 77 extern const char *test_print5(void); 77 78 extern const char *test_thread1(void); 79 extern const char *test_smpcall1(void); 80 extern const char *test_workqueue_all(void); 81 extern const char *test_workqueue3(void); 82 extern const char *test_workqueue3quit(void); 83 extern const char *test_rcu1(void); 78 84 79 85 extern test_t tests[]; -
uspace/Makefile
r1a5fe4f r3875f106 67 67 app/nterm \ 68 68 app/redir \ 69 app/rcutest \ 69 70 app/sbi \ 70 71 app/sportdmp \ -
uspace/lib/c/Makefile
r1a5fe4f r3875f106 79 79 generic/pcb.c \ 80 80 generic/smc.c \ 81 generic/smp_memory_barrier.c \ 81 82 generic/task.c \ 82 83 generic/imath.c \ … … 133 134 generic/thread/tls.c \ 134 135 generic/thread/futex.c \ 136 generic/thread/rcu.c \ 135 137 generic/thread/mpsc.c \ 136 138 generic/sysinfo.c \ -
uspace/lib/c/include/barrier.h
r1a5fe4f r3875f106 38 38 #include <stdatomic.h> 39 39 40 extern void smp_memory_barrier(void); 41 40 42 static inline void compiler_barrier(void) 41 43 {
Note:
See TracChangeset
for help on using the changeset viewer.