Changeset 111b9b9 in mainline for kernel/generic/src/synch/waitq.c
- Timestamp:
- 2023-02-11T19:13:44Z (15 months ago)
- Branches:
- master, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 4777e02
- Parents:
- 76e17d7c
- git-author:
- Jiří Zárevúcky <zarevucky.jiri@…> (2022-08-15 17:46:39)
- git-committer:
- Jiří Zárevúcky <zarevucky.jiri@…> (2023-02-11 19:13:44)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/synch/waitq.c
r76e17d7c r111b9b9 1 1 /* 2 2 * Copyright (c) 2001-2004 Jakub Jermar 3 * Copyright (c) 2022 Jiří Zárevúcky 3 4 * All rights reserved. 4 5 * … … 60 61 #include <mem.h> 61 62 62 static void waitq_sleep_timed_out(void *);63 static void waitq_complete_wakeup(waitq_t *);64 65 63 /** Initialize wait queue 66 64 * … … 77 75 } 78 76 77 /** 78 * Initialize wait queue with an initial number of queued wakeups 79 * (or a wakeup debt if negative). 80 */ 79 81 void waitq_initialize_with_count(waitq_t *wq, int count) 80 82 { 81 memsetb(wq, sizeof(*wq), 0); 82 irq_spinlock_initialize(&wq->lock, "wq.lock"); 83 list_initialize(&wq->sleepers); 84 wq->missed_wakeups = count; 85 } 86 87 /** Handle timeout during waitq_sleep_timeout() call 88 * 89 * This routine is called when waitq_sleep_timeout() times out. 90 * Interrupts are disabled. 91 * 92 * It is supposed to try to remove 'its' thread from the wait queue; 93 * it can eventually fail to achieve this goal when these two events 94 * overlap. In that case it behaves just as though there was no 95 * timeout at all. 96 * 97 * @param data Pointer to the thread that called waitq_sleep_timeout(). 98 * 99 */ 100 void waitq_sleep_timed_out(void *data) 101 { 102 thread_t *thread = (thread_t *) data; 103 bool do_wakeup = false; 104 DEADLOCK_PROBE_INIT(p_wqlock); 105 106 irq_spinlock_lock(&threads_lock, false); 107 108 grab_locks: 109 irq_spinlock_lock(&thread->lock, false); 110 111 waitq_t *wq; 112 if ((wq = thread->sleep_queue)) { /* Assignment */ 113 if (!irq_spinlock_trylock(&wq->lock)) { 114 irq_spinlock_unlock(&thread->lock, false); 115 DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD); 116 /* Avoid deadlock */ 117 goto grab_locks; 118 } 119 120 list_remove(&thread->wq_link); 121 thread->saved_context = thread->sleep_timeout_context; 122 do_wakeup = true; 123 if (thread->sleep_composable) 124 wq->ignore_wakeups++; 125 thread->sleep_queue = NULL; 126 irq_spinlock_unlock(&wq->lock, false); 127 } 128 129 irq_spinlock_unlock(&thread->lock, false); 130 131 if (do_wakeup) 132 thread_ready(thread); 133 134 irq_spinlock_unlock(&threads_lock, false); 135 } 136 137 /** Interrupt sleeping thread. 138 * 139 * This routine attempts to interrupt a thread from its sleep in 140 * a waitqueue. If the thread is not found sleeping, no action 141 * is taken. 142 * 143 * The threads_lock must be already held and interrupts must be 144 * disabled upon calling this function. 145 * 146 * @param thread Thread to be interrupted. 147 * 148 */ 149 void waitq_interrupt_sleep(thread_t *thread) 150 { 151 bool do_wakeup = false; 152 DEADLOCK_PROBE_INIT(p_wqlock); 153 154 /* 155 * The thread is quaranteed to exist because 156 * threads_lock is held. 157 */ 158 159 grab_locks: 160 irq_spinlock_lock(&thread->lock, false); 161 162 waitq_t *wq; 163 if ((wq = thread->sleep_queue)) { /* Assignment */ 164 if (!(thread->sleep_interruptible)) { 165 /* 166 * The sleep cannot be interrupted. 167 */ 168 irq_spinlock_unlock(&thread->lock, false); 169 return; 170 } 171 172 if (!irq_spinlock_trylock(&wq->lock)) { 173 /* Avoid deadlock */ 174 irq_spinlock_unlock(&thread->lock, false); 175 DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD); 176 goto grab_locks; 177 } 178 179 list_remove(&thread->wq_link); 180 thread->saved_context = thread->sleep_interruption_context; 181 if (thread->sleep_composable) 182 wq->ignore_wakeups++; 183 do_wakeup = true; 184 thread->sleep_queue = NULL; 185 irq_spinlock_unlock(&wq->lock, false); 186 } 187 188 irq_spinlock_unlock(&thread->lock, false); 189 190 if (do_wakeup) 191 thread_ready(thread); 83 waitq_initialize(wq); 84 wq->wakeup_balance = count; 192 85 } 193 86 … … 197 90 errno_t waitq_sleep(waitq_t *wq) 198 91 { 199 return waitq_sleep_timeout(wq, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE, NULL); 92 return _waitq_sleep_timeout(wq, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE); 93 } 94 95 errno_t waitq_sleep_timeout(waitq_t *wq, uint32_t usec) 96 { 97 return _waitq_sleep_timeout(wq, usec, SYNCH_FLAGS_NON_BLOCKING); 200 98 } 201 99 202 100 /** Sleep until either wakeup, timeout or interruption occurs 203 101 * 204 * This is a sleep implementation which allows itself to time out or to be205 * interrupted from the sleep, restoring a failover context.206 *207 102 * Sleepers are organised in a FIFO fashion in a structure called wait queue. 208 103 * 209 * This function is really basic in that other functions as waitq_sleep()210 * and all the *_timeout() functions use it.104 * Other functions as waitq_sleep() and all the *_timeout() functions are 105 * implemented using this function. 211 106 * 212 107 * @param wq Pointer to wait queue. … … 214 109 * @param flags Specify mode of the sleep. 215 110 * 216 * @param[out] blocked On return, regardless of the return code,217 * `*blocked` is set to `true` iff the thread went to218 * sleep.219 *220 111 * The sleep can be interrupted only if the 221 112 * SYNCH_FLAGS_INTERRUPTIBLE bit is specified in flags. … … 231 122 * call will immediately return, reporting either success or failure. 232 123 * 233 * @return EAGAIN, meaning that the sleep failed because it was requested 234 * as SYNCH_FLAGS_NON_BLOCKING, but there was no pending wakeup. 235 * @return ETIMEOUT, meaning that the sleep timed out. 236 * @return EINTR, meaning that somebody interrupted the sleeping 237 * thread. Check the value of `*blocked` to see if the thread slept, 238 * or if a pending interrupt forced it to return immediately. 124 * @return ETIMEOUT, meaning that the sleep timed out, or a nonblocking call 125 * returned unsuccessfully. 126 * @return EINTR, meaning that somebody interrupted the sleeping thread. 239 127 * @return EOK, meaning that none of the above conditions occured, and the 240 * thread was woken up successfuly by `waitq_wakeup()`. Check 241 * the value of `*blocked` to see if the thread slept or if 242 * the wakeup was already pending. 243 * 244 */ 245 errno_t waitq_sleep_timeout(waitq_t *wq, uint32_t usec, unsigned int flags, bool *blocked) 128 * thread was woken up successfuly by `waitq_wake_*()`. 129 * 130 */ 131 errno_t _waitq_sleep_timeout(waitq_t *wq, uint32_t usec, unsigned int flags) 246 132 { 247 133 assert((!PREEMPTION_DISABLED) || (PARAM_NON_BLOCKING(flags, usec))); 248 249 ipl_t ipl = waitq_sleep_prepare(wq); 250 bool nblocked; 251 errno_t rc = waitq_sleep_timeout_unsafe(wq, usec, flags, &nblocked); 252 waitq_sleep_finish(wq, nblocked, ipl); 253 254 if (blocked != NULL) { 255 *blocked = nblocked; 256 } 257 return rc; 134 return waitq_sleep_timeout_unsafe(wq, usec, flags, waitq_sleep_prepare(wq)); 258 135 } 259 136 … … 268 145 * 269 146 */ 270 ipl_t waitq_sleep_prepare(waitq_t *wq)147 wait_guard_t waitq_sleep_prepare(waitq_t *wq) 271 148 { 272 149 ipl_t ipl = interrupts_disable(); 273 150 irq_spinlock_lock(&wq->lock, false); 274 return ipl; 275 } 276 277 /** Finish waiting in a wait queue. 278 * 279 * This function restores interrupts to the state that existed prior 280 * to the call to waitq_sleep_prepare(). If necessary, the wait queue 281 * lock is released. 282 * 283 * @param wq Wait queue. 284 * @param blocked Out parameter of waitq_sleep_timeout_unsafe(). 285 * @param ipl Interrupt level returned by waitq_sleep_prepare(). 286 * 287 */ 288 void waitq_sleep_finish(waitq_t *wq, bool blocked, ipl_t ipl) 289 { 290 if (blocked) { 291 /* 292 * Wait for a waitq_wakeup() or waitq_unsleep() to complete 293 * before returning from waitq_sleep() to the caller. Otherwise 294 * the caller might expect that the wait queue is no longer used 295 * and deallocate it (although the wakeup on a another cpu has 296 * not yet completed and is using the wait queue). 297 * 298 * Note that we have to do this for EOK and EINTR, but not 299 * necessarily for ETIMEOUT where the timeout handler stops 300 * using the waitq before waking us up. To be on the safe side, 301 * ensure the waitq is not in use anymore in this case as well. 302 */ 303 waitq_complete_wakeup(wq); 304 } else { 305 irq_spinlock_unlock(&wq->lock, false); 306 } 307 308 interrupts_restore(ipl); 309 } 310 311 errno_t waitq_sleep_unsafe(waitq_t *wq, bool *blocked) 312 { 313 return waitq_sleep_timeout_unsafe(wq, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE, blocked); 151 return (wait_guard_t) { 152 .ipl = ipl, 153 }; 154 } 155 156 errno_t waitq_sleep_unsafe(waitq_t *wq, wait_guard_t guard) 157 { 158 return waitq_sleep_timeout_unsafe(wq, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE, guard); 314 159 } 315 160 … … 317 162 * 318 163 * This function implements logic of sleeping in a wait queue. 319 * This call must be preceded by a call to waitq_sleep_prepare() 320 * and followed by a call to waitq_sleep_finish(). 164 * This call must be preceded by a call to waitq_sleep_prepare(). 321 165 * 322 166 * @param wq See waitq_sleep_timeout(). … … 329 173 * 330 174 */ 331 errno_t waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, unsigned int flags, bool *blocked) 332 { 333 *blocked = false; 175 errno_t waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, unsigned int flags, wait_guard_t guard) 176 { 177 errno_t rc; 178 179 /* 180 * If true, and this thread's sleep returns without a wakeup 181 * (timed out or interrupted), waitq ignores the next wakeup. 182 * This is necessary for futex to be able to handle those conditions. 183 */ 184 bool sleep_composable = (flags & SYNCH_FLAGS_FUTEX); 185 bool interruptible = (flags & SYNCH_FLAGS_INTERRUPTIBLE); 186 187 if (wq->closed) { 188 rc = EOK; 189 goto exit; 190 } 334 191 335 192 /* Checks whether to go to sleep at all */ 336 if (wq->missed_wakeups) { 337 wq->missed_wakeups--; 338 return EOK; 339 } else { 340 if (PARAM_NON_BLOCKING(flags, usec)) { 341 /* Return immediately instead of going to sleep */ 342 return EAGAIN; 193 if (wq->wakeup_balance > 0) { 194 wq->wakeup_balance--; 195 196 rc = EOK; 197 goto exit; 198 } 199 200 if (PARAM_NON_BLOCKING(flags, usec)) { 201 /* Return immediately instead of going to sleep */ 202 rc = ETIMEOUT; 203 goto exit; 204 } 205 206 /* Just for debugging output. */ 207 atomic_store_explicit(&THREAD->sleep_queue, wq, memory_order_relaxed); 208 209 /* 210 * This thread_t field is synchronized exclusively via 211 * waitq lock of the waitq currently listing it. 212 */ 213 list_append(&THREAD->wq_link, &wq->sleepers); 214 215 /* Needs to be run when interrupts are still disabled. */ 216 deadline_t deadline = usec > 0 ? 217 timeout_deadline_in_usec(usec) : DEADLINE_NEVER; 218 219 while (true) { 220 bool terminating = (thread_wait_start() == THREAD_TERMINATING); 221 if (terminating && interruptible) { 222 rc = EINTR; 223 goto exit; 343 224 } 344 } 345 346 /* 347 * Now we are firmly decided to go to sleep. 348 * 349 */ 350 irq_spinlock_lock(&THREAD->lock, false); 351 352 timeout_t timeout; 353 timeout_initialize(&timeout); 354 355 THREAD->sleep_composable = (flags & SYNCH_FLAGS_FUTEX); 356 357 if (flags & SYNCH_FLAGS_INTERRUPTIBLE) { 225 226 irq_spinlock_unlock(&wq->lock, false); 227 228 bool timed_out = (thread_wait_finish(deadline) == THREAD_WAIT_TIMEOUT); 229 358 230 /* 359 * If the thread was already interrupted, 360 * don't go to sleep at all. 231 * We always need to re-lock the WQ, since concurrently running 232 * waitq_wakeup() may still not have exitted. 233 * If we didn't always do this, we'd risk waitq_wakeup() that woke us 234 * up still running on another CPU even after this function returns, 235 * and that would be an issue if the waitq is allocated locally to 236 * wait for a one-off asynchronous event. We'd need more external 237 * synchronization in that case, and that would be a pain. 238 * 239 * On the plus side, always regaining a lock simplifies cleanup. 361 240 */ 362 if (THREAD->interrupted) { 363 irq_spinlock_unlock(&THREAD->lock, false); 364 return EINTR; 241 irq_spinlock_lock(&wq->lock, false); 242 243 if (!link_in_use(&THREAD->wq_link)) { 244 /* 245 * We were woken up by the desired event. Return success, 246 * regardless of any concurrent timeout or interruption. 247 */ 248 rc = EOK; 249 goto exit; 365 250 } 366 251 367 /* 368 * Set context that will be restored if the sleep 369 * of this thread is ever interrupted. 370 */ 371 THREAD->sleep_interruptible = true; 372 if (!context_save(&THREAD->sleep_interruption_context)) { 373 /* Short emulation of scheduler() return code. */ 374 THREAD->last_cycle = get_cycle(); 375 irq_spinlock_unlock(&THREAD->lock, false); 376 if (usec) { 377 timeout_unregister(&timeout); 378 } 379 return EINTR; 252 if (timed_out) { 253 rc = ETIMEOUT; 254 goto exit; 380 255 } 381 } else 382 THREAD->sleep_interruptible = false; 383 384 if (usec) { 385 /* We use the timeout variant. */ 386 if (!context_save(&THREAD->sleep_timeout_context)) { 387 /* Short emulation of scheduler() return code. */ 388 THREAD->last_cycle = get_cycle(); 389 irq_spinlock_unlock(&THREAD->lock, false); 390 return ETIMEOUT; 391 } 392 393 timeout_register(&timeout, (uint64_t) usec, waitq_sleep_timed_out, THREAD); 394 } 395 396 list_append(&THREAD->wq_link, &wq->sleepers); 397 398 /* 399 * Suspend execution. 400 * 401 */ 402 THREAD->state = Sleeping; 403 THREAD->sleep_queue = wq; 404 405 /* 406 * Must be before entry to scheduler, because there are multiple 407 * return vectors. 408 */ 409 *blocked = true; 410 411 irq_spinlock_unlock(&THREAD->lock, false); 412 413 /* wq->lock is released in scheduler_separated_stack() */ 414 scheduler(); 415 416 if (usec) { 417 timeout_unregister(&timeout); 418 } 419 420 return EOK; 421 } 422 423 /** Wake up first thread sleeping in a wait queue 424 * 425 * Wake up first thread sleeping in a wait queue. This is the SMP- and IRQ-safe 426 * wrapper meant for general use. 427 * 428 * Besides its 'normal' wakeup operation, it attempts to unregister possible 429 * timeout. 430 * 431 * @param wq Pointer to wait queue. 432 * @param mode Wakeup mode. 433 * 434 */ 435 void waitq_wakeup(waitq_t *wq, wakeup_mode_t mode) 256 257 /* Interrupted for some other reason. */ 258 } 259 260 exit: 261 if (THREAD) 262 list_remove(&THREAD->wq_link); 263 264 if (rc != EOK && sleep_composable) 265 wq->wakeup_balance--; 266 267 if (THREAD) 268 atomic_store_explicit(&THREAD->sleep_queue, NULL, memory_order_relaxed); 269 270 irq_spinlock_unlock(&wq->lock, false); 271 interrupts_restore(guard.ipl); 272 return rc; 273 } 274 275 static void _wake_one(waitq_t *wq) 276 { 277 /* Pop one thread from the queue and wake it up. */ 278 thread_t *thread = list_get_instance(list_first(&wq->sleepers), thread_t, wq_link); 279 list_remove(&thread->wq_link); 280 thread_wakeup(thread); 281 } 282 283 /** 284 * Meant for implementing condvar signal. 285 * Always wakes one thread if there are any sleeping, 286 * has no effect if no threads are waiting for wakeup. 287 */ 288 void waitq_signal(waitq_t *wq) 436 289 { 437 290 irq_spinlock_lock(&wq->lock, true); 438 _waitq_wakeup_unsafe(wq, mode); 291 292 if (!list_empty(&wq->sleepers)) 293 _wake_one(wq); 294 439 295 irq_spinlock_unlock(&wq->lock, true); 440 296 } 441 297 442 /** If there is a wakeup in progress actively waits for it to complete. 443 * 444 * The function returns once the concurrently running waitq_wakeup() 445 * exits. It returns immediately if there are no concurrent wakeups 446 * at the time. 447 * 448 * Interrupts must be disabled. 449 * 450 * Example usage: 451 * @code 452 * void callback(waitq *wq) 453 * { 454 * // Do something and notify wait_for_completion() that we're done. 455 * waitq_wakeup(wq); 456 * } 457 * void wait_for_completion(void) 458 * { 459 * waitq wg; 460 * waitq_initialize(&wq); 461 * // Run callback() in the background, pass it wq. 462 * do_asynchronously(callback, &wq); 463 * // Wait for callback() to complete its work. 464 * waitq_sleep(&wq); 465 * // callback() completed its work, but it may still be accessing 466 * // wq in waitq_wakeup(). Therefore it is not yet safe to return 467 * // from waitq_sleep() or it would clobber up our stack (where wq 468 * // is stored). waitq_sleep() ensures the wait queue is no longer 469 * // in use by invoking waitq_complete_wakeup() internally. 470 * 471 * // waitq_sleep() returned, it is safe to free wq. 472 * } 473 * @endcode 474 * 475 * @param wq Pointer to a wait queue. 476 */ 477 static void waitq_complete_wakeup(waitq_t *wq) 478 { 479 assert(interrupts_disabled()); 480 481 irq_spinlock_lock(&wq->lock, false); 482 irq_spinlock_unlock(&wq->lock, false); 483 } 484 485 /** Internal SMP- and IRQ-unsafe version of waitq_wakeup() 486 * 487 * This is the internal SMP- and IRQ-unsafe version of waitq_wakeup(). It 488 * assumes wq->lock is already locked and interrupts are already disabled. 489 * 490 * @param wq Pointer to wait queue. 491 * @param mode If mode is WAKEUP_FIRST, then the longest waiting 492 * thread, if any, is woken up. If mode is WAKEUP_ALL, then 493 * all waiting threads, if any, are woken up. If there are 494 * no waiting threads to be woken up, the missed wakeup is 495 * recorded in the wait queue. 496 * 497 */ 498 void _waitq_wakeup_unsafe(waitq_t *wq, wakeup_mode_t mode) 499 { 500 size_t count = 0; 501 502 assert(interrupts_disabled()); 503 assert(irq_spinlock_locked(&wq->lock)); 504 505 if (wq->ignore_wakeups > 0) { 506 if (mode == WAKEUP_FIRST) { 507 wq->ignore_wakeups--; 508 return; 509 } 510 wq->ignore_wakeups = 0; 511 } 512 513 loop: 514 if (list_empty(&wq->sleepers)) { 515 if (mode == WAKEUP_CLOSE) { 516 // FIXME: this can technically fail if we get two billion sleeps after the wakeup call. 517 wq->missed_wakeups = INT_MAX; 518 } else if (mode != WAKEUP_ALL) { 519 wq->missed_wakeups++; 520 } 521 522 return; 523 } 524 525 count++; 526 thread_t *thread = list_get_instance(list_first(&wq->sleepers), 527 thread_t, wq_link); 528 529 /* 530 * Lock the thread prior to removing it from the wq. 531 * This is not necessary because of mutual exclusion 532 * (the link belongs to the wait queue), but because 533 * of synchronization with waitq_sleep_timed_out() 534 * and thread_interrupt_sleep(). 535 * 536 * In order for these two functions to work, the following 537 * invariant must hold: 538 * 539 * thread->sleep_queue != NULL <=> thread sleeps in a wait queue 540 * 541 * For an observer who locks the thread, the invariant 542 * holds only when the lock is held prior to removing 543 * it from the wait queue. 544 * 545 */ 546 irq_spinlock_lock(&thread->lock, false); 547 list_remove(&thread->wq_link); 548 549 thread->sleep_queue = NULL; 550 irq_spinlock_unlock(&thread->lock, false); 551 552 thread_ready(thread); 553 554 if (mode == WAKEUP_ALL) 555 goto loop; 298 /** 299 * Wakes up one thread sleeping on this waitq. 300 * If there are no threads waiting, saves the wakeup so that the next sleep 301 * returns immediately. If a previous failure in sleep created a wakeup debt 302 * (see SYNCH_FLAGS_FUTEX) this debt is annulled and no thread is woken up. 303 */ 304 void waitq_wake_one(waitq_t *wq) 305 { 306 irq_spinlock_lock(&wq->lock, true); 307 308 if (!wq->closed) { 309 if (wq->wakeup_balance < 0 || list_empty(&wq->sleepers)) 310 wq->wakeup_balance++; 311 else 312 _wake_one(wq); 313 } 314 315 irq_spinlock_unlock(&wq->lock, true); 316 } 317 318 static void _wake_all(waitq_t *wq) 319 { 320 while (!list_empty(&wq->sleepers)) 321 _wake_one(wq); 322 } 323 324 /** 325 * Wakes up all threads currently waiting on this waitq 326 * and makes all future sleeps return instantly. 327 */ 328 void waitq_close(waitq_t *wq) 329 { 330 irq_spinlock_lock(&wq->lock, true); 331 wq->wakeup_balance = 0; 332 wq->closed = true; 333 _wake_all(wq); 334 irq_spinlock_unlock(&wq->lock, true); 335 } 336 337 /** 338 * Wakes up all threads currently waiting on this waitq 339 */ 340 void waitq_wake_all(waitq_t *wq) 341 { 342 irq_spinlock_lock(&wq->lock, true); 343 wq->wakeup_balance = 0; 344 _wake_all(wq); 345 irq_spinlock_unlock(&wq->lock, true); 556 346 } 557 347
Note:
See TracChangeset
for help on using the changeset viewer.