Changes in kernel/generic/src/synch/waitq.c [e88eb48:597fa24] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/synch/waitq.c
re88eb48 r597fa24 1 1 /* 2 2 * Copyright (c) 2001-2004 Jakub Jermar 3 * Copyright (c) 2022 Jiří Zárevúcky 3 4 * All rights reserved. 4 5 * … … 48 49 #include <synch/waitq.h> 49 50 #include <synch/spinlock.h> 51 #include <preemption.h> 50 52 #include <proc/thread.h> 51 53 #include <proc/scheduler.h> … … 57 59 #include <adt/list.h> 58 60 #include <arch/cycle.h> 59 #include <mem.h> 60 61 static void waitq_sleep_timed_out(void *); 62 static void waitq_complete_wakeup(waitq_t *); 61 #include <memw.h> 63 62 64 63 /** Initialize wait queue … … 71 70 void waitq_initialize(waitq_t *wq) 72 71 { 73 memsetb(wq, sizeof(*wq), 0); 74 irq_spinlock_initialize(&wq->lock, "wq.lock"); 75 list_initialize(&wq->sleepers); 76 } 77 78 /** Handle timeout during waitq_sleep_timeout() call 79 * 80 * This routine is called when waitq_sleep_timeout() times out. 81 * Interrupts are disabled. 82 * 83 * It is supposed to try to remove 'its' thread from the wait queue; 84 * it can eventually fail to achieve this goal when these two events 85 * overlap. In that case it behaves just as though there was no 86 * timeout at all. 87 * 88 * @param data Pointer to the thread that called waitq_sleep_timeout(). 89 * 90 */ 91 void waitq_sleep_timed_out(void *data) 92 { 93 thread_t *thread = (thread_t *) data; 94 bool do_wakeup = false; 95 DEADLOCK_PROBE_INIT(p_wqlock); 96 97 irq_spinlock_lock(&threads_lock, false); 98 if (!thread_exists(thread)) 99 goto out; 100 101 grab_locks: 102 irq_spinlock_lock(&thread->lock, false); 103 104 waitq_t *wq; 105 if ((wq = thread->sleep_queue)) { /* Assignment */ 106 if (!irq_spinlock_trylock(&wq->lock)) { 107 irq_spinlock_unlock(&thread->lock, false); 108 DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD); 109 /* Avoid deadlock */ 110 goto grab_locks; 111 } 112 113 list_remove(&thread->wq_link); 114 thread->saved_context = thread->sleep_timeout_context; 115 do_wakeup = true; 116 if (thread->sleep_composable) 117 wq->ignore_wakeups++; 118 thread->sleep_queue = NULL; 119 irq_spinlock_unlock(&wq->lock, false); 120 } 121 122 thread->timeout_pending = false; 123 irq_spinlock_unlock(&thread->lock, false); 124 125 if (do_wakeup) 126 thread_ready(thread); 127 128 out: 129 irq_spinlock_unlock(&threads_lock, false); 130 } 131 132 /** Interrupt sleeping thread. 133 * 134 * This routine attempts to interrupt a thread from its sleep in 135 * a waitqueue. If the thread is not found sleeping, no action 136 * is taken. 137 * 138 * The threads_lock must be already held and interrupts must be 139 * disabled upon calling this function. 140 * 141 * @param thread Thread to be interrupted. 142 * 143 */ 144 void waitq_interrupt_sleep(thread_t *thread) 145 { 146 bool do_wakeup = false; 147 DEADLOCK_PROBE_INIT(p_wqlock); 148 149 /* 150 * The thread is quaranteed to exist because 151 * threads_lock is held. 152 */ 153 154 grab_locks: 155 irq_spinlock_lock(&thread->lock, false); 156 157 waitq_t *wq; 158 if ((wq = thread->sleep_queue)) { /* Assignment */ 159 if (!(thread->sleep_interruptible)) { 160 /* 161 * The sleep cannot be interrupted. 162 */ 163 irq_spinlock_unlock(&thread->lock, false); 164 return; 165 } 166 167 if (!irq_spinlock_trylock(&wq->lock)) { 168 /* Avoid deadlock */ 169 irq_spinlock_unlock(&thread->lock, false); 170 DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD); 171 goto grab_locks; 172 } 173 174 if ((thread->timeout_pending) && 175 (timeout_unregister(&thread->sleep_timeout))) 176 thread->timeout_pending = false; 177 178 list_remove(&thread->wq_link); 179 thread->saved_context = thread->sleep_interruption_context; 180 if (thread->sleep_composable) 181 wq->ignore_wakeups++; 182 do_wakeup = true; 183 thread->sleep_queue = NULL; 184 irq_spinlock_unlock(&wq->lock, false); 185 } 186 187 irq_spinlock_unlock(&thread->lock, false); 188 189 if (do_wakeup) 190 thread_ready(thread); 72 *wq = WAITQ_INITIALIZER(*wq); 73 } 74 75 /** 76 * Initialize wait queue with an initial number of queued wakeups 77 * (or a wakeup debt if negative). 78 */ 79 void waitq_initialize_with_count(waitq_t *wq, int count) 80 { 81 *wq = WAITQ_INITIALIZER_WITH_COUNT(*wq, count); 191 82 } 192 83 … … 194 85 (((flags) & SYNCH_FLAGS_NON_BLOCKING) && ((usec) == 0)) 195 86 87 errno_t waitq_sleep(waitq_t *wq) 88 { 89 return _waitq_sleep_timeout(wq, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE); 90 } 91 92 errno_t waitq_sleep_timeout(waitq_t *wq, uint32_t usec) 93 { 94 return _waitq_sleep_timeout(wq, usec, SYNCH_FLAGS_NON_BLOCKING); 95 } 96 196 97 /** Sleep until either wakeup, timeout or interruption occurs 197 98 * 198 * This is a sleep implementation which allows itself to time out or to be199 * interrupted from the sleep, restoring a failover context.200 *201 99 * Sleepers are organised in a FIFO fashion in a structure called wait queue. 202 100 * 203 * This function is really basic in that other functions as waitq_sleep()204 * and all the *_timeout() functions use it.101 * Other functions as waitq_sleep() and all the *_timeout() functions are 102 * implemented using this function. 205 103 * 206 104 * @param wq Pointer to wait queue. … … 208 106 * @param flags Specify mode of the sleep. 209 107 * 210 * @param[out] blocked On return, regardless of the return code,211 * `*blocked` is set to `true` iff the thread went to212 * sleep.213 *214 108 * The sleep can be interrupted only if the 215 109 * SYNCH_FLAGS_INTERRUPTIBLE bit is specified in flags. … … 225 119 * call will immediately return, reporting either success or failure. 226 120 * 227 * @return EAGAIN, meaning that the sleep failed because it was requested 228 * as SYNCH_FLAGS_NON_BLOCKING, but there was no pending wakeup. 229 * @return ETIMEOUT, meaning that the sleep timed out. 230 * @return EINTR, meaning that somebody interrupted the sleeping 231 * thread. Check the value of `*blocked` to see if the thread slept, 232 * or if a pending interrupt forced it to return immediately. 121 * @return ETIMEOUT, meaning that the sleep timed out, or a nonblocking call 122 * returned unsuccessfully. 123 * @return EINTR, meaning that somebody interrupted the sleeping thread. 233 124 * @return EOK, meaning that none of the above conditions occured, and the 234 * thread was woken up successfuly by `waitq_wakeup()`. Check 235 * the value of `*blocked` to see if the thread slept or if 236 * the wakeup was already pending. 237 * 238 */ 239 errno_t waitq_sleep_timeout(waitq_t *wq, uint32_t usec, unsigned int flags, bool *blocked) 125 * thread was woken up successfuly by `waitq_wake_*()`. 126 * 127 */ 128 errno_t _waitq_sleep_timeout(waitq_t *wq, uint32_t usec, unsigned int flags) 240 129 { 241 130 assert((!PREEMPTION_DISABLED) || (PARAM_NON_BLOCKING(flags, usec))); 242 243 ipl_t ipl = waitq_sleep_prepare(wq); 244 bool nblocked; 245 errno_t rc = waitq_sleep_timeout_unsafe(wq, usec, flags, &nblocked); 246 waitq_sleep_finish(wq, nblocked, ipl); 247 248 if (blocked != NULL) { 249 *blocked = nblocked; 250 } 251 return rc; 131 return waitq_sleep_timeout_unsafe(wq, usec, flags, waitq_sleep_prepare(wq)); 252 132 } 253 133 … … 262 142 * 263 143 */ 264 ipl_t waitq_sleep_prepare(waitq_t *wq) 265 { 266 ipl_t ipl; 267 268 restart: 269 ipl = interrupts_disable(); 270 271 if (THREAD) { /* Needed during system initiailzation */ 272 /* 273 * Busy waiting for a delayed timeout. 274 * This is an important fix for the race condition between 275 * a delayed timeout and a next call to waitq_sleep_timeout(). 276 * Simply, the thread is not allowed to go to sleep if 277 * there are timeouts in progress. 278 * 279 */ 280 irq_spinlock_lock(&THREAD->lock, false); 281 282 if (THREAD->timeout_pending) { 283 irq_spinlock_unlock(&THREAD->lock, false); 284 interrupts_restore(ipl); 285 goto restart; 286 } 287 288 irq_spinlock_unlock(&THREAD->lock, false); 289 } 290 144 wait_guard_t waitq_sleep_prepare(waitq_t *wq) 145 { 146 ipl_t ipl = interrupts_disable(); 291 147 irq_spinlock_lock(&wq->lock, false); 292 return ipl; 293 } 294 295 /** Finish waiting in a wait queue. 296 * 297 * This function restores interrupts to the state that existed prior 298 * to the call to waitq_sleep_prepare(). If necessary, the wait queue 299 * lock is released. 300 * 301 * @param wq Wait queue. 302 * @param blocked Out parameter of waitq_sleep_timeout_unsafe(). 303 * @param ipl Interrupt level returned by waitq_sleep_prepare(). 304 * 305 */ 306 void waitq_sleep_finish(waitq_t *wq, bool blocked, ipl_t ipl) 307 { 308 if (blocked) { 309 /* 310 * Wait for a waitq_wakeup() or waitq_unsleep() to complete 311 * before returning from waitq_sleep() to the caller. Otherwise 312 * the caller might expect that the wait queue is no longer used 313 * and deallocate it (although the wakeup on a another cpu has 314 * not yet completed and is using the wait queue). 315 * 316 * Note that we have to do this for EOK and EINTR, but not 317 * necessarily for ETIMEOUT where the timeout handler stops 318 * using the waitq before waking us up. To be on the safe side, 319 * ensure the waitq is not in use anymore in this case as well. 320 */ 321 waitq_complete_wakeup(wq); 322 } else { 323 irq_spinlock_unlock(&wq->lock, false); 324 } 325 326 interrupts_restore(ipl); 148 return (wait_guard_t) { 149 .ipl = ipl, 150 }; 151 } 152 153 errno_t waitq_sleep_unsafe(waitq_t *wq, wait_guard_t guard) 154 { 155 return waitq_sleep_timeout_unsafe(wq, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE, guard); 327 156 } 328 157 … … 330 159 * 331 160 * This function implements logic of sleeping in a wait queue. 332 * This call must be preceded by a call to waitq_sleep_prepare() 333 * and followed by a call to waitq_sleep_finish(). 161 * This call must be preceded by a call to waitq_sleep_prepare(). 334 162 * 335 163 * @param wq See waitq_sleep_timeout(). … … 342 170 * 343 171 */ 344 errno_t waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, unsigned int flags, bool *blocked) 345 { 346 *blocked = false; 172 errno_t waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, unsigned int flags, wait_guard_t guard) 173 { 174 errno_t rc; 175 176 /* 177 * If true, and this thread's sleep returns without a wakeup 178 * (timed out or interrupted), waitq ignores the next wakeup. 179 * This is necessary for futex to be able to handle those conditions. 180 */ 181 bool sleep_composable = (flags & SYNCH_FLAGS_FUTEX); 182 bool interruptible = (flags & SYNCH_FLAGS_INTERRUPTIBLE); 183 184 if (wq->closed) { 185 rc = EOK; 186 goto exit; 187 } 347 188 348 189 /* Checks whether to go to sleep at all */ 349 if (wq->missed_wakeups) { 350 wq->missed_wakeups--; 351 return EOK; 352 } else { 353 if (PARAM_NON_BLOCKING(flags, usec)) { 354 /* Return immediately instead of going to sleep */ 355 return EAGAIN; 190 if (wq->wakeup_balance > 0) { 191 wq->wakeup_balance--; 192 193 rc = EOK; 194 goto exit; 195 } 196 197 if (PARAM_NON_BLOCKING(flags, usec)) { 198 /* Return immediately instead of going to sleep */ 199 rc = ETIMEOUT; 200 goto exit; 201 } 202 203 /* Just for debugging output. */ 204 atomic_store_explicit(&THREAD->sleep_queue, wq, memory_order_relaxed); 205 206 /* 207 * This thread_t field is synchronized exclusively via 208 * waitq lock of the waitq currently listing it. 209 */ 210 list_append(&THREAD->wq_link, &wq->sleepers); 211 212 /* Needs to be run when interrupts are still disabled. */ 213 deadline_t deadline = usec > 0 ? 214 timeout_deadline_in_usec(usec) : DEADLINE_NEVER; 215 216 while (true) { 217 bool terminating = (thread_wait_start() == THREAD_TERMINATING); 218 if (terminating && interruptible) { 219 rc = EINTR; 220 goto exit; 356 221 } 357 } 358 359 /* 360 * Now we are firmly decided to go to sleep. 361 * 362 */ 363 irq_spinlock_lock(&THREAD->lock, false); 364 365 THREAD->sleep_composable = (flags & SYNCH_FLAGS_FUTEX); 366 367 if (flags & SYNCH_FLAGS_INTERRUPTIBLE) { 222 223 irq_spinlock_unlock(&wq->lock, false); 224 225 bool timed_out = (thread_wait_finish(deadline) == THREAD_WAIT_TIMEOUT); 226 368 227 /* 369 * If the thread was already interrupted, 370 * don't go to sleep at all. 228 * We always need to re-lock the WQ, since concurrently running 229 * waitq_wakeup() may still not have exitted. 230 * If we didn't always do this, we'd risk waitq_wakeup() that woke us 231 * up still running on another CPU even after this function returns, 232 * and that would be an issue if the waitq is allocated locally to 233 * wait for a one-off asynchronous event. We'd need more external 234 * synchronization in that case, and that would be a pain. 235 * 236 * On the plus side, always regaining a lock simplifies cleanup. 371 237 */ 372 if (THREAD->interrupted) { 373 irq_spinlock_unlock(&THREAD->lock, false); 374 return EINTR; 238 irq_spinlock_lock(&wq->lock, false); 239 240 if (!link_in_use(&THREAD->wq_link)) { 241 /* 242 * We were woken up by the desired event. Return success, 243 * regardless of any concurrent timeout or interruption. 244 */ 245 rc = EOK; 246 goto exit; 375 247 } 376 248 377 /* 378 * Set context that will be restored if the sleep 379 * of this thread is ever interrupted. 380 */ 381 THREAD->sleep_interruptible = true; 382 if (!context_save(&THREAD->sleep_interruption_context)) { 383 /* Short emulation of scheduler() return code. */ 384 THREAD->last_cycle = get_cycle(); 385 irq_spinlock_unlock(&THREAD->lock, false); 386 return EINTR; 249 if (timed_out) { 250 rc = ETIMEOUT; 251 goto exit; 387 252 } 388 } else 389 THREAD->sleep_interruptible = false; 390 391 if (usec) { 392 /* We use the timeout variant. */ 393 if (!context_save(&THREAD->sleep_timeout_context)) { 394 /* Short emulation of scheduler() return code. */ 395 THREAD->last_cycle = get_cycle(); 396 irq_spinlock_unlock(&THREAD->lock, false); 397 return ETIMEOUT; 398 } 399 400 THREAD->timeout_pending = true; 401 timeout_register(&THREAD->sleep_timeout, (uint64_t) usec, 402 waitq_sleep_timed_out, THREAD); 403 } 404 405 list_append(&THREAD->wq_link, &wq->sleepers); 406 407 /* 408 * Suspend execution. 409 * 410 */ 411 THREAD->state = Sleeping; 412 THREAD->sleep_queue = wq; 413 414 /* 415 * Must be before entry to scheduler, because there are multiple 416 * return vectors. 417 */ 418 *blocked = true; 419 420 irq_spinlock_unlock(&THREAD->lock, false); 421 422 /* wq->lock is released in scheduler_separated_stack() */ 423 scheduler(); 424 425 return EOK; 426 } 427 428 /** Wake up first thread sleeping in a wait queue 429 * 430 * Wake up first thread sleeping in a wait queue. This is the SMP- and IRQ-safe 431 * wrapper meant for general use. 432 * 433 * Besides its 'normal' wakeup operation, it attempts to unregister possible 434 * timeout. 435 * 436 * @param wq Pointer to wait queue. 437 * @param mode Wakeup mode. 438 * 439 */ 440 void waitq_wakeup(waitq_t *wq, wakeup_mode_t mode) 253 254 /* Interrupted for some other reason. */ 255 } 256 257 exit: 258 if (THREAD) 259 list_remove(&THREAD->wq_link); 260 261 if (rc != EOK && sleep_composable) 262 wq->wakeup_balance--; 263 264 if (THREAD) 265 atomic_store_explicit(&THREAD->sleep_queue, NULL, memory_order_relaxed); 266 267 irq_spinlock_unlock(&wq->lock, false); 268 interrupts_restore(guard.ipl); 269 return rc; 270 } 271 272 static void _wake_one(waitq_t *wq) 273 { 274 /* Pop one thread from the queue and wake it up. */ 275 thread_t *thread = list_get_instance(list_first(&wq->sleepers), thread_t, wq_link); 276 list_remove(&thread->wq_link); 277 thread_wakeup(thread); 278 } 279 280 /** 281 * Meant for implementing condvar signal. 282 * Always wakes one thread if there are any sleeping, 283 * has no effect if no threads are waiting for wakeup. 284 */ 285 void waitq_signal(waitq_t *wq) 441 286 { 442 287 irq_spinlock_lock(&wq->lock, true); 443 _waitq_wakeup_unsafe(wq, mode); 288 289 if (!list_empty(&wq->sleepers)) 290 _wake_one(wq); 291 444 292 irq_spinlock_unlock(&wq->lock, true); 445 293 } 446 294 447 /** If there is a wakeup in progress actively waits for it to complete. 448 * 449 * The function returns once the concurrently running waitq_wakeup() 450 * exits. It returns immediately if there are no concurrent wakeups 451 * at the time. 452 * 453 * Interrupts must be disabled. 454 * 455 * Example usage: 456 * @code 457 * void callback(waitq *wq) 458 * { 459 * // Do something and notify wait_for_completion() that we're done. 460 * waitq_wakeup(wq); 461 * } 462 * void wait_for_completion(void) 463 * { 464 * waitq wg; 465 * waitq_initialize(&wq); 466 * // Run callback() in the background, pass it wq. 467 * do_asynchronously(callback, &wq); 468 * // Wait for callback() to complete its work. 469 * waitq_sleep(&wq); 470 * // callback() completed its work, but it may still be accessing 471 * // wq in waitq_wakeup(). Therefore it is not yet safe to return 472 * // from waitq_sleep() or it would clobber up our stack (where wq 473 * // is stored). waitq_sleep() ensures the wait queue is no longer 474 * // in use by invoking waitq_complete_wakeup() internally. 475 * 476 * // waitq_sleep() returned, it is safe to free wq. 477 * } 478 * @endcode 479 * 480 * @param wq Pointer to a wait queue. 481 */ 482 static void waitq_complete_wakeup(waitq_t *wq) 483 { 484 assert(interrupts_disabled()); 485 486 irq_spinlock_lock(&wq->lock, false); 487 irq_spinlock_unlock(&wq->lock, false); 488 } 489 490 /** Internal SMP- and IRQ-unsafe version of waitq_wakeup() 491 * 492 * This is the internal SMP- and IRQ-unsafe version of waitq_wakeup(). It 493 * assumes wq->lock is already locked and interrupts are already disabled. 494 * 495 * @param wq Pointer to wait queue. 496 * @param mode If mode is WAKEUP_FIRST, then the longest waiting 497 * thread, if any, is woken up. If mode is WAKEUP_ALL, then 498 * all waiting threads, if any, are woken up. If there are 499 * no waiting threads to be woken up, the missed wakeup is 500 * recorded in the wait queue. 501 * 502 */ 503 void _waitq_wakeup_unsafe(waitq_t *wq, wakeup_mode_t mode) 504 { 505 size_t count = 0; 506 507 assert(interrupts_disabled()); 508 assert(irq_spinlock_locked(&wq->lock)); 509 510 if (wq->ignore_wakeups > 0) { 511 if (mode == WAKEUP_FIRST) { 512 wq->ignore_wakeups--; 513 return; 514 } 515 wq->ignore_wakeups = 0; 516 } 517 518 loop: 519 if (list_empty(&wq->sleepers)) { 520 wq->missed_wakeups++; 521 if ((count) && (mode == WAKEUP_ALL)) 522 wq->missed_wakeups--; 523 524 return; 525 } 526 527 count++; 528 thread_t *thread = list_get_instance(list_first(&wq->sleepers), 529 thread_t, wq_link); 530 531 /* 532 * Lock the thread prior to removing it from the wq. 533 * This is not necessary because of mutual exclusion 534 * (the link belongs to the wait queue), but because 535 * of synchronization with waitq_sleep_timed_out() 536 * and thread_interrupt_sleep(). 537 * 538 * In order for these two functions to work, the following 539 * invariant must hold: 540 * 541 * thread->sleep_queue != NULL <=> thread sleeps in a wait queue 542 * 543 * For an observer who locks the thread, the invariant 544 * holds only when the lock is held prior to removing 545 * it from the wait queue. 546 * 547 */ 548 irq_spinlock_lock(&thread->lock, false); 549 list_remove(&thread->wq_link); 550 551 if ((thread->timeout_pending) && 552 (timeout_unregister(&thread->sleep_timeout))) 553 thread->timeout_pending = false; 554 555 thread->sleep_queue = NULL; 556 irq_spinlock_unlock(&thread->lock, false); 557 558 thread_ready(thread); 559 560 if (mode == WAKEUP_ALL) 561 goto loop; 562 } 563 564 /** Get the missed wakeups count. 565 * 566 * @param wq Pointer to wait queue. 567 * @return The wait queue's missed_wakeups count. 568 */ 569 int waitq_count_get(waitq_t *wq) 570 { 571 int cnt; 572 295 /** 296 * Wakes up one thread sleeping on this waitq. 297 * If there are no threads waiting, saves the wakeup so that the next sleep 298 * returns immediately. If a previous failure in sleep created a wakeup debt 299 * (see SYNCH_FLAGS_FUTEX) this debt is annulled and no thread is woken up. 300 */ 301 void waitq_wake_one(waitq_t *wq) 302 { 573 303 irq_spinlock_lock(&wq->lock, true); 574 cnt = wq->missed_wakeups; 304 305 if (!wq->closed) { 306 if (wq->wakeup_balance < 0 || list_empty(&wq->sleepers)) 307 wq->wakeup_balance++; 308 else 309 _wake_one(wq); 310 } 311 575 312 irq_spinlock_unlock(&wq->lock, true); 576 577 return cnt; 578 } 579 580 /** Set the missed wakeups count. 581 * 582 * @param wq Pointer to wait queue. 583 * @param val New value of the missed_wakeups count. 584 */ 585 void waitq_count_set(waitq_t *wq, int val) 313 } 314 315 static void _wake_all(waitq_t *wq) 316 { 317 while (!list_empty(&wq->sleepers)) 318 _wake_one(wq); 319 } 320 321 /** 322 * Wakes up all threads currently waiting on this waitq 323 * and makes all future sleeps return instantly. 324 */ 325 void waitq_close(waitq_t *wq) 586 326 { 587 327 irq_spinlock_lock(&wq->lock, true); 588 wq->missed_wakeups = val; 328 wq->wakeup_balance = 0; 329 wq->closed = true; 330 _wake_all(wq); 589 331 irq_spinlock_unlock(&wq->lock, true); 590 332 } 591 333 334 /** 335 * Wakes up all threads currently waiting on this waitq 336 */ 337 void waitq_wake_all(waitq_t *wq) 338 { 339 irq_spinlock_lock(&wq->lock, true); 340 wq->wakeup_balance = 0; 341 _wake_all(wq); 342 irq_spinlock_unlock(&wq->lock, true); 343 } 344 592 345 /** @} 593 346 */
Note:
See TracChangeset
for help on using the changeset viewer.