| 1 | /*
|
|---|
| 2 | * Copyright (c) 2001-2004 Jakub Jermar
|
|---|
| 3 | * Copyright (c) 2022 Jiří Zárevúcky
|
|---|
| 4 | * All rights reserved.
|
|---|
| 5 | *
|
|---|
| 6 | * Redistribution and use in source and binary forms, with or without
|
|---|
| 7 | * modification, are permitted provided that the following conditions
|
|---|
| 8 | * are met:
|
|---|
| 9 | *
|
|---|
| 10 | * - Redistributions of source code must retain the above copyright
|
|---|
| 11 | * notice, this list of conditions and the following disclaimer.
|
|---|
| 12 | * - Redistributions in binary form must reproduce the above copyright
|
|---|
| 13 | * notice, this list of conditions and the following disclaimer in the
|
|---|
| 14 | * documentation and/or other materials provided with the distribution.
|
|---|
| 15 | * - The name of the author may not be used to endorse or promote products
|
|---|
| 16 | * derived from this software without specific prior written permission.
|
|---|
| 17 | *
|
|---|
| 18 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|---|
| 19 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|---|
| 20 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|---|
| 21 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|---|
| 22 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|---|
| 23 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|---|
| 24 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|---|
| 25 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|---|
| 26 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|---|
| 27 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|---|
| 28 | */
|
|---|
| 29 |
|
|---|
| 30 | /** @addtogroup kernel_sync
|
|---|
| 31 | * @{
|
|---|
| 32 | */
|
|---|
| 33 |
|
|---|
| 34 | /**
|
|---|
| 35 | * @file
|
|---|
| 36 | * @brief Wait queue.
|
|---|
| 37 | *
|
|---|
| 38 | * Wait queue is the basic synchronization primitive upon which all
|
|---|
| 39 | * other synchronization primitives build.
|
|---|
| 40 | *
|
|---|
| 41 | * It allows threads to wait for an event in first-come, first-served
|
|---|
| 42 | * fashion. Conditional operation as well as timeouts and interruptions
|
|---|
| 43 | * are supported.
|
|---|
| 44 | *
|
|---|
| 45 | */
|
|---|
| 46 |
|
|---|
| 47 | #include <assert.h>
|
|---|
| 48 | #include <errno.h>
|
|---|
| 49 | #include <synch/waitq.h>
|
|---|
| 50 | #include <synch/spinlock.h>
|
|---|
| 51 | #include <preemption.h>
|
|---|
| 52 | #include <proc/thread.h>
|
|---|
| 53 | #include <proc/scheduler.h>
|
|---|
| 54 | #include <arch/asm.h>
|
|---|
| 55 | #include <typedefs.h>
|
|---|
| 56 | #include <time/timeout.h>
|
|---|
| 57 | #include <arch.h>
|
|---|
| 58 | #include <context.h>
|
|---|
| 59 | #include <adt/list.h>
|
|---|
| 60 | #include <arch/cycle.h>
|
|---|
| 61 | #include <memw.h>
|
|---|
| 62 |
|
|---|
| 63 | /** Initialize wait queue
|
|---|
| 64 | *
|
|---|
| 65 | * Initialize wait queue.
|
|---|
| 66 | *
|
|---|
| 67 | * @param wq Pointer to wait queue to be initialized.
|
|---|
| 68 | *
|
|---|
| 69 | */
|
|---|
| 70 | void waitq_initialize(waitq_t *wq)
|
|---|
| 71 | {
|
|---|
| 72 | memsetb(wq, sizeof(*wq), 0);
|
|---|
| 73 | irq_spinlock_initialize(&wq->lock, "wq.lock");
|
|---|
| 74 | list_initialize(&wq->sleepers);
|
|---|
| 75 | }
|
|---|
| 76 |
|
|---|
| 77 | /**
|
|---|
| 78 | * Initialize wait queue with an initial number of queued wakeups
|
|---|
| 79 | * (or a wakeup debt if negative).
|
|---|
| 80 | */
|
|---|
| 81 | void waitq_initialize_with_count(waitq_t *wq, int count)
|
|---|
| 82 | {
|
|---|
| 83 | waitq_initialize(wq);
|
|---|
| 84 | wq->wakeup_balance = count;
|
|---|
| 85 | }
|
|---|
| 86 |
|
|---|
| 87 | #define PARAM_NON_BLOCKING(flags, usec) \
|
|---|
| 88 | (((flags) & SYNCH_FLAGS_NON_BLOCKING) && ((usec) == 0))
|
|---|
| 89 |
|
|---|
| 90 | errno_t waitq_sleep(waitq_t *wq)
|
|---|
| 91 | {
|
|---|
| 92 | return _waitq_sleep_timeout(wq, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE);
|
|---|
| 93 | }
|
|---|
| 94 |
|
|---|
| 95 | errno_t waitq_sleep_timeout(waitq_t *wq, uint32_t usec)
|
|---|
| 96 | {
|
|---|
| 97 | return _waitq_sleep_timeout(wq, usec, SYNCH_FLAGS_NON_BLOCKING);
|
|---|
| 98 | }
|
|---|
| 99 |
|
|---|
| 100 | /** Sleep until either wakeup, timeout or interruption occurs
|
|---|
| 101 | *
|
|---|
| 102 | * Sleepers are organised in a FIFO fashion in a structure called wait queue.
|
|---|
| 103 | *
|
|---|
| 104 | * Other functions as waitq_sleep() and all the *_timeout() functions are
|
|---|
| 105 | * implemented using this function.
|
|---|
| 106 | *
|
|---|
| 107 | * @param wq Pointer to wait queue.
|
|---|
| 108 | * @param usec Timeout in microseconds.
|
|---|
| 109 | * @param flags Specify mode of the sleep.
|
|---|
| 110 | *
|
|---|
| 111 | * The sleep can be interrupted only if the
|
|---|
| 112 | * SYNCH_FLAGS_INTERRUPTIBLE bit is specified in flags.
|
|---|
| 113 | *
|
|---|
| 114 | * If usec is greater than zero, regardless of the value of the
|
|---|
| 115 | * SYNCH_FLAGS_NON_BLOCKING bit in flags, the call will not return until either
|
|---|
| 116 | * timeout, interruption or wakeup comes.
|
|---|
| 117 | *
|
|---|
| 118 | * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is not set in flags,
|
|---|
| 119 | * the call will not return until wakeup or interruption comes.
|
|---|
| 120 | *
|
|---|
| 121 | * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is set in flags, the
|
|---|
| 122 | * call will immediately return, reporting either success or failure.
|
|---|
| 123 | *
|
|---|
| 124 | * @return ETIMEOUT, meaning that the sleep timed out, or a nonblocking call
|
|---|
| 125 | * returned unsuccessfully.
|
|---|
| 126 | * @return EINTR, meaning that somebody interrupted the sleeping thread.
|
|---|
| 127 | * @return EOK, meaning that none of the above conditions occured, and the
|
|---|
| 128 | * thread was woken up successfuly by `waitq_wake_*()`.
|
|---|
| 129 | *
|
|---|
| 130 | */
|
|---|
| 131 | errno_t _waitq_sleep_timeout(waitq_t *wq, uint32_t usec, unsigned int flags)
|
|---|
| 132 | {
|
|---|
| 133 | assert((!PREEMPTION_DISABLED) || (PARAM_NON_BLOCKING(flags, usec)));
|
|---|
| 134 | return waitq_sleep_timeout_unsafe(wq, usec, flags, waitq_sleep_prepare(wq));
|
|---|
| 135 | }
|
|---|
| 136 |
|
|---|
| 137 | /** Prepare to sleep in a waitq.
|
|---|
| 138 | *
|
|---|
| 139 | * This function will return holding the lock of the wait queue
|
|---|
| 140 | * and interrupts disabled.
|
|---|
| 141 | *
|
|---|
| 142 | * @param wq Wait queue.
|
|---|
| 143 | *
|
|---|
| 144 | * @return Interrupt level as it existed on entry to this function.
|
|---|
| 145 | *
|
|---|
| 146 | */
|
|---|
| 147 | wait_guard_t waitq_sleep_prepare(waitq_t *wq)
|
|---|
| 148 | {
|
|---|
| 149 | ipl_t ipl = interrupts_disable();
|
|---|
| 150 | irq_spinlock_lock(&wq->lock, false);
|
|---|
| 151 | return (wait_guard_t) {
|
|---|
| 152 | .ipl = ipl,
|
|---|
| 153 | };
|
|---|
| 154 | }
|
|---|
| 155 |
|
|---|
| 156 | errno_t waitq_sleep_unsafe(waitq_t *wq, wait_guard_t guard)
|
|---|
| 157 | {
|
|---|
| 158 | return waitq_sleep_timeout_unsafe(wq, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE, guard);
|
|---|
| 159 | }
|
|---|
| 160 |
|
|---|
| 161 | /** Internal implementation of waitq_sleep_timeout().
|
|---|
| 162 | *
|
|---|
| 163 | * This function implements logic of sleeping in a wait queue.
|
|---|
| 164 | * This call must be preceded by a call to waitq_sleep_prepare().
|
|---|
| 165 | *
|
|---|
| 166 | * @param wq See waitq_sleep_timeout().
|
|---|
| 167 | * @param usec See waitq_sleep_timeout().
|
|---|
| 168 | * @param flags See waitq_sleep_timeout().
|
|---|
| 169 | *
|
|---|
| 170 | * @param[out] blocked See waitq_sleep_timeout().
|
|---|
| 171 | *
|
|---|
| 172 | * @return See waitq_sleep_timeout().
|
|---|
| 173 | *
|
|---|
| 174 | */
|
|---|
| 175 | errno_t waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, unsigned int flags, wait_guard_t guard)
|
|---|
| 176 | {
|
|---|
| 177 | errno_t rc;
|
|---|
| 178 |
|
|---|
| 179 | /*
|
|---|
| 180 | * If true, and this thread's sleep returns without a wakeup
|
|---|
| 181 | * (timed out or interrupted), waitq ignores the next wakeup.
|
|---|
| 182 | * This is necessary for futex to be able to handle those conditions.
|
|---|
| 183 | */
|
|---|
| 184 | bool sleep_composable = (flags & SYNCH_FLAGS_FUTEX);
|
|---|
| 185 | bool interruptible = (flags & SYNCH_FLAGS_INTERRUPTIBLE);
|
|---|
| 186 |
|
|---|
| 187 | if (wq->closed) {
|
|---|
| 188 | rc = EOK;
|
|---|
| 189 | goto exit;
|
|---|
| 190 | }
|
|---|
| 191 |
|
|---|
| 192 | /* Checks whether to go to sleep at all */
|
|---|
| 193 | if (wq->wakeup_balance > 0) {
|
|---|
| 194 | wq->wakeup_balance--;
|
|---|
| 195 |
|
|---|
| 196 | rc = EOK;
|
|---|
| 197 | goto exit;
|
|---|
| 198 | }
|
|---|
| 199 |
|
|---|
| 200 | if (PARAM_NON_BLOCKING(flags, usec)) {
|
|---|
| 201 | /* Return immediately instead of going to sleep */
|
|---|
| 202 | rc = ETIMEOUT;
|
|---|
| 203 | goto exit;
|
|---|
| 204 | }
|
|---|
| 205 |
|
|---|
| 206 | /* Just for debugging output. */
|
|---|
| 207 | atomic_store_explicit(&THREAD->sleep_queue, wq, memory_order_relaxed);
|
|---|
| 208 |
|
|---|
| 209 | /*
|
|---|
| 210 | * This thread_t field is synchronized exclusively via
|
|---|
| 211 | * waitq lock of the waitq currently listing it.
|
|---|
| 212 | */
|
|---|
| 213 | list_append(&THREAD->wq_link, &wq->sleepers);
|
|---|
| 214 |
|
|---|
| 215 | /* Needs to be run when interrupts are still disabled. */
|
|---|
| 216 | deadline_t deadline = usec > 0 ?
|
|---|
| 217 | timeout_deadline_in_usec(usec) : DEADLINE_NEVER;
|
|---|
| 218 |
|
|---|
| 219 | while (true) {
|
|---|
| 220 | bool terminating = (thread_wait_start() == THREAD_TERMINATING);
|
|---|
| 221 | if (terminating && interruptible) {
|
|---|
| 222 | rc = EINTR;
|
|---|
| 223 | goto exit;
|
|---|
| 224 | }
|
|---|
| 225 |
|
|---|
| 226 | irq_spinlock_unlock(&wq->lock, false);
|
|---|
| 227 |
|
|---|
| 228 | bool timed_out = (thread_wait_finish(deadline) == THREAD_WAIT_TIMEOUT);
|
|---|
| 229 |
|
|---|
| 230 | /*
|
|---|
| 231 | * We always need to re-lock the WQ, since concurrently running
|
|---|
| 232 | * waitq_wakeup() may still not have exitted.
|
|---|
| 233 | * If we didn't always do this, we'd risk waitq_wakeup() that woke us
|
|---|
| 234 | * up still running on another CPU even after this function returns,
|
|---|
| 235 | * and that would be an issue if the waitq is allocated locally to
|
|---|
| 236 | * wait for a one-off asynchronous event. We'd need more external
|
|---|
| 237 | * synchronization in that case, and that would be a pain.
|
|---|
| 238 | *
|
|---|
| 239 | * On the plus side, always regaining a lock simplifies cleanup.
|
|---|
| 240 | */
|
|---|
| 241 | irq_spinlock_lock(&wq->lock, false);
|
|---|
| 242 |
|
|---|
| 243 | if (!link_in_use(&THREAD->wq_link)) {
|
|---|
| 244 | /*
|
|---|
| 245 | * We were woken up by the desired event. Return success,
|
|---|
| 246 | * regardless of any concurrent timeout or interruption.
|
|---|
| 247 | */
|
|---|
| 248 | rc = EOK;
|
|---|
| 249 | goto exit;
|
|---|
| 250 | }
|
|---|
| 251 |
|
|---|
| 252 | if (timed_out) {
|
|---|
| 253 | rc = ETIMEOUT;
|
|---|
| 254 | goto exit;
|
|---|
| 255 | }
|
|---|
| 256 |
|
|---|
| 257 | /* Interrupted for some other reason. */
|
|---|
| 258 | }
|
|---|
| 259 |
|
|---|
| 260 | exit:
|
|---|
| 261 | if (THREAD)
|
|---|
| 262 | list_remove(&THREAD->wq_link);
|
|---|
| 263 |
|
|---|
| 264 | if (rc != EOK && sleep_composable)
|
|---|
| 265 | wq->wakeup_balance--;
|
|---|
| 266 |
|
|---|
| 267 | if (THREAD)
|
|---|
| 268 | atomic_store_explicit(&THREAD->sleep_queue, NULL, memory_order_relaxed);
|
|---|
| 269 |
|
|---|
| 270 | irq_spinlock_unlock(&wq->lock, false);
|
|---|
| 271 | interrupts_restore(guard.ipl);
|
|---|
| 272 | return rc;
|
|---|
| 273 | }
|
|---|
| 274 |
|
|---|
| 275 | static void _wake_one(waitq_t *wq)
|
|---|
| 276 | {
|
|---|
| 277 | /* Pop one thread from the queue and wake it up. */
|
|---|
| 278 | thread_t *thread = list_get_instance(list_first(&wq->sleepers), thread_t, wq_link);
|
|---|
| 279 | list_remove(&thread->wq_link);
|
|---|
| 280 | thread_wakeup(thread);
|
|---|
| 281 | }
|
|---|
| 282 |
|
|---|
| 283 | /**
|
|---|
| 284 | * Meant for implementing condvar signal.
|
|---|
| 285 | * Always wakes one thread if there are any sleeping,
|
|---|
| 286 | * has no effect if no threads are waiting for wakeup.
|
|---|
| 287 | */
|
|---|
| 288 | void waitq_signal(waitq_t *wq)
|
|---|
| 289 | {
|
|---|
| 290 | irq_spinlock_lock(&wq->lock, true);
|
|---|
| 291 |
|
|---|
| 292 | if (!list_empty(&wq->sleepers))
|
|---|
| 293 | _wake_one(wq);
|
|---|
| 294 |
|
|---|
| 295 | irq_spinlock_unlock(&wq->lock, true);
|
|---|
| 296 | }
|
|---|
| 297 |
|
|---|
| 298 | /**
|
|---|
| 299 | * Wakes up one thread sleeping on this waitq.
|
|---|
| 300 | * If there are no threads waiting, saves the wakeup so that the next sleep
|
|---|
| 301 | * returns immediately. If a previous failure in sleep created a wakeup debt
|
|---|
| 302 | * (see SYNCH_FLAGS_FUTEX) this debt is annulled and no thread is woken up.
|
|---|
| 303 | */
|
|---|
| 304 | void waitq_wake_one(waitq_t *wq)
|
|---|
| 305 | {
|
|---|
| 306 | irq_spinlock_lock(&wq->lock, true);
|
|---|
| 307 |
|
|---|
| 308 | if (!wq->closed) {
|
|---|
| 309 | if (wq->wakeup_balance < 0 || list_empty(&wq->sleepers))
|
|---|
| 310 | wq->wakeup_balance++;
|
|---|
| 311 | else
|
|---|
| 312 | _wake_one(wq);
|
|---|
| 313 | }
|
|---|
| 314 |
|
|---|
| 315 | irq_spinlock_unlock(&wq->lock, true);
|
|---|
| 316 | }
|
|---|
| 317 |
|
|---|
| 318 | static void _wake_all(waitq_t *wq)
|
|---|
| 319 | {
|
|---|
| 320 | while (!list_empty(&wq->sleepers))
|
|---|
| 321 | _wake_one(wq);
|
|---|
| 322 | }
|
|---|
| 323 |
|
|---|
| 324 | /**
|
|---|
| 325 | * Wakes up all threads currently waiting on this waitq
|
|---|
| 326 | * and makes all future sleeps return instantly.
|
|---|
| 327 | */
|
|---|
| 328 | void waitq_close(waitq_t *wq)
|
|---|
| 329 | {
|
|---|
| 330 | irq_spinlock_lock(&wq->lock, true);
|
|---|
| 331 | wq->wakeup_balance = 0;
|
|---|
| 332 | wq->closed = true;
|
|---|
| 333 | _wake_all(wq);
|
|---|
| 334 | irq_spinlock_unlock(&wq->lock, true);
|
|---|
| 335 | }
|
|---|
| 336 |
|
|---|
| 337 | /**
|
|---|
| 338 | * Wakes up all threads currently waiting on this waitq
|
|---|
| 339 | */
|
|---|
| 340 | void waitq_wake_all(waitq_t *wq)
|
|---|
| 341 | {
|
|---|
| 342 | irq_spinlock_lock(&wq->lock, true);
|
|---|
| 343 | wq->wakeup_balance = 0;
|
|---|
| 344 | _wake_all(wq);
|
|---|
| 345 | irq_spinlock_unlock(&wq->lock, true);
|
|---|
| 346 | }
|
|---|
| 347 |
|
|---|
| 348 | /** @}
|
|---|
| 349 | */
|
|---|