source: mainline/kernel/generic/src/synch/condvar.c@ 269bc459

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 269bc459 was e88eb48, checked in by Jakub Jermar <jakub@…>, 7 years ago

Prefix remaining kernel doxygroups with 'kernel_'

  • Property mode set to 100644
File size: 5.8 KB
RevLine 
[f761f1eb]1/*
[df4ed85]2 * Copyright (c) 2001-2004 Jakub Jermar
[f761f1eb]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[e88eb48]29/** @addtogroup kernel_sync
[b45c443]30 * @{
31 */
32
[cf26ba9]33/**
[b45c443]34 * @file
[cf26ba9]35 * @brief Condition variables.
36 */
37
[f761f1eb]38#include <synch/condvar.h>
39#include <synch/mutex.h>
[46a5b37]40#include <synch/spinlock.h>
[f761f1eb]41#include <synch/waitq.h>
[c0bc189]42#include <arch.h>
[f761f1eb]43
[c0bc189]44/** Initialize condition variable.
[df364582]45 *
[08a19ba]46 * @param cv Condition variable.
[df364582]47 */
[f761f1eb]48void condvar_initialize(condvar_t *cv)
49{
50 waitq_initialize(&cv->wq);
51}
52
[08a19ba]53/** Signal the condition has become true to the first waiting thread by waking
54 * it up.
[df364582]55 *
[08a19ba]56 * @param cv Condition variable.
[df364582]57 */
[f761f1eb]58void condvar_signal(condvar_t *cv)
59{
60 waitq_wakeup(&cv->wq, WAKEUP_FIRST);
61}
62
[08a19ba]63/** Signal the condition has become true to all waiting threads by waking
64 * them up.
[df364582]65 *
[08a19ba]66 * @param cv Condition variable.
[df364582]67 */
[f761f1eb]68void condvar_broadcast(condvar_t *cv)
69{
70 waitq_wakeup(&cv->wq, WAKEUP_ALL);
71}
72
[c0bc189]73/** Wait for the condition becoming true.
[df364582]74 *
[08a19ba]75 * @param cv Condition variable.
76 * @param mtx Mutex.
77 * @param usec Timeout value in microseconds.
78 * @param flags Select mode of operation.
[a783ca4]79 *
[08a19ba]80 * For exact description of meaning of possible combinations of usec and flags,
81 * see comment for waitq_sleep_timeout(). Note that when
[897fd8f1]82 * SYNCH_FLAGS_NON_BLOCKING is specified here, EAGAIN is always
[08a19ba]83 * returned.
[a783ca4]84 *
[08a19ba]85 * @return See comment for waitq_sleep_timeout().
[df364582]86 */
[b7fd2a0]87errno_t _condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, uint32_t usec, int flags)
[f761f1eb]88{
[b7fd2a0]89 errno_t rc;
[c0bc189]90 ipl_t ipl;
[897fd8f1]91 bool blocked;
[f761f1eb]92
[c0bc189]93 ipl = waitq_sleep_prepare(&cv->wq);
[497bd656]94 /* Unlock only after the waitq is locked so we don't miss a wakeup. */
[f761f1eb]95 mutex_unlock(mtx);
[baafe71]96
97 cv->wq.missed_wakeups = 0; /* Enforce blocking. */
[897fd8f1]98 rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags, &blocked);
99 assert(blocked || rc != EOK);
[c0bc189]100
[897fd8f1]101 waitq_sleep_finish(&cv->wq, blocked, ipl);
[497bd656]102 /* Lock only after releasing the waitq to avoid a possible deadlock. */
103 mutex_lock(mtx);
[c0bc189]104
[f761f1eb]105 return rc;
106}
[b45c443]107
[46a5b37]108/** Wait for the condition to become true with a locked spinlock.
[1b20da0]109 *
[46a5b37]110 * The function is not aware of irq_spinlock. Therefore do not even
111 * try passing irq_spinlock_t to it. Use _condvar_wait_timeout_irq_spinlock()
112 * instead.
113 *
114 * @param cv Condition variable.
115 * @param lock Locked spinlock.
116 * @param usec Timeout value in microseconds.
117 * @param flags Select mode of operation.
118 *
119 * For exact description of meaning of possible combinations of usec and flags,
120 * see comment for waitq_sleep_timeout(). Note that when
[897fd8f1]121 * SYNCH_FLAGS_NON_BLOCKING is specified here, EAGAIN is always
[46a5b37]122 * returned.
123 *
124 * @return See comment for waitq_sleep_timeout().
125 */
[1b20da0]126errno_t _condvar_wait_timeout_spinlock_impl(condvar_t *cv, spinlock_t *lock,
[1433ecda]127 uint32_t usec, int flags)
[46a5b37]128{
[b7fd2a0]129 errno_t rc;
[46a5b37]130 ipl_t ipl;
[897fd8f1]131 bool blocked;
132
[46a5b37]133 ipl = waitq_sleep_prepare(&cv->wq);
134
[497bd656]135 /* Unlock only after the waitq is locked so we don't miss a wakeup. */
[46a5b37]136 spinlock_unlock(lock);
137
138 cv->wq.missed_wakeups = 0; /* Enforce blocking. */
[897fd8f1]139 rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags, &blocked);
140 assert(blocked || rc != EOK);
[46a5b37]141
[897fd8f1]142 waitq_sleep_finish(&cv->wq, blocked, ipl);
[497bd656]143 /* Lock only after releasing the waitq to avoid a possible deadlock. */
[46a5b37]144 spinlock_lock(lock);
[a35b458]145
[46a5b37]146 return rc;
147}
148
149/** Wait for the condition to become true with a locked irq spinlock.
[1b20da0]150 *
[46a5b37]151 * @param cv Condition variable.
152 * @param lock Locked irq spinlock.
153 * @param usec Timeout value in microseconds.
154 * @param flags Select mode of operation.
155 *
156 * For exact description of meaning of possible combinations of usec and flags,
157 * see comment for waitq_sleep_timeout(). Note that when
[897fd8f1]158 * SYNCH_FLAGS_NON_BLOCKING is specified here, EAGAIN is always
[46a5b37]159 * returned.
160 *
161 * @return See comment for waitq_sleep_timeout().
162 */
[1b20da0]163errno_t _condvar_wait_timeout_irq_spinlock(condvar_t *cv, irq_spinlock_t *irq_lock,
[1433ecda]164 uint32_t usec, int flags)
[46a5b37]165{
[b7fd2a0]166 errno_t rc;
[46a5b37]167 /* Save spinlock's state so we can restore it correctly later on. */
168 ipl_t ipl = irq_lock->ipl;
169 bool guard = irq_lock->guard;
[a35b458]170
[46a5b37]171 irq_lock->guard = false;
[a35b458]172
[1b20da0]173 /*
174 * waitq_prepare() restores interrupts to the current state,
175 * ie disabled. Therefore, interrupts will remain disabled while
176 * it spins waiting for a pending timeout handler to complete.
[46a5b37]177 * Although it spins with interrupts disabled there can only
178 * be a pending timeout if we failed to cancel an imminent
[1b20da0]179 * timeout (on another cpu) during a wakeup. As a result the
180 * timeout handler is guaranteed to run (it is most likely already
[46a5b37]181 * running) and there is no danger of a deadlock.
182 */
183 rc = _condvar_wait_timeout_spinlock(cv, &irq_lock->lock, usec, flags);
[a35b458]184
[46a5b37]185 irq_lock->guard = guard;
186 irq_lock->ipl = ipl;
[a35b458]187
[46a5b37]188 return rc;
189}
190
[cc73a8a1]191/** @}
[b45c443]192 */
Note: See TracBrowser for help on using the repository browser.