source: mainline/kernel/generic/src/synch/condvar.c@ daadfa6

ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since daadfa6 was 5110d0a, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 3 years ago

Turn a bunch of macros into regular functions

  • Property mode set to 100644
File size: 6.1 KB
Line 
1/*
2 * Copyright (c) 2001-2004 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup kernel_sync
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Condition variables.
36 */
37
38#include <synch/condvar.h>
39#include <synch/mutex.h>
40#include <synch/spinlock.h>
41#include <synch/waitq.h>
42#include <arch.h>
43
44/** Initialize condition variable.
45 *
46 * @param cv Condition variable.
47 */
48void condvar_initialize(condvar_t *cv)
49{
50 waitq_initialize(&cv->wq);
51}
52
53/** Signal the condition has become true to the first waiting thread by waking
54 * it up.
55 *
56 * @param cv Condition variable.
57 */
58void condvar_signal(condvar_t *cv)
59{
60 waitq_wakeup(&cv->wq, WAKEUP_FIRST);
61}
62
63/** Signal the condition has become true to all waiting threads by waking
64 * them up.
65 *
66 * @param cv Condition variable.
67 */
68void condvar_broadcast(condvar_t *cv)
69{
70 waitq_wakeup(&cv->wq, WAKEUP_ALL);
71}
72
73/** Wait for the condition becoming true.
74 *
75 * @param cv Condition variable.
76 * @param mtx Mutex.
77 * @param usec Timeout value in microseconds.
78 *
79 * @return See comment for waitq_sleep_timeout().
80 */
81errno_t condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, uint32_t usec)
82{
83 errno_t rc;
84 ipl_t ipl;
85 bool blocked;
86
87 ipl = waitq_sleep_prepare(&cv->wq);
88 /* Unlock only after the waitq is locked so we don't miss a wakeup. */
89 mutex_unlock(mtx);
90
91 cv->wq.missed_wakeups = 0; /* Enforce blocking. */
92 rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, SYNCH_FLAGS_NON_BLOCKING, &blocked);
93 assert(blocked || rc != EOK);
94
95 waitq_sleep_finish(&cv->wq, blocked, ipl);
96 /* Lock only after releasing the waitq to avoid a possible deadlock. */
97 mutex_lock(mtx);
98
99 return rc;
100}
101
102errno_t condvar_wait(condvar_t *cv, mutex_t *mtx)
103{
104 errno_t rc;
105 ipl_t ipl;
106 bool blocked;
107
108 ipl = waitq_sleep_prepare(&cv->wq);
109 /* Unlock only after the waitq is locked so we don't miss a wakeup. */
110 mutex_unlock(mtx);
111
112 cv->wq.missed_wakeups = 0; /* Enforce blocking. */
113 rc = waitq_sleep_unsafe(&cv->wq, &blocked);
114 assert(blocked || rc != EOK);
115
116 waitq_sleep_finish(&cv->wq, blocked, ipl);
117 /* Lock only after releasing the waitq to avoid a possible deadlock. */
118 mutex_lock(mtx);
119
120 return rc;
121}
122
123/** Wait for the condition to become true with a locked spinlock.
124 *
125 * The function is not aware of irq_spinlock. Therefore do not even
126 * try passing irq_spinlock_t to it. Use _condvar_wait_timeout_irq_spinlock()
127 * instead.
128 *
129 * @param cv Condition variable.
130 * @param lock Locked spinlock.
131 * @param usec Timeout value in microseconds.
132 * @param flags Select mode of operation.
133 *
134 * For exact description of meaning of possible combinations of usec and flags,
135 * see comment for waitq_sleep_timeout(). Note that when
136 * SYNCH_FLAGS_NON_BLOCKING is specified here, EAGAIN is always
137 * returned.
138 *
139 * @return See comment for waitq_sleep_timeout().
140 */
141errno_t _condvar_wait_timeout_spinlock_impl(condvar_t *cv, spinlock_t *lock,
142 uint32_t usec, int flags)
143{
144 errno_t rc;
145 ipl_t ipl;
146 bool blocked;
147
148 ipl = waitq_sleep_prepare(&cv->wq);
149
150 /* Unlock only after the waitq is locked so we don't miss a wakeup. */
151 spinlock_unlock(lock);
152
153 cv->wq.missed_wakeups = 0; /* Enforce blocking. */
154 rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags, &blocked);
155 assert(blocked || rc != EOK);
156
157 waitq_sleep_finish(&cv->wq, blocked, ipl);
158 /* Lock only after releasing the waitq to avoid a possible deadlock. */
159 spinlock_lock(lock);
160
161 return rc;
162}
163
164/** Wait for the condition to become true with a locked irq spinlock.
165 *
166 * @param cv Condition variable.
167 * @param lock Locked irq spinlock.
168 * @param usec Timeout value in microseconds.
169 * @param flags Select mode of operation.
170 *
171 * For exact description of meaning of possible combinations of usec and flags,
172 * see comment for waitq_sleep_timeout(). Note that when
173 * SYNCH_FLAGS_NON_BLOCKING is specified here, EAGAIN is always
174 * returned.
175 *
176 * @return See comment for waitq_sleep_timeout().
177 */
178errno_t _condvar_wait_timeout_irq_spinlock(condvar_t *cv, irq_spinlock_t *irq_lock,
179 uint32_t usec, int flags)
180{
181 errno_t rc;
182 /* Save spinlock's state so we can restore it correctly later on. */
183 ipl_t ipl = irq_lock->ipl;
184 bool guard = irq_lock->guard;
185
186 irq_lock->guard = false;
187
188 /*
189 * waitq_prepare() restores interrupts to the current state,
190 * ie disabled. Therefore, interrupts will remain disabled while
191 * it spins waiting for a pending timeout handler to complete.
192 * Although it spins with interrupts disabled there can only
193 * be a pending timeout if we failed to cancel an imminent
194 * timeout (on another cpu) during a wakeup. As a result the
195 * timeout handler is guaranteed to run (it is most likely already
196 * running) and there is no danger of a deadlock.
197 */
198 rc = _condvar_wait_timeout_spinlock(cv, &irq_lock->lock, usec, flags);
199
200 irq_lock->guard = guard;
201 irq_lock->ipl = ipl;
202
203 return rc;
204}
205
206/** @}
207 */
Note: See TracBrowser for help on using the repository browser.