source: mainline/kernel/generic/src/synch/condvar.c@ cc106e4

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since cc106e4 was cc106e4, checked in by Adam Hraska <adam.hraska+hos@…>, 13 years ago

Fixed build for sparc64/ultra, sparc64/niagara, ppc32, mips32/GXemul.

  • Property mode set to 100644
File size: 5.4 KB
RevLine 
[f761f1eb]1/*
[df4ed85]2 * Copyright (c) 2001-2004 Jakub Jermar
[f761f1eb]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[cc73a8a1]29/** @addtogroup sync
[b45c443]30 * @{
31 */
32
[cf26ba9]33/**
[b45c443]34 * @file
[cf26ba9]35 * @brief Condition variables.
36 */
37
[f761f1eb]38#include <synch/condvar.h>
39#include <synch/mutex.h>
[46a5b37]40#include <synch/spinlock.h>
[f761f1eb]41#include <synch/waitq.h>
[c0bc189]42#include <arch.h>
[f761f1eb]43
[c0bc189]44/** Initialize condition variable.
[df364582]45 *
[08a19ba]46 * @param cv Condition variable.
[df364582]47 */
[f761f1eb]48void condvar_initialize(condvar_t *cv)
49{
50 waitq_initialize(&cv->wq);
51}
52
[08a19ba]53/** Signal the condition has become true to the first waiting thread by waking
54 * it up.
[df364582]55 *
[08a19ba]56 * @param cv Condition variable.
[df364582]57 */
[f761f1eb]58void condvar_signal(condvar_t *cv)
59{
60 waitq_wakeup(&cv->wq, WAKEUP_FIRST);
61}
62
[08a19ba]63/** Signal the condition has become true to all waiting threads by waking
64 * them up.
[df364582]65 *
[08a19ba]66 * @param cv Condition variable.
[df364582]67 */
[f761f1eb]68void condvar_broadcast(condvar_t *cv)
69{
70 waitq_wakeup(&cv->wq, WAKEUP_ALL);
71}
72
[c0bc189]73/** Wait for the condition becoming true.
[df364582]74 *
[08a19ba]75 * @param cv Condition variable.
76 * @param mtx Mutex.
77 * @param usec Timeout value in microseconds.
78 * @param flags Select mode of operation.
[a783ca4]79 *
[08a19ba]80 * For exact description of meaning of possible combinations of usec and flags,
81 * see comment for waitq_sleep_timeout(). Note that when
82 * SYNCH_FLAGS_NON_BLOCKING is specified here, ESYNCH_WOULD_BLOCK is always
83 * returned.
[a783ca4]84 *
[08a19ba]85 * @return See comment for waitq_sleep_timeout().
[df364582]86 */
[7f1c620]87int _condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, uint32_t usec, int flags)
[f761f1eb]88{
89 int rc;
[c0bc189]90 ipl_t ipl;
[f761f1eb]91
[c0bc189]92 ipl = waitq_sleep_prepare(&cv->wq);
[f761f1eb]93 mutex_unlock(mtx);
[baafe71]94
95 cv->wq.missed_wakeups = 0; /* Enforce blocking. */
[116d1ef4]96 rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags);
[c0bc189]97
[f761f1eb]98 mutex_lock(mtx);
[c0bc189]99 waitq_sleep_finish(&cv->wq, rc, ipl);
100
[f761f1eb]101 return rc;
102}
[b45c443]103
[46a5b37]104/** Wait for the condition to become true with a locked spinlock.
105 *
106 * The function is not aware of irq_spinlock. Therefore do not even
107 * try passing irq_spinlock_t to it. Use _condvar_wait_timeout_irq_spinlock()
108 * instead.
109 *
110 * @param cv Condition variable.
111 * @param lock Locked spinlock.
112 * @param usec Timeout value in microseconds.
113 * @param flags Select mode of operation.
114 *
115 * For exact description of meaning of possible combinations of usec and flags,
116 * see comment for waitq_sleep_timeout(). Note that when
117 * SYNCH_FLAGS_NON_BLOCKING is specified here, ESYNCH_WOULD_BLOCK is always
118 * returned.
119 *
120 * @return See comment for waitq_sleep_timeout().
121 */
[cc106e4]122int _condvar_wait_timeout_spinlock_impl(condvar_t *cv, spinlock_t *lock,
[46a5b37]123 uint32_t usec, int flags)
124{
125 int rc;
126 ipl_t ipl;
127
128 ipl = waitq_sleep_prepare(&cv->wq);
129
130 spinlock_unlock(lock);
131
132 cv->wq.missed_wakeups = 0; /* Enforce blocking. */
133 rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags);
134
135 waitq_sleep_finish(&cv->wq, rc, ipl);
136
137 spinlock_lock(lock);
138
139 return rc;
140}
141
142/** Wait for the condition to become true with a locked irq spinlock.
143 *
144 * @param cv Condition variable.
145 * @param lock Locked irq spinlock.
146 * @param usec Timeout value in microseconds.
147 * @param flags Select mode of operation.
148 *
149 * For exact description of meaning of possible combinations of usec and flags,
150 * see comment for waitq_sleep_timeout(). Note that when
151 * SYNCH_FLAGS_NON_BLOCKING is specified here, ESYNCH_WOULD_BLOCK is always
152 * returned.
153 *
154 * @return See comment for waitq_sleep_timeout().
155 */
156int _condvar_wait_timeout_irq_spinlock(condvar_t *cv, irq_spinlock_t *irq_lock,
157 uint32_t usec, int flags)
158{
159 int rc;
160 /* Save spinlock's state so we can restore it correctly later on. */
161 ipl_t ipl = irq_lock->ipl;
162 bool guard = irq_lock->guard;
163
164 irq_lock->guard = false;
165
166 /*
167 * waitq_prepare() restores interrupts to the current state,
168 * ie disabled. Therefore, interrupts will remain disabled while
169 * it spins waiting for a pending timeout handler to complete.
170 * Although it spins with interrupts disabled there can only
171 * be a pending timeout if we failed to cancel an imminent
172 * timeout (on another cpu) during a wakeup. As a result the
173 * timeout handler is guaranteed to run (it is most likely already
174 * running) and there is no danger of a deadlock.
175 */
176 rc = _condvar_wait_timeout_spinlock(cv, &irq_lock->lock, usec, flags);
177
178 irq_lock->guard = guard;
179 irq_lock->ipl = ipl;
180
181 return rc;
182}
183
184
[cc73a8a1]185/** @}
[b45c443]186 */
Note: See TracBrowser for help on using the repository browser.