source: mainline/kernel/generic/src/synch/waitq.c@ b3f8fb7

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since b3f8fb7 was b3f8fb7, checked in by Martin Decky <martin@…>, 18 years ago

huge type system cleanup
remove cyclical type dependencies across multiple header files
many minor coding style fixes

  • Property mode set to 100644
File size: 10.8 KB
RevLine 
[f761f1eb]1/*
[df4ed85]2 * Copyright (c) 2001-2004 Jakub Jermar
[f761f1eb]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[cc73a8a1]29/** @addtogroup sync
[b45c443]30 * @{
31 */
32
[9179d0a]33/**
[b45c443]34 * @file
[9179d0a]35 * @brief Wait queue.
36 *
[e3c762cd]37 * Wait queue is the basic synchronization primitive upon which all
[9179d0a]38 * other synchronization primitives build.
39 *
40 * It allows threads to wait for an event in first-come, first-served
41 * fashion. Conditional operation as well as timeouts and interruptions
42 * are supported.
43 */
44
[f761f1eb]45#include <synch/waitq.h>
[922c7ce]46#include <synch/synch.h>
[f761f1eb]47#include <synch/spinlock.h>
[922c7ce]48#include <proc/thread.h>
[4b2c872d]49#include <proc/scheduler.h>
[f761f1eb]50#include <arch/asm.h>
51#include <arch/types.h>
[922c7ce]52#include <time/timeout.h>
[f761f1eb]53#include <arch.h>
[922c7ce]54#include <context.h>
[5c9a08b]55#include <adt/list.h>
[f761f1eb]56
[203f4c3]57static void waitq_timeouted_sleep(void *data);
58
[922c7ce]59/** Initialize wait queue
60 *
61 * Initialize wait queue.
62 *
63 * @param wq Pointer to wait queue to be initialized.
64 */
[f761f1eb]65void waitq_initialize(waitq_t *wq)
66{
[2d93f1f9]67 spinlock_initialize(&wq->lock, "waitq_lock");
[f761f1eb]68 list_initialize(&wq->head);
69 wq->missed_wakeups = 0;
70}
71
[922c7ce]72/** Handle timeout during waitq_sleep_timeout() call
73 *
74 * This routine is called when waitq_sleep_timeout() timeouts.
75 * Interrupts are disabled.
[f761f1eb]76 *
[922c7ce]77 * It is supposed to try to remove 'its' thread from the wait queue;
78 * it can eventually fail to achieve this goal when these two events
79 * overlap. In that case it behaves just as though there was no
80 * timeout at all.
81 *
82 * @param data Pointer to the thread that called waitq_sleep_timeout().
[f761f1eb]83 */
[203f4c3]84void waitq_timeouted_sleep(void *data)
[f761f1eb]85{
86 thread_t *t = (thread_t *) data;
87 waitq_t *wq;
[05e2a7ad]88 bool do_wakeup = false;
[f761f1eb]89
90 spinlock_lock(&threads_lock);
[016acbe]91 if (!thread_exists(t))
[f761f1eb]92 goto out;
93
94grab_locks:
95 spinlock_lock(&t->lock);
[5a95b25]96 if ((wq = t->sleep_queue)) { /* assignment */
[f761f1eb]97 if (!spinlock_trylock(&wq->lock)) {
98 spinlock_unlock(&t->lock);
[05e2a7ad]99 goto grab_locks; /* avoid deadlock */
[f761f1eb]100 }
101
102 list_remove(&t->wq_link);
103 t->saved_context = t->sleep_timeout_context;
[05e2a7ad]104 do_wakeup = true;
[f761f1eb]105 t->sleep_queue = NULL;
[4b74488]106 spinlock_unlock(&wq->lock);
[f761f1eb]107 }
108
[05e2a7ad]109 t->timeout_pending = false;
[f761f1eb]110 spinlock_unlock(&t->lock);
111
[05e2a7ad]112 if (do_wakeup)
113 thread_ready(t);
[f761f1eb]114
115out:
116 spinlock_unlock(&threads_lock);
117}
118
[203f4c3]119
120/** Sleep until either wakeup, timeout or interruption occurs
[922c7ce]121 *
[116d1ef4]122 * This is a sleep implementation which allows itself to time out or to be
[f761f1eb]123 * interrupted from the sleep, restoring a failover context.
124 *
[c0bc189]125 * Sleepers are organised in a FIFO fashion in a structure called wait queue.
[922c7ce]126 *
[f761f1eb]127 * This function is really basic in that other functions as waitq_sleep()
128 * and all the *_timeout() functions use it.
129 *
[922c7ce]130 * @param wq Pointer to wait queue.
[a783ca4]131 * @param usec Timeout in microseconds.
[116d1ef4]132 * @param flags Specify mode of the sleep.
[922c7ce]133 *
[116d1ef4]134 * The sleep can be interrupted only if the
135 * SYNCH_FLAGS_INTERRUPTIBLE bit is specified in flags.
[6f4495f5]136 *
[116d1ef4]137 * If usec is greater than zero, regardless of the value of the
[4e33b6b]138 * SYNCH_FLAGS_NON_BLOCKING bit in flags, the call will not return until either
139 * timeout, interruption or wakeup comes.
[f761f1eb]140 *
[4e33b6b]141 * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is not set in flags,
142 * the call will not return until wakeup or interruption comes.
[a783ca4]143 *
[4e33b6b]144 * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is set in flags, the
145 * call will immediately return, reporting either success or failure.
[f761f1eb]146 *
[4e33b6b]147 * @return One of: ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT, ESYNCH_INTERRUPTED,
148 * ESYNCH_OK_ATOMIC, ESYNCH_OK_BLOCKED.
[922c7ce]149 *
[4e33b6b]150 * @li ESYNCH_WOULD_BLOCK means that the sleep failed because at the time of the
151 * call there was no pending wakeup.
[a783ca4]152 *
[9179d0a]153 * @li ESYNCH_TIMEOUT means that the sleep timed out.
[922c7ce]154 *
[9179d0a]155 * @li ESYNCH_INTERRUPTED means that somebody interrupted the sleeping thread.
[203f4c3]156 *
[9179d0a]157 * @li ESYNCH_OK_ATOMIC means that the sleep succeeded and that there was
[a783ca4]158 * a pending wakeup at the time of the call. The caller was not put
159 * asleep at all.
160 *
[9179d0a]161 * @li ESYNCH_OK_BLOCKED means that the sleep succeeded; the full sleep was
[a783ca4]162 * attempted.
[f761f1eb]163 */
[7f1c620]164int waitq_sleep_timeout(waitq_t *wq, uint32_t usec, int flags)
[f761f1eb]165{
[c0bc189]166 ipl_t ipl;
167 int rc;
[f761f1eb]168
[c0bc189]169 ipl = waitq_sleep_prepare(wq);
[116d1ef4]170 rc = waitq_sleep_timeout_unsafe(wq, usec, flags);
[c0bc189]171 waitq_sleep_finish(wq, rc, ipl);
172 return rc;
173}
174
175/** Prepare to sleep in a waitq.
176 *
177 * This function will return holding the lock of the wait queue
178 * and interrupts disabled.
179 *
180 * @param wq Wait queue.
181 *
182 * @return Interrupt level as it existed on entry to this function.
183 */
184ipl_t waitq_sleep_prepare(waitq_t *wq)
185{
186 ipl_t ipl;
[f761f1eb]187
188restart:
[22f7769]189 ipl = interrupts_disable();
[c0bc189]190
[343fc179]191 if (THREAD) { /* needed during system initiailzation */
192 /*
193 * Busy waiting for a delayed timeout.
194 * This is an important fix for the race condition between
195 * a delayed timeout and a next call to waitq_sleep_timeout().
196 * Simply, the thread is not allowed to go to sleep if
197 * there are timeouts in progress.
198 */
199 spinlock_lock(&THREAD->lock);
200 if (THREAD->timeout_pending) {
201 spinlock_unlock(&THREAD->lock);
202 interrupts_restore(ipl);
203 goto restart;
204 }
[43114c5]205 spinlock_unlock(&THREAD->lock);
[f761f1eb]206 }
[c0bc189]207
[f761f1eb]208 spinlock_lock(&wq->lock);
[c0bc189]209 return ipl;
210}
211
212/** Finish waiting in a wait queue.
213 *
214 * This function restores interrupts to the state that existed prior
215 * to the call to waitq_sleep_prepare(). If necessary, the wait queue
216 * lock is released.
217 *
218 * @param wq Wait queue.
219 * @param rc Return code of waitq_sleep_timeout_unsafe().
220 * @param ipl Interrupt level returned by waitq_sleep_prepare().
221 */
222void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl)
223{
224 switch (rc) {
225 case ESYNCH_WOULD_BLOCK:
226 case ESYNCH_OK_ATOMIC:
227 spinlock_unlock(&wq->lock);
228 break;
229 default:
230 break;
231 }
232 interrupts_restore(ipl);
233}
234
235/** Internal implementation of waitq_sleep_timeout().
236 *
237 * This function implements logic of sleeping in a wait queue.
238 * This call must be preceeded by a call to waitq_sleep_prepare()
239 * and followed by a call to waitq_slee_finish().
240 *
241 * @param wq See waitq_sleep_timeout().
242 * @param usec See waitq_sleep_timeout().
[116d1ef4]243 * @param flags See waitq_sleep_timeout().
[c0bc189]244 *
245 * @return See waitq_sleep_timeout().
246 */
[7f1c620]247int waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, int flags)
[c0bc189]248{
[f761f1eb]249 /* checks whether to go to sleep at all */
250 if (wq->missed_wakeups) {
251 wq->missed_wakeups--;
252 return ESYNCH_OK_ATOMIC;
253 }
254 else {
[116d1ef4]255 if ((flags & SYNCH_FLAGS_NON_BLOCKING) && (usec == 0)) {
[f761f1eb]256 /* return immediatelly instead of going to sleep */
257 return ESYNCH_WOULD_BLOCK;
258 }
259 }
260
261 /*
262 * Now we are firmly decided to go to sleep.
263 */
[43114c5]264 spinlock_lock(&THREAD->lock);
[203f4c3]265
[116d1ef4]266 if (flags & SYNCH_FLAGS_INTERRUPTIBLE) {
[34dcd3f]267
268 /*
269 * If the thread was already interrupted,
270 * don't go to sleep at all.
271 */
272 if (THREAD->interrupted) {
273 spinlock_unlock(&THREAD->lock);
274 spinlock_unlock(&wq->lock);
275 return ESYNCH_INTERRUPTED;
276 }
277
[116d1ef4]278 /*
279 * Set context that will be restored if the sleep
280 * of this thread is ever interrupted.
281 */
282 THREAD->sleep_interruptible = true;
283 if (!context_save(&THREAD->sleep_interruption_context)) {
284 /* Short emulation of scheduler() return code. */
285 spinlock_unlock(&THREAD->lock);
286 return ESYNCH_INTERRUPTED;
287 }
[34dcd3f]288
[116d1ef4]289 } else {
290 THREAD->sleep_interruptible = false;
[203f4c3]291 }
292
[f761f1eb]293 if (usec) {
294 /* We use the timeout variant. */
[43114c5]295 if (!context_save(&THREAD->sleep_timeout_context)) {
[203f4c3]296 /* Short emulation of scheduler() return code. */
[43114c5]297 spinlock_unlock(&THREAD->lock);
[f761f1eb]298 return ESYNCH_TIMEOUT;
299 }
[05e2a7ad]300 THREAD->timeout_pending = true;
[4e33b6b]301 timeout_register(&THREAD->sleep_timeout, (uint64_t) usec,
[6f4495f5]302 waitq_timeouted_sleep, THREAD);
[f761f1eb]303 }
304
[43114c5]305 list_append(&THREAD->wq_link, &wq->head);
[f761f1eb]306
307 /*
308 * Suspend execution.
309 */
[43114c5]310 THREAD->state = Sleeping;
311 THREAD->sleep_queue = wq;
[f761f1eb]312
[43114c5]313 spinlock_unlock(&THREAD->lock);
[f761f1eb]314
[4e33b6b]315 /* wq->lock is released in scheduler_separated_stack() */
316 scheduler();
[f761f1eb]317
318 return ESYNCH_OK_BLOCKED;
319}
320
321
[922c7ce]322/** Wake up first thread sleeping in a wait queue
323 *
[4e33b6b]324 * Wake up first thread sleeping in a wait queue. This is the SMP- and IRQ-safe
325 * wrapper meant for general use.
[922c7ce]326 *
[4e33b6b]327 * Besides its 'normal' wakeup operation, it attempts to unregister possible
328 * timeout.
[922c7ce]329 *
330 * @param wq Pointer to wait queue.
[4e33b6b]331 * @param all If this is non-zero, all sleeping threads will be woken up and
332 * missed count will be zeroed.
[f761f1eb]333 */
[05e2a7ad]334void waitq_wakeup(waitq_t *wq, bool all)
[f761f1eb]335{
[22f7769]336 ipl_t ipl;
[f761f1eb]337
[22f7769]338 ipl = interrupts_disable();
[f761f1eb]339 spinlock_lock(&wq->lock);
340
341 _waitq_wakeup_unsafe(wq, all);
342
343 spinlock_unlock(&wq->lock);
[22f7769]344 interrupts_restore(ipl);
[f761f1eb]345}
346
[922c7ce]347/** Internal SMP- and IRQ-unsafe version of waitq_wakeup()
348 *
[4e33b6b]349 * This is the internal SMP- and IRQ-unsafe version of waitq_wakeup(). It
350 * assumes wq->lock is already locked and interrupts are already disabled.
[922c7ce]351 *
352 * @param wq Pointer to wait queue.
[4e33b6b]353 * @param all If this is non-zero, all sleeping threads will be woken up and
354 * missed count will be zeroed.
[f761f1eb]355 */
[05e2a7ad]356void _waitq_wakeup_unsafe(waitq_t *wq, bool all)
[f761f1eb]357{
358 thread_t *t;
359
360loop:
361 if (list_empty(&wq->head)) {
362 wq->missed_wakeups++;
[05e2a7ad]363 if (all)
364 wq->missed_wakeups = 0;
[f761f1eb]365 return;
366 }
367
368 t = list_get_instance(wq->head.next, thread_t, wq_link);
369
[4b74488]370 /*
371 * Lock the thread prior to removing it from the wq.
372 * This is not necessary because of mutual exclusion
373 * (the link belongs to the wait queue), but because
374 * of synchronization with waitq_timeouted_sleep()
[b3f8fb7]375 * and thread_interrupt_sleep().
[4b74488]376 *
377 * In order for these two functions to work, the following
378 * invariant must hold:
379 *
380 * t->sleep_queue != NULL <=> t sleeps in a wait queue
381 *
382 * For an observer who locks the thread, the invariant
383 * holds only when the lock is held prior to removing
384 * it from the wait queue.
385 */
[f761f1eb]386 spinlock_lock(&t->lock);
[4b74488]387 list_remove(&t->wq_link);
388
[f761f1eb]389 if (t->timeout_pending && timeout_unregister(&t->sleep_timeout))
[05e2a7ad]390 t->timeout_pending = false;
[f761f1eb]391 t->sleep_queue = NULL;
392 spinlock_unlock(&t->lock);
393
394 thread_ready(t);
395
[05e2a7ad]396 if (all)
397 goto loop;
[f761f1eb]398}
[b45c443]399
[cc73a8a1]400/** @}
[b45c443]401 */
Note: See TracBrowser for help on using the repository browser.