source: mainline/kernel/generic/src/synch/waitq.c@ 4039c77

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 4039c77 was 4039c77, checked in by Jakub Jermar <jakub@…>, 16 years ago

Add assertion to detect attempts to block when hodling a spinlock.

  • Property mode set to 100644
File size: 13.3 KB
RevLine 
[f761f1eb]1/*
[df4ed85]2 * Copyright (c) 2001-2004 Jakub Jermar
[f761f1eb]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[cc73a8a1]29/** @addtogroup sync
[b45c443]30 * @{
31 */
32
[9179d0a]33/**
[b45c443]34 * @file
[9179d0a]35 * @brief Wait queue.
36 *
[e3c762cd]37 * Wait queue is the basic synchronization primitive upon which all
[9179d0a]38 * other synchronization primitives build.
39 *
40 * It allows threads to wait for an event in first-come, first-served
41 * fashion. Conditional operation as well as timeouts and interruptions
42 * are supported.
43 */
44
[f761f1eb]45#include <synch/waitq.h>
[922c7ce]46#include <synch/synch.h>
[f761f1eb]47#include <synch/spinlock.h>
[922c7ce]48#include <proc/thread.h>
[4b2c872d]49#include <proc/scheduler.h>
[f761f1eb]50#include <arch/asm.h>
[d99c1d2]51#include <typedefs.h>
[922c7ce]52#include <time/timeout.h>
[f761f1eb]53#include <arch.h>
[922c7ce]54#include <context.h>
[5c9a08b]55#include <adt/list.h>
[6ec34bb]56#include <arch/cycle.h>
[f761f1eb]57
[929ce92]58static void waitq_sleep_timed_out(void *data);
[203f4c3]59
[922c7ce]60/** Initialize wait queue
61 *
62 * Initialize wait queue.
63 *
[ace9358]64 * @param wq Pointer to wait queue to be initialized.
[922c7ce]65 */
[f761f1eb]66void waitq_initialize(waitq_t *wq)
67{
[2d93f1f9]68 spinlock_initialize(&wq->lock, "waitq_lock");
[f761f1eb]69 list_initialize(&wq->head);
70 wq->missed_wakeups = 0;
71}
72
[922c7ce]73/** Handle timeout during waitq_sleep_timeout() call
74 *
[ace9358]75 * This routine is called when waitq_sleep_timeout() times out.
[922c7ce]76 * Interrupts are disabled.
[f761f1eb]77 *
[922c7ce]78 * It is supposed to try to remove 'its' thread from the wait queue;
79 * it can eventually fail to achieve this goal when these two events
80 * overlap. In that case it behaves just as though there was no
81 * timeout at all.
82 *
[ace9358]83 * @param data Pointer to the thread that called waitq_sleep_timeout().
[f761f1eb]84 */
[929ce92]85void waitq_sleep_timed_out(void *data)
[f761f1eb]86{
87 thread_t *t = (thread_t *) data;
88 waitq_t *wq;
[05e2a7ad]89 bool do_wakeup = false;
[31d8e10]90 DEADLOCK_PROBE_INIT(p_wqlock);
[f761f1eb]91
92 spinlock_lock(&threads_lock);
[016acbe]93 if (!thread_exists(t))
[f761f1eb]94 goto out;
95
96grab_locks:
97 spinlock_lock(&t->lock);
[5a95b25]98 if ((wq = t->sleep_queue)) { /* assignment */
[f761f1eb]99 if (!spinlock_trylock(&wq->lock)) {
100 spinlock_unlock(&t->lock);
[31d8e10]101 DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD);
[05e2a7ad]102 goto grab_locks; /* avoid deadlock */
[f761f1eb]103 }
104
105 list_remove(&t->wq_link);
106 t->saved_context = t->sleep_timeout_context;
[05e2a7ad]107 do_wakeup = true;
[f761f1eb]108 t->sleep_queue = NULL;
[4b74488]109 spinlock_unlock(&wq->lock);
[f761f1eb]110 }
111
[05e2a7ad]112 t->timeout_pending = false;
[f761f1eb]113 spinlock_unlock(&t->lock);
114
[05e2a7ad]115 if (do_wakeup)
116 thread_ready(t);
[f761f1eb]117
118out:
119 spinlock_unlock(&threads_lock);
120}
121
[5573942]122/** Interrupt sleeping thread.
123 *
124 * This routine attempts to interrupt a thread from its sleep in a waitqueue.
125 * If the thread is not found sleeping, no action is taken.
126 *
[ace9358]127 * @param t Thread to be interrupted.
[5573942]128 */
129void waitq_interrupt_sleep(thread_t *t)
130{
131 waitq_t *wq;
132 bool do_wakeup = false;
133 ipl_t ipl;
[31d8e10]134 DEADLOCK_PROBE_INIT(p_wqlock);
[5573942]135
136 ipl = interrupts_disable();
137 spinlock_lock(&threads_lock);
138 if (!thread_exists(t))
139 goto out;
140
141grab_locks:
142 spinlock_lock(&t->lock);
143 if ((wq = t->sleep_queue)) { /* assignment */
144 if (!(t->sleep_interruptible)) {
145 /*
146 * The sleep cannot be interrupted.
147 */
148 spinlock_unlock(&t->lock);
149 goto out;
150 }
151
152 if (!spinlock_trylock(&wq->lock)) {
153 spinlock_unlock(&t->lock);
[31d8e10]154 DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD);
[5573942]155 goto grab_locks; /* avoid deadlock */
156 }
157
158 if (t->timeout_pending && timeout_unregister(&t->sleep_timeout))
159 t->timeout_pending = false;
160
161 list_remove(&t->wq_link);
162 t->saved_context = t->sleep_interruption_context;
163 do_wakeup = true;
164 t->sleep_queue = NULL;
165 spinlock_unlock(&wq->lock);
166 }
167 spinlock_unlock(&t->lock);
168
169 if (do_wakeup)
170 thread_ready(t);
171
172out:
173 spinlock_unlock(&threads_lock);
174 interrupts_restore(ipl);
175}
[203f4c3]176
[6c4a56f]177/** Interrupt the first thread sleeping in the wait queue.
178 *
179 * Note that the caller somehow needs to know that the thread to be interrupted
180 * is sleeping interruptibly.
181 *
182 * @param wq Pointer to wait queue.
183 */
184void waitq_unsleep(waitq_t *wq)
185{
186 ipl_t ipl;
187
188 ipl = interrupts_disable();
189 spinlock_lock(&wq->lock);
190
191 if (!list_empty(&wq->head)) {
192 thread_t *t;
193
194 t = list_get_instance(wq->head.next, thread_t, wq_link);
195 spinlock_lock(&t->lock);
196 ASSERT(t->sleep_interruptible);
197 if (t->timeout_pending && timeout_unregister(&t->sleep_timeout))
198 t->timeout_pending = false;
199 list_remove(&t->wq_link);
200 t->saved_context = t->sleep_interruption_context;
201 t->sleep_queue = NULL;
202 spinlock_unlock(&t->lock);
203 thread_ready(t);
204 }
205
206 spinlock_unlock(&wq->lock);
207 interrupts_restore(ipl);
208}
209
[4039c77]210#define PARAM_NON_BLOCKING(flags, usec) \
211 (((flags) & SYNCH_FLAGS_NON_BLOCKING) && ((usec) == 0))
212
[203f4c3]213/** Sleep until either wakeup, timeout or interruption occurs
[922c7ce]214 *
[116d1ef4]215 * This is a sleep implementation which allows itself to time out or to be
[f761f1eb]216 * interrupted from the sleep, restoring a failover context.
217 *
[c0bc189]218 * Sleepers are organised in a FIFO fashion in a structure called wait queue.
[922c7ce]219 *
[f761f1eb]220 * This function is really basic in that other functions as waitq_sleep()
221 * and all the *_timeout() functions use it.
222 *
[929ce92]223 * @param wq Pointer to wait queue.
224 * @param usec Timeout in microseconds.
225 * @param flags Specify mode of the sleep.
[922c7ce]226 *
[116d1ef4]227 * The sleep can be interrupted only if the
228 * SYNCH_FLAGS_INTERRUPTIBLE bit is specified in flags.
[6f4495f5]229 *
[116d1ef4]230 * If usec is greater than zero, regardless of the value of the
[4e33b6b]231 * SYNCH_FLAGS_NON_BLOCKING bit in flags, the call will not return until either
232 * timeout, interruption or wakeup comes.
[f761f1eb]233 *
[4e33b6b]234 * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is not set in flags,
235 * the call will not return until wakeup or interruption comes.
[a783ca4]236 *
[4e33b6b]237 * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is set in flags, the
238 * call will immediately return, reporting either success or failure.
[f761f1eb]239 *
[929ce92]240 * @return Returns one of ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT,
241 * ESYNCH_INTERRUPTED, ESYNCH_OK_ATOMIC and
242 * ESYNCH_OK_BLOCKED.
[922c7ce]243 *
[929ce92]244 * @li ESYNCH_WOULD_BLOCK means that the sleep failed because at the time of
245 * the call there was no pending wakeup.
[a783ca4]246 *
[929ce92]247 * @li ESYNCH_TIMEOUT means that the sleep timed out.
[922c7ce]248 *
[929ce92]249 * @li ESYNCH_INTERRUPTED means that somebody interrupted the sleeping thread.
[203f4c3]250 *
[929ce92]251 * @li ESYNCH_OK_ATOMIC means that the sleep succeeded and that there was
252 * a pending wakeup at the time of the call. The caller was not put
253 * asleep at all.
[a783ca4]254 *
[929ce92]255 * @li ESYNCH_OK_BLOCKED means that the sleep succeeded; the full sleep was
256 * attempted.
[f761f1eb]257 */
[7f1c620]258int waitq_sleep_timeout(waitq_t *wq, uint32_t usec, int flags)
[f761f1eb]259{
[c0bc189]260 ipl_t ipl;
261 int rc;
[4039c77]262
263 ASSERT(!PREEMPTION_DISABLED || PARAM_NON_BLOCKING(flags, usec));
[f761f1eb]264
[c0bc189]265 ipl = waitq_sleep_prepare(wq);
[116d1ef4]266 rc = waitq_sleep_timeout_unsafe(wq, usec, flags);
[c0bc189]267 waitq_sleep_finish(wq, rc, ipl);
268 return rc;
269}
270
271/** Prepare to sleep in a waitq.
272 *
273 * This function will return holding the lock of the wait queue
274 * and interrupts disabled.
275 *
[ace9358]276 * @param wq Wait queue.
[c0bc189]277 *
[ace9358]278 * @return Interrupt level as it existed on entry to this function.
[c0bc189]279 */
280ipl_t waitq_sleep_prepare(waitq_t *wq)
281{
282 ipl_t ipl;
[f761f1eb]283
284restart:
[22f7769]285 ipl = interrupts_disable();
[c0bc189]286
[343fc179]287 if (THREAD) { /* needed during system initiailzation */
288 /*
289 * Busy waiting for a delayed timeout.
290 * This is an important fix for the race condition between
291 * a delayed timeout and a next call to waitq_sleep_timeout().
292 * Simply, the thread is not allowed to go to sleep if
293 * there are timeouts in progress.
294 */
295 spinlock_lock(&THREAD->lock);
296 if (THREAD->timeout_pending) {
297 spinlock_unlock(&THREAD->lock);
298 interrupts_restore(ipl);
299 goto restart;
300 }
[43114c5]301 spinlock_unlock(&THREAD->lock);
[f761f1eb]302 }
[c0bc189]303
[f761f1eb]304 spinlock_lock(&wq->lock);
[c0bc189]305 return ipl;
306}
307
308/** Finish waiting in a wait queue.
309 *
310 * This function restores interrupts to the state that existed prior
311 * to the call to waitq_sleep_prepare(). If necessary, the wait queue
312 * lock is released.
313 *
[ace9358]314 * @param wq Wait queue.
315 * @param rc Return code of waitq_sleep_timeout_unsafe().
316 * @param ipl Interrupt level returned by waitq_sleep_prepare().
[c0bc189]317 */
318void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl)
319{
320 switch (rc) {
321 case ESYNCH_WOULD_BLOCK:
322 case ESYNCH_OK_ATOMIC:
323 spinlock_unlock(&wq->lock);
324 break;
325 default:
326 break;
327 }
328 interrupts_restore(ipl);
329}
330
331/** Internal implementation of waitq_sleep_timeout().
332 *
333 * This function implements logic of sleeping in a wait queue.
[ace9358]334 * This call must be preceded by a call to waitq_sleep_prepare()
335 * and followed by a call to waitq_sleep_finish().
[c0bc189]336 *
[ace9358]337 * @param wq See waitq_sleep_timeout().
338 * @param usec See waitq_sleep_timeout().
339 * @param flags See waitq_sleep_timeout().
[c0bc189]340 *
[ace9358]341 * @return See waitq_sleep_timeout().
[c0bc189]342 */
[7f1c620]343int waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, int flags)
[c0bc189]344{
[f761f1eb]345 /* checks whether to go to sleep at all */
346 if (wq->missed_wakeups) {
347 wq->missed_wakeups--;
348 return ESYNCH_OK_ATOMIC;
349 }
350 else {
[4039c77]351 if (PARAM_NON_BLOCKING(flags, usec)) {
[f761f1eb]352 /* return immediatelly instead of going to sleep */
353 return ESYNCH_WOULD_BLOCK;
354 }
355 }
356
357 /*
358 * Now we are firmly decided to go to sleep.
359 */
[43114c5]360 spinlock_lock(&THREAD->lock);
[203f4c3]361
[116d1ef4]362 if (flags & SYNCH_FLAGS_INTERRUPTIBLE) {
[34dcd3f]363
364 /*
365 * If the thread was already interrupted,
366 * don't go to sleep at all.
367 */
368 if (THREAD->interrupted) {
369 spinlock_unlock(&THREAD->lock);
370 spinlock_unlock(&wq->lock);
371 return ESYNCH_INTERRUPTED;
372 }
373
[116d1ef4]374 /*
375 * Set context that will be restored if the sleep
376 * of this thread is ever interrupted.
377 */
378 THREAD->sleep_interruptible = true;
379 if (!context_save(&THREAD->sleep_interruption_context)) {
380 /* Short emulation of scheduler() return code. */
[6ec34bb]381 THREAD->last_cycle = get_cycle();
[116d1ef4]382 spinlock_unlock(&THREAD->lock);
383 return ESYNCH_INTERRUPTED;
384 }
[34dcd3f]385
[116d1ef4]386 } else {
387 THREAD->sleep_interruptible = false;
[203f4c3]388 }
389
[f761f1eb]390 if (usec) {
391 /* We use the timeout variant. */
[43114c5]392 if (!context_save(&THREAD->sleep_timeout_context)) {
[203f4c3]393 /* Short emulation of scheduler() return code. */
[6ec34bb]394 THREAD->last_cycle = get_cycle();
[43114c5]395 spinlock_unlock(&THREAD->lock);
[f761f1eb]396 return ESYNCH_TIMEOUT;
397 }
[05e2a7ad]398 THREAD->timeout_pending = true;
[4e33b6b]399 timeout_register(&THREAD->sleep_timeout, (uint64_t) usec,
[929ce92]400 waitq_sleep_timed_out, THREAD);
[f761f1eb]401 }
402
[43114c5]403 list_append(&THREAD->wq_link, &wq->head);
[f761f1eb]404
405 /*
406 * Suspend execution.
407 */
[43114c5]408 THREAD->state = Sleeping;
409 THREAD->sleep_queue = wq;
[f761f1eb]410
[43114c5]411 spinlock_unlock(&THREAD->lock);
[f761f1eb]412
[4e33b6b]413 /* wq->lock is released in scheduler_separated_stack() */
414 scheduler();
[f761f1eb]415
416 return ESYNCH_OK_BLOCKED;
417}
418
419
[922c7ce]420/** Wake up first thread sleeping in a wait queue
421 *
[4e33b6b]422 * Wake up first thread sleeping in a wait queue. This is the SMP- and IRQ-safe
423 * wrapper meant for general use.
[922c7ce]424 *
[4e33b6b]425 * Besides its 'normal' wakeup operation, it attempts to unregister possible
426 * timeout.
[922c7ce]427 *
[ace9358]428 * @param wq Pointer to wait queue.
429 * @param mode Wakeup mode.
[f761f1eb]430 */
[5c8ba05]431void waitq_wakeup(waitq_t *wq, wakeup_mode_t mode)
[f761f1eb]432{
[22f7769]433 ipl_t ipl;
[f761f1eb]434
[22f7769]435 ipl = interrupts_disable();
[f761f1eb]436 spinlock_lock(&wq->lock);
437
[5c8ba05]438 _waitq_wakeup_unsafe(wq, mode);
[f761f1eb]439
[5c8ba05]440 spinlock_unlock(&wq->lock);
441 interrupts_restore(ipl);
[f761f1eb]442}
443
[922c7ce]444/** Internal SMP- and IRQ-unsafe version of waitq_wakeup()
445 *
[4e33b6b]446 * This is the internal SMP- and IRQ-unsafe version of waitq_wakeup(). It
447 * assumes wq->lock is already locked and interrupts are already disabled.
[922c7ce]448 *
[ace9358]449 * @param wq Pointer to wait queue.
[83512f21]450 * @param mode If mode is WAKEUP_FIRST, then the longest waiting
[ace9358]451 * thread, if any, is woken up. If mode is WAKEUP_ALL, then
452 * all waiting threads, if any, are woken up. If there are
453 * no waiting threads to be woken up, the missed wakeup is
454 * recorded in the wait queue.
[f761f1eb]455 */
[5c8ba05]456void _waitq_wakeup_unsafe(waitq_t *wq, wakeup_mode_t mode)
[f761f1eb]457{
458 thread_t *t;
[98000fb]459 size_t count = 0;
[f761f1eb]460
461loop:
462 if (list_empty(&wq->head)) {
463 wq->missed_wakeups++;
[5c8ba05]464 if (count && mode == WAKEUP_ALL)
465 wq->missed_wakeups--;
[f761f1eb]466 return;
467 }
468
[5c8ba05]469 count++;
[f761f1eb]470 t = list_get_instance(wq->head.next, thread_t, wq_link);
471
[4b74488]472 /*
473 * Lock the thread prior to removing it from the wq.
474 * This is not necessary because of mutual exclusion
475 * (the link belongs to the wait queue), but because
[929ce92]476 * of synchronization with waitq_sleep_timed_out()
[b3f8fb7]477 * and thread_interrupt_sleep().
[4b74488]478 *
479 * In order for these two functions to work, the following
480 * invariant must hold:
481 *
482 * t->sleep_queue != NULL <=> t sleeps in a wait queue
483 *
484 * For an observer who locks the thread, the invariant
485 * holds only when the lock is held prior to removing
486 * it from the wait queue.
487 */
[f761f1eb]488 spinlock_lock(&t->lock);
[4b74488]489 list_remove(&t->wq_link);
490
[f761f1eb]491 if (t->timeout_pending && timeout_unregister(&t->sleep_timeout))
[05e2a7ad]492 t->timeout_pending = false;
[f761f1eb]493 t->sleep_queue = NULL;
494 spinlock_unlock(&t->lock);
495
496 thread_ready(t);
497
[5c8ba05]498 if (mode == WAKEUP_ALL)
[05e2a7ad]499 goto loop;
[f761f1eb]500}
[b45c443]501
[cc73a8a1]502/** @}
[b45c443]503 */
Note: See TracBrowser for help on using the repository browser.