source: mainline/kernel/generic/src/synch/waitq.c@ dec16a2

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since dec16a2 was 6ec34bb, checked in by Stanislav Kozina <stanislav.kozina@…>, 16 years ago

bugfix: synch/waitq.c saves THREAD→last_cycle after context restored.

  • Property mode set to 100644
File size: 13.2 KB
RevLine 
[f761f1eb]1/*
[df4ed85]2 * Copyright (c) 2001-2004 Jakub Jermar
[f761f1eb]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[cc73a8a1]29/** @addtogroup sync
[b45c443]30 * @{
31 */
32
[9179d0a]33/**
[b45c443]34 * @file
[9179d0a]35 * @brief Wait queue.
36 *
[e3c762cd]37 * Wait queue is the basic synchronization primitive upon which all
[9179d0a]38 * other synchronization primitives build.
39 *
40 * It allows threads to wait for an event in first-come, first-served
41 * fashion. Conditional operation as well as timeouts and interruptions
42 * are supported.
43 */
44
[f761f1eb]45#include <synch/waitq.h>
[922c7ce]46#include <synch/synch.h>
[f761f1eb]47#include <synch/spinlock.h>
[922c7ce]48#include <proc/thread.h>
[4b2c872d]49#include <proc/scheduler.h>
[f761f1eb]50#include <arch/asm.h>
[d99c1d2]51#include <typedefs.h>
[922c7ce]52#include <time/timeout.h>
[f761f1eb]53#include <arch.h>
[922c7ce]54#include <context.h>
[5c9a08b]55#include <adt/list.h>
[6ec34bb]56#include <arch/cycle.h>
[f761f1eb]57
[929ce92]58static void waitq_sleep_timed_out(void *data);
[203f4c3]59
[922c7ce]60/** Initialize wait queue
61 *
62 * Initialize wait queue.
63 *
[ace9358]64 * @param wq Pointer to wait queue to be initialized.
[922c7ce]65 */
[f761f1eb]66void waitq_initialize(waitq_t *wq)
67{
[2d93f1f9]68 spinlock_initialize(&wq->lock, "waitq_lock");
[f761f1eb]69 list_initialize(&wq->head);
70 wq->missed_wakeups = 0;
71}
72
[922c7ce]73/** Handle timeout during waitq_sleep_timeout() call
74 *
[ace9358]75 * This routine is called when waitq_sleep_timeout() times out.
[922c7ce]76 * Interrupts are disabled.
[f761f1eb]77 *
[922c7ce]78 * It is supposed to try to remove 'its' thread from the wait queue;
79 * it can eventually fail to achieve this goal when these two events
80 * overlap. In that case it behaves just as though there was no
81 * timeout at all.
82 *
[ace9358]83 * @param data Pointer to the thread that called waitq_sleep_timeout().
[f761f1eb]84 */
[929ce92]85void waitq_sleep_timed_out(void *data)
[f761f1eb]86{
87 thread_t *t = (thread_t *) data;
88 waitq_t *wq;
[05e2a7ad]89 bool do_wakeup = false;
[31d8e10]90 DEADLOCK_PROBE_INIT(p_wqlock);
[f761f1eb]91
92 spinlock_lock(&threads_lock);
[016acbe]93 if (!thread_exists(t))
[f761f1eb]94 goto out;
95
96grab_locks:
97 spinlock_lock(&t->lock);
[5a95b25]98 if ((wq = t->sleep_queue)) { /* assignment */
[f761f1eb]99 if (!spinlock_trylock(&wq->lock)) {
100 spinlock_unlock(&t->lock);
[31d8e10]101 DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD);
[05e2a7ad]102 goto grab_locks; /* avoid deadlock */
[f761f1eb]103 }
104
105 list_remove(&t->wq_link);
106 t->saved_context = t->sleep_timeout_context;
[05e2a7ad]107 do_wakeup = true;
[f761f1eb]108 t->sleep_queue = NULL;
[4b74488]109 spinlock_unlock(&wq->lock);
[f761f1eb]110 }
111
[05e2a7ad]112 t->timeout_pending = false;
[f761f1eb]113 spinlock_unlock(&t->lock);
114
[05e2a7ad]115 if (do_wakeup)
116 thread_ready(t);
[f761f1eb]117
118out:
119 spinlock_unlock(&threads_lock);
120}
121
[5573942]122/** Interrupt sleeping thread.
123 *
124 * This routine attempts to interrupt a thread from its sleep in a waitqueue.
125 * If the thread is not found sleeping, no action is taken.
126 *
[ace9358]127 * @param t Thread to be interrupted.
[5573942]128 */
129void waitq_interrupt_sleep(thread_t *t)
130{
131 waitq_t *wq;
132 bool do_wakeup = false;
133 ipl_t ipl;
[31d8e10]134 DEADLOCK_PROBE_INIT(p_wqlock);
[5573942]135
136 ipl = interrupts_disable();
137 spinlock_lock(&threads_lock);
138 if (!thread_exists(t))
139 goto out;
140
141grab_locks:
142 spinlock_lock(&t->lock);
143 if ((wq = t->sleep_queue)) { /* assignment */
144 if (!(t->sleep_interruptible)) {
145 /*
146 * The sleep cannot be interrupted.
147 */
148 spinlock_unlock(&t->lock);
149 goto out;
150 }
151
152 if (!spinlock_trylock(&wq->lock)) {
153 spinlock_unlock(&t->lock);
[31d8e10]154 DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD);
[5573942]155 goto grab_locks; /* avoid deadlock */
156 }
157
158 if (t->timeout_pending && timeout_unregister(&t->sleep_timeout))
159 t->timeout_pending = false;
160
161 list_remove(&t->wq_link);
162 t->saved_context = t->sleep_interruption_context;
163 do_wakeup = true;
164 t->sleep_queue = NULL;
165 spinlock_unlock(&wq->lock);
166 }
167 spinlock_unlock(&t->lock);
168
169 if (do_wakeup)
170 thread_ready(t);
171
172out:
173 spinlock_unlock(&threads_lock);
174 interrupts_restore(ipl);
175}
[203f4c3]176
[6c4a56f]177/** Interrupt the first thread sleeping in the wait queue.
178 *
179 * Note that the caller somehow needs to know that the thread to be interrupted
180 * is sleeping interruptibly.
181 *
182 * @param wq Pointer to wait queue.
183 */
184void waitq_unsleep(waitq_t *wq)
185{
186 ipl_t ipl;
187
188 ipl = interrupts_disable();
189 spinlock_lock(&wq->lock);
190
191 if (!list_empty(&wq->head)) {
192 thread_t *t;
193
194 t = list_get_instance(wq->head.next, thread_t, wq_link);
195 spinlock_lock(&t->lock);
196 ASSERT(t->sleep_interruptible);
197 if (t->timeout_pending && timeout_unregister(&t->sleep_timeout))
198 t->timeout_pending = false;
199 list_remove(&t->wq_link);
200 t->saved_context = t->sleep_interruption_context;
201 t->sleep_queue = NULL;
202 spinlock_unlock(&t->lock);
203 thread_ready(t);
204 }
205
206 spinlock_unlock(&wq->lock);
207 interrupts_restore(ipl);
208}
209
[203f4c3]210/** Sleep until either wakeup, timeout or interruption occurs
[922c7ce]211 *
[116d1ef4]212 * This is a sleep implementation which allows itself to time out or to be
[f761f1eb]213 * interrupted from the sleep, restoring a failover context.
214 *
[c0bc189]215 * Sleepers are organised in a FIFO fashion in a structure called wait queue.
[922c7ce]216 *
[f761f1eb]217 * This function is really basic in that other functions as waitq_sleep()
218 * and all the *_timeout() functions use it.
219 *
[929ce92]220 * @param wq Pointer to wait queue.
221 * @param usec Timeout in microseconds.
222 * @param flags Specify mode of the sleep.
[922c7ce]223 *
[116d1ef4]224 * The sleep can be interrupted only if the
225 * SYNCH_FLAGS_INTERRUPTIBLE bit is specified in flags.
[6f4495f5]226 *
[116d1ef4]227 * If usec is greater than zero, regardless of the value of the
[4e33b6b]228 * SYNCH_FLAGS_NON_BLOCKING bit in flags, the call will not return until either
229 * timeout, interruption or wakeup comes.
[f761f1eb]230 *
[4e33b6b]231 * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is not set in flags,
232 * the call will not return until wakeup or interruption comes.
[a783ca4]233 *
[4e33b6b]234 * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is set in flags, the
235 * call will immediately return, reporting either success or failure.
[f761f1eb]236 *
[929ce92]237 * @return Returns one of ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT,
238 * ESYNCH_INTERRUPTED, ESYNCH_OK_ATOMIC and
239 * ESYNCH_OK_BLOCKED.
[922c7ce]240 *
[929ce92]241 * @li ESYNCH_WOULD_BLOCK means that the sleep failed because at the time of
242 * the call there was no pending wakeup.
[a783ca4]243 *
[929ce92]244 * @li ESYNCH_TIMEOUT means that the sleep timed out.
[922c7ce]245 *
[929ce92]246 * @li ESYNCH_INTERRUPTED means that somebody interrupted the sleeping thread.
[203f4c3]247 *
[929ce92]248 * @li ESYNCH_OK_ATOMIC means that the sleep succeeded and that there was
249 * a pending wakeup at the time of the call. The caller was not put
250 * asleep at all.
[a783ca4]251 *
[929ce92]252 * @li ESYNCH_OK_BLOCKED means that the sleep succeeded; the full sleep was
253 * attempted.
[f761f1eb]254 */
[7f1c620]255int waitq_sleep_timeout(waitq_t *wq, uint32_t usec, int flags)
[f761f1eb]256{
[c0bc189]257 ipl_t ipl;
258 int rc;
[f761f1eb]259
[c0bc189]260 ipl = waitq_sleep_prepare(wq);
[116d1ef4]261 rc = waitq_sleep_timeout_unsafe(wq, usec, flags);
[c0bc189]262 waitq_sleep_finish(wq, rc, ipl);
263 return rc;
264}
265
266/** Prepare to sleep in a waitq.
267 *
268 * This function will return holding the lock of the wait queue
269 * and interrupts disabled.
270 *
[ace9358]271 * @param wq Wait queue.
[c0bc189]272 *
[ace9358]273 * @return Interrupt level as it existed on entry to this function.
[c0bc189]274 */
275ipl_t waitq_sleep_prepare(waitq_t *wq)
276{
277 ipl_t ipl;
[f761f1eb]278
279restart:
[22f7769]280 ipl = interrupts_disable();
[c0bc189]281
[343fc179]282 if (THREAD) { /* needed during system initiailzation */
283 /*
284 * Busy waiting for a delayed timeout.
285 * This is an important fix for the race condition between
286 * a delayed timeout and a next call to waitq_sleep_timeout().
287 * Simply, the thread is not allowed to go to sleep if
288 * there are timeouts in progress.
289 */
290 spinlock_lock(&THREAD->lock);
291 if (THREAD->timeout_pending) {
292 spinlock_unlock(&THREAD->lock);
293 interrupts_restore(ipl);
294 goto restart;
295 }
[43114c5]296 spinlock_unlock(&THREAD->lock);
[f761f1eb]297 }
[c0bc189]298
[f761f1eb]299 spinlock_lock(&wq->lock);
[c0bc189]300 return ipl;
301}
302
303/** Finish waiting in a wait queue.
304 *
305 * This function restores interrupts to the state that existed prior
306 * to the call to waitq_sleep_prepare(). If necessary, the wait queue
307 * lock is released.
308 *
[ace9358]309 * @param wq Wait queue.
310 * @param rc Return code of waitq_sleep_timeout_unsafe().
311 * @param ipl Interrupt level returned by waitq_sleep_prepare().
[c0bc189]312 */
313void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl)
314{
315 switch (rc) {
316 case ESYNCH_WOULD_BLOCK:
317 case ESYNCH_OK_ATOMIC:
318 spinlock_unlock(&wq->lock);
319 break;
320 default:
321 break;
322 }
323 interrupts_restore(ipl);
324}
325
326/** Internal implementation of waitq_sleep_timeout().
327 *
328 * This function implements logic of sleeping in a wait queue.
[ace9358]329 * This call must be preceded by a call to waitq_sleep_prepare()
330 * and followed by a call to waitq_sleep_finish().
[c0bc189]331 *
[ace9358]332 * @param wq See waitq_sleep_timeout().
333 * @param usec See waitq_sleep_timeout().
334 * @param flags See waitq_sleep_timeout().
[c0bc189]335 *
[ace9358]336 * @return See waitq_sleep_timeout().
[c0bc189]337 */
[7f1c620]338int waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, int flags)
[c0bc189]339{
[f761f1eb]340 /* checks whether to go to sleep at all */
341 if (wq->missed_wakeups) {
342 wq->missed_wakeups--;
343 return ESYNCH_OK_ATOMIC;
344 }
345 else {
[116d1ef4]346 if ((flags & SYNCH_FLAGS_NON_BLOCKING) && (usec == 0)) {
[f761f1eb]347 /* return immediatelly instead of going to sleep */
348 return ESYNCH_WOULD_BLOCK;
349 }
350 }
351
352 /*
353 * Now we are firmly decided to go to sleep.
354 */
[43114c5]355 spinlock_lock(&THREAD->lock);
[203f4c3]356
[116d1ef4]357 if (flags & SYNCH_FLAGS_INTERRUPTIBLE) {
[34dcd3f]358
359 /*
360 * If the thread was already interrupted,
361 * don't go to sleep at all.
362 */
363 if (THREAD->interrupted) {
364 spinlock_unlock(&THREAD->lock);
365 spinlock_unlock(&wq->lock);
366 return ESYNCH_INTERRUPTED;
367 }
368
[116d1ef4]369 /*
370 * Set context that will be restored if the sleep
371 * of this thread is ever interrupted.
372 */
373 THREAD->sleep_interruptible = true;
374 if (!context_save(&THREAD->sleep_interruption_context)) {
375 /* Short emulation of scheduler() return code. */
[6ec34bb]376 THREAD->last_cycle = get_cycle();
[116d1ef4]377 spinlock_unlock(&THREAD->lock);
378 return ESYNCH_INTERRUPTED;
379 }
[34dcd3f]380
[116d1ef4]381 } else {
382 THREAD->sleep_interruptible = false;
[203f4c3]383 }
384
[f761f1eb]385 if (usec) {
386 /* We use the timeout variant. */
[43114c5]387 if (!context_save(&THREAD->sleep_timeout_context)) {
[203f4c3]388 /* Short emulation of scheduler() return code. */
[6ec34bb]389 THREAD->last_cycle = get_cycle();
[43114c5]390 spinlock_unlock(&THREAD->lock);
[f761f1eb]391 return ESYNCH_TIMEOUT;
392 }
[05e2a7ad]393 THREAD->timeout_pending = true;
[4e33b6b]394 timeout_register(&THREAD->sleep_timeout, (uint64_t) usec,
[929ce92]395 waitq_sleep_timed_out, THREAD);
[f761f1eb]396 }
397
[43114c5]398 list_append(&THREAD->wq_link, &wq->head);
[f761f1eb]399
400 /*
401 * Suspend execution.
402 */
[43114c5]403 THREAD->state = Sleeping;
404 THREAD->sleep_queue = wq;
[f761f1eb]405
[43114c5]406 spinlock_unlock(&THREAD->lock);
[f761f1eb]407
[4e33b6b]408 /* wq->lock is released in scheduler_separated_stack() */
409 scheduler();
[f761f1eb]410
411 return ESYNCH_OK_BLOCKED;
412}
413
414
[922c7ce]415/** Wake up first thread sleeping in a wait queue
416 *
[4e33b6b]417 * Wake up first thread sleeping in a wait queue. This is the SMP- and IRQ-safe
418 * wrapper meant for general use.
[922c7ce]419 *
[4e33b6b]420 * Besides its 'normal' wakeup operation, it attempts to unregister possible
421 * timeout.
[922c7ce]422 *
[ace9358]423 * @param wq Pointer to wait queue.
424 * @param mode Wakeup mode.
[f761f1eb]425 */
[5c8ba05]426void waitq_wakeup(waitq_t *wq, wakeup_mode_t mode)
[f761f1eb]427{
[22f7769]428 ipl_t ipl;
[f761f1eb]429
[22f7769]430 ipl = interrupts_disable();
[f761f1eb]431 spinlock_lock(&wq->lock);
432
[5c8ba05]433 _waitq_wakeup_unsafe(wq, mode);
[f761f1eb]434
[5c8ba05]435 spinlock_unlock(&wq->lock);
436 interrupts_restore(ipl);
[f761f1eb]437}
438
[922c7ce]439/** Internal SMP- and IRQ-unsafe version of waitq_wakeup()
440 *
[4e33b6b]441 * This is the internal SMP- and IRQ-unsafe version of waitq_wakeup(). It
442 * assumes wq->lock is already locked and interrupts are already disabled.
[922c7ce]443 *
[ace9358]444 * @param wq Pointer to wait queue.
[83512f21]445 * @param mode If mode is WAKEUP_FIRST, then the longest waiting
[ace9358]446 * thread, if any, is woken up. If mode is WAKEUP_ALL, then
447 * all waiting threads, if any, are woken up. If there are
448 * no waiting threads to be woken up, the missed wakeup is
449 * recorded in the wait queue.
[f761f1eb]450 */
[5c8ba05]451void _waitq_wakeup_unsafe(waitq_t *wq, wakeup_mode_t mode)
[f761f1eb]452{
453 thread_t *t;
[98000fb]454 size_t count = 0;
[f761f1eb]455
456loop:
457 if (list_empty(&wq->head)) {
458 wq->missed_wakeups++;
[5c8ba05]459 if (count && mode == WAKEUP_ALL)
460 wq->missed_wakeups--;
[f761f1eb]461 return;
462 }
463
[5c8ba05]464 count++;
[f761f1eb]465 t = list_get_instance(wq->head.next, thread_t, wq_link);
466
[4b74488]467 /*
468 * Lock the thread prior to removing it from the wq.
469 * This is not necessary because of mutual exclusion
470 * (the link belongs to the wait queue), but because
[929ce92]471 * of synchronization with waitq_sleep_timed_out()
[b3f8fb7]472 * and thread_interrupt_sleep().
[4b74488]473 *
474 * In order for these two functions to work, the following
475 * invariant must hold:
476 *
477 * t->sleep_queue != NULL <=> t sleeps in a wait queue
478 *
479 * For an observer who locks the thread, the invariant
480 * holds only when the lock is held prior to removing
481 * it from the wait queue.
482 */
[f761f1eb]483 spinlock_lock(&t->lock);
[4b74488]484 list_remove(&t->wq_link);
485
[f761f1eb]486 if (t->timeout_pending && timeout_unregister(&t->sleep_timeout))
[05e2a7ad]487 t->timeout_pending = false;
[f761f1eb]488 t->sleep_queue = NULL;
489 spinlock_unlock(&t->lock);
490
491 thread_ready(t);
492
[5c8ba05]493 if (mode == WAKEUP_ALL)
[05e2a7ad]494 goto loop;
[f761f1eb]495}
[b45c443]496
[cc73a8a1]497/** @}
[b45c443]498 */
Note: See TracBrowser for help on using the repository browser.