source: mainline/kernel/generic/src/synch/waitq.c@ e2fcdb1

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since e2fcdb1 was 4039c77, checked in by Jakub Jermar <jakub@…>, 15 years ago

Add assertion to detect attempts to block when hodling a spinlock.

  • Property mode set to 100644
File size: 13.3 KB
Line 
1/*
2 * Copyright (c) 2001-2004 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup sync
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Wait queue.
36 *
37 * Wait queue is the basic synchronization primitive upon which all
38 * other synchronization primitives build.
39 *
40 * It allows threads to wait for an event in first-come, first-served
41 * fashion. Conditional operation as well as timeouts and interruptions
42 * are supported.
43 */
44
45#include <synch/waitq.h>
46#include <synch/synch.h>
47#include <synch/spinlock.h>
48#include <proc/thread.h>
49#include <proc/scheduler.h>
50#include <arch/asm.h>
51#include <typedefs.h>
52#include <time/timeout.h>
53#include <arch.h>
54#include <context.h>
55#include <adt/list.h>
56#include <arch/cycle.h>
57
58static void waitq_sleep_timed_out(void *data);
59
60/** Initialize wait queue
61 *
62 * Initialize wait queue.
63 *
64 * @param wq Pointer to wait queue to be initialized.
65 */
66void waitq_initialize(waitq_t *wq)
67{
68 spinlock_initialize(&wq->lock, "waitq_lock");
69 list_initialize(&wq->head);
70 wq->missed_wakeups = 0;
71}
72
73/** Handle timeout during waitq_sleep_timeout() call
74 *
75 * This routine is called when waitq_sleep_timeout() times out.
76 * Interrupts are disabled.
77 *
78 * It is supposed to try to remove 'its' thread from the wait queue;
79 * it can eventually fail to achieve this goal when these two events
80 * overlap. In that case it behaves just as though there was no
81 * timeout at all.
82 *
83 * @param data Pointer to the thread that called waitq_sleep_timeout().
84 */
85void waitq_sleep_timed_out(void *data)
86{
87 thread_t *t = (thread_t *) data;
88 waitq_t *wq;
89 bool do_wakeup = false;
90 DEADLOCK_PROBE_INIT(p_wqlock);
91
92 spinlock_lock(&threads_lock);
93 if (!thread_exists(t))
94 goto out;
95
96grab_locks:
97 spinlock_lock(&t->lock);
98 if ((wq = t->sleep_queue)) { /* assignment */
99 if (!spinlock_trylock(&wq->lock)) {
100 spinlock_unlock(&t->lock);
101 DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD);
102 goto grab_locks; /* avoid deadlock */
103 }
104
105 list_remove(&t->wq_link);
106 t->saved_context = t->sleep_timeout_context;
107 do_wakeup = true;
108 t->sleep_queue = NULL;
109 spinlock_unlock(&wq->lock);
110 }
111
112 t->timeout_pending = false;
113 spinlock_unlock(&t->lock);
114
115 if (do_wakeup)
116 thread_ready(t);
117
118out:
119 spinlock_unlock(&threads_lock);
120}
121
122/** Interrupt sleeping thread.
123 *
124 * This routine attempts to interrupt a thread from its sleep in a waitqueue.
125 * If the thread is not found sleeping, no action is taken.
126 *
127 * @param t Thread to be interrupted.
128 */
129void waitq_interrupt_sleep(thread_t *t)
130{
131 waitq_t *wq;
132 bool do_wakeup = false;
133 ipl_t ipl;
134 DEADLOCK_PROBE_INIT(p_wqlock);
135
136 ipl = interrupts_disable();
137 spinlock_lock(&threads_lock);
138 if (!thread_exists(t))
139 goto out;
140
141grab_locks:
142 spinlock_lock(&t->lock);
143 if ((wq = t->sleep_queue)) { /* assignment */
144 if (!(t->sleep_interruptible)) {
145 /*
146 * The sleep cannot be interrupted.
147 */
148 spinlock_unlock(&t->lock);
149 goto out;
150 }
151
152 if (!spinlock_trylock(&wq->lock)) {
153 spinlock_unlock(&t->lock);
154 DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD);
155 goto grab_locks; /* avoid deadlock */
156 }
157
158 if (t->timeout_pending && timeout_unregister(&t->sleep_timeout))
159 t->timeout_pending = false;
160
161 list_remove(&t->wq_link);
162 t->saved_context = t->sleep_interruption_context;
163 do_wakeup = true;
164 t->sleep_queue = NULL;
165 spinlock_unlock(&wq->lock);
166 }
167 spinlock_unlock(&t->lock);
168
169 if (do_wakeup)
170 thread_ready(t);
171
172out:
173 spinlock_unlock(&threads_lock);
174 interrupts_restore(ipl);
175}
176
177/** Interrupt the first thread sleeping in the wait queue.
178 *
179 * Note that the caller somehow needs to know that the thread to be interrupted
180 * is sleeping interruptibly.
181 *
182 * @param wq Pointer to wait queue.
183 */
184void waitq_unsleep(waitq_t *wq)
185{
186 ipl_t ipl;
187
188 ipl = interrupts_disable();
189 spinlock_lock(&wq->lock);
190
191 if (!list_empty(&wq->head)) {
192 thread_t *t;
193
194 t = list_get_instance(wq->head.next, thread_t, wq_link);
195 spinlock_lock(&t->lock);
196 ASSERT(t->sleep_interruptible);
197 if (t->timeout_pending && timeout_unregister(&t->sleep_timeout))
198 t->timeout_pending = false;
199 list_remove(&t->wq_link);
200 t->saved_context = t->sleep_interruption_context;
201 t->sleep_queue = NULL;
202 spinlock_unlock(&t->lock);
203 thread_ready(t);
204 }
205
206 spinlock_unlock(&wq->lock);
207 interrupts_restore(ipl);
208}
209
210#define PARAM_NON_BLOCKING(flags, usec) \
211 (((flags) & SYNCH_FLAGS_NON_BLOCKING) && ((usec) == 0))
212
213/** Sleep until either wakeup, timeout or interruption occurs
214 *
215 * This is a sleep implementation which allows itself to time out or to be
216 * interrupted from the sleep, restoring a failover context.
217 *
218 * Sleepers are organised in a FIFO fashion in a structure called wait queue.
219 *
220 * This function is really basic in that other functions as waitq_sleep()
221 * and all the *_timeout() functions use it.
222 *
223 * @param wq Pointer to wait queue.
224 * @param usec Timeout in microseconds.
225 * @param flags Specify mode of the sleep.
226 *
227 * The sleep can be interrupted only if the
228 * SYNCH_FLAGS_INTERRUPTIBLE bit is specified in flags.
229 *
230 * If usec is greater than zero, regardless of the value of the
231 * SYNCH_FLAGS_NON_BLOCKING bit in flags, the call will not return until either
232 * timeout, interruption or wakeup comes.
233 *
234 * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is not set in flags,
235 * the call will not return until wakeup or interruption comes.
236 *
237 * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is set in flags, the
238 * call will immediately return, reporting either success or failure.
239 *
240 * @return Returns one of ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT,
241 * ESYNCH_INTERRUPTED, ESYNCH_OK_ATOMIC and
242 * ESYNCH_OK_BLOCKED.
243 *
244 * @li ESYNCH_WOULD_BLOCK means that the sleep failed because at the time of
245 * the call there was no pending wakeup.
246 *
247 * @li ESYNCH_TIMEOUT means that the sleep timed out.
248 *
249 * @li ESYNCH_INTERRUPTED means that somebody interrupted the sleeping thread.
250 *
251 * @li ESYNCH_OK_ATOMIC means that the sleep succeeded and that there was
252 * a pending wakeup at the time of the call. The caller was not put
253 * asleep at all.
254 *
255 * @li ESYNCH_OK_BLOCKED means that the sleep succeeded; the full sleep was
256 * attempted.
257 */
258int waitq_sleep_timeout(waitq_t *wq, uint32_t usec, int flags)
259{
260 ipl_t ipl;
261 int rc;
262
263 ASSERT(!PREEMPTION_DISABLED || PARAM_NON_BLOCKING(flags, usec));
264
265 ipl = waitq_sleep_prepare(wq);
266 rc = waitq_sleep_timeout_unsafe(wq, usec, flags);
267 waitq_sleep_finish(wq, rc, ipl);
268 return rc;
269}
270
271/** Prepare to sleep in a waitq.
272 *
273 * This function will return holding the lock of the wait queue
274 * and interrupts disabled.
275 *
276 * @param wq Wait queue.
277 *
278 * @return Interrupt level as it existed on entry to this function.
279 */
280ipl_t waitq_sleep_prepare(waitq_t *wq)
281{
282 ipl_t ipl;
283
284restart:
285 ipl = interrupts_disable();
286
287 if (THREAD) { /* needed during system initiailzation */
288 /*
289 * Busy waiting for a delayed timeout.
290 * This is an important fix for the race condition between
291 * a delayed timeout and a next call to waitq_sleep_timeout().
292 * Simply, the thread is not allowed to go to sleep if
293 * there are timeouts in progress.
294 */
295 spinlock_lock(&THREAD->lock);
296 if (THREAD->timeout_pending) {
297 spinlock_unlock(&THREAD->lock);
298 interrupts_restore(ipl);
299 goto restart;
300 }
301 spinlock_unlock(&THREAD->lock);
302 }
303
304 spinlock_lock(&wq->lock);
305 return ipl;
306}
307
308/** Finish waiting in a wait queue.
309 *
310 * This function restores interrupts to the state that existed prior
311 * to the call to waitq_sleep_prepare(). If necessary, the wait queue
312 * lock is released.
313 *
314 * @param wq Wait queue.
315 * @param rc Return code of waitq_sleep_timeout_unsafe().
316 * @param ipl Interrupt level returned by waitq_sleep_prepare().
317 */
318void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl)
319{
320 switch (rc) {
321 case ESYNCH_WOULD_BLOCK:
322 case ESYNCH_OK_ATOMIC:
323 spinlock_unlock(&wq->lock);
324 break;
325 default:
326 break;
327 }
328 interrupts_restore(ipl);
329}
330
331/** Internal implementation of waitq_sleep_timeout().
332 *
333 * This function implements logic of sleeping in a wait queue.
334 * This call must be preceded by a call to waitq_sleep_prepare()
335 * and followed by a call to waitq_sleep_finish().
336 *
337 * @param wq See waitq_sleep_timeout().
338 * @param usec See waitq_sleep_timeout().
339 * @param flags See waitq_sleep_timeout().
340 *
341 * @return See waitq_sleep_timeout().
342 */
343int waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, int flags)
344{
345 /* checks whether to go to sleep at all */
346 if (wq->missed_wakeups) {
347 wq->missed_wakeups--;
348 return ESYNCH_OK_ATOMIC;
349 }
350 else {
351 if (PARAM_NON_BLOCKING(flags, usec)) {
352 /* return immediatelly instead of going to sleep */
353 return ESYNCH_WOULD_BLOCK;
354 }
355 }
356
357 /*
358 * Now we are firmly decided to go to sleep.
359 */
360 spinlock_lock(&THREAD->lock);
361
362 if (flags & SYNCH_FLAGS_INTERRUPTIBLE) {
363
364 /*
365 * If the thread was already interrupted,
366 * don't go to sleep at all.
367 */
368 if (THREAD->interrupted) {
369 spinlock_unlock(&THREAD->lock);
370 spinlock_unlock(&wq->lock);
371 return ESYNCH_INTERRUPTED;
372 }
373
374 /*
375 * Set context that will be restored if the sleep
376 * of this thread is ever interrupted.
377 */
378 THREAD->sleep_interruptible = true;
379 if (!context_save(&THREAD->sleep_interruption_context)) {
380 /* Short emulation of scheduler() return code. */
381 THREAD->last_cycle = get_cycle();
382 spinlock_unlock(&THREAD->lock);
383 return ESYNCH_INTERRUPTED;
384 }
385
386 } else {
387 THREAD->sleep_interruptible = false;
388 }
389
390 if (usec) {
391 /* We use the timeout variant. */
392 if (!context_save(&THREAD->sleep_timeout_context)) {
393 /* Short emulation of scheduler() return code. */
394 THREAD->last_cycle = get_cycle();
395 spinlock_unlock(&THREAD->lock);
396 return ESYNCH_TIMEOUT;
397 }
398 THREAD->timeout_pending = true;
399 timeout_register(&THREAD->sleep_timeout, (uint64_t) usec,
400 waitq_sleep_timed_out, THREAD);
401 }
402
403 list_append(&THREAD->wq_link, &wq->head);
404
405 /*
406 * Suspend execution.
407 */
408 THREAD->state = Sleeping;
409 THREAD->sleep_queue = wq;
410
411 spinlock_unlock(&THREAD->lock);
412
413 /* wq->lock is released in scheduler_separated_stack() */
414 scheduler();
415
416 return ESYNCH_OK_BLOCKED;
417}
418
419
420/** Wake up first thread sleeping in a wait queue
421 *
422 * Wake up first thread sleeping in a wait queue. This is the SMP- and IRQ-safe
423 * wrapper meant for general use.
424 *
425 * Besides its 'normal' wakeup operation, it attempts to unregister possible
426 * timeout.
427 *
428 * @param wq Pointer to wait queue.
429 * @param mode Wakeup mode.
430 */
431void waitq_wakeup(waitq_t *wq, wakeup_mode_t mode)
432{
433 ipl_t ipl;
434
435 ipl = interrupts_disable();
436 spinlock_lock(&wq->lock);
437
438 _waitq_wakeup_unsafe(wq, mode);
439
440 spinlock_unlock(&wq->lock);
441 interrupts_restore(ipl);
442}
443
444/** Internal SMP- and IRQ-unsafe version of waitq_wakeup()
445 *
446 * This is the internal SMP- and IRQ-unsafe version of waitq_wakeup(). It
447 * assumes wq->lock is already locked and interrupts are already disabled.
448 *
449 * @param wq Pointer to wait queue.
450 * @param mode If mode is WAKEUP_FIRST, then the longest waiting
451 * thread, if any, is woken up. If mode is WAKEUP_ALL, then
452 * all waiting threads, if any, are woken up. If there are
453 * no waiting threads to be woken up, the missed wakeup is
454 * recorded in the wait queue.
455 */
456void _waitq_wakeup_unsafe(waitq_t *wq, wakeup_mode_t mode)
457{
458 thread_t *t;
459 size_t count = 0;
460
461loop:
462 if (list_empty(&wq->head)) {
463 wq->missed_wakeups++;
464 if (count && mode == WAKEUP_ALL)
465 wq->missed_wakeups--;
466 return;
467 }
468
469 count++;
470 t = list_get_instance(wq->head.next, thread_t, wq_link);
471
472 /*
473 * Lock the thread prior to removing it from the wq.
474 * This is not necessary because of mutual exclusion
475 * (the link belongs to the wait queue), but because
476 * of synchronization with waitq_sleep_timed_out()
477 * and thread_interrupt_sleep().
478 *
479 * In order for these two functions to work, the following
480 * invariant must hold:
481 *
482 * t->sleep_queue != NULL <=> t sleeps in a wait queue
483 *
484 * For an observer who locks the thread, the invariant
485 * holds only when the lock is held prior to removing
486 * it from the wait queue.
487 */
488 spinlock_lock(&t->lock);
489 list_remove(&t->wq_link);
490
491 if (t->timeout_pending && timeout_unregister(&t->sleep_timeout))
492 t->timeout_pending = false;
493 t->sleep_queue = NULL;
494 spinlock_unlock(&t->lock);
495
496 thread_ready(t);
497
498 if (mode == WAKEUP_ALL)
499 goto loop;
500}
501
502/** @}
503 */
Note: See TracBrowser for help on using the repository browser.