source: mainline/generic/src/synch/waitq.c@ 7f1c620

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 7f1c620 was 7f1c620, checked in by Jakub Jermar <jakub@…>, 19 years ago

Replace old u?? types with respective C99 variants (e.g. uint32_t, int64_t, uintptr_t etc.).

  • Property mode set to 100644
File size: 12.0 KB
Line 
1/*
2 * Copyright (C) 2001-2004 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup sync
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Wait queue.
36 *
37 * Wait queue is the basic synchronization primitive upon which all
38 * other synchronization primitives build.
39 *
40 * It allows threads to wait for an event in first-come, first-served
41 * fashion. Conditional operation as well as timeouts and interruptions
42 * are supported.
43 */
44
45#include <synch/waitq.h>
46#include <synch/synch.h>
47#include <synch/spinlock.h>
48#include <proc/thread.h>
49#include <proc/scheduler.h>
50#include <arch/asm.h>
51#include <arch/types.h>
52#include <typedefs.h>
53#include <time/timeout.h>
54#include <arch.h>
55#include <context.h>
56#include <adt/list.h>
57
58static void waitq_timeouted_sleep(void *data);
59
60/** Initialize wait queue
61 *
62 * Initialize wait queue.
63 *
64 * @param wq Pointer to wait queue to be initialized.
65 */
66void waitq_initialize(waitq_t *wq)
67{
68 spinlock_initialize(&wq->lock, "waitq_lock");
69 list_initialize(&wq->head);
70 wq->missed_wakeups = 0;
71}
72
73/** Handle timeout during waitq_sleep_timeout() call
74 *
75 * This routine is called when waitq_sleep_timeout() timeouts.
76 * Interrupts are disabled.
77 *
78 * It is supposed to try to remove 'its' thread from the wait queue;
79 * it can eventually fail to achieve this goal when these two events
80 * overlap. In that case it behaves just as though there was no
81 * timeout at all.
82 *
83 * @param data Pointer to the thread that called waitq_sleep_timeout().
84 */
85void waitq_timeouted_sleep(void *data)
86{
87 thread_t *t = (thread_t *) data;
88 waitq_t *wq;
89 bool do_wakeup = false;
90
91 spinlock_lock(&threads_lock);
92 if (!thread_exists(t))
93 goto out;
94
95grab_locks:
96 spinlock_lock(&t->lock);
97 if ((wq = t->sleep_queue)) { /* assignment */
98 if (!spinlock_trylock(&wq->lock)) {
99 spinlock_unlock(&t->lock);
100 goto grab_locks; /* avoid deadlock */
101 }
102
103 list_remove(&t->wq_link);
104 t->saved_context = t->sleep_timeout_context;
105 do_wakeup = true;
106 t->sleep_queue = NULL;
107 spinlock_unlock(&wq->lock);
108 }
109
110 t->timeout_pending = false;
111 spinlock_unlock(&t->lock);
112
113 if (do_wakeup)
114 thread_ready(t);
115
116out:
117 spinlock_unlock(&threads_lock);
118}
119
120/** Interrupt sleeping thread.
121 *
122 * This routine attempts to interrupt a thread from its sleep in a waitqueue.
123 * If the thread is not found sleeping, no action is taken.
124 *
125 * @param t Thread to be interrupted.
126 */
127void waitq_interrupt_sleep(thread_t *t)
128{
129 waitq_t *wq;
130 bool do_wakeup = false;
131 ipl_t ipl;
132
133 ipl = interrupts_disable();
134 spinlock_lock(&threads_lock);
135 if (!thread_exists(t))
136 goto out;
137
138grab_locks:
139 spinlock_lock(&t->lock);
140 if ((wq = t->sleep_queue)) { /* assignment */
141 if (!(t->sleep_interruptible)) {
142 /*
143 * The sleep cannot be interrupted.
144 */
145 spinlock_unlock(&t->lock);
146 goto out;
147 }
148
149 if (!spinlock_trylock(&wq->lock)) {
150 spinlock_unlock(&t->lock);
151 goto grab_locks; /* avoid deadlock */
152 }
153
154 if (t->timeout_pending && timeout_unregister(&t->sleep_timeout))
155 t->timeout_pending = false;
156
157 list_remove(&t->wq_link);
158 t->saved_context = t->sleep_interruption_context;
159 do_wakeup = true;
160 t->sleep_queue = NULL;
161 spinlock_unlock(&wq->lock);
162 }
163 spinlock_unlock(&t->lock);
164
165 if (do_wakeup)
166 thread_ready(t);
167
168out:
169 spinlock_unlock(&threads_lock);
170 interrupts_restore(ipl);
171}
172
173/** Sleep until either wakeup, timeout or interruption occurs
174 *
175 * This is a sleep implementation which allows itself to time out or to be
176 * interrupted from the sleep, restoring a failover context.
177 *
178 * Sleepers are organised in a FIFO fashion in a structure called wait queue.
179 *
180 * This function is really basic in that other functions as waitq_sleep()
181 * and all the *_timeout() functions use it.
182 *
183 * @param wq Pointer to wait queue.
184 * @param usec Timeout in microseconds.
185 * @param flags Specify mode of the sleep.
186 *
187 * The sleep can be interrupted only if the
188 * SYNCH_FLAGS_INTERRUPTIBLE bit is specified in flags.
189
190 * If usec is greater than zero, regardless of the value of the
191 * SYNCH_FLAGS_NON_BLOCKING bit in flags, the call will not return until either timeout,
192 * interruption or wakeup comes.
193 *
194 * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is not set in flags, the call
195 * will not return until wakeup or interruption comes.
196 *
197 * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is set in flags, the call will
198 * immediately return, reporting either success or failure.
199 *
200 * @return Returns one of: ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT, ESYNCH_INTERRUPTED,
201 * ESYNCH_OK_ATOMIC, ESYNCH_OK_BLOCKED.
202 *
203 * @li ESYNCH_WOULD_BLOCK means that the sleep failed because at the time
204 * of the call there was no pending wakeup.
205 *
206 * @li ESYNCH_TIMEOUT means that the sleep timed out.
207 *
208 * @li ESYNCH_INTERRUPTED means that somebody interrupted the sleeping thread.
209 *
210 * @li ESYNCH_OK_ATOMIC means that the sleep succeeded and that there was
211 * a pending wakeup at the time of the call. The caller was not put
212 * asleep at all.
213 *
214 * @li ESYNCH_OK_BLOCKED means that the sleep succeeded; the full sleep was
215 * attempted.
216 */
217int waitq_sleep_timeout(waitq_t *wq, uint32_t usec, int flags)
218{
219 ipl_t ipl;
220 int rc;
221
222 ipl = waitq_sleep_prepare(wq);
223 rc = waitq_sleep_timeout_unsafe(wq, usec, flags);
224 waitq_sleep_finish(wq, rc, ipl);
225 return rc;
226}
227
228/** Prepare to sleep in a waitq.
229 *
230 * This function will return holding the lock of the wait queue
231 * and interrupts disabled.
232 *
233 * @param wq Wait queue.
234 *
235 * @return Interrupt level as it existed on entry to this function.
236 */
237ipl_t waitq_sleep_prepare(waitq_t *wq)
238{
239 ipl_t ipl;
240
241restart:
242 ipl = interrupts_disable();
243
244 if (THREAD) { /* needed during system initiailzation */
245 /*
246 * Busy waiting for a delayed timeout.
247 * This is an important fix for the race condition between
248 * a delayed timeout and a next call to waitq_sleep_timeout().
249 * Simply, the thread is not allowed to go to sleep if
250 * there are timeouts in progress.
251 */
252 spinlock_lock(&THREAD->lock);
253 if (THREAD->timeout_pending) {
254 spinlock_unlock(&THREAD->lock);
255 interrupts_restore(ipl);
256 goto restart;
257 }
258 spinlock_unlock(&THREAD->lock);
259 }
260
261 spinlock_lock(&wq->lock);
262 return ipl;
263}
264
265/** Finish waiting in a wait queue.
266 *
267 * This function restores interrupts to the state that existed prior
268 * to the call to waitq_sleep_prepare(). If necessary, the wait queue
269 * lock is released.
270 *
271 * @param wq Wait queue.
272 * @param rc Return code of waitq_sleep_timeout_unsafe().
273 * @param ipl Interrupt level returned by waitq_sleep_prepare().
274 */
275void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl)
276{
277 switch (rc) {
278 case ESYNCH_WOULD_BLOCK:
279 case ESYNCH_OK_ATOMIC:
280 spinlock_unlock(&wq->lock);
281 break;
282 default:
283 break;
284 }
285 interrupts_restore(ipl);
286}
287
288/** Internal implementation of waitq_sleep_timeout().
289 *
290 * This function implements logic of sleeping in a wait queue.
291 * This call must be preceeded by a call to waitq_sleep_prepare()
292 * and followed by a call to waitq_slee_finish().
293 *
294 * @param wq See waitq_sleep_timeout().
295 * @param usec See waitq_sleep_timeout().
296 * @param flags See waitq_sleep_timeout().
297 *
298 * @return See waitq_sleep_timeout().
299 */
300int waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, int flags)
301{
302 /* checks whether to go to sleep at all */
303 if (wq->missed_wakeups) {
304 wq->missed_wakeups--;
305 return ESYNCH_OK_ATOMIC;
306 }
307 else {
308 if ((flags & SYNCH_FLAGS_NON_BLOCKING) && (usec == 0)) {
309 /* return immediatelly instead of going to sleep */
310 return ESYNCH_WOULD_BLOCK;
311 }
312 }
313
314 /*
315 * Now we are firmly decided to go to sleep.
316 */
317 spinlock_lock(&THREAD->lock);
318
319 if (flags & SYNCH_FLAGS_INTERRUPTIBLE) {
320
321 /*
322 * If the thread was already interrupted,
323 * don't go to sleep at all.
324 */
325 if (THREAD->interrupted) {
326 spinlock_unlock(&THREAD->lock);
327 spinlock_unlock(&wq->lock);
328 return ESYNCH_INTERRUPTED;
329 }
330
331 /*
332 * Set context that will be restored if the sleep
333 * of this thread is ever interrupted.
334 */
335 THREAD->sleep_interruptible = true;
336 if (!context_save(&THREAD->sleep_interruption_context)) {
337 /* Short emulation of scheduler() return code. */
338 spinlock_unlock(&THREAD->lock);
339 return ESYNCH_INTERRUPTED;
340 }
341
342 } else {
343 THREAD->sleep_interruptible = false;
344 }
345
346 if (usec) {
347 /* We use the timeout variant. */
348 if (!context_save(&THREAD->sleep_timeout_context)) {
349 /* Short emulation of scheduler() return code. */
350 spinlock_unlock(&THREAD->lock);
351 return ESYNCH_TIMEOUT;
352 }
353 THREAD->timeout_pending = true;
354 timeout_register(&THREAD->sleep_timeout, (uint64_t) usec, waitq_timeouted_sleep, THREAD);
355 }
356
357 list_append(&THREAD->wq_link, &wq->head);
358
359 /*
360 * Suspend execution.
361 */
362 THREAD->state = Sleeping;
363 THREAD->sleep_queue = wq;
364
365 spinlock_unlock(&THREAD->lock);
366
367 scheduler(); /* wq->lock is released in scheduler_separated_stack() */
368
369 return ESYNCH_OK_BLOCKED;
370}
371
372
373/** Wake up first thread sleeping in a wait queue
374 *
375 * Wake up first thread sleeping in a wait queue.
376 * This is the SMP- and IRQ-safe wrapper meant for
377 * general use.
378 *
379 * Besides its 'normal' wakeup operation, it attempts
380 * to unregister possible timeout.
381 *
382 * @param wq Pointer to wait queue.
383 * @param all If this is non-zero, all sleeping threads
384 * will be woken up and missed count will be zeroed.
385 */
386void waitq_wakeup(waitq_t *wq, bool all)
387{
388 ipl_t ipl;
389
390 ipl = interrupts_disable();
391 spinlock_lock(&wq->lock);
392
393 _waitq_wakeup_unsafe(wq, all);
394
395 spinlock_unlock(&wq->lock);
396 interrupts_restore(ipl);
397}
398
399/** Internal SMP- and IRQ-unsafe version of waitq_wakeup()
400 *
401 * This is the internal SMP- and IRQ-unsafe version
402 * of waitq_wakeup(). It assumes wq->lock is already
403 * locked and interrupts are already disabled.
404 *
405 * @param wq Pointer to wait queue.
406 * @param all If this is non-zero, all sleeping threads
407 * will be woken up and missed count will be zeroed.
408 */
409void _waitq_wakeup_unsafe(waitq_t *wq, bool all)
410{
411 thread_t *t;
412
413loop:
414 if (list_empty(&wq->head)) {
415 wq->missed_wakeups++;
416 if (all)
417 wq->missed_wakeups = 0;
418 return;
419 }
420
421 t = list_get_instance(wq->head.next, thread_t, wq_link);
422
423 /*
424 * Lock the thread prior to removing it from the wq.
425 * This is not necessary because of mutual exclusion
426 * (the link belongs to the wait queue), but because
427 * of synchronization with waitq_timeouted_sleep()
428 * and waitq_interrupt_sleep().
429 *
430 * In order for these two functions to work, the following
431 * invariant must hold:
432 *
433 * t->sleep_queue != NULL <=> t sleeps in a wait queue
434 *
435 * For an observer who locks the thread, the invariant
436 * holds only when the lock is held prior to removing
437 * it from the wait queue.
438 */
439 spinlock_lock(&t->lock);
440 list_remove(&t->wq_link);
441
442 if (t->timeout_pending && timeout_unregister(&t->sleep_timeout))
443 t->timeout_pending = false;
444 t->sleep_queue = NULL;
445 spinlock_unlock(&t->lock);
446
447 thread_ready(t);
448
449 if (all)
450 goto loop;
451}
452
453/** @}
454 */
Note: See TracBrowser for help on using the repository browser.