source: mainline/kernel/generic/src/synch/waitq.c@ 7ed8530

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 7ed8530 was 6ec34bb, checked in by Stanislav Kozina <stanislav.kozina@…>, 15 years ago

bugfix: synch/waitq.c saves THREAD→last_cycle after context restored.

  • Property mode set to 100644
File size: 13.2 KB
Line 
1/*
2 * Copyright (c) 2001-2004 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup sync
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Wait queue.
36 *
37 * Wait queue is the basic synchronization primitive upon which all
38 * other synchronization primitives build.
39 *
40 * It allows threads to wait for an event in first-come, first-served
41 * fashion. Conditional operation as well as timeouts and interruptions
42 * are supported.
43 */
44
45#include <synch/waitq.h>
46#include <synch/synch.h>
47#include <synch/spinlock.h>
48#include <proc/thread.h>
49#include <proc/scheduler.h>
50#include <arch/asm.h>
51#include <typedefs.h>
52#include <time/timeout.h>
53#include <arch.h>
54#include <context.h>
55#include <adt/list.h>
56#include <arch/cycle.h>
57
58static void waitq_sleep_timed_out(void *data);
59
60/** Initialize wait queue
61 *
62 * Initialize wait queue.
63 *
64 * @param wq Pointer to wait queue to be initialized.
65 */
66void waitq_initialize(waitq_t *wq)
67{
68 spinlock_initialize(&wq->lock, "waitq_lock");
69 list_initialize(&wq->head);
70 wq->missed_wakeups = 0;
71}
72
73/** Handle timeout during waitq_sleep_timeout() call
74 *
75 * This routine is called when waitq_sleep_timeout() times out.
76 * Interrupts are disabled.
77 *
78 * It is supposed to try to remove 'its' thread from the wait queue;
79 * it can eventually fail to achieve this goal when these two events
80 * overlap. In that case it behaves just as though there was no
81 * timeout at all.
82 *
83 * @param data Pointer to the thread that called waitq_sleep_timeout().
84 */
85void waitq_sleep_timed_out(void *data)
86{
87 thread_t *t = (thread_t *) data;
88 waitq_t *wq;
89 bool do_wakeup = false;
90 DEADLOCK_PROBE_INIT(p_wqlock);
91
92 spinlock_lock(&threads_lock);
93 if (!thread_exists(t))
94 goto out;
95
96grab_locks:
97 spinlock_lock(&t->lock);
98 if ((wq = t->sleep_queue)) { /* assignment */
99 if (!spinlock_trylock(&wq->lock)) {
100 spinlock_unlock(&t->lock);
101 DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD);
102 goto grab_locks; /* avoid deadlock */
103 }
104
105 list_remove(&t->wq_link);
106 t->saved_context = t->sleep_timeout_context;
107 do_wakeup = true;
108 t->sleep_queue = NULL;
109 spinlock_unlock(&wq->lock);
110 }
111
112 t->timeout_pending = false;
113 spinlock_unlock(&t->lock);
114
115 if (do_wakeup)
116 thread_ready(t);
117
118out:
119 spinlock_unlock(&threads_lock);
120}
121
122/** Interrupt sleeping thread.
123 *
124 * This routine attempts to interrupt a thread from its sleep in a waitqueue.
125 * If the thread is not found sleeping, no action is taken.
126 *
127 * @param t Thread to be interrupted.
128 */
129void waitq_interrupt_sleep(thread_t *t)
130{
131 waitq_t *wq;
132 bool do_wakeup = false;
133 ipl_t ipl;
134 DEADLOCK_PROBE_INIT(p_wqlock);
135
136 ipl = interrupts_disable();
137 spinlock_lock(&threads_lock);
138 if (!thread_exists(t))
139 goto out;
140
141grab_locks:
142 spinlock_lock(&t->lock);
143 if ((wq = t->sleep_queue)) { /* assignment */
144 if (!(t->sleep_interruptible)) {
145 /*
146 * The sleep cannot be interrupted.
147 */
148 spinlock_unlock(&t->lock);
149 goto out;
150 }
151
152 if (!spinlock_trylock(&wq->lock)) {
153 spinlock_unlock(&t->lock);
154 DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD);
155 goto grab_locks; /* avoid deadlock */
156 }
157
158 if (t->timeout_pending && timeout_unregister(&t->sleep_timeout))
159 t->timeout_pending = false;
160
161 list_remove(&t->wq_link);
162 t->saved_context = t->sleep_interruption_context;
163 do_wakeup = true;
164 t->sleep_queue = NULL;
165 spinlock_unlock(&wq->lock);
166 }
167 spinlock_unlock(&t->lock);
168
169 if (do_wakeup)
170 thread_ready(t);
171
172out:
173 spinlock_unlock(&threads_lock);
174 interrupts_restore(ipl);
175}
176
177/** Interrupt the first thread sleeping in the wait queue.
178 *
179 * Note that the caller somehow needs to know that the thread to be interrupted
180 * is sleeping interruptibly.
181 *
182 * @param wq Pointer to wait queue.
183 */
184void waitq_unsleep(waitq_t *wq)
185{
186 ipl_t ipl;
187
188 ipl = interrupts_disable();
189 spinlock_lock(&wq->lock);
190
191 if (!list_empty(&wq->head)) {
192 thread_t *t;
193
194 t = list_get_instance(wq->head.next, thread_t, wq_link);
195 spinlock_lock(&t->lock);
196 ASSERT(t->sleep_interruptible);
197 if (t->timeout_pending && timeout_unregister(&t->sleep_timeout))
198 t->timeout_pending = false;
199 list_remove(&t->wq_link);
200 t->saved_context = t->sleep_interruption_context;
201 t->sleep_queue = NULL;
202 spinlock_unlock(&t->lock);
203 thread_ready(t);
204 }
205
206 spinlock_unlock(&wq->lock);
207 interrupts_restore(ipl);
208}
209
210/** Sleep until either wakeup, timeout or interruption occurs
211 *
212 * This is a sleep implementation which allows itself to time out or to be
213 * interrupted from the sleep, restoring a failover context.
214 *
215 * Sleepers are organised in a FIFO fashion in a structure called wait queue.
216 *
217 * This function is really basic in that other functions as waitq_sleep()
218 * and all the *_timeout() functions use it.
219 *
220 * @param wq Pointer to wait queue.
221 * @param usec Timeout in microseconds.
222 * @param flags Specify mode of the sleep.
223 *
224 * The sleep can be interrupted only if the
225 * SYNCH_FLAGS_INTERRUPTIBLE bit is specified in flags.
226 *
227 * If usec is greater than zero, regardless of the value of the
228 * SYNCH_FLAGS_NON_BLOCKING bit in flags, the call will not return until either
229 * timeout, interruption or wakeup comes.
230 *
231 * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is not set in flags,
232 * the call will not return until wakeup or interruption comes.
233 *
234 * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is set in flags, the
235 * call will immediately return, reporting either success or failure.
236 *
237 * @return Returns one of ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT,
238 * ESYNCH_INTERRUPTED, ESYNCH_OK_ATOMIC and
239 * ESYNCH_OK_BLOCKED.
240 *
241 * @li ESYNCH_WOULD_BLOCK means that the sleep failed because at the time of
242 * the call there was no pending wakeup.
243 *
244 * @li ESYNCH_TIMEOUT means that the sleep timed out.
245 *
246 * @li ESYNCH_INTERRUPTED means that somebody interrupted the sleeping thread.
247 *
248 * @li ESYNCH_OK_ATOMIC means that the sleep succeeded and that there was
249 * a pending wakeup at the time of the call. The caller was not put
250 * asleep at all.
251 *
252 * @li ESYNCH_OK_BLOCKED means that the sleep succeeded; the full sleep was
253 * attempted.
254 */
255int waitq_sleep_timeout(waitq_t *wq, uint32_t usec, int flags)
256{
257 ipl_t ipl;
258 int rc;
259
260 ipl = waitq_sleep_prepare(wq);
261 rc = waitq_sleep_timeout_unsafe(wq, usec, flags);
262 waitq_sleep_finish(wq, rc, ipl);
263 return rc;
264}
265
266/** Prepare to sleep in a waitq.
267 *
268 * This function will return holding the lock of the wait queue
269 * and interrupts disabled.
270 *
271 * @param wq Wait queue.
272 *
273 * @return Interrupt level as it existed on entry to this function.
274 */
275ipl_t waitq_sleep_prepare(waitq_t *wq)
276{
277 ipl_t ipl;
278
279restart:
280 ipl = interrupts_disable();
281
282 if (THREAD) { /* needed during system initiailzation */
283 /*
284 * Busy waiting for a delayed timeout.
285 * This is an important fix for the race condition between
286 * a delayed timeout and a next call to waitq_sleep_timeout().
287 * Simply, the thread is not allowed to go to sleep if
288 * there are timeouts in progress.
289 */
290 spinlock_lock(&THREAD->lock);
291 if (THREAD->timeout_pending) {
292 spinlock_unlock(&THREAD->lock);
293 interrupts_restore(ipl);
294 goto restart;
295 }
296 spinlock_unlock(&THREAD->lock);
297 }
298
299 spinlock_lock(&wq->lock);
300 return ipl;
301}
302
303/** Finish waiting in a wait queue.
304 *
305 * This function restores interrupts to the state that existed prior
306 * to the call to waitq_sleep_prepare(). If necessary, the wait queue
307 * lock is released.
308 *
309 * @param wq Wait queue.
310 * @param rc Return code of waitq_sleep_timeout_unsafe().
311 * @param ipl Interrupt level returned by waitq_sleep_prepare().
312 */
313void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl)
314{
315 switch (rc) {
316 case ESYNCH_WOULD_BLOCK:
317 case ESYNCH_OK_ATOMIC:
318 spinlock_unlock(&wq->lock);
319 break;
320 default:
321 break;
322 }
323 interrupts_restore(ipl);
324}
325
326/** Internal implementation of waitq_sleep_timeout().
327 *
328 * This function implements logic of sleeping in a wait queue.
329 * This call must be preceded by a call to waitq_sleep_prepare()
330 * and followed by a call to waitq_sleep_finish().
331 *
332 * @param wq See waitq_sleep_timeout().
333 * @param usec See waitq_sleep_timeout().
334 * @param flags See waitq_sleep_timeout().
335 *
336 * @return See waitq_sleep_timeout().
337 */
338int waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, int flags)
339{
340 /* checks whether to go to sleep at all */
341 if (wq->missed_wakeups) {
342 wq->missed_wakeups--;
343 return ESYNCH_OK_ATOMIC;
344 }
345 else {
346 if ((flags & SYNCH_FLAGS_NON_BLOCKING) && (usec == 0)) {
347 /* return immediatelly instead of going to sleep */
348 return ESYNCH_WOULD_BLOCK;
349 }
350 }
351
352 /*
353 * Now we are firmly decided to go to sleep.
354 */
355 spinlock_lock(&THREAD->lock);
356
357 if (flags & SYNCH_FLAGS_INTERRUPTIBLE) {
358
359 /*
360 * If the thread was already interrupted,
361 * don't go to sleep at all.
362 */
363 if (THREAD->interrupted) {
364 spinlock_unlock(&THREAD->lock);
365 spinlock_unlock(&wq->lock);
366 return ESYNCH_INTERRUPTED;
367 }
368
369 /*
370 * Set context that will be restored if the sleep
371 * of this thread is ever interrupted.
372 */
373 THREAD->sleep_interruptible = true;
374 if (!context_save(&THREAD->sleep_interruption_context)) {
375 /* Short emulation of scheduler() return code. */
376 THREAD->last_cycle = get_cycle();
377 spinlock_unlock(&THREAD->lock);
378 return ESYNCH_INTERRUPTED;
379 }
380
381 } else {
382 THREAD->sleep_interruptible = false;
383 }
384
385 if (usec) {
386 /* We use the timeout variant. */
387 if (!context_save(&THREAD->sleep_timeout_context)) {
388 /* Short emulation of scheduler() return code. */
389 THREAD->last_cycle = get_cycle();
390 spinlock_unlock(&THREAD->lock);
391 return ESYNCH_TIMEOUT;
392 }
393 THREAD->timeout_pending = true;
394 timeout_register(&THREAD->sleep_timeout, (uint64_t) usec,
395 waitq_sleep_timed_out, THREAD);
396 }
397
398 list_append(&THREAD->wq_link, &wq->head);
399
400 /*
401 * Suspend execution.
402 */
403 THREAD->state = Sleeping;
404 THREAD->sleep_queue = wq;
405
406 spinlock_unlock(&THREAD->lock);
407
408 /* wq->lock is released in scheduler_separated_stack() */
409 scheduler();
410
411 return ESYNCH_OK_BLOCKED;
412}
413
414
415/** Wake up first thread sleeping in a wait queue
416 *
417 * Wake up first thread sleeping in a wait queue. This is the SMP- and IRQ-safe
418 * wrapper meant for general use.
419 *
420 * Besides its 'normal' wakeup operation, it attempts to unregister possible
421 * timeout.
422 *
423 * @param wq Pointer to wait queue.
424 * @param mode Wakeup mode.
425 */
426void waitq_wakeup(waitq_t *wq, wakeup_mode_t mode)
427{
428 ipl_t ipl;
429
430 ipl = interrupts_disable();
431 spinlock_lock(&wq->lock);
432
433 _waitq_wakeup_unsafe(wq, mode);
434
435 spinlock_unlock(&wq->lock);
436 interrupts_restore(ipl);
437}
438
439/** Internal SMP- and IRQ-unsafe version of waitq_wakeup()
440 *
441 * This is the internal SMP- and IRQ-unsafe version of waitq_wakeup(). It
442 * assumes wq->lock is already locked and interrupts are already disabled.
443 *
444 * @param wq Pointer to wait queue.
445 * @param mode If mode is WAKEUP_FIRST, then the longest waiting
446 * thread, if any, is woken up. If mode is WAKEUP_ALL, then
447 * all waiting threads, if any, are woken up. If there are
448 * no waiting threads to be woken up, the missed wakeup is
449 * recorded in the wait queue.
450 */
451void _waitq_wakeup_unsafe(waitq_t *wq, wakeup_mode_t mode)
452{
453 thread_t *t;
454 size_t count = 0;
455
456loop:
457 if (list_empty(&wq->head)) {
458 wq->missed_wakeups++;
459 if (count && mode == WAKEUP_ALL)
460 wq->missed_wakeups--;
461 return;
462 }
463
464 count++;
465 t = list_get_instance(wq->head.next, thread_t, wq_link);
466
467 /*
468 * Lock the thread prior to removing it from the wq.
469 * This is not necessary because of mutual exclusion
470 * (the link belongs to the wait queue), but because
471 * of synchronization with waitq_sleep_timed_out()
472 * and thread_interrupt_sleep().
473 *
474 * In order for these two functions to work, the following
475 * invariant must hold:
476 *
477 * t->sleep_queue != NULL <=> t sleeps in a wait queue
478 *
479 * For an observer who locks the thread, the invariant
480 * holds only when the lock is held prior to removing
481 * it from the wait queue.
482 */
483 spinlock_lock(&t->lock);
484 list_remove(&t->wq_link);
485
486 if (t->timeout_pending && timeout_unregister(&t->sleep_timeout))
487 t->timeout_pending = false;
488 t->sleep_queue = NULL;
489 spinlock_unlock(&t->lock);
490
491 thread_ready(t);
492
493 if (mode == WAKEUP_ALL)
494 goto loop;
495}
496
497/** @}
498 */
Note: See TracBrowser for help on using the repository browser.