source: mainline/generic/src/synch/waitq.c@ 5c9a08b

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 5c9a08b was 5c9a08b, checked in by Jakub Jermar <jakub@…>, 19 years ago

Move list and fifo data types to adt/.

  • Property mode set to 100644
File size: 7.7 KB
RevLine 
[f761f1eb]1/*
2 * Copyright (C) 2001-2004 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <synch/waitq.h>
[922c7ce]30#include <synch/synch.h>
[f761f1eb]31#include <synch/spinlock.h>
[922c7ce]32#include <proc/thread.h>
[4b2c872d]33#include <proc/scheduler.h>
[f761f1eb]34#include <arch/asm.h>
35#include <arch/types.h>
[05e2a7ad]36#include <typedefs.h>
[922c7ce]37#include <time/timeout.h>
[f761f1eb]38#include <arch.h>
[922c7ce]39#include <context.h>
[5c9a08b]40#include <adt/list.h>
[f761f1eb]41
[922c7ce]42/** Initialize wait queue
43 *
44 * Initialize wait queue.
45 *
46 * @param wq Pointer to wait queue to be initialized.
47 */
[f761f1eb]48void waitq_initialize(waitq_t *wq)
49{
[2d93f1f9]50 spinlock_initialize(&wq->lock, "waitq_lock");
[f761f1eb]51 list_initialize(&wq->head);
52 wq->missed_wakeups = 0;
53}
54
[922c7ce]55/** Handle timeout during waitq_sleep_timeout() call
56 *
57 * This routine is called when waitq_sleep_timeout() timeouts.
58 * Interrupts are disabled.
[f761f1eb]59 *
[922c7ce]60 * It is supposed to try to remove 'its' thread from the wait queue;
61 * it can eventually fail to achieve this goal when these two events
62 * overlap. In that case it behaves just as though there was no
63 * timeout at all.
64 *
65 * @param data Pointer to the thread that called waitq_sleep_timeout().
[f761f1eb]66 */
67void waitq_interrupted_sleep(void *data)
68{
69 thread_t *t = (thread_t *) data;
70 waitq_t *wq;
[05e2a7ad]71 bool do_wakeup = false;
[f761f1eb]72
73 spinlock_lock(&threads_lock);
74 if (!list_member(&t->threads_link, &threads_head))
75 goto out;
76
77grab_locks:
78 spinlock_lock(&t->lock);
[5a95b25]79 if ((wq = t->sleep_queue)) { /* assignment */
[f761f1eb]80 if (!spinlock_trylock(&wq->lock)) {
81 spinlock_unlock(&t->lock);
[05e2a7ad]82 goto grab_locks; /* avoid deadlock */
[f761f1eb]83 }
84
85 list_remove(&t->wq_link);
86 t->saved_context = t->sleep_timeout_context;
[05e2a7ad]87 do_wakeup = true;
[f761f1eb]88
89 spinlock_unlock(&wq->lock);
90 t->sleep_queue = NULL;
91 }
92
[05e2a7ad]93 t->timeout_pending = false;
[f761f1eb]94 spinlock_unlock(&t->lock);
95
[05e2a7ad]96 if (do_wakeup)
97 thread_ready(t);
[f761f1eb]98
99out:
100 spinlock_unlock(&threads_lock);
101}
102
[922c7ce]103/** Sleep until either wakeup or timeout occurs
104 *
[f761f1eb]105 * This is a sleep implementation which allows itself to be
106 * interrupted from the sleep, restoring a failover context.
107 *
[922c7ce]108 * Sleepers are organised in FIFO fashion in a structure called wait queue.
109 *
[f761f1eb]110 * This function is really basic in that other functions as waitq_sleep()
111 * and all the *_timeout() functions use it.
112 *
[922c7ce]113 * @param wq Pointer to wait queue.
[a783ca4]114 * @param usec Timeout in microseconds.
115 * @param nonblocking Blocking vs. non-blocking operation mode switch.
[922c7ce]116 *
[26f9943]117 * If usec is greater than zero, regardless of the value of nonblocking,
[a783ca4]118 * the call will not return until either timeout or wakeup comes.
[f761f1eb]119 *
[a783ca4]120 * If usec is zero and nonblocking is zero (false), the call
121 * will not return until wakeup comes.
122 *
123 * If usec is zero and nonblocking is non-zero (true), the call will
124 * immediately return, reporting either success or failure.
[f761f1eb]125 *
[922c7ce]126 * @return Returns one of: ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT,
127 * ESYNCH_OK_ATOMIC, ESYNCH_OK_BLOCKED.
128 *
[a783ca4]129 * ESYNCH_WOULD_BLOCK means that the sleep failed because at the time
130 * of the call there was no pending wakeup.
131 *
132 * ESYNCH_TIMEOUT means that the sleep timed out.
[922c7ce]133 *
[a783ca4]134 * ESYNCH_OK_ATOMIC means that the sleep succeeded and that there was
135 * a pending wakeup at the time of the call. The caller was not put
136 * asleep at all.
137 *
138 * ESYNCH_OK_BLOCKED means that the sleep succeeded; the full sleep was
139 * attempted.
[f761f1eb]140 */
141int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int nonblocking)
142{
[22f7769]143 volatile ipl_t ipl; /* must be live after context_restore() */
[f761f1eb]144
145
146restart:
[22f7769]147 ipl = interrupts_disable();
[f761f1eb]148
149 /*
150 * Busy waiting for a delayed timeout.
151 * This is an important fix for the race condition between
152 * a delayed timeout and a next call to waitq_sleep_timeout().
153 * Simply, the thread is not allowed to go to sleep if
154 * there are timeouts in progress.
155 */
[43114c5]156 spinlock_lock(&THREAD->lock);
157 if (THREAD->timeout_pending) {
158 spinlock_unlock(&THREAD->lock);
[22f7769]159 interrupts_restore(ipl);
[f761f1eb]160 goto restart;
161 }
[43114c5]162 spinlock_unlock(&THREAD->lock);
[f761f1eb]163
164 spinlock_lock(&wq->lock);
165
166 /* checks whether to go to sleep at all */
167 if (wq->missed_wakeups) {
168 wq->missed_wakeups--;
169 spinlock_unlock(&wq->lock);
[22f7769]170 interrupts_restore(ipl);
[f761f1eb]171 return ESYNCH_OK_ATOMIC;
172 }
173 else {
174 if (nonblocking && (usec == 0)) {
175 /* return immediatelly instead of going to sleep */
176 spinlock_unlock(&wq->lock);
[22f7769]177 interrupts_restore(ipl);
[f761f1eb]178 return ESYNCH_WOULD_BLOCK;
179 }
180 }
181
182
183 /*
184 * Now we are firmly decided to go to sleep.
185 */
[43114c5]186 spinlock_lock(&THREAD->lock);
[f761f1eb]187 if (usec) {
188 /* We use the timeout variant. */
[43114c5]189 if (!context_save(&THREAD->sleep_timeout_context)) {
[f761f1eb]190 /*
191 * Short emulation of scheduler() return code.
192 */
[25f62cdf]193 before_thread_runs();
[43114c5]194 spinlock_unlock(&THREAD->lock);
[22f7769]195 interrupts_restore(ipl);
[f761f1eb]196 return ESYNCH_TIMEOUT;
197 }
[05e2a7ad]198 THREAD->timeout_pending = true;
[43114c5]199 timeout_register(&THREAD->sleep_timeout, (__u64) usec, waitq_interrupted_sleep, THREAD);
[f761f1eb]200 }
201
[43114c5]202 list_append(&THREAD->wq_link, &wq->head);
[f761f1eb]203
204 /*
205 * Suspend execution.
206 */
[43114c5]207 THREAD->state = Sleeping;
208 THREAD->sleep_queue = wq;
[f761f1eb]209
[43114c5]210 spinlock_unlock(&THREAD->lock);
[f761f1eb]211
212 scheduler(); /* wq->lock is released in scheduler_separated_stack() */
[22f7769]213 interrupts_restore(ipl);
[f761f1eb]214
215 return ESYNCH_OK_BLOCKED;
216}
217
218
[922c7ce]219/** Wake up first thread sleeping in a wait queue
220 *
221 * Wake up first thread sleeping in a wait queue.
222 * This is the SMP- and IRQ-safe wrapper meant for
223 * general use.
224 *
225 * Besides its 'normal' wakeup operation, it attempts
226 * to unregister possible timeout.
227 *
228 * @param wq Pointer to wait queue.
229 * @param all If this is non-zero, all sleeping threads
230 * will be woken up and missed count will be zeroed.
[f761f1eb]231 */
[05e2a7ad]232void waitq_wakeup(waitq_t *wq, bool all)
[f761f1eb]233{
[22f7769]234 ipl_t ipl;
[f761f1eb]235
[22f7769]236 ipl = interrupts_disable();
[f761f1eb]237 spinlock_lock(&wq->lock);
238
239 _waitq_wakeup_unsafe(wq, all);
240
241 spinlock_unlock(&wq->lock);
[22f7769]242 interrupts_restore(ipl);
[f761f1eb]243}
244
[922c7ce]245/** Internal SMP- and IRQ-unsafe version of waitq_wakeup()
246 *
247 * This is the internal SMP- and IRQ-unsafe version
248 * of waitq_wakeup(). It assumes wq->lock is already
249 * locked and interrupts are already disabled.
250 *
251 * @param wq Pointer to wait queue.
252 * @param all If this is non-zero, all sleeping threads
253 * will be woken up and missed count will be zeroed.
[f761f1eb]254 */
[05e2a7ad]255void _waitq_wakeup_unsafe(waitq_t *wq, bool all)
[f761f1eb]256{
257 thread_t *t;
258
259loop:
260 if (list_empty(&wq->head)) {
261 wq->missed_wakeups++;
[05e2a7ad]262 if (all)
263 wq->missed_wakeups = 0;
[f761f1eb]264 return;
265 }
266
267 t = list_get_instance(wq->head.next, thread_t, wq_link);
268
269 list_remove(&t->wq_link);
270 spinlock_lock(&t->lock);
271 if (t->timeout_pending && timeout_unregister(&t->sleep_timeout))
[05e2a7ad]272 t->timeout_pending = false;
[f761f1eb]273 t->sleep_queue = NULL;
274 spinlock_unlock(&t->lock);
275
276 thread_ready(t);
277
[05e2a7ad]278 if (all)
279 goto loop;
[f761f1eb]280}
Note: See TracBrowser for help on using the repository browser.