waitq.c

Go to the documentation of this file.
00001 /*
00002  * Copyright (C) 2001-2004 Jakub Jermar
00003  * All rights reserved.
00004  *
00005  * Redistribution and use in source and binary forms, with or without
00006  * modification, are permitted provided that the following conditions
00007  * are met:
00008  *
00009  * - Redistributions of source code must retain the above copyright
00010  *   notice, this list of conditions and the following disclaimer.
00011  * - Redistributions in binary form must reproduce the above copyright
00012  *   notice, this list of conditions and the following disclaimer in the
00013  *   documentation and/or other materials provided with the distribution.
00014  * - The name of the author may not be used to endorse or promote products
00015  *   derived from this software without specific prior written permission.
00016  *
00017  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
00018  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00019  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
00020  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
00021  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
00022  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
00023  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
00024  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
00025  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
00026  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00027  */
00028 
00045 #include <synch/waitq.h>
00046 #include <synch/synch.h>
00047 #include <synch/spinlock.h>
00048 #include <proc/thread.h>
00049 #include <proc/scheduler.h>
00050 #include <arch/asm.h>
00051 #include <arch/types.h>
00052 #include <typedefs.h>
00053 #include <time/timeout.h>
00054 #include <arch.h>
00055 #include <context.h>
00056 #include <adt/list.h>
00057 
00058 static void waitq_timeouted_sleep(void *data);
00059 
00066 void waitq_initialize(waitq_t *wq)
00067 {
00068         spinlock_initialize(&wq->lock, "waitq_lock");
00069         list_initialize(&wq->head);
00070         wq->missed_wakeups = 0;
00071 }
00072 
00085 void waitq_timeouted_sleep(void *data)
00086 {
00087         thread_t *t = (thread_t *) data;
00088         waitq_t *wq;
00089         bool do_wakeup = false;
00090 
00091         spinlock_lock(&threads_lock);
00092         if (!thread_exists(t))
00093                 goto out;
00094 
00095 grab_locks:
00096         spinlock_lock(&t->lock);
00097         if ((wq = t->sleep_queue)) {            /* assignment */
00098                 if (!spinlock_trylock(&wq->lock)) {
00099                         spinlock_unlock(&t->lock);
00100                         goto grab_locks;        /* avoid deadlock */
00101                 }
00102 
00103                 list_remove(&t->wq_link);
00104                 t->saved_context = t->sleep_timeout_context;
00105                 do_wakeup = true;
00106                 t->sleep_queue = NULL;
00107                 spinlock_unlock(&wq->lock);
00108         }
00109         
00110         t->timeout_pending = false;
00111         spinlock_unlock(&t->lock);
00112         
00113         if (do_wakeup)
00114                 thread_ready(t);
00115 
00116 out:
00117         spinlock_unlock(&threads_lock);
00118 }
00119 
00127 void waitq_interrupt_sleep(thread_t *t)
00128 {
00129         waitq_t *wq;
00130         bool do_wakeup = false;
00131         ipl_t ipl;
00132 
00133         ipl = interrupts_disable();
00134         spinlock_lock(&threads_lock);
00135         if (!thread_exists(t))
00136                 goto out;
00137 
00138 grab_locks:
00139         spinlock_lock(&t->lock);
00140         if ((wq = t->sleep_queue)) {            /* assignment */
00141                 if (!(t->sleep_interruptible)) {
00142                         /*
00143                          * The sleep cannot be interrupted.
00144                          */
00145                         spinlock_unlock(&t->lock);
00146                         goto out;
00147                 }
00148                         
00149                 if (!spinlock_trylock(&wq->lock)) {
00150                         spinlock_unlock(&t->lock);
00151                         goto grab_locks;        /* avoid deadlock */
00152                 }
00153 
00154                 if (t->timeout_pending && timeout_unregister(&t->sleep_timeout))
00155                         t->timeout_pending = false;
00156 
00157                 list_remove(&t->wq_link);
00158                 t->saved_context = t->sleep_interruption_context;
00159                 do_wakeup = true;
00160                 t->sleep_queue = NULL;
00161                 spinlock_unlock(&wq->lock);
00162         }
00163         spinlock_unlock(&t->lock);
00164 
00165         if (do_wakeup)
00166                 thread_ready(t);
00167 
00168 out:
00169         spinlock_unlock(&threads_lock);
00170         interrupts_restore(ipl);
00171 }
00172 
00217 int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int flags)
00218 {
00219         ipl_t ipl;
00220         int rc;
00221         
00222         ipl = waitq_sleep_prepare(wq);
00223         rc = waitq_sleep_timeout_unsafe(wq, usec, flags);
00224         waitq_sleep_finish(wq, rc, ipl);
00225         return rc;
00226 }
00227 
00237 ipl_t waitq_sleep_prepare(waitq_t *wq)
00238 {
00239         ipl_t ipl;
00240         
00241 restart:
00242         ipl = interrupts_disable();
00243 
00244         if (THREAD) {   /* needed during system initiailzation */
00245                 /*
00246                  * Busy waiting for a delayed timeout.
00247                  * This is an important fix for the race condition between
00248                  * a delayed timeout and a next call to waitq_sleep_timeout().
00249                  * Simply, the thread is not allowed to go to sleep if
00250                  * there are timeouts in progress.
00251                  */
00252                 spinlock_lock(&THREAD->lock);
00253                 if (THREAD->timeout_pending) {
00254                         spinlock_unlock(&THREAD->lock);
00255                         interrupts_restore(ipl);
00256                         goto restart;
00257                 }
00258                 spinlock_unlock(&THREAD->lock);
00259         }
00260                                                                                                         
00261         spinlock_lock(&wq->lock);
00262         return ipl;
00263 }
00264 
00275 void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl)
00276 {
00277         switch (rc) {
00278         case ESYNCH_WOULD_BLOCK:
00279         case ESYNCH_OK_ATOMIC:
00280                 spinlock_unlock(&wq->lock);
00281                 break;
00282         default:
00283                 break;
00284         }
00285         interrupts_restore(ipl);
00286 }
00287 
00300 int waitq_sleep_timeout_unsafe(waitq_t *wq, __u32 usec, int flags)
00301 {
00302         /* checks whether to go to sleep at all */
00303         if (wq->missed_wakeups) {
00304                 wq->missed_wakeups--;
00305                 return ESYNCH_OK_ATOMIC;
00306         }
00307         else {
00308                 if ((flags & SYNCH_FLAGS_NON_BLOCKING) && (usec == 0)) {
00309                         /* return immediatelly instead of going to sleep */
00310                         return ESYNCH_WOULD_BLOCK;
00311                 }
00312         }
00313         
00314         /*
00315          * Now we are firmly decided to go to sleep.
00316          */
00317         spinlock_lock(&THREAD->lock);
00318 
00319         if (flags & SYNCH_FLAGS_INTERRUPTIBLE) {
00320 
00321                 /*
00322                  * If the thread was already interrupted,
00323                  * don't go to sleep at all.
00324                  */
00325                 if (THREAD->interrupted) {
00326                         spinlock_unlock(&THREAD->lock);
00327                         spinlock_unlock(&wq->lock);
00328                         return ESYNCH_INTERRUPTED;
00329                 }
00330 
00331                 /*
00332                  * Set context that will be restored if the sleep
00333                  * of this thread is ever interrupted.
00334                  */
00335                 THREAD->sleep_interruptible = true;
00336                 if (!context_save(&THREAD->sleep_interruption_context)) {
00337                         /* Short emulation of scheduler() return code. */
00338                         spinlock_unlock(&THREAD->lock);
00339                         return ESYNCH_INTERRUPTED;
00340                 }
00341 
00342         } else {
00343                 THREAD->sleep_interruptible = false;
00344         }
00345 
00346         if (usec) {
00347                 /* We use the timeout variant. */
00348                 if (!context_save(&THREAD->sleep_timeout_context)) {
00349                         /* Short emulation of scheduler() return code. */
00350                         spinlock_unlock(&THREAD->lock);
00351                         return ESYNCH_TIMEOUT;
00352                 }
00353                 THREAD->timeout_pending = true;
00354                 timeout_register(&THREAD->sleep_timeout, (__u64) usec, waitq_timeouted_sleep, THREAD);
00355         }
00356 
00357         list_append(&THREAD->wq_link, &wq->head);
00358 
00359         /*
00360          * Suspend execution.
00361          */
00362         THREAD->state = Sleeping;
00363         THREAD->sleep_queue = wq;
00364 
00365         spinlock_unlock(&THREAD->lock);
00366 
00367         scheduler();    /* wq->lock is released in scheduler_separated_stack() */
00368         
00369         return ESYNCH_OK_BLOCKED;
00370 }
00371 
00372 
00386 void waitq_wakeup(waitq_t *wq, bool all)
00387 {
00388         ipl_t ipl;
00389 
00390         ipl = interrupts_disable();
00391         spinlock_lock(&wq->lock);
00392 
00393         _waitq_wakeup_unsafe(wq, all);
00394 
00395         spinlock_unlock(&wq->lock);     
00396         interrupts_restore(ipl);        
00397 }
00398 
00409 void _waitq_wakeup_unsafe(waitq_t *wq, bool all)
00410 {
00411         thread_t *t;
00412 
00413 loop:   
00414         if (list_empty(&wq->head)) {
00415                 wq->missed_wakeups++;
00416                 if (all)
00417                         wq->missed_wakeups = 0;
00418                 return;
00419         }
00420 
00421         t = list_get_instance(wq->head.next, thread_t, wq_link);
00422         
00423         /*
00424          * Lock the thread prior to removing it from the wq.
00425          * This is not necessary because of mutual exclusion
00426          * (the link belongs to the wait queue), but because
00427          * of synchronization with waitq_timeouted_sleep()
00428          * and waitq_interrupt_sleep().
00429          *
00430          * In order for these two functions to work, the following
00431          * invariant must hold:
00432          *
00433          * t->sleep_queue != NULL <=> t sleeps in a wait queue
00434          *
00435          * For an observer who locks the thread, the invariant
00436          * holds only when the lock is held prior to removing
00437          * it from the wait queue.
00438          */
00439         spinlock_lock(&t->lock);
00440         list_remove(&t->wq_link);
00441         
00442         if (t->timeout_pending && timeout_unregister(&t->sleep_timeout))
00443                 t->timeout_pending = false;
00444         t->sleep_queue = NULL;
00445         spinlock_unlock(&t->lock);
00446 
00447         thread_ready(t);
00448 
00449         if (all)
00450                 goto loop;
00451 }
00452 

Generated on Sun Jun 18 17:28:04 2006 for HelenOS Kernel (ppc64) by  doxygen 1.4.6