rwlock.c

Go to the documentation of this file.
00001 /*
00002  * Copyright (C) 2001-2004 Jakub Jermar
00003  * All rights reserved.
00004  *
00005  * Redistribution and use in source and binary forms, with or without
00006  * modification, are permitted provided that the following conditions
00007  * are met:
00008  *
00009  * - Redistributions of source code must retain the above copyright
00010  *   notice, this list of conditions and the following disclaimer.
00011  * - Redistributions in binary form must reproduce the above copyright
00012  *   notice, this list of conditions and the following disclaimer in the
00013  *   documentation and/or other materials provided with the distribution.
00014  * - The name of the author may not be used to endorse or promote products
00015  *   derived from this software without specific prior written permission.
00016  *
00017  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
00018  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00019  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
00020  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
00021  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
00022  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
00023  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
00024  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
00025  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
00026  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00027  */
00028 
00049 /*
00050  * NOTE ON rwlock_holder_type
00051  * This field is set on an attempt to acquire the exclusive mutex
00052  * to the respective value depending whether the caller is a reader
00053  * or a writer. The field is examined only if the thread had been
00054  * previously blocked on the exclusive mutex. Thus it is save
00055  * to store the rwlock type in the thread structure, because
00056  * each thread can block on only one rwlock at a time.
00057  */
00058  
00059 #include <synch/rwlock.h>
00060 #include <synch/spinlock.h>
00061 #include <synch/mutex.h>
00062 #include <synch/waitq.h>
00063 #include <synch/synch.h>
00064 #include <adt/list.h>
00065 #include <typedefs.h>
00066 #include <arch/asm.h>
00067 #include <arch.h>
00068 #include <proc/thread.h>
00069 #include <panic.h>
00070 
00071 #define ALLOW_ALL               0
00072 #define ALLOW_READERS_ONLY      1
00073 
00074 static void let_others_in(rwlock_t *rwl, int readers_only);
00075 static void release_spinlock(void *arg);
00076 
00083 void rwlock_initialize(rwlock_t *rwl) {
00084         spinlock_initialize(&rwl->lock, "rwlock_t");
00085         mutex_initialize(&rwl->exclusive);
00086         rwl->readers_in = 0;
00087 }
00088 
00103 int _rwlock_write_lock_timeout(rwlock_t *rwl, __u32 usec, int flags)
00104 {
00105         ipl_t ipl;
00106         int rc;
00107         
00108         ipl = interrupts_disable();
00109         spinlock_lock(&THREAD->lock);
00110         THREAD->rwlock_holder_type = RWLOCK_WRITER;
00111         spinlock_unlock(&THREAD->lock); 
00112         interrupts_restore(ipl);
00113 
00114         /*
00115          * Writers take the easy part.
00116          * They just need to acquire the exclusive mutex.
00117          */
00118         rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags);
00119         if (SYNCH_FAILED(rc)) {
00120 
00121                 /*
00122                  * Lock operation timed out or was interrupted.
00123                  * The state of rwl is UNKNOWN at this point.
00124                  * No claims about its holder can be made.
00125                  */
00126                  
00127                 ipl = interrupts_disable();
00128                 spinlock_lock(&rwl->lock);
00129                 /*
00130                  * Now when rwl is locked, we can inspect it again.
00131                  * If it is held by some readers already, we can let
00132                  * readers from the head of the wait queue in.
00133                  */
00134                 if (rwl->readers_in)
00135                         let_others_in(rwl, ALLOW_READERS_ONLY);
00136                 spinlock_unlock(&rwl->lock);
00137                 interrupts_restore(ipl);
00138         }
00139         
00140         return rc;
00141 }
00142 
00157 int _rwlock_read_lock_timeout(rwlock_t *rwl, __u32 usec, int flags)
00158 {
00159         int rc;
00160         ipl_t ipl;
00161         
00162         ipl = interrupts_disable();
00163         spinlock_lock(&THREAD->lock);
00164         THREAD->rwlock_holder_type = RWLOCK_READER;
00165         spinlock_unlock(&THREAD->lock); 
00166 
00167         spinlock_lock(&rwl->lock);
00168 
00169         /*
00170          * Find out whether we can get what we want without blocking.
00171          */
00172         rc = mutex_trylock(&rwl->exclusive);
00173         if (SYNCH_FAILED(rc)) {
00174 
00175                 /*
00176                  * 'exclusive' mutex is being held by someone else.
00177                  * If the holder is a reader and there is no one
00178                  * else waiting for it, we can enter the critical
00179                  * section.
00180                  */
00181 
00182                 if (rwl->readers_in) {
00183                         spinlock_lock(&rwl->exclusive.sem.wq.lock);
00184                         if (list_empty(&rwl->exclusive.sem.wq.head)) {
00185                                 /*
00186                                  * We can enter.
00187                                  */
00188                                 spinlock_unlock(&rwl->exclusive.sem.wq.lock);
00189                                 goto shortcut;
00190                         }
00191                         spinlock_unlock(&rwl->exclusive.sem.wq.lock);
00192                 }
00193 
00194                 /*
00195                  * In order to prevent a race condition when a reader
00196                  * could block another reader at the head of the waitq,
00197                  * we register a function to unlock rwl->lock
00198                  * after this thread is put asleep.
00199                  */
00200                 #ifdef CONFIG_SMP
00201                 thread_register_call_me(release_spinlock, &rwl->lock);
00202                 #else
00203                 thread_register_call_me(release_spinlock, NULL);
00204                 #endif
00205                                  
00206                 rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags);
00207                 switch (rc) {
00208                         case ESYNCH_WOULD_BLOCK:
00209                                 /*
00210                                  * release_spinlock() wasn't called
00211                                  */
00212                                 thread_register_call_me(NULL, NULL);
00213                                 spinlock_unlock(&rwl->lock);
00214                         case ESYNCH_TIMEOUT:
00215                         case ESYNCH_INTERRUPTED:
00216                                 /*
00217                                  * The sleep timed out.
00218                                  * We just restore interrupt priority level.
00219                                  */
00220                         case ESYNCH_OK_BLOCKED:         
00221                                 /*
00222                                  * We were woken with rwl->readers_in already incremented.
00223                                  * Note that this arrangement avoids race condition between
00224                                  * two concurrent readers. (Race is avoided if 'exclusive' is
00225                                  * locked at the same time as 'readers_in' is incremented.
00226                                  * Same time means both events happen atomically when
00227                                  * rwl->lock is held.)
00228                                  */
00229                                 interrupts_restore(ipl);
00230                                 break;
00231                         case ESYNCH_OK_ATOMIC:
00232                                 panic("_mutex_lock_timeout()==ESYNCH_OK_ATOMIC\n");
00233                                 break;
00234                         default:
00235                                 panic("invalid ESYNCH\n");
00236                                 break;
00237                 }
00238                 return rc;
00239         }
00240 
00241 shortcut:
00242 
00243         /*
00244          * We can increment readers_in only if we didn't go to sleep.
00245          * For sleepers, rwlock_let_others_in() will do the job.
00246          */
00247         rwl->readers_in++;
00248         
00249         spinlock_unlock(&rwl->lock);
00250         interrupts_restore(ipl);
00251 
00252         return ESYNCH_OK_ATOMIC;
00253 }
00254 
00263 void rwlock_write_unlock(rwlock_t *rwl)
00264 {
00265         ipl_t ipl;
00266         
00267         ipl = interrupts_disable();
00268         spinlock_lock(&rwl->lock);
00269         let_others_in(rwl, ALLOW_ALL);
00270         spinlock_unlock(&rwl->lock);
00271         interrupts_restore(ipl);
00272         
00273 }
00274 
00284 void rwlock_read_unlock(rwlock_t *rwl)
00285 {
00286         ipl_t ipl;
00287 
00288         ipl = interrupts_disable();
00289         spinlock_lock(&rwl->lock);
00290         if (!--rwl->readers_in)
00291                 let_others_in(rwl, ALLOW_ALL);
00292         spinlock_unlock(&rwl->lock);
00293         interrupts_restore(ipl);
00294 }
00295 
00296 
00316 void let_others_in(rwlock_t *rwl, int readers_only)
00317 {
00318         rwlock_type_t type = RWLOCK_NONE;
00319         thread_t *t = NULL;
00320         bool one_more = true;
00321         
00322         spinlock_lock(&rwl->exclusive.sem.wq.lock);
00323 
00324         if (!list_empty(&rwl->exclusive.sem.wq.head))
00325                 t = list_get_instance(rwl->exclusive.sem.wq.head.next, thread_t, wq_link);
00326         do {
00327                 if (t) {
00328                         spinlock_lock(&t->lock);
00329                         type = t->rwlock_holder_type;
00330                         spinlock_unlock(&t->lock);                      
00331                 }
00332         
00333                 /*
00334                  * If readers_only is true, we wake all leading readers
00335                  * if and only if rwl is locked by another reader.
00336                  * Assumption: readers_only ==> rwl->readers_in
00337                  */
00338                 if (readers_only && (type != RWLOCK_READER))
00339                         break;
00340 
00341 
00342                 if (type == RWLOCK_READER) {
00343                         /*
00344                          * Waking up a reader.
00345                          * We are responsible for incrementing rwl->readers_in for it.
00346                          */
00347                          rwl->readers_in++;
00348                 }
00349 
00350                 /*
00351                  * Only the last iteration through this loop can increment
00352                  * rwl->exclusive.sem.wq.missed_wakeup's. All preceeding
00353                  * iterations will wake up a thread.
00354                  */
00355                 /* We call the internal version of waitq_wakeup, which
00356                  * relies on the fact that the waitq is already locked.
00357                  */
00358                 _waitq_wakeup_unsafe(&rwl->exclusive.sem.wq, WAKEUP_FIRST);
00359                 
00360                 t = NULL;
00361                 if (!list_empty(&rwl->exclusive.sem.wq.head)) {
00362                         t = list_get_instance(rwl->exclusive.sem.wq.head.next, thread_t, wq_link);
00363                         if (t) {
00364                                 spinlock_lock(&t->lock);
00365                                 if (t->rwlock_holder_type != RWLOCK_READER)
00366                                         one_more = false;
00367                                 spinlock_unlock(&t->lock);      
00368                         }
00369                 }
00370         } while ((type == RWLOCK_READER) && t && one_more);
00371 
00372         spinlock_unlock(&rwl->exclusive.sem.wq.lock);
00373 }
00374 
00382 void release_spinlock(void *arg)
00383 {
00384         spinlock_unlock((spinlock_t *) arg);
00385 }
00386 

Generated on Sun Jun 18 17:28:04 2006 for HelenOS Kernel (ppc64) by  doxygen 1.4.6