Changeset da1bafb in mainline for kernel/generic/src/synch/rwlock.c


Ignore:
Timestamp:
2010-05-24T18:57:31Z (14 years ago)
Author:
Martin Decky <martin@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
0095368
Parents:
666f492
Message:

major code revision

  • replace spinlocks taken with interrupts disabled with irq_spinlocks
  • change spacing (not indendation) to be tab-size independent
  • use unsigned integer types where appropriate (especially bit flags)
  • visual separation
  • remove argument names in function prototypes
  • string changes
  • correct some formating directives
  • replace various cryptic single-character variables (t, a, m, c, b, etc.) with proper identifiers (thread, task, timeout, as, itm, itc, etc.)
  • unify some assembler constructs
  • unused page table levels are now optimized out in compile time
  • replace several ints (with boolean semantics) with bools
  • use specifically sized types instead of generic types where appropriate (size_t, uint32_t, btree_key_t)
  • improve comments
  • split asserts with conjuction into multiple independent asserts
File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/synch/rwlock.c

    r666f492 rda1bafb  
    3333/**
    3434 * @file
    35  * @brief       Reader/Writer locks.
     35 * @brief Reader/Writer locks.
    3636 *
    3737 * A reader/writer lock can be held by multiple readers at a time.
     
    5757 * each thread can block on only one rwlock at a time.
    5858 */
    59  
     59
    6060#include <synch/rwlock.h>
    6161#include <synch/spinlock.h>
     
    6969#include <panic.h>
    7070
    71 #define ALLOW_ALL               0
    72 #define ALLOW_READERS_ONLY      1
    73 
    74 static void let_others_in(rwlock_t *rwl, int readers_only);
    75 static void release_spinlock(void *arg);
     71#define ALLOW_ALL           0
     72#define ALLOW_READERS_ONLY  1
    7673
    7774/** Initialize reader/writer lock
     
    8077 *
    8178 * @param rwl Reader/Writer lock.
     79 *
    8280 */
    8381void rwlock_initialize(rwlock_t *rwl) {
    84         spinlock_initialize(&rwl->lock, "rwlock_t");
     82        irq_spinlock_initialize(&rwl->lock, "rwl.lock");
    8583        mutex_initialize(&rwl->exclusive, MUTEX_PASSIVE);
    8684        rwl->readers_in = 0;
    8785}
    8886
     87/** Direct handoff of reader/writer lock ownership.
     88 *
     89 * Direct handoff of reader/writer lock ownership
     90 * to waiting readers or a writer.
     91 *
     92 * Must be called with rwl->lock locked.
     93 * Must be called with interrupts_disable()'d.
     94 *
     95 * @param rwl          Reader/Writer lock.
     96 * @param readers_only See the description below.
     97 *
     98 * If readers_only is false: (unlock scenario)
     99 * Let the first sleeper on 'exclusive' mutex in, no matter
     100 * whether it is a reader or a writer. If there are more leading
     101 * readers in line, let each of them in.
     102 *
     103 * Otherwise: (timeout scenario)
     104 * Let all leading readers in.
     105 *
     106 */
     107static void let_others_in(rwlock_t *rwl, int readers_only)
     108{
     109        rwlock_type_t type = RWLOCK_NONE;
     110        thread_t *thread = NULL;
     111        bool one_more = true;
     112       
     113        irq_spinlock_lock(&rwl->exclusive.sem.wq.lock, false);
     114       
     115        if (!list_empty(&rwl->exclusive.sem.wq.head))
     116                thread = list_get_instance(rwl->exclusive.sem.wq.head.next,
     117                    thread_t, wq_link);
     118       
     119        do {
     120                if (thread) {
     121                        irq_spinlock_lock(&thread->lock, false);
     122                        type = thread->rwlock_holder_type;
     123                        irq_spinlock_unlock(&thread->lock, false);
     124                }
     125               
     126                /*
     127                 * If readers_only is true, we wake all leading readers
     128                 * if and only if rwl is locked by another reader.
     129                 * Assumption: readers_only ==> rwl->readers_in
     130                 *
     131                 */
     132                if ((readers_only) && (type != RWLOCK_READER))
     133                        break;
     134               
     135                if (type == RWLOCK_READER) {
     136                        /*
     137                         * Waking up a reader.
     138                         * We are responsible for incrementing rwl->readers_in
     139                         * for it.
     140                         *
     141                         */
     142                         rwl->readers_in++;
     143                }
     144               
     145                /*
     146                 * Only the last iteration through this loop can increment
     147                 * rwl->exclusive.sem.wq.missed_wakeup's. All preceeding
     148                 * iterations will wake up a thread.
     149                 *
     150                 */
     151               
     152                /*
     153                 * We call the internal version of waitq_wakeup, which
     154                 * relies on the fact that the waitq is already locked.
     155                 *
     156                 */
     157                _waitq_wakeup_unsafe(&rwl->exclusive.sem.wq, WAKEUP_FIRST);
     158               
     159                thread = NULL;
     160                if (!list_empty(&rwl->exclusive.sem.wq.head)) {
     161                        thread = list_get_instance(rwl->exclusive.sem.wq.head.next,
     162                            thread_t, wq_link);
     163                       
     164                        if (thread) {
     165                                irq_spinlock_lock(&thread->lock, false);
     166                                if (thread->rwlock_holder_type != RWLOCK_READER)
     167                                        one_more = false;
     168                                irq_spinlock_unlock(&thread->lock, false);
     169                        }
     170                }
     171        } while ((type == RWLOCK_READER) && (thread) && (one_more));
     172       
     173        irq_spinlock_unlock(&rwl->exclusive.sem.wq.lock, false);
     174}
     175
    89176/** Acquire reader/writer lock for reading
    90177 *
     
    92179 * Timeout and willingness to block may be specified.
    93180 *
    94  * @param rwl Reader/Writer lock.
    95  * @param usec Timeout in microseconds.
     181 * @param rwl   Reader/Writer lock.
     182 * @param usec  Timeout in microseconds.
    96183 * @param flags Specify mode of operation.
    97184 *
     
    100187 *
    101188 * @return See comment for waitq_sleep_timeout().
    102  */
    103 int _rwlock_write_lock_timeout(rwlock_t *rwl, uint32_t usec, int flags)
    104 {
    105         ipl_t ipl;
    106         int rc;
    107        
    108         ipl = interrupts_disable();
    109         spinlock_lock(&THREAD->lock);
     189 *
     190 */
     191int _rwlock_write_lock_timeout(rwlock_t *rwl, uint32_t usec, unsigned int flags)
     192{
     193        irq_spinlock_lock(&THREAD->lock, true);
    110194        THREAD->rwlock_holder_type = RWLOCK_WRITER;
    111         spinlock_unlock(&THREAD->lock);
    112         interrupts_restore(ipl);
    113 
     195        irq_spinlock_unlock(&THREAD->lock, true);
     196       
    114197        /*
    115198         * Writers take the easy part.
    116199         * They just need to acquire the exclusive mutex.
     200         *
    117201         */
    118         rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags);
     202        int rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags);
    119203        if (SYNCH_FAILED(rc)) {
    120 
    121204                /*
    122205                 * Lock operation timed out or was interrupted.
    123206                 * The state of rwl is UNKNOWN at this point.
    124207                 * No claims about its holder can be made.
    125                  */
    126                  
    127                 ipl = interrupts_disable();
    128                 spinlock_lock(&rwl->lock);
     208                 *
     209                 */
     210                irq_spinlock_lock(&rwl->lock, true);
     211               
    129212                /*
    130213                 * Now when rwl is locked, we can inspect it again.
    131214                 * If it is held by some readers already, we can let
    132215                 * readers from the head of the wait queue in.
     216                 *
    133217                 */
    134218                if (rwl->readers_in)
    135219                        let_others_in(rwl, ALLOW_READERS_ONLY);
    136                 spinlock_unlock(&rwl->lock);
    137                 interrupts_restore(ipl);
     220               
     221                irq_spinlock_unlock(&rwl->lock, true);
    138222        }
    139223       
    140224        return rc;
     225}
     226
     227/** Release spinlock callback
     228 *
     229 * This is a callback function invoked from the scheduler.
     230 * The callback is registered in _rwlock_read_lock_timeout().
     231 *
     232 * @param arg Spinlock.
     233 *
     234 */
     235static void release_spinlock(void *arg)
     236{
     237        if (arg != NULL)
     238                irq_spinlock_unlock((irq_spinlock_t *) arg, false);
    141239}
    142240
     
    146244 * Timeout and willingness to block may be specified.
    147245 *
    148  * @param rwl Reader/Writer lock.
    149  * @param usec Timeout in microseconds.
     246 * @param rwl   Reader/Writer lock.
     247 * @param usec  Timeout in microseconds.
    150248 * @param flags Select mode of operation.
    151249 *
     
    154252 *
    155253 * @return See comment for waitq_sleep_timeout().
    156  */
    157 int _rwlock_read_lock_timeout(rwlock_t *rwl, uint32_t usec, int flags)
    158 {
    159         int rc;
    160         ipl_t ipl;
    161        
    162         ipl = interrupts_disable();
    163         spinlock_lock(&THREAD->lock);
     254 *
     255 */
     256int _rwlock_read_lock_timeout(rwlock_t *rwl, uint32_t usec, unsigned int flags)
     257{
     258        /*
     259         * Since the locking scenarios get a little bit too
     260         * complicated, we do not rely on internal irq_spinlock_t
     261         * interrupt disabling logic here and control interrupts
     262         * manually.
     263         *
     264         */
     265        ipl_t ipl = interrupts_disable();
     266       
     267        irq_spinlock_lock(&THREAD->lock, false);
    164268        THREAD->rwlock_holder_type = RWLOCK_READER;
    165         spinlock_unlock(&THREAD->lock);
    166 
    167         spinlock_lock(&rwl->lock);
    168 
     269        irq_spinlock_pass(&THREAD->lock, &rwl->lock);
     270       
    169271        /*
    170272         * Find out whether we can get what we want without blocking.
     273         *
    171274         */
    172         rc = mutex_trylock(&rwl->exclusive);
     275        int rc = mutex_trylock(&rwl->exclusive);
    173276        if (SYNCH_FAILED(rc)) {
    174 
    175277                /*
    176278                 * 'exclusive' mutex is being held by someone else.
     
    178280                 * else waiting for it, we can enter the critical
    179281                 * section.
    180                  */
    181 
     282                 *
     283                 */
     284               
    182285                if (rwl->readers_in) {
    183                         spinlock_lock(&rwl->exclusive.sem.wq.lock);
     286                        irq_spinlock_lock(&rwl->exclusive.sem.wq.lock, false);
    184287                        if (list_empty(&rwl->exclusive.sem.wq.head)) {
    185288                                /*
    186289                                 * We can enter.
    187290                                 */
    188                                 spinlock_unlock(&rwl->exclusive.sem.wq.lock);
     291                                irq_spinlock_unlock(&rwl->exclusive.sem.wq.lock, false);
    189292                                goto shortcut;
    190293                        }
    191                         spinlock_unlock(&rwl->exclusive.sem.wq.lock);
     294                        irq_spinlock_unlock(&rwl->exclusive.sem.wq.lock, false);
    192295                }
    193 
     296               
    194297                /*
    195298                 * In order to prevent a race condition when a reader
     
    197300                 * we register a function to unlock rwl->lock
    198301                 * after this thread is put asleep.
    199                  */
    200                 #ifdef CONFIG_SMP
     302                 *
     303                 */
     304#ifdef CONFIG_SMP
    201305                thread_register_call_me(release_spinlock, &rwl->lock);
    202                 #else
     306#else
    203307                thread_register_call_me(release_spinlock, NULL);
    204                 #endif
    205                                  
     308#endif
     309               
    206310                rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags);
    207311                switch (rc) {
     
    209313                        /*
    210314                         * release_spinlock() wasn't called
     315                         *
    211316                         */
    212317                        thread_register_call_me(NULL, NULL);
    213                         spinlock_unlock(&rwl->lock);
     318                        irq_spinlock_unlock(&rwl->lock, false);
    214319                case ESYNCH_TIMEOUT:
    215320                case ESYNCH_INTERRUPTED:
     
    217322                         * The sleep timed out.
    218323                         * We just restore interrupt priority level.
     324                         *
    219325                         */
    220                 case ESYNCH_OK_BLOCKED:         
     326                case ESYNCH_OK_BLOCKED:
    221327                        /*
    222328                         * We were woken with rwl->readers_in already
     
    228334                         * 'readers_in' is incremented. Same time means both
    229335                         * events happen atomically when rwl->lock is held.)
     336                         *
    230337                         */
    231338                        interrupts_restore(ipl);
     
    240347                return rc;
    241348        }
    242 
     349       
    243350shortcut:
    244 
    245351        /*
    246352         * We can increment readers_in only if we didn't go to sleep.
    247353         * For sleepers, rwlock_let_others_in() will do the job.
     354         *
    248355         */
    249356        rwl->readers_in++;
    250        
    251         spinlock_unlock(&rwl->lock);
     357        irq_spinlock_unlock(&rwl->lock, false);
    252358        interrupts_restore(ipl);
    253 
     359       
    254360        return ESYNCH_OK_ATOMIC;
    255361}
     
    262368 *
    263369 * @param rwl Reader/Writer lock.
     370 *
    264371 */
    265372void rwlock_write_unlock(rwlock_t *rwl)
    266373{
    267         ipl_t ipl;
    268        
    269         ipl = interrupts_disable();
    270         spinlock_lock(&rwl->lock);
     374        irq_spinlock_lock(&rwl->lock, true);
    271375        let_others_in(rwl, ALLOW_ALL);
    272         spinlock_unlock(&rwl->lock);
    273         interrupts_restore(ipl);
    274        
     376        irq_spinlock_unlock(&rwl->lock, true);
    275377}
    276378
     
    283385 *
    284386 * @param rwl Reader/Writer lock.
     387 *
    285388 */
    286389void rwlock_read_unlock(rwlock_t *rwl)
    287390{
    288         ipl_t ipl;
    289 
    290         ipl = interrupts_disable();
    291         spinlock_lock(&rwl->lock);
     391        irq_spinlock_lock(&rwl->lock, true);
     392       
    292393        if (!--rwl->readers_in)
    293394                let_others_in(rwl, ALLOW_ALL);
    294         spinlock_unlock(&rwl->lock);
    295         interrupts_restore(ipl);
    296 }
    297 
    298 
    299 /** Direct handoff of reader/writer lock ownership.
    300  *
    301  * Direct handoff of reader/writer lock ownership
    302  * to waiting readers or a writer.
    303  *
    304  * Must be called with rwl->lock locked.
    305  * Must be called with interrupts_disable()'d.
    306  *
    307  * @param rwl Reader/Writer lock.
    308  * @param readers_only See the description below.
    309  *
    310  * If readers_only is false: (unlock scenario)
    311  * Let the first sleeper on 'exclusive' mutex in, no matter
    312  * whether it is a reader or a writer. If there are more leading
    313  * readers in line, let each of them in.
    314  *
    315  * Otherwise: (timeout scenario)
    316  * Let all leading readers in.
    317  */
    318 void let_others_in(rwlock_t *rwl, int readers_only)
    319 {
    320         rwlock_type_t type = RWLOCK_NONE;
    321         thread_t *t = NULL;
    322         bool one_more = true;
    323        
    324         spinlock_lock(&rwl->exclusive.sem.wq.lock);
    325 
    326         if (!list_empty(&rwl->exclusive.sem.wq.head))
    327                 t = list_get_instance(rwl->exclusive.sem.wq.head.next, thread_t,
    328                     wq_link);
    329         do {
    330                 if (t) {
    331                         spinlock_lock(&t->lock);
    332                         type = t->rwlock_holder_type;
    333                         spinlock_unlock(&t->lock);                     
    334                 }
    335        
    336                 /*
    337                  * If readers_only is true, we wake all leading readers
    338                  * if and only if rwl is locked by another reader.
    339                  * Assumption: readers_only ==> rwl->readers_in
    340                  */
    341                 if (readers_only && (type != RWLOCK_READER))
    342                         break;
    343 
    344 
    345                 if (type == RWLOCK_READER) {
    346                         /*
    347                          * Waking up a reader.
    348                          * We are responsible for incrementing rwl->readers_in
    349                          * for it.
    350                          */
    351                          rwl->readers_in++;
    352                 }
    353 
    354                 /*
    355                  * Only the last iteration through this loop can increment
    356                  * rwl->exclusive.sem.wq.missed_wakeup's. All preceeding
    357                  * iterations will wake up a thread.
    358                  */
    359                 /* We call the internal version of waitq_wakeup, which
    360                  * relies on the fact that the waitq is already locked.
    361                  */
    362                 _waitq_wakeup_unsafe(&rwl->exclusive.sem.wq, WAKEUP_FIRST);
    363                
    364                 t = NULL;
    365                 if (!list_empty(&rwl->exclusive.sem.wq.head)) {
    366                         t = list_get_instance(rwl->exclusive.sem.wq.head.next,
    367                             thread_t, wq_link);
    368                         if (t) {
    369                                 spinlock_lock(&t->lock);
    370                                 if (t->rwlock_holder_type != RWLOCK_READER)
    371                                         one_more = false;
    372                                 spinlock_unlock(&t->lock);     
    373                         }
    374                 }
    375         } while ((type == RWLOCK_READER) && t && one_more);
    376 
    377         spinlock_unlock(&rwl->exclusive.sem.wq.lock);
    378 }
    379 
    380 /** Release spinlock callback
    381  *
    382  * This is a callback function invoked from the scheduler.
    383  * The callback is registered in _rwlock_read_lock_timeout().
    384  *
    385  * @param arg Spinlock.
    386  */
    387 void release_spinlock(void *arg)
    388 {
    389         spinlock_unlock((spinlock_t *) arg);
     395       
     396        irq_spinlock_unlock(&rwl->lock, true);
    390397}
    391398
Note: See TracChangeset for help on using the changeset viewer.