Changeset da1bafb in mainline for kernel/generic/src/synch


Ignore:
Timestamp:
2010-05-24T18:57:31Z (15 years ago)
Author:
Martin Decky <martin@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
0095368
Parents:
666f492
Message:

major code revision

  • replace spinlocks taken with interrupts disabled with irq_spinlocks
  • change spacing (not indendation) to be tab-size independent
  • use unsigned integer types where appropriate (especially bit flags)
  • visual separation
  • remove argument names in function prototypes
  • string changes
  • correct some formating directives
  • replace various cryptic single-character variables (t, a, m, c, b, etc.) with proper identifiers (thread, task, timeout, as, itm, itc, etc.)
  • unify some assembler constructs
  • unused page table levels are now optimized out in compile time
  • replace several ints (with boolean semantics) with bools
  • use specifically sized types instead of generic types where appropriate (size_t, uint32_t, btree_key_t)
  • improve comments
  • split asserts with conjuction into multiple independent asserts
Location:
kernel/generic/src/synch
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/synch/mutex.c

    r666f492 rda1bafb  
    6767 *
    6868 */
    69 int _mutex_lock_timeout(mutex_t *mtx, uint32_t usec, int flags)
     69int _mutex_lock_timeout(mutex_t *mtx, uint32_t usec, unsigned int flags)
    7070{
    7171        int rc;
  • kernel/generic/src/synch/rwlock.c

    r666f492 rda1bafb  
    3333/**
    3434 * @file
    35  * @brief       Reader/Writer locks.
     35 * @brief Reader/Writer locks.
    3636 *
    3737 * A reader/writer lock can be held by multiple readers at a time.
     
    5757 * each thread can block on only one rwlock at a time.
    5858 */
    59  
     59
    6060#include <synch/rwlock.h>
    6161#include <synch/spinlock.h>
     
    6969#include <panic.h>
    7070
    71 #define ALLOW_ALL               0
    72 #define ALLOW_READERS_ONLY      1
    73 
    74 static void let_others_in(rwlock_t *rwl, int readers_only);
    75 static void release_spinlock(void *arg);
     71#define ALLOW_ALL           0
     72#define ALLOW_READERS_ONLY  1
    7673
    7774/** Initialize reader/writer lock
     
    8077 *
    8178 * @param rwl Reader/Writer lock.
     79 *
    8280 */
    8381void rwlock_initialize(rwlock_t *rwl) {
    84         spinlock_initialize(&rwl->lock, "rwlock_t");
     82        irq_spinlock_initialize(&rwl->lock, "rwl.lock");
    8583        mutex_initialize(&rwl->exclusive, MUTEX_PASSIVE);
    8684        rwl->readers_in = 0;
    8785}
    8886
     87/** Direct handoff of reader/writer lock ownership.
     88 *
     89 * Direct handoff of reader/writer lock ownership
     90 * to waiting readers or a writer.
     91 *
     92 * Must be called with rwl->lock locked.
     93 * Must be called with interrupts_disable()'d.
     94 *
     95 * @param rwl          Reader/Writer lock.
     96 * @param readers_only See the description below.
     97 *
     98 * If readers_only is false: (unlock scenario)
     99 * Let the first sleeper on 'exclusive' mutex in, no matter
     100 * whether it is a reader or a writer. If there are more leading
     101 * readers in line, let each of them in.
     102 *
     103 * Otherwise: (timeout scenario)
     104 * Let all leading readers in.
     105 *
     106 */
     107static void let_others_in(rwlock_t *rwl, int readers_only)
     108{
     109        rwlock_type_t type = RWLOCK_NONE;
     110        thread_t *thread = NULL;
     111        bool one_more = true;
     112       
     113        irq_spinlock_lock(&rwl->exclusive.sem.wq.lock, false);
     114       
     115        if (!list_empty(&rwl->exclusive.sem.wq.head))
     116                thread = list_get_instance(rwl->exclusive.sem.wq.head.next,
     117                    thread_t, wq_link);
     118       
     119        do {
     120                if (thread) {
     121                        irq_spinlock_lock(&thread->lock, false);
     122                        type = thread->rwlock_holder_type;
     123                        irq_spinlock_unlock(&thread->lock, false);
     124                }
     125               
     126                /*
     127                 * If readers_only is true, we wake all leading readers
     128                 * if and only if rwl is locked by another reader.
     129                 * Assumption: readers_only ==> rwl->readers_in
     130                 *
     131                 */
     132                if ((readers_only) && (type != RWLOCK_READER))
     133                        break;
     134               
     135                if (type == RWLOCK_READER) {
     136                        /*
     137                         * Waking up a reader.
     138                         * We are responsible for incrementing rwl->readers_in
     139                         * for it.
     140                         *
     141                         */
     142                         rwl->readers_in++;
     143                }
     144               
     145                /*
     146                 * Only the last iteration through this loop can increment
     147                 * rwl->exclusive.sem.wq.missed_wakeup's. All preceeding
     148                 * iterations will wake up a thread.
     149                 *
     150                 */
     151               
     152                /*
     153                 * We call the internal version of waitq_wakeup, which
     154                 * relies on the fact that the waitq is already locked.
     155                 *
     156                 */
     157                _waitq_wakeup_unsafe(&rwl->exclusive.sem.wq, WAKEUP_FIRST);
     158               
     159                thread = NULL;
     160                if (!list_empty(&rwl->exclusive.sem.wq.head)) {
     161                        thread = list_get_instance(rwl->exclusive.sem.wq.head.next,
     162                            thread_t, wq_link);
     163                       
     164                        if (thread) {
     165                                irq_spinlock_lock(&thread->lock, false);
     166                                if (thread->rwlock_holder_type != RWLOCK_READER)
     167                                        one_more = false;
     168                                irq_spinlock_unlock(&thread->lock, false);
     169                        }
     170                }
     171        } while ((type == RWLOCK_READER) && (thread) && (one_more));
     172       
     173        irq_spinlock_unlock(&rwl->exclusive.sem.wq.lock, false);
     174}
     175
    89176/** Acquire reader/writer lock for reading
    90177 *
     
    92179 * Timeout and willingness to block may be specified.
    93180 *
    94  * @param rwl Reader/Writer lock.
    95  * @param usec Timeout in microseconds.
     181 * @param rwl   Reader/Writer lock.
     182 * @param usec  Timeout in microseconds.
    96183 * @param flags Specify mode of operation.
    97184 *
     
    100187 *
    101188 * @return See comment for waitq_sleep_timeout().
    102  */
    103 int _rwlock_write_lock_timeout(rwlock_t *rwl, uint32_t usec, int flags)
    104 {
    105         ipl_t ipl;
    106         int rc;
    107        
    108         ipl = interrupts_disable();
    109         spinlock_lock(&THREAD->lock);
     189 *
     190 */
     191int _rwlock_write_lock_timeout(rwlock_t *rwl, uint32_t usec, unsigned int flags)
     192{
     193        irq_spinlock_lock(&THREAD->lock, true);
    110194        THREAD->rwlock_holder_type = RWLOCK_WRITER;
    111         spinlock_unlock(&THREAD->lock);
    112         interrupts_restore(ipl);
    113 
     195        irq_spinlock_unlock(&THREAD->lock, true);
     196       
    114197        /*
    115198         * Writers take the easy part.
    116199         * They just need to acquire the exclusive mutex.
     200         *
    117201         */
    118         rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags);
     202        int rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags);
    119203        if (SYNCH_FAILED(rc)) {
    120 
    121204                /*
    122205                 * Lock operation timed out or was interrupted.
    123206                 * The state of rwl is UNKNOWN at this point.
    124207                 * No claims about its holder can be made.
    125                  */
    126                  
    127                 ipl = interrupts_disable();
    128                 spinlock_lock(&rwl->lock);
     208                 *
     209                 */
     210                irq_spinlock_lock(&rwl->lock, true);
     211               
    129212                /*
    130213                 * Now when rwl is locked, we can inspect it again.
    131214                 * If it is held by some readers already, we can let
    132215                 * readers from the head of the wait queue in.
     216                 *
    133217                 */
    134218                if (rwl->readers_in)
    135219                        let_others_in(rwl, ALLOW_READERS_ONLY);
    136                 spinlock_unlock(&rwl->lock);
    137                 interrupts_restore(ipl);
     220               
     221                irq_spinlock_unlock(&rwl->lock, true);
    138222        }
    139223       
    140224        return rc;
     225}
     226
     227/** Release spinlock callback
     228 *
     229 * This is a callback function invoked from the scheduler.
     230 * The callback is registered in _rwlock_read_lock_timeout().
     231 *
     232 * @param arg Spinlock.
     233 *
     234 */
     235static void release_spinlock(void *arg)
     236{
     237        if (arg != NULL)
     238                irq_spinlock_unlock((irq_spinlock_t *) arg, false);
    141239}
    142240
     
    146244 * Timeout and willingness to block may be specified.
    147245 *
    148  * @param rwl Reader/Writer lock.
    149  * @param usec Timeout in microseconds.
     246 * @param rwl   Reader/Writer lock.
     247 * @param usec  Timeout in microseconds.
    150248 * @param flags Select mode of operation.
    151249 *
     
    154252 *
    155253 * @return See comment for waitq_sleep_timeout().
    156  */
    157 int _rwlock_read_lock_timeout(rwlock_t *rwl, uint32_t usec, int flags)
    158 {
    159         int rc;
    160         ipl_t ipl;
    161        
    162         ipl = interrupts_disable();
    163         spinlock_lock(&THREAD->lock);
     254 *
     255 */
     256int _rwlock_read_lock_timeout(rwlock_t *rwl, uint32_t usec, unsigned int flags)
     257{
     258        /*
     259         * Since the locking scenarios get a little bit too
     260         * complicated, we do not rely on internal irq_spinlock_t
     261         * interrupt disabling logic here and control interrupts
     262         * manually.
     263         *
     264         */
     265        ipl_t ipl = interrupts_disable();
     266       
     267        irq_spinlock_lock(&THREAD->lock, false);
    164268        THREAD->rwlock_holder_type = RWLOCK_READER;
    165         spinlock_unlock(&THREAD->lock);
    166 
    167         spinlock_lock(&rwl->lock);
    168 
     269        irq_spinlock_pass(&THREAD->lock, &rwl->lock);
     270       
    169271        /*
    170272         * Find out whether we can get what we want without blocking.
     273         *
    171274         */
    172         rc = mutex_trylock(&rwl->exclusive);
     275        int rc = mutex_trylock(&rwl->exclusive);
    173276        if (SYNCH_FAILED(rc)) {
    174 
    175277                /*
    176278                 * 'exclusive' mutex is being held by someone else.
     
    178280                 * else waiting for it, we can enter the critical
    179281                 * section.
    180                  */
    181 
     282                 *
     283                 */
     284               
    182285                if (rwl->readers_in) {
    183                         spinlock_lock(&rwl->exclusive.sem.wq.lock);
     286                        irq_spinlock_lock(&rwl->exclusive.sem.wq.lock, false);
    184287                        if (list_empty(&rwl->exclusive.sem.wq.head)) {
    185288                                /*
    186289                                 * We can enter.
    187290                                 */
    188                                 spinlock_unlock(&rwl->exclusive.sem.wq.lock);
     291                                irq_spinlock_unlock(&rwl->exclusive.sem.wq.lock, false);
    189292                                goto shortcut;
    190293                        }
    191                         spinlock_unlock(&rwl->exclusive.sem.wq.lock);
     294                        irq_spinlock_unlock(&rwl->exclusive.sem.wq.lock, false);
    192295                }
    193 
     296               
    194297                /*
    195298                 * In order to prevent a race condition when a reader
     
    197300                 * we register a function to unlock rwl->lock
    198301                 * after this thread is put asleep.
    199                  */
    200                 #ifdef CONFIG_SMP
     302                 *
     303                 */
     304#ifdef CONFIG_SMP
    201305                thread_register_call_me(release_spinlock, &rwl->lock);
    202                 #else
     306#else
    203307                thread_register_call_me(release_spinlock, NULL);
    204                 #endif
    205                                  
     308#endif
     309               
    206310                rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags);
    207311                switch (rc) {
     
    209313                        /*
    210314                         * release_spinlock() wasn't called
     315                         *
    211316                         */
    212317                        thread_register_call_me(NULL, NULL);
    213                         spinlock_unlock(&rwl->lock);
     318                        irq_spinlock_unlock(&rwl->lock, false);
    214319                case ESYNCH_TIMEOUT:
    215320                case ESYNCH_INTERRUPTED:
     
    217322                         * The sleep timed out.
    218323                         * We just restore interrupt priority level.
     324                         *
    219325                         */
    220                 case ESYNCH_OK_BLOCKED:         
     326                case ESYNCH_OK_BLOCKED:
    221327                        /*
    222328                         * We were woken with rwl->readers_in already
     
    228334                         * 'readers_in' is incremented. Same time means both
    229335                         * events happen atomically when rwl->lock is held.)
     336                         *
    230337                         */
    231338                        interrupts_restore(ipl);
     
    240347                return rc;
    241348        }
    242 
     349       
    243350shortcut:
    244 
    245351        /*
    246352         * We can increment readers_in only if we didn't go to sleep.
    247353         * For sleepers, rwlock_let_others_in() will do the job.
     354         *
    248355         */
    249356        rwl->readers_in++;
    250        
    251         spinlock_unlock(&rwl->lock);
     357        irq_spinlock_unlock(&rwl->lock, false);
    252358        interrupts_restore(ipl);
    253 
     359       
    254360        return ESYNCH_OK_ATOMIC;
    255361}
     
    262368 *
    263369 * @param rwl Reader/Writer lock.
     370 *
    264371 */
    265372void rwlock_write_unlock(rwlock_t *rwl)
    266373{
    267         ipl_t ipl;
    268        
    269         ipl = interrupts_disable();
    270         spinlock_lock(&rwl->lock);
     374        irq_spinlock_lock(&rwl->lock, true);
    271375        let_others_in(rwl, ALLOW_ALL);
    272         spinlock_unlock(&rwl->lock);
    273         interrupts_restore(ipl);
    274        
     376        irq_spinlock_unlock(&rwl->lock, true);
    275377}
    276378
     
    283385 *
    284386 * @param rwl Reader/Writer lock.
     387 *
    285388 */
    286389void rwlock_read_unlock(rwlock_t *rwl)
    287390{
    288         ipl_t ipl;
    289 
    290         ipl = interrupts_disable();
    291         spinlock_lock(&rwl->lock);
     391        irq_spinlock_lock(&rwl->lock, true);
     392       
    292393        if (!--rwl->readers_in)
    293394                let_others_in(rwl, ALLOW_ALL);
    294         spinlock_unlock(&rwl->lock);
    295         interrupts_restore(ipl);
    296 }
    297 
    298 
    299 /** Direct handoff of reader/writer lock ownership.
    300  *
    301  * Direct handoff of reader/writer lock ownership
    302  * to waiting readers or a writer.
    303  *
    304  * Must be called with rwl->lock locked.
    305  * Must be called with interrupts_disable()'d.
    306  *
    307  * @param rwl Reader/Writer lock.
    308  * @param readers_only See the description below.
    309  *
    310  * If readers_only is false: (unlock scenario)
    311  * Let the first sleeper on 'exclusive' mutex in, no matter
    312  * whether it is a reader or a writer. If there are more leading
    313  * readers in line, let each of them in.
    314  *
    315  * Otherwise: (timeout scenario)
    316  * Let all leading readers in.
    317  */
    318 void let_others_in(rwlock_t *rwl, int readers_only)
    319 {
    320         rwlock_type_t type = RWLOCK_NONE;
    321         thread_t *t = NULL;
    322         bool one_more = true;
    323        
    324         spinlock_lock(&rwl->exclusive.sem.wq.lock);
    325 
    326         if (!list_empty(&rwl->exclusive.sem.wq.head))
    327                 t = list_get_instance(rwl->exclusive.sem.wq.head.next, thread_t,
    328                     wq_link);
    329         do {
    330                 if (t) {
    331                         spinlock_lock(&t->lock);
    332                         type = t->rwlock_holder_type;
    333                         spinlock_unlock(&t->lock);                     
    334                 }
    335        
    336                 /*
    337                  * If readers_only is true, we wake all leading readers
    338                  * if and only if rwl is locked by another reader.
    339                  * Assumption: readers_only ==> rwl->readers_in
    340                  */
    341                 if (readers_only && (type != RWLOCK_READER))
    342                         break;
    343 
    344 
    345                 if (type == RWLOCK_READER) {
    346                         /*
    347                          * Waking up a reader.
    348                          * We are responsible for incrementing rwl->readers_in
    349                          * for it.
    350                          */
    351                          rwl->readers_in++;
    352                 }
    353 
    354                 /*
    355                  * Only the last iteration through this loop can increment
    356                  * rwl->exclusive.sem.wq.missed_wakeup's. All preceeding
    357                  * iterations will wake up a thread.
    358                  */
    359                 /* We call the internal version of waitq_wakeup, which
    360                  * relies on the fact that the waitq is already locked.
    361                  */
    362                 _waitq_wakeup_unsafe(&rwl->exclusive.sem.wq, WAKEUP_FIRST);
    363                
    364                 t = NULL;
    365                 if (!list_empty(&rwl->exclusive.sem.wq.head)) {
    366                         t = list_get_instance(rwl->exclusive.sem.wq.head.next,
    367                             thread_t, wq_link);
    368                         if (t) {
    369                                 spinlock_lock(&t->lock);
    370                                 if (t->rwlock_holder_type != RWLOCK_READER)
    371                                         one_more = false;
    372                                 spinlock_unlock(&t->lock);     
    373                         }
    374                 }
    375         } while ((type == RWLOCK_READER) && t && one_more);
    376 
    377         spinlock_unlock(&rwl->exclusive.sem.wq.lock);
    378 }
    379 
    380 /** Release spinlock callback
    381  *
    382  * This is a callback function invoked from the scheduler.
    383  * The callback is registered in _rwlock_read_lock_timeout().
    384  *
    385  * @param arg Spinlock.
    386  */
    387 void release_spinlock(void *arg)
    388 {
    389         spinlock_unlock((spinlock_t *) arg);
     395       
     396        irq_spinlock_unlock(&rwl->lock, true);
    390397}
    391398
  • kernel/generic/src/synch/semaphore.c

    r666f492 rda1bafb  
    3333/**
    3434 * @file
    35  * @brief       Semaphores.
     35 * @brief Semaphores.
    3636 */
    3737
     
    4747 * Initialize semaphore.
    4848 *
    49  * @param s Semaphore.
     49 * @param sem Semaphore.
    5050 * @param val Maximal number of threads allowed to enter critical section.
     51 *
    5152 */
    52 void semaphore_initialize(semaphore_t *s, int val)
     53void semaphore_initialize(semaphore_t *sem, int val)
    5354{
    54         ipl_t ipl;
     55        waitq_initialize(&sem->wq);
    5556       
    56         waitq_initialize(&s->wq);
    57        
    58         ipl = interrupts_disable();
    59 
    60         spinlock_lock(&s->wq.lock);
    61         s->wq.missed_wakeups = val;
    62         spinlock_unlock(&s->wq.lock);
    63 
    64         interrupts_restore(ipl);
     57        irq_spinlock_lock(&sem->wq.lock, true);
     58        sem->wq.missed_wakeups = val;
     59        irq_spinlock_unlock(&sem->wq.lock, true);
    6560}
    6661
     
    7065 * Conditional mode and mode with timeout can be requested.
    7166 *
    72  * @param s Semaphore.
    73  * @param usec Timeout in microseconds.
     67 * @param sem  Semaphore.
     68 * @param usec  Timeout in microseconds.
    7469 * @param flags Select mode of operation.
    7570 *
     
    7873 *
    7974 * @return See comment for waitq_sleep_timeout().
     75 *
    8076 */
    81 int _semaphore_down_timeout(semaphore_t *s, uint32_t usec, int flags)
     77int _semaphore_down_timeout(semaphore_t *sem, uint32_t usec, unsigned int flags)
    8278{
    83         return waitq_sleep_timeout(&s->wq, usec, flags);
     79        return waitq_sleep_timeout(&sem->wq, usec, flags);
    8480}
    8581
     
    8985 *
    9086 * @param s Semaphore.
     87 *
    9188 */
    92 void semaphore_up(semaphore_t *s)
     89void semaphore_up(semaphore_t *sem)
    9390{
    94         waitq_wakeup(&s->wq, WAKEUP_FIRST);
     91        waitq_wakeup(&sem->wq, WAKEUP_FIRST);
    9592}
    9693
  • kernel/generic/src/synch/waitq.c

    r666f492 rda1bafb  
    3333/**
    3434 * @file
    35  * @brief       Wait queue.
     35 * @brief Wait queue.
    3636 *
    3737 * Wait queue is the basic synchronization primitive upon which all
     
    4141 * fashion. Conditional operation as well as timeouts and interruptions
    4242 * are supported.
     43 *
    4344 */
    4445
     
    5657#include <arch/cycle.h>
    5758
    58 static void waitq_sleep_timed_out(void *data);
     59static void waitq_sleep_timed_out(void *);
    5960
    6061/** Initialize wait queue
     
    6263 * Initialize wait queue.
    6364 *
    64  * @param wq            Pointer to wait queue to be initialized.
     65 * @param wq Pointer to wait queue to be initialized.
     66 *
    6567 */
    6668void waitq_initialize(waitq_t *wq)
    6769{
    68         spinlock_initialize(&wq->lock, "waitq_lock");
     70        irq_spinlock_initialize(&wq->lock, "wq.lock");
    6971        list_initialize(&wq->head);
    7072        wq->missed_wakeups = 0;
     
    8183 * timeout at all.
    8284 *
    83  * @param data          Pointer to the thread that called waitq_sleep_timeout().
     85 * @param data Pointer to the thread that called waitq_sleep_timeout().
     86 *
    8487 */
    8588void waitq_sleep_timed_out(void *data)
    8689{
    87         thread_t *t = (thread_t *) data;
    88         waitq_t *wq;
     90        thread_t *thread = (thread_t *) data;
    8991        bool do_wakeup = false;
    9092        DEADLOCK_PROBE_INIT(p_wqlock);
    91 
    92         spinlock_lock(&threads_lock);
    93         if (!thread_exists(t))
     93       
     94        irq_spinlock_lock(&threads_lock, false);
     95        if (!thread_exists(thread))
    9496                goto out;
    95 
     97       
    9698grab_locks:
    97         spinlock_lock(&t->lock);
    98         if ((wq = t->sleep_queue)) {            /* assignment */
    99                 if (!spinlock_trylock(&wq->lock)) {
    100                         spinlock_unlock(&t->lock);
     99        irq_spinlock_lock(&thread->lock, false);
     100       
     101        waitq_t *wq;
     102        if ((wq = thread->sleep_queue)) {  /* Assignment */
     103                if (!irq_spinlock_trylock(&wq->lock)) {
     104                        irq_spinlock_unlock(&thread->lock, false);
    101105                        DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD);
    102                         goto grab_locks;        /* avoid deadlock */
    103                 }
    104 
    105                 list_remove(&t->wq_link);
    106                 t->saved_context = t->sleep_timeout_context;
     106                        /* Avoid deadlock */
     107                        goto grab_locks;
     108                }
     109               
     110                list_remove(&thread->wq_link);
     111                thread->saved_context = thread->sleep_timeout_context;
    107112                do_wakeup = true;
    108                 t->sleep_queue = NULL;
    109                 spinlock_unlock(&wq->lock);
    110         }
    111        
    112         t->timeout_pending = false;
    113         spinlock_unlock(&t->lock);
     113                thread->sleep_queue = NULL;
     114                irq_spinlock_unlock(&wq->lock, false);
     115        }
     116       
     117        thread->timeout_pending = false;
     118        irq_spinlock_unlock(&thread->lock, false);
    114119       
    115120        if (do_wakeup)
    116                 thread_ready(t);
    117 
     121                thread_ready(thread);
     122       
    118123out:
    119         spinlock_unlock(&threads_lock);
     124        irq_spinlock_unlock(&threads_lock, false);
    120125}
    121126
     
    125130 * If the thread is not found sleeping, no action is taken.
    126131 *
    127  * @param t             Thread to be interrupted.
    128  */
    129 void waitq_interrupt_sleep(thread_t *t)
    130 {
     132 * @param thread Thread to be interrupted.
     133 *
     134 */
     135void waitq_interrupt_sleep(thread_t *thread)
     136{
     137        bool do_wakeup = false;
     138        DEADLOCK_PROBE_INIT(p_wqlock);
     139       
     140        irq_spinlock_lock(&threads_lock, true);
     141        if (!thread_exists(thread))
     142                goto out;
     143       
     144grab_locks:
     145        irq_spinlock_lock(&thread->lock, false);
     146       
    131147        waitq_t *wq;
    132         bool do_wakeup = false;
    133         ipl_t ipl;
    134         DEADLOCK_PROBE_INIT(p_wqlock);
    135 
    136         ipl = interrupts_disable();
    137         spinlock_lock(&threads_lock);
    138         if (!thread_exists(t))
    139                 goto out;
    140 
    141 grab_locks:
    142         spinlock_lock(&t->lock);
    143         if ((wq = t->sleep_queue)) {            /* assignment */
    144                 if (!(t->sleep_interruptible)) {
     148        if ((wq = thread->sleep_queue)) {  /* Assignment */
     149                if (!(thread->sleep_interruptible)) {
    145150                        /*
    146151                         * The sleep cannot be interrupted.
     152                         *
    147153                         */
    148                         spinlock_unlock(&t->lock);
     154                        irq_spinlock_unlock(&thread->lock, false);
    149155                        goto out;
    150156                }
    151                        
    152                 if (!spinlock_trylock(&wq->lock)) {
    153                         spinlock_unlock(&t->lock);
     157               
     158                if (!irq_spinlock_trylock(&wq->lock)) {
     159                        irq_spinlock_unlock(&thread->lock, false);
    154160                        DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD);
    155                         goto grab_locks;        /* avoid deadlock */
    156                 }
    157 
    158                 if (t->timeout_pending && timeout_unregister(&t->sleep_timeout))
    159                         t->timeout_pending = false;
    160 
    161                 list_remove(&t->wq_link);
    162                 t->saved_context = t->sleep_interruption_context;
     161                        /* Avoid deadlock */
     162                        goto grab_locks;
     163                }
     164               
     165                if ((thread->timeout_pending) &&
     166                    (timeout_unregister(&thread->sleep_timeout)))
     167                        thread->timeout_pending = false;
     168               
     169                list_remove(&thread->wq_link);
     170                thread->saved_context = thread->sleep_interruption_context;
    163171                do_wakeup = true;
    164                 t->sleep_queue = NULL;
    165                 spinlock_unlock(&wq->lock);
    166         }
    167         spinlock_unlock(&t->lock);
    168 
     172                thread->sleep_queue = NULL;
     173                irq_spinlock_unlock(&wq->lock, false);
     174        }
     175        irq_spinlock_unlock(&thread->lock, false);
     176       
    169177        if (do_wakeup)
    170                 thread_ready(t);
    171 
     178                thread_ready(thread);
     179       
    172180out:
    173         spinlock_unlock(&threads_lock);
    174         interrupts_restore(ipl);
     181        irq_spinlock_unlock(&threads_lock, true);
    175182}
    176183
     
    180187 * is sleeping interruptibly.
    181188 *
    182  * @param wq            Pointer to wait queue.
     189 * @param wq Pointer to wait queue.
     190 *
    183191 */
    184192void waitq_unsleep(waitq_t *wq)
    185193{
    186         ipl_t ipl;
    187 
    188         ipl = interrupts_disable();
    189         spinlock_lock(&wq->lock);
    190 
     194        irq_spinlock_lock(&wq->lock, true);
     195       
    191196        if (!list_empty(&wq->head)) {
    192                 thread_t *t;
    193                
    194                 t = list_get_instance(wq->head.next, thread_t, wq_link);
    195                 spinlock_lock(&t->lock);
    196                 ASSERT(t->sleep_interruptible);
    197                 if (t->timeout_pending && timeout_unregister(&t->sleep_timeout))
    198                         t->timeout_pending = false;
    199                 list_remove(&t->wq_link);
    200                 t->saved_context = t->sleep_interruption_context;
    201                 t->sleep_queue = NULL;
    202                 spinlock_unlock(&t->lock);
    203                 thread_ready(t);
    204         }
    205 
    206         spinlock_unlock(&wq->lock);
    207         interrupts_restore(ipl);
     197                thread_t *thread = list_get_instance(wq->head.next, thread_t, wq_link);
     198               
     199                irq_spinlock_lock(&thread->lock, false);
     200               
     201                ASSERT(thread->sleep_interruptible);
     202               
     203                if ((thread->timeout_pending) &&
     204                    (timeout_unregister(&thread->sleep_timeout)))
     205                        thread->timeout_pending = false;
     206               
     207                list_remove(&thread->wq_link);
     208                thread->saved_context = thread->sleep_interruption_context;
     209                thread->sleep_queue = NULL;
     210               
     211                irq_spinlock_unlock(&thread->lock, false);
     212                thread_ready(thread);
     213        }
     214       
     215        irq_spinlock_unlock(&wq->lock, true);
    208216}
    209217
     
    221229 * and all the *_timeout() functions use it.
    222230 *
    223  * @param wq            Pointer to wait queue.
    224  * @param usec          Timeout in microseconds.
    225  * @param flags         Specify mode of the sleep.
     231 * @param wq    Pointer to wait queue.
     232 * @param usec  Timeout in microseconds.
     233 * @param flags Specify mode of the sleep.
    226234 *
    227235 * The sleep can be interrupted only if the
    228236 * SYNCH_FLAGS_INTERRUPTIBLE bit is specified in flags.
    229  * 
     237 *
    230238 * If usec is greater than zero, regardless of the value of the
    231239 * SYNCH_FLAGS_NON_BLOCKING bit in flags, the call will not return until either
    232  * timeout, interruption or wakeup comes. 
     240 * timeout, interruption or wakeup comes.
    233241 *
    234242 * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is not set in flags,
     
    238246 * call will immediately return, reporting either success or failure.
    239247 *
    240  * @return              Returns one of ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT,
    241  *                      ESYNCH_INTERRUPTED, ESYNCH_OK_ATOMIC and
    242  *                      ESYNCH_OK_BLOCKED.
    243  *
    244  * @li  ESYNCH_WOULD_BLOCK means that the sleep failed because at the time of
    245  *      the call there was no pending wakeup.
    246  *
    247  * @li  ESYNCH_TIMEOUT means that the sleep timed out.
    248  *
    249  * @li  ESYNCH_INTERRUPTED means that somebody interrupted the sleeping thread.
    250  *
    251  * @li  ESYNCH_OK_ATOMIC means that the sleep succeeded and that there was
    252  *      a pending wakeup at the time of the call. The caller was not put
    253  *      asleep at all.
    254  *
    255  * @li  ESYNCH_OK_BLOCKED means that the sleep succeeded; the full sleep was
    256  *      attempted.
    257  */
    258 int waitq_sleep_timeout(waitq_t *wq, uint32_t usec, int flags)
    259 {
    260         ipl_t ipl;
    261         int rc;
    262 
     248 * @return ESYNCH_WOULD_BLOCK, meaning that the sleep failed because at the
     249 *         time of the call there was no pending wakeup
     250 * @return ESYNCH_TIMEOUT, meaning that the sleep timed out.
     251 * @return ESYNCH_INTERRUPTED, meaning that somebody interrupted the sleeping
     252 *         thread.
     253 * @return ESYNCH_OK_ATOMIC, meaning that the sleep succeeded and that there
     254 *         was a pending wakeup at the time of the call. The caller was not put
     255 *         asleep at all.
     256 * @return ESYNCH_OK_BLOCKED, meaning that the sleep succeeded; the full sleep
     257 *         was attempted.
     258 *
     259 */
     260int waitq_sleep_timeout(waitq_t *wq, uint32_t usec, unsigned int flags)
     261{
    263262        ASSERT((!PREEMPTION_DISABLED) || (PARAM_NON_BLOCKING(flags, usec)));
    264263       
    265         ipl = waitq_sleep_prepare(wq);
    266         rc = waitq_sleep_timeout_unsafe(wq, usec, flags);
     264        ipl_t ipl = waitq_sleep_prepare(wq);
     265        int rc = waitq_sleep_timeout_unsafe(wq, usec, flags);
    267266        waitq_sleep_finish(wq, rc, ipl);
    268267        return rc;
     
    274273 * and interrupts disabled.
    275274 *
    276  * @param wq            Wait queue.
    277  *
    278  * @return              Interrupt level as it existed on entry to this function.
     275 * @param wq Wait queue.
     276 *
     277 * @return Interrupt level as it existed on entry to this function.
     278 *
    279279 */
    280280ipl_t waitq_sleep_prepare(waitq_t *wq)
     
    284284restart:
    285285        ipl = interrupts_disable();
    286 
    287         if (THREAD) {   /* needed during system initiailzation */
     286       
     287        if (THREAD) {  /* Needed during system initiailzation */
    288288                /*
    289289                 * Busy waiting for a delayed timeout.
     
    292292                 * Simply, the thread is not allowed to go to sleep if
    293293                 * there are timeouts in progress.
     294                 *
    294295                 */
    295                 spinlock_lock(&THREAD->lock);
     296                irq_spinlock_lock(&THREAD->lock, false);
     297               
    296298                if (THREAD->timeout_pending) {
    297                         spinlock_unlock(&THREAD->lock);
     299                        irq_spinlock_unlock(&THREAD->lock, false);
    298300                        interrupts_restore(ipl);
    299301                        goto restart;
    300302                }
    301                 spinlock_unlock(&THREAD->lock);
    302         }
    303                                                                                                        
    304         spinlock_lock(&wq->lock);
     303               
     304                irq_spinlock_unlock(&THREAD->lock, false);
     305        }
     306       
     307        irq_spinlock_lock(&wq->lock, false);
    305308        return ipl;
    306309}
     
    312315 * lock is released.
    313316 *
    314  * @param wq            Wait queue.
    315  * @param rc            Return code of waitq_sleep_timeout_unsafe().
    316  * @param ipl           Interrupt level returned by waitq_sleep_prepare().
     317 * @param wq  Wait queue.
     318 * @param rc  Return code of waitq_sleep_timeout_unsafe().
     319 * @param ipl Interrupt level returned by waitq_sleep_prepare().
     320 *
    317321 */
    318322void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl)
     
    321325        case ESYNCH_WOULD_BLOCK:
    322326        case ESYNCH_OK_ATOMIC:
    323                 spinlock_unlock(&wq->lock);
     327                irq_spinlock_unlock(&wq->lock, false);
    324328                break;
    325329        default:
    326330                break;
    327331        }
     332       
    328333        interrupts_restore(ipl);
    329334}
     
    335340 * and followed by a call to waitq_sleep_finish().
    336341 *
    337  * @param wq            See waitq_sleep_timeout().
    338  * @param usec          See waitq_sleep_timeout().
    339  * @param flags         See waitq_sleep_timeout().
    340  *
    341  * @return              See waitq_sleep_timeout().
    342  */
    343 int waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, int flags)
    344 {
    345         /* checks whether to go to sleep at all */
     342 * @param wq    See waitq_sleep_timeout().
     343 * @param usec  See waitq_sleep_timeout().
     344 * @param flags See waitq_sleep_timeout().
     345 *
     346 * @return See waitq_sleep_timeout().
     347 *
     348 */
     349int waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, unsigned int flags)
     350{
     351        /* Checks whether to go to sleep at all */
    346352        if (wq->missed_wakeups) {
    347353                wq->missed_wakeups--;
    348354                return ESYNCH_OK_ATOMIC;
    349         }
    350         else {
     355        } else {
    351356                if (PARAM_NON_BLOCKING(flags, usec)) {
    352                         /* return immediatelly instead of going to sleep */
     357                        /* Return immediatelly instead of going to sleep */
    353358                        return ESYNCH_WOULD_BLOCK;
    354359                }
     
    357362        /*
    358363         * Now we are firmly decided to go to sleep.
     364         *
    359365         */
    360         spinlock_lock(&THREAD->lock);
    361 
     366        irq_spinlock_lock(&THREAD->lock, false);
     367       
    362368        if (flags & SYNCH_FLAGS_INTERRUPTIBLE) {
    363 
    364369                /*
    365370                 * If the thread was already interrupted,
    366371                 * don't go to sleep at all.
     372                 *
    367373                 */
    368374                if (THREAD->interrupted) {
    369                         spinlock_unlock(&THREAD->lock);
    370                         spinlock_unlock(&wq->lock);
     375                        irq_spinlock_unlock(&THREAD->lock, false);
     376                        irq_spinlock_unlock(&wq->lock, false);
    371377                        return ESYNCH_INTERRUPTED;
    372378                }
    373 
     379               
    374380                /*
    375381                 * Set context that will be restored if the sleep
    376382                 * of this thread is ever interrupted.
     383                 *
    377384                 */
    378385                THREAD->sleep_interruptible = true;
     
    380387                        /* Short emulation of scheduler() return code. */
    381388                        THREAD->last_cycle = get_cycle();
    382                         spinlock_unlock(&THREAD->lock);
     389                        irq_spinlock_unlock(&THREAD->lock, false);
    383390                        return ESYNCH_INTERRUPTED;
    384391                }
    385 
    386         } else {
     392        } else
    387393                THREAD->sleep_interruptible = false;
    388         }
    389 
     394       
    390395        if (usec) {
    391396                /* We use the timeout variant. */
     
    393398                        /* Short emulation of scheduler() return code. */
    394399                        THREAD->last_cycle = get_cycle();
    395                         spinlock_unlock(&THREAD->lock);
     400                        irq_spinlock_unlock(&THREAD->lock, false);
    396401                        return ESYNCH_TIMEOUT;
    397402                }
     403               
    398404                THREAD->timeout_pending = true;
    399405                timeout_register(&THREAD->sleep_timeout, (uint64_t) usec,
    400406                    waitq_sleep_timed_out, THREAD);
    401407        }
    402 
     408       
    403409        list_append(&THREAD->wq_link, &wq->head);
    404 
     410       
    405411        /*
    406412         * Suspend execution.
     413         *
    407414         */
    408415        THREAD->state = Sleeping;
    409416        THREAD->sleep_queue = wq;
    410 
    411         spinlock_unlock(&THREAD->lock);
    412 
     417       
     418        irq_spinlock_unlock(&THREAD->lock, false);
     419       
    413420        /* wq->lock is released in scheduler_separated_stack() */
    414         scheduler(); 
     421        scheduler();
    415422       
    416423        return ESYNCH_OK_BLOCKED;
    417424}
    418 
    419425
    420426/** Wake up first thread sleeping in a wait queue
     
    426432 * timeout.
    427433 *
    428  * @param wq            Pointer to wait queue.
    429  * @param mode          Wakeup mode.
     434 * @param wq   Pointer to wait queue.
     435 * @param mode Wakeup mode.
     436 *
    430437 */
    431438void waitq_wakeup(waitq_t *wq, wakeup_mode_t mode)
    432439{
    433         ipl_t ipl;
    434 
    435         ipl = interrupts_disable();
    436         spinlock_lock(&wq->lock);
    437 
     440        irq_spinlock_lock(&wq->lock, true);
    438441        _waitq_wakeup_unsafe(wq, mode);
    439 
    440         spinlock_unlock(&wq->lock);
    441         interrupts_restore(ipl);
     442        irq_spinlock_unlock(&wq->lock, true);
    442443}
    443444
     
    447448 * assumes wq->lock is already locked and interrupts are already disabled.
    448449 *
    449  * @param wq            Pointer to wait queue.
    450  * @param mode          If mode is WAKEUP_FIRST, then the longest waiting
    451  *                      thread, if any, is woken up. If mode is WAKEUP_ALL, then
    452  *                      all waiting threads, if any, are woken up. If there are
    453  *                      no waiting threads to be woken up, the missed wakeup is
    454  *                      recorded in the wait queue.
     450 * @param wq   Pointer to wait queue.
     451 * @param mode If mode is WAKEUP_FIRST, then the longest waiting
     452 *             thread, if any, is woken up. If mode is WAKEUP_ALL, then
     453 *             all waiting threads, if any, are woken up. If there are
     454 *             no waiting threads to be woken up, the missed wakeup is
     455 *             recorded in the wait queue.
     456 *
    455457 */
    456458void _waitq_wakeup_unsafe(waitq_t *wq, wakeup_mode_t mode)
    457459{
    458         thread_t *t;
    459460        size_t count = 0;
    460 
    461 loop:   
     461       
     462loop:
    462463        if (list_empty(&wq->head)) {
    463464                wq->missed_wakeups++;
    464                 if (count && mode == WAKEUP_ALL)
     465                if ((count) && (mode == WAKEUP_ALL))
    465466                        wq->missed_wakeups--;
     467               
    466468                return;
    467469        }
    468 
     470       
    469471        count++;
    470         t = list_get_instance(wq->head.next, thread_t, wq_link);
     472        thread_t *thread = list_get_instance(wq->head.next, thread_t, wq_link);
    471473       
    472474        /*
     
    480482         * invariant must hold:
    481483         *
    482          * t->sleep_queue != NULL <=> t sleeps in a wait queue
     484         * thread->sleep_queue != NULL <=> thread sleeps in a wait queue
    483485         *
    484486         * For an observer who locks the thread, the invariant
    485487         * holds only when the lock is held prior to removing
    486488         * it from the wait queue.
     489         *
    487490         */
    488         spinlock_lock(&t->lock);
    489         list_remove(&t->wq_link);
    490        
    491         if (t->timeout_pending && timeout_unregister(&t->sleep_timeout))
    492                 t->timeout_pending = false;
    493         t->sleep_queue = NULL;
    494         spinlock_unlock(&t->lock);
    495 
    496         thread_ready(t);
    497 
     491        irq_spinlock_lock(&thread->lock, false);
     492        list_remove(&thread->wq_link);
     493       
     494        if ((thread->timeout_pending) &&
     495            (timeout_unregister(&thread->sleep_timeout)))
     496                thread->timeout_pending = false;
     497       
     498        thread->sleep_queue = NULL;
     499        irq_spinlock_unlock(&thread->lock, false);
     500       
     501        thread_ready(thread);
     502       
    498503        if (mode == WAKEUP_ALL)
    499504                goto loop;
Note: See TracChangeset for help on using the changeset viewer.