Changeset 46c20c8 in mainline for kernel/generic/src/synch


Ignore:
Timestamp:
2010-11-26T20:08:10Z (15 years ago)
Author:
Jiri Svoboda <jiri@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
45df59a
Parents:
fb150d78 (diff), ffdd2b9 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge mainline changes.

Location:
kernel/generic/src/synch
Files:
1 deleted
6 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/synch/futex.c

    rfb150d78 r46c20c8  
    3737
    3838#include <synch/futex.h>
    39 #include <synch/rwlock.h>
     39#include <synch/mutex.h>
    4040#include <synch/spinlock.h>
    4141#include <synch/synch.h>
     
    6565
    6666/**
    67  * Read-write lock protecting global futex hash table.
     67 * Mutex protecting global futex hash table.
    6868 * It is also used to serialize access to all futex_t structures.
    6969 * Must be acquired before the task futex B+tree lock.
    7070 */
    71 static rwlock_t futex_ht_lock;
     71static mutex_t futex_ht_lock;
    7272
    7373/** Futex hash table. */
     
    8484void futex_init(void)
    8585{
    86         rwlock_initialize(&futex_ht_lock);
     86        mutex_initialize(&futex_ht_lock, MUTEX_PASSIVE);
    8787        hash_table_create(&futex_ht, FUTEX_HT_SIZE, 1, &futex_ht_ops);
    8888}
     
    113113        uintptr_t paddr;
    114114        pte_t *t;
    115         ipl_t ipl;
    116115        int rc;
    117116       
    118         ipl = interrupts_disable();
    119 
    120117        /*
    121118         * Find physical address of futex counter.
     
    125122        if (!t || !PTE_VALID(t) || !PTE_PRESENT(t)) {
    126123                page_table_unlock(AS, true);
    127                 interrupts_restore(ipl);
    128124                return (unative_t) ENOENT;
    129125        }
     
    131127        page_table_unlock(AS, true);
    132128       
    133         interrupts_restore(ipl);       
    134 
    135129        futex = futex_find(paddr);
    136130
     
    156150        uintptr_t paddr;
    157151        pte_t *t;
    158         ipl_t ipl;
    159        
    160         ipl = interrupts_disable();
    161152       
    162153        /*
     
    167158        if (!t || !PTE_VALID(t) || !PTE_PRESENT(t)) {
    168159                page_table_unlock(AS, true);
    169                 interrupts_restore(ipl);
    170160                return (unative_t) ENOENT;
    171161        }
     
    173163        page_table_unlock(AS, true);
    174164       
    175         interrupts_restore(ipl);
    176 
    177165        futex = futex_find(paddr);
    178166               
     
    200188         * or allocate new one if it does not exist already.
    201189         */
    202         rwlock_read_lock(&futex_ht_lock);
     190        mutex_lock(&futex_ht_lock);
    203191        item = hash_table_find(&futex_ht, &paddr);
    204192        if (item) {
     
    212200                        /*
    213201                         * The futex is new to the current task.
    214                          * However, we only have read access.
    215                          * Gain write access and try again.
     202                         * Upgrade its reference count and put it to the
     203                         * current task's B+tree of known futexes.
    216204                         */
    217                         mutex_unlock(&TASK->futexes_lock);
    218                         goto gain_write_access;
     205                        futex->refcount++;
     206                        btree_insert(&TASK->futexes, paddr, futex, leaf);
    219207                }
    220208                mutex_unlock(&TASK->futexes_lock);
    221 
    222                 rwlock_read_unlock(&futex_ht_lock);
    223209        } else {
    224 gain_write_access:
     210                futex = (futex_t *) malloc(sizeof(futex_t), 0);
     211                futex_initialize(futex);
     212                futex->paddr = paddr;
     213                hash_table_insert(&futex_ht, &paddr, &futex->ht_link);
     214                       
    225215                /*
    226                  * Upgrade to writer is not currently supported,
    227                  * therefore, it is necessary to release the read lock
    228                  * and reacquire it as a writer.
     216                 * This is the first task referencing the futex.
     217                 * It can be directly inserted into its
     218                 * B+tree of known futexes.
    229219                 */
    230                 rwlock_read_unlock(&futex_ht_lock);
    231 
    232                 rwlock_write_lock(&futex_ht_lock);
    233                 /*
    234                  * Avoid possible race condition by searching
    235                  * the hash table once again with write access.
    236                  */
    237                 item = hash_table_find(&futex_ht, &paddr);
    238                 if (item) {
    239                         futex = hash_table_get_instance(item, futex_t, ht_link);
    240                        
    241                         /*
    242                          * See if this futex is known to the current task.
    243                          */
    244                         mutex_lock(&TASK->futexes_lock);
    245                         if (!btree_search(&TASK->futexes, paddr, &leaf)) {
    246                                 /*
    247                                  * The futex is new to the current task.
    248                                  * Upgrade its reference count and put it to the
    249                                  * current task's B+tree of known futexes.
    250                                  */
    251                                 futex->refcount++;
    252                                 btree_insert(&TASK->futexes, paddr, futex,
    253                                     leaf);
    254                         }
    255                         mutex_unlock(&TASK->futexes_lock);
    256        
    257                         rwlock_write_unlock(&futex_ht_lock);
    258                 } else {
    259                         futex = (futex_t *) malloc(sizeof(futex_t), 0);
    260                         futex_initialize(futex);
    261                         futex->paddr = paddr;
    262                         hash_table_insert(&futex_ht, &paddr, &futex->ht_link);
    263                        
    264                         /*
    265                          * This is the first task referencing the futex.
    266                          * It can be directly inserted into its
    267                          * B+tree of known futexes.
    268                          */
    269                         mutex_lock(&TASK->futexes_lock);
    270                         btree_insert(&TASK->futexes, paddr, futex, NULL);
    271                         mutex_unlock(&TASK->futexes_lock);
    272                        
    273                         rwlock_write_unlock(&futex_ht_lock);
    274                 }
     220                mutex_lock(&TASK->futexes_lock);
     221                btree_insert(&TASK->futexes, paddr, futex, NULL);
     222                mutex_unlock(&TASK->futexes_lock);
     223               
    275224        }
     225        mutex_unlock(&futex_ht_lock);
    276226       
    277227        return futex;
     
    324274        link_t *cur;
    325275       
    326         rwlock_write_lock(&futex_ht_lock);
     276        mutex_lock(&futex_ht_lock);
    327277        mutex_lock(&TASK->futexes_lock);
    328278
     
    344294       
    345295        mutex_unlock(&TASK->futexes_lock);
    346         rwlock_write_unlock(&futex_ht_lock);
     296        mutex_unlock(&futex_ht_lock);
    347297}
    348298
  • kernel/generic/src/synch/mutex.c

    rfb150d78 r46c20c8  
    3333/**
    3434 * @file
    35  * @brief       Mutexes.
     35 * @brief Mutexes.
    3636 */
    37  
     37
    3838#include <synch/mutex.h>
    3939#include <synch/semaphore.h>
    4040#include <synch/synch.h>
    4141#include <debug.h>
     42#include <arch.h>
    4243
    4344/** Initialize mutex.
    4445 *
    45  * @param mtx           Mutex.
    46  * @param type          Type of the mutex.
     46 * @param mtx  Mutex.
     47 * @param type Type of the mutex.
    4748 */
    4849void mutex_initialize(mutex_t *mtx, mutex_type_t type)
     
    5253}
    5354
     55/** Find out whether the mutex is currently locked.
     56 *
     57 * @param mtx           Mutex.
     58 * @return              True if the mutex is locked, false otherwise.
     59 */
     60bool mutex_locked(mutex_t *mtx)
     61{
     62        return semaphore_count_get(&mtx->sem) <= 0;
     63}
     64
    5465/** Acquire mutex.
    5566 *
    5667 * Timeout mode and non-blocking mode can be requested.
    5768 *
    58  * @param mtx           Mutex.
    59  * @param usec          Timeout in microseconds.
    60  * @param flags         Specify mode of operation.
     69 * @param mtx   Mutex.
     70 * @param usec  Timeout in microseconds.
     71 * @param flags Specify mode of operation.
    6172 *
    6273 * For exact description of possible combinations of
    6374 * usec and flags, see comment for waitq_sleep_timeout().
    6475 *
    65  * @return              See comment for waitq_sleep_timeout().
     76 * @return See comment for waitq_sleep_timeout().
     77 *
    6678 */
    67 int _mutex_lock_timeout(mutex_t *mtx, uint32_t usec, int flags)
     79int _mutex_lock_timeout(mutex_t *mtx, uint32_t usec, unsigned int flags)
    6880{
    6981        int rc;
    7082
    71         if (mtx->type == MUTEX_PASSIVE) {
     83        if ((mtx->type == MUTEX_PASSIVE) && (THREAD)) {
    7284                rc = _semaphore_down_timeout(&mtx->sem, usec, flags);
    7385        } else {
    74                 ASSERT(mtx->type == MUTEX_ACTIVE);
     86                ASSERT((mtx->type == MUTEX_ACTIVE) || (!THREAD));
    7587                ASSERT(usec == SYNCH_NO_TIMEOUT);
    7688                ASSERT(!(flags & SYNCH_FLAGS_INTERRUPTIBLE));
     89               
    7790                do {
    7891                        rc = semaphore_trydown(&mtx->sem);
     
    8699/** Release mutex.
    87100 *
    88  * @param mtx           Mutex.
     101 * @param mtx Mutex.
    89102 */
    90103void mutex_unlock(mutex_t *mtx)
  • kernel/generic/src/synch/semaphore.c

    rfb150d78 r46c20c8  
    3333/**
    3434 * @file
    35  * @brief       Semaphores.
     35 * @brief Semaphores.
    3636 */
    3737
     
    4747 * Initialize semaphore.
    4848 *
    49  * @param s Semaphore.
     49 * @param sem Semaphore.
    5050 * @param val Maximal number of threads allowed to enter critical section.
     51 *
    5152 */
    52 void semaphore_initialize(semaphore_t *s, int val)
     53void semaphore_initialize(semaphore_t *sem, int val)
    5354{
    54         ipl_t ipl;
    55        
    56         waitq_initialize(&s->wq);
    57        
    58         ipl = interrupts_disable();
    59 
    60         spinlock_lock(&s->wq.lock);
    61         s->wq.missed_wakeups = val;
    62         spinlock_unlock(&s->wq.lock);
    63 
    64         interrupts_restore(ipl);
     55        waitq_initialize(&sem->wq);
     56        waitq_count_set(&sem->wq, val);
    6557}
    6658
     
    7062 * Conditional mode and mode with timeout can be requested.
    7163 *
    72  * @param s Semaphore.
    73  * @param usec Timeout in microseconds.
     64 * @param sem  Semaphore.
     65 * @param usec  Timeout in microseconds.
    7466 * @param flags Select mode of operation.
    7567 *
     
    7870 *
    7971 * @return See comment for waitq_sleep_timeout().
     72 *
    8073 */
    81 int _semaphore_down_timeout(semaphore_t *s, uint32_t usec, int flags)
     74int _semaphore_down_timeout(semaphore_t *sem, uint32_t usec, unsigned int flags)
    8275{
    83         return waitq_sleep_timeout(&s->wq, usec, flags);
     76        return waitq_sleep_timeout(&sem->wq, usec, flags);
    8477}
    8578
     
    8982 *
    9083 * @param s Semaphore.
     84 *
    9185 */
    92 void semaphore_up(semaphore_t *s)
     86void semaphore_up(semaphore_t *sem)
    9387{
    94         waitq_wakeup(&s->wq, WAKEUP_FIRST);
     88        waitq_wakeup(&sem->wq, WAKEUP_FIRST);
     89}
     90
     91/** Get the semaphore counter value.
     92 *
     93 * @param sem           Semaphore.
     94 * @return              The number of threads that can down the semaphore
     95 *                      without blocking.
     96 */
     97int semaphore_count_get(semaphore_t *sem)
     98{
     99        return waitq_count_get(&sem->wq);
    95100}
    96101
  • kernel/generic/src/synch/smc.c

    rfb150d78 r46c20c8  
    4444unative_t sys_smc_coherence(uintptr_t va, size_t size)
    4545{
    46         if (overlaps(va, size, NULL, PAGE_SIZE))
     46        if (overlaps(va, size, (uintptr_t) NULL, PAGE_SIZE))
    4747                return EINVAL;
    4848
  • kernel/generic/src/synch/spinlock.c

    rfb150d78 r46c20c8  
    5252 *
    5353 */
    54 void spinlock_initialize(spinlock_t *lock, char *name)
     54void spinlock_initialize(spinlock_t *lock, const char *name)
    5555{
    5656        atomic_set(&lock->val, 0);
     
    102102               
    103103                if (i++ > DEADLOCK_THRESHOLD) {
    104                         printf("cpu%u: looping on spinlock %" PRIp ":%s, "
    105                             "caller=%" PRIp "(%s)\n", CPU->id, lock, lock->name,
    106                             CALLER, symtab_fmt_name_lookup(CALLER));
     104                        printf("cpu%u: looping on spinlock %p:%s, "
     105                            "caller=%p (%s)\n", CPU->id, lock, lock->name,
     106                            (void *) CALLER, symtab_fmt_name_lookup(CALLER));
    107107                       
    108108                        i = 0;
     
    120120}
    121121
     122/** Unlock spinlock
     123 *
     124 * Unlock spinlock.
     125 *
     126 * @param sl Pointer to spinlock_t structure.
     127 */
     128void spinlock_unlock_debug(spinlock_t *lock)
     129{
     130        ASSERT_SPINLOCK(spinlock_locked(lock), lock);
     131       
     132        /*
     133         * Prevent critical section code from bleeding out this way down.
     134         */
     135        CS_LEAVE_BARRIER();
     136       
     137        atomic_set(&lock->val, 0);
     138        preemption_enable();
     139}
     140
    122141#endif
    123142
    124143/** Lock spinlock conditionally
    125144 *
    126  * Lock spinlock conditionally.
    127  * If the spinlock is not available at the moment,
    128  * signal failure.
     145 * Lock spinlock conditionally. If the spinlock is not available
     146 * at the moment, signal failure.
    129147 *
    130148 * @param lock Pointer to spinlock_t structure.
     
    149167}
    150168
     169/** Find out whether the spinlock is currently locked.
     170 *
     171 * @param lock          Spinlock.
     172 * @return              True if the spinlock is locked, false otherwise.
     173 */
     174bool spinlock_locked(spinlock_t *lock)
     175{
     176        return atomic_get(&lock->val) != 0;
     177}
     178
    151179#endif
    152180
     181/** Initialize interrupts-disabled spinlock
     182 *
     183 * @param lock IRQ spinlock to be initialized.
     184 * @param name IRQ spinlock name.
     185 *
     186 */
     187void irq_spinlock_initialize(irq_spinlock_t *lock, const char *name)
     188{
     189        spinlock_initialize(&(lock->lock), name);
     190        lock->guard = false;
     191        lock->ipl = 0;
     192}
     193
     194/** Lock interrupts-disabled spinlock
     195 *
     196 * Lock a spinlock which requires disabled interrupts.
     197 *
     198 * @param lock    IRQ spinlock to be locked.
     199 * @param irq_dis If true, interrupts are actually disabled
     200 *                prior locking the spinlock. If false, interrupts
     201 *                are expected to be already disabled.
     202 *
     203 */
     204void irq_spinlock_lock(irq_spinlock_t *lock, bool irq_dis)
     205{
     206        if (irq_dis) {
     207                ipl_t ipl = interrupts_disable();
     208                spinlock_lock(&(lock->lock));
     209               
     210                lock->guard = true;
     211                lock->ipl = ipl;
     212        } else {
     213                ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock);
     214               
     215                spinlock_lock(&(lock->lock));
     216                ASSERT_IRQ_SPINLOCK(!lock->guard, lock);
     217        }
     218}
     219
     220/** Unlock interrupts-disabled spinlock
     221 *
     222 * Unlock a spinlock which requires disabled interrupts.
     223 *
     224 * @param lock    IRQ spinlock to be unlocked.
     225 * @param irq_res If true, interrupts are restored to previously
     226 *                saved interrupt level.
     227 *
     228 */
     229void irq_spinlock_unlock(irq_spinlock_t *lock, bool irq_res)
     230{
     231        ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock);
     232       
     233        if (irq_res) {
     234                ASSERT_IRQ_SPINLOCK(lock->guard, lock);
     235               
     236                lock->guard = false;
     237                ipl_t ipl = lock->ipl;
     238               
     239                spinlock_unlock(&(lock->lock));
     240                interrupts_restore(ipl);
     241        } else {
     242                ASSERT_IRQ_SPINLOCK(!lock->guard, lock);
     243                spinlock_unlock(&(lock->lock));
     244        }
     245}
     246
     247/** Lock interrupts-disabled spinlock
     248 *
     249 * Lock an interrupts-disabled spinlock conditionally. If the
     250 * spinlock is not available at the moment, signal failure.
     251 * Interrupts are expected to be already disabled.
     252 *
     253 * @param lock IRQ spinlock to be locked conditionally.
     254 *
     255 * @return Zero on failure, non-zero otherwise.
     256 *
     257 */
     258int irq_spinlock_trylock(irq_spinlock_t *lock)
     259{
     260        ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock);
     261        int rc = spinlock_trylock(&(lock->lock));
     262       
     263        ASSERT_IRQ_SPINLOCK(!lock->guard, lock);
     264        return rc;
     265}
     266
     267/** Pass lock from one interrupts-disabled spinlock to another
     268 *
     269 * Pass lock from one IRQ spinlock to another IRQ spinlock
     270 * without enabling interrupts during the process.
     271 *
     272 * The first IRQ spinlock is supposed to be locked.
     273 *
     274 * @param unlock IRQ spinlock to be unlocked.
     275 * @param lock   IRQ spinlock to be locked.
     276 *
     277 */
     278void irq_spinlock_pass(irq_spinlock_t *unlock, irq_spinlock_t *lock)
     279{
     280        ASSERT_IRQ_SPINLOCK(interrupts_disabled(), unlock);
     281       
     282        /* Pass guard from unlock to lock */
     283        bool guard = unlock->guard;
     284        ipl_t ipl = unlock->ipl;
     285        unlock->guard = false;
     286       
     287        spinlock_unlock(&(unlock->lock));
     288        spinlock_lock(&(lock->lock));
     289       
     290        ASSERT_IRQ_SPINLOCK(!lock->guard, lock);
     291       
     292        if (guard) {
     293                lock->guard = true;
     294                lock->ipl = ipl;
     295        }
     296}
     297
     298/** Hand-over-hand locking of interrupts-disabled spinlocks
     299 *
     300 * Implement hand-over-hand locking between two interrupts-disabled
     301 * spinlocks without enabling interrupts during the process.
     302 *
     303 * The first IRQ spinlock is supposed to be locked.
     304 *
     305 * @param unlock IRQ spinlock to be unlocked.
     306 * @param lock   IRQ spinlock to be locked.
     307 *
     308 */
     309void irq_spinlock_exchange(irq_spinlock_t *unlock, irq_spinlock_t *lock)
     310{
     311        ASSERT_IRQ_SPINLOCK(interrupts_disabled(), unlock);
     312       
     313        spinlock_lock(&(lock->lock));
     314        ASSERT_IRQ_SPINLOCK(!lock->guard, lock);
     315       
     316        /* Pass guard from unlock to lock */
     317        if (unlock->guard) {
     318                lock->guard = true;
     319                lock->ipl = unlock->ipl;
     320                unlock->guard = false;
     321        }
     322       
     323        spinlock_unlock(&(unlock->lock));
     324}
     325
     326/** Find out whether the IRQ spinlock is currently locked.
     327 *
     328 * @param lock          IRQ spinlock.
     329 * @return              True if the IRQ spinlock is locked, false otherwise.
     330 */
     331bool irq_spinlock_locked(irq_spinlock_t *ilock)
     332{
     333        return spinlock_locked(&ilock->lock);
     334}
     335
    153336/** @}
    154337 */
  • kernel/generic/src/synch/waitq.c

    rfb150d78 r46c20c8  
    3333/**
    3434 * @file
    35  * @brief       Wait queue.
     35 * @brief Wait queue.
    3636 *
    3737 * Wait queue is the basic synchronization primitive upon which all
     
    4141 * fashion. Conditional operation as well as timeouts and interruptions
    4242 * are supported.
     43 *
    4344 */
    4445
     
    4950#include <proc/scheduler.h>
    5051#include <arch/asm.h>
    51 #include <arch/types.h>
     52#include <typedefs.h>
    5253#include <time/timeout.h>
    5354#include <arch.h>
    5455#include <context.h>
    5556#include <adt/list.h>
    56 
    57 static void waitq_sleep_timed_out(void *data);
     57#include <arch/cycle.h>
     58
     59static void waitq_sleep_timed_out(void *);
    5860
    5961/** Initialize wait queue
     
    6163 * Initialize wait queue.
    6264 *
    63  * @param wq            Pointer to wait queue to be initialized.
     65 * @param wq Pointer to wait queue to be initialized.
     66 *
    6467 */
    6568void waitq_initialize(waitq_t *wq)
    6669{
    67         spinlock_initialize(&wq->lock, "waitq_lock");
     70        irq_spinlock_initialize(&wq->lock, "wq.lock");
    6871        list_initialize(&wq->head);
    6972        wq->missed_wakeups = 0;
     
    8083 * timeout at all.
    8184 *
    82  * @param data          Pointer to the thread that called waitq_sleep_timeout().
     85 * @param data Pointer to the thread that called waitq_sleep_timeout().
     86 *
    8387 */
    8488void waitq_sleep_timed_out(void *data)
    8589{
    86         thread_t *t = (thread_t *) data;
    87         waitq_t *wq;
     90        thread_t *thread = (thread_t *) data;
    8891        bool do_wakeup = false;
    8992        DEADLOCK_PROBE_INIT(p_wqlock);
    90 
    91         spinlock_lock(&threads_lock);
    92         if (!thread_exists(t))
     93       
     94        irq_spinlock_lock(&threads_lock, false);
     95        if (!thread_exists(thread))
    9396                goto out;
    94 
     97       
    9598grab_locks:
    96         spinlock_lock(&t->lock);
    97         if ((wq = t->sleep_queue)) {            /* assignment */
    98                 if (!spinlock_trylock(&wq->lock)) {
    99                         spinlock_unlock(&t->lock);
     99        irq_spinlock_lock(&thread->lock, false);
     100       
     101        waitq_t *wq;
     102        if ((wq = thread->sleep_queue)) {  /* Assignment */
     103                if (!irq_spinlock_trylock(&wq->lock)) {
     104                        irq_spinlock_unlock(&thread->lock, false);
    100105                        DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD);
    101                         goto grab_locks;        /* avoid deadlock */
    102                 }
    103 
    104                 list_remove(&t->wq_link);
    105                 t->saved_context = t->sleep_timeout_context;
     106                        /* Avoid deadlock */
     107                        goto grab_locks;
     108                }
     109               
     110                list_remove(&thread->wq_link);
     111                thread->saved_context = thread->sleep_timeout_context;
    106112                do_wakeup = true;
    107                 t->sleep_queue = NULL;
    108                 spinlock_unlock(&wq->lock);
    109         }
    110        
    111         t->timeout_pending = false;
    112         spinlock_unlock(&t->lock);
     113                thread->sleep_queue = NULL;
     114                irq_spinlock_unlock(&wq->lock, false);
     115        }
     116       
     117        thread->timeout_pending = false;
     118        irq_spinlock_unlock(&thread->lock, false);
    113119       
    114120        if (do_wakeup)
    115                 thread_ready(t);
    116 
     121                thread_ready(thread);
     122       
    117123out:
    118         spinlock_unlock(&threads_lock);
     124        irq_spinlock_unlock(&threads_lock, false);
    119125}
    120126
     
    124130 * If the thread is not found sleeping, no action is taken.
    125131 *
    126  * @param t             Thread to be interrupted.
    127  */
    128 void waitq_interrupt_sleep(thread_t *t)
    129 {
     132 * @param thread Thread to be interrupted.
     133 *
     134 */
     135void waitq_interrupt_sleep(thread_t *thread)
     136{
     137        bool do_wakeup = false;
     138        DEADLOCK_PROBE_INIT(p_wqlock);
     139       
     140        irq_spinlock_lock(&threads_lock, true);
     141        if (!thread_exists(thread))
     142                goto out;
     143       
     144grab_locks:
     145        irq_spinlock_lock(&thread->lock, false);
     146       
    130147        waitq_t *wq;
    131         bool do_wakeup = false;
    132         ipl_t ipl;
    133         DEADLOCK_PROBE_INIT(p_wqlock);
    134 
    135         ipl = interrupts_disable();
    136         spinlock_lock(&threads_lock);
    137         if (!thread_exists(t))
    138                 goto out;
    139 
    140 grab_locks:
    141         spinlock_lock(&t->lock);
    142         if ((wq = t->sleep_queue)) {            /* assignment */
    143                 if (!(t->sleep_interruptible)) {
     148        if ((wq = thread->sleep_queue)) {  /* Assignment */
     149                if (!(thread->sleep_interruptible)) {
    144150                        /*
    145151                         * The sleep cannot be interrupted.
     152                         *
    146153                         */
    147                         spinlock_unlock(&t->lock);
     154                        irq_spinlock_unlock(&thread->lock, false);
    148155                        goto out;
    149156                }
    150                        
    151                 if (!spinlock_trylock(&wq->lock)) {
    152                         spinlock_unlock(&t->lock);
     157               
     158                if (!irq_spinlock_trylock(&wq->lock)) {
     159                        irq_spinlock_unlock(&thread->lock, false);
    153160                        DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD);
    154                         goto grab_locks;        /* avoid deadlock */
    155                 }
    156 
    157                 if (t->timeout_pending && timeout_unregister(&t->sleep_timeout))
    158                         t->timeout_pending = false;
    159 
    160                 list_remove(&t->wq_link);
    161                 t->saved_context = t->sleep_interruption_context;
     161                        /* Avoid deadlock */
     162                        goto grab_locks;
     163                }
     164               
     165                if ((thread->timeout_pending) &&
     166                    (timeout_unregister(&thread->sleep_timeout)))
     167                        thread->timeout_pending = false;
     168               
     169                list_remove(&thread->wq_link);
     170                thread->saved_context = thread->sleep_interruption_context;
    162171                do_wakeup = true;
    163                 t->sleep_queue = NULL;
    164                 spinlock_unlock(&wq->lock);
    165         }
    166         spinlock_unlock(&t->lock);
    167 
     172                thread->sleep_queue = NULL;
     173                irq_spinlock_unlock(&wq->lock, false);
     174        }
     175        irq_spinlock_unlock(&thread->lock, false);
     176       
    168177        if (do_wakeup)
    169                 thread_ready(t);
    170 
     178                thread_ready(thread);
     179       
    171180out:
    172         spinlock_unlock(&threads_lock);
    173         interrupts_restore(ipl);
     181        irq_spinlock_unlock(&threads_lock, true);
    174182}
    175183
     
    179187 * is sleeping interruptibly.
    180188 *
    181  * @param wq            Pointer to wait queue.
     189 * @param wq Pointer to wait queue.
     190 *
    182191 */
    183192void waitq_unsleep(waitq_t *wq)
    184193{
    185         ipl_t ipl;
    186 
    187         ipl = interrupts_disable();
    188         spinlock_lock(&wq->lock);
    189 
     194        irq_spinlock_lock(&wq->lock, true);
     195       
    190196        if (!list_empty(&wq->head)) {
    191                 thread_t *t;
    192                
    193                 t = list_get_instance(wq->head.next, thread_t, wq_link);
    194                 spinlock_lock(&t->lock);
    195                 ASSERT(t->sleep_interruptible);
    196                 if (t->timeout_pending && timeout_unregister(&t->sleep_timeout))
    197                         t->timeout_pending = false;
    198                 list_remove(&t->wq_link);
    199                 t->saved_context = t->sleep_interruption_context;
    200                 t->sleep_queue = NULL;
    201                 spinlock_unlock(&t->lock);
    202                 thread_ready(t);
    203         }
    204 
    205         spinlock_unlock(&wq->lock);
    206         interrupts_restore(ipl);
    207 }
     197                thread_t *thread = list_get_instance(wq->head.next, thread_t, wq_link);
     198               
     199                irq_spinlock_lock(&thread->lock, false);
     200               
     201                ASSERT(thread->sleep_interruptible);
     202               
     203                if ((thread->timeout_pending) &&
     204                    (timeout_unregister(&thread->sleep_timeout)))
     205                        thread->timeout_pending = false;
     206               
     207                list_remove(&thread->wq_link);
     208                thread->saved_context = thread->sleep_interruption_context;
     209                thread->sleep_queue = NULL;
     210               
     211                irq_spinlock_unlock(&thread->lock, false);
     212                thread_ready(thread);
     213        }
     214       
     215        irq_spinlock_unlock(&wq->lock, true);
     216}
     217
     218#define PARAM_NON_BLOCKING(flags, usec) \
     219        (((flags) & SYNCH_FLAGS_NON_BLOCKING) && ((usec) == 0))
    208220
    209221/** Sleep until either wakeup, timeout or interruption occurs
     
    217229 * and all the *_timeout() functions use it.
    218230 *
    219  * @param wq            Pointer to wait queue.
    220  * @param usec          Timeout in microseconds.
    221  * @param flags         Specify mode of the sleep.
     231 * @param wq    Pointer to wait queue.
     232 * @param usec  Timeout in microseconds.
     233 * @param flags Specify mode of the sleep.
    222234 *
    223235 * The sleep can be interrupted only if the
    224236 * SYNCH_FLAGS_INTERRUPTIBLE bit is specified in flags.
    225  * 
     237 *
    226238 * If usec is greater than zero, regardless of the value of the
    227239 * SYNCH_FLAGS_NON_BLOCKING bit in flags, the call will not return until either
    228  * timeout, interruption or wakeup comes. 
     240 * timeout, interruption or wakeup comes.
    229241 *
    230242 * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is not set in flags,
     
    234246 * call will immediately return, reporting either success or failure.
    235247 *
    236  * @return              Returns one of ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT,
    237  *                      ESYNCH_INTERRUPTED, ESYNCH_OK_ATOMIC and
    238  *                      ESYNCH_OK_BLOCKED.
    239  *
    240  * @li  ESYNCH_WOULD_BLOCK means that the sleep failed because at the time of
    241  *      the call there was no pending wakeup.
    242  *
    243  * @li  ESYNCH_TIMEOUT means that the sleep timed out.
    244  *
    245  * @li  ESYNCH_INTERRUPTED means that somebody interrupted the sleeping thread.
    246  *
    247  * @li  ESYNCH_OK_ATOMIC means that the sleep succeeded and that there was
    248  *      a pending wakeup at the time of the call. The caller was not put
    249  *      asleep at all.
    250  *
    251  * @li  ESYNCH_OK_BLOCKED means that the sleep succeeded; the full sleep was
    252  *      attempted.
    253  */
    254 int waitq_sleep_timeout(waitq_t *wq, uint32_t usec, int flags)
    255 {
    256         ipl_t ipl;
    257         int rc;
    258        
    259         ipl = waitq_sleep_prepare(wq);
    260         rc = waitq_sleep_timeout_unsafe(wq, usec, flags);
     248 * @return ESYNCH_WOULD_BLOCK, meaning that the sleep failed because at the
     249 *         time of the call there was no pending wakeup
     250 * @return ESYNCH_TIMEOUT, meaning that the sleep timed out.
     251 * @return ESYNCH_INTERRUPTED, meaning that somebody interrupted the sleeping
     252 *         thread.
     253 * @return ESYNCH_OK_ATOMIC, meaning that the sleep succeeded and that there
     254 *         was a pending wakeup at the time of the call. The caller was not put
     255 *         asleep at all.
     256 * @return ESYNCH_OK_BLOCKED, meaning that the sleep succeeded; the full sleep
     257 *         was attempted.
     258 *
     259 */
     260int waitq_sleep_timeout(waitq_t *wq, uint32_t usec, unsigned int flags)
     261{
     262        ASSERT((!PREEMPTION_DISABLED) || (PARAM_NON_BLOCKING(flags, usec)));
     263       
     264        ipl_t ipl = waitq_sleep_prepare(wq);
     265        int rc = waitq_sleep_timeout_unsafe(wq, usec, flags);
    261266        waitq_sleep_finish(wq, rc, ipl);
    262267        return rc;
     
    268273 * and interrupts disabled.
    269274 *
    270  * @param wq            Wait queue.
    271  *
    272  * @return              Interrupt level as it existed on entry to this function.
     275 * @param wq Wait queue.
     276 *
     277 * @return Interrupt level as it existed on entry to this function.
     278 *
    273279 */
    274280ipl_t waitq_sleep_prepare(waitq_t *wq)
     
    278284restart:
    279285        ipl = interrupts_disable();
    280 
    281         if (THREAD) {   /* needed during system initiailzation */
     286       
     287        if (THREAD) {  /* Needed during system initiailzation */
    282288                /*
    283289                 * Busy waiting for a delayed timeout.
     
    286292                 * Simply, the thread is not allowed to go to sleep if
    287293                 * there are timeouts in progress.
     294                 *
    288295                 */
    289                 spinlock_lock(&THREAD->lock);
     296                irq_spinlock_lock(&THREAD->lock, false);
     297               
    290298                if (THREAD->timeout_pending) {
    291                         spinlock_unlock(&THREAD->lock);
     299                        irq_spinlock_unlock(&THREAD->lock, false);
    292300                        interrupts_restore(ipl);
    293301                        goto restart;
    294302                }
    295                 spinlock_unlock(&THREAD->lock);
    296         }
    297                                                                                                        
    298         spinlock_lock(&wq->lock);
     303               
     304                irq_spinlock_unlock(&THREAD->lock, false);
     305        }
     306       
     307        irq_spinlock_lock(&wq->lock, false);
    299308        return ipl;
    300309}
     
    306315 * lock is released.
    307316 *
    308  * @param wq            Wait queue.
    309  * @param rc            Return code of waitq_sleep_timeout_unsafe().
    310  * @param ipl           Interrupt level returned by waitq_sleep_prepare().
     317 * @param wq  Wait queue.
     318 * @param rc  Return code of waitq_sleep_timeout_unsafe().
     319 * @param ipl Interrupt level returned by waitq_sleep_prepare().
     320 *
    311321 */
    312322void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl)
     
    315325        case ESYNCH_WOULD_BLOCK:
    316326        case ESYNCH_OK_ATOMIC:
    317                 spinlock_unlock(&wq->lock);
     327                irq_spinlock_unlock(&wq->lock, false);
    318328                break;
    319329        default:
    320330                break;
    321331        }
     332       
    322333        interrupts_restore(ipl);
    323334}
     
    329340 * and followed by a call to waitq_sleep_finish().
    330341 *
    331  * @param wq            See waitq_sleep_timeout().
    332  * @param usec          See waitq_sleep_timeout().
    333  * @param flags         See waitq_sleep_timeout().
    334  *
    335  * @return              See waitq_sleep_timeout().
    336  */
    337 int waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, int flags)
    338 {
    339         /* checks whether to go to sleep at all */
     342 * @param wq    See waitq_sleep_timeout().
     343 * @param usec  See waitq_sleep_timeout().
     344 * @param flags See waitq_sleep_timeout().
     345 *
     346 * @return See waitq_sleep_timeout().
     347 *
     348 */
     349int waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, unsigned int flags)
     350{
     351        /* Checks whether to go to sleep at all */
    340352        if (wq->missed_wakeups) {
    341353                wq->missed_wakeups--;
    342354                return ESYNCH_OK_ATOMIC;
    343         }
    344         else {
    345                 if ((flags & SYNCH_FLAGS_NON_BLOCKING) && (usec == 0)) {
    346                         /* return immediatelly instead of going to sleep */
     355        } else {
     356                if (PARAM_NON_BLOCKING(flags, usec)) {
     357                        /* Return immediatelly instead of going to sleep */
    347358                        return ESYNCH_WOULD_BLOCK;
    348359                }
     
    351362        /*
    352363         * Now we are firmly decided to go to sleep.
     364         *
    353365         */
    354         spinlock_lock(&THREAD->lock);
    355 
     366        irq_spinlock_lock(&THREAD->lock, false);
     367       
    356368        if (flags & SYNCH_FLAGS_INTERRUPTIBLE) {
    357 
    358369                /*
    359370                 * If the thread was already interrupted,
    360371                 * don't go to sleep at all.
     372                 *
    361373                 */
    362374                if (THREAD->interrupted) {
    363                         spinlock_unlock(&THREAD->lock);
    364                         spinlock_unlock(&wq->lock);
     375                        irq_spinlock_unlock(&THREAD->lock, false);
     376                        irq_spinlock_unlock(&wq->lock, false);
    365377                        return ESYNCH_INTERRUPTED;
    366378                }
    367 
     379               
    368380                /*
    369381                 * Set context that will be restored if the sleep
    370382                 * of this thread is ever interrupted.
     383                 *
    371384                 */
    372385                THREAD->sleep_interruptible = true;
    373386                if (!context_save(&THREAD->sleep_interruption_context)) {
    374387                        /* Short emulation of scheduler() return code. */
    375                         spinlock_unlock(&THREAD->lock);
     388                        THREAD->last_cycle = get_cycle();
     389                        irq_spinlock_unlock(&THREAD->lock, false);
    376390                        return ESYNCH_INTERRUPTED;
    377391                }
    378 
    379         } else {
     392        } else
    380393                THREAD->sleep_interruptible = false;
    381         }
    382 
     394       
    383395        if (usec) {
    384396                /* We use the timeout variant. */
    385397                if (!context_save(&THREAD->sleep_timeout_context)) {
    386398                        /* Short emulation of scheduler() return code. */
    387                         spinlock_unlock(&THREAD->lock);
     399                        THREAD->last_cycle = get_cycle();
     400                        irq_spinlock_unlock(&THREAD->lock, false);
    388401                        return ESYNCH_TIMEOUT;
    389402                }
     403               
    390404                THREAD->timeout_pending = true;
    391405                timeout_register(&THREAD->sleep_timeout, (uint64_t) usec,
    392406                    waitq_sleep_timed_out, THREAD);
    393407        }
    394 
     408       
    395409        list_append(&THREAD->wq_link, &wq->head);
    396 
     410       
    397411        /*
    398412         * Suspend execution.
     413         *
    399414         */
    400415        THREAD->state = Sleeping;
    401416        THREAD->sleep_queue = wq;
    402 
    403         spinlock_unlock(&THREAD->lock);
    404 
     417       
     418        irq_spinlock_unlock(&THREAD->lock, false);
     419       
    405420        /* wq->lock is released in scheduler_separated_stack() */
    406         scheduler(); 
     421        scheduler();
    407422       
    408423        return ESYNCH_OK_BLOCKED;
    409424}
    410 
    411425
    412426/** Wake up first thread sleeping in a wait queue
     
    418432 * timeout.
    419433 *
    420  * @param wq            Pointer to wait queue.
    421  * @param mode          Wakeup mode.
     434 * @param wq   Pointer to wait queue.
     435 * @param mode Wakeup mode.
     436 *
    422437 */
    423438void waitq_wakeup(waitq_t *wq, wakeup_mode_t mode)
    424439{
    425         ipl_t ipl;
    426 
    427         ipl = interrupts_disable();
    428         spinlock_lock(&wq->lock);
    429 
     440        irq_spinlock_lock(&wq->lock, true);
    430441        _waitq_wakeup_unsafe(wq, mode);
    431 
    432         spinlock_unlock(&wq->lock);
    433         interrupts_restore(ipl);
     442        irq_spinlock_unlock(&wq->lock, true);
    434443}
    435444
     
    439448 * assumes wq->lock is already locked and interrupts are already disabled.
    440449 *
    441  * @param wq            Pointer to wait queue.
    442  * @param mode          If mode is WAKEUP_FIRST, then the longest waiting
    443  *                      thread, if any, is woken up. If mode is WAKEUP_ALL, then
    444  *                      all waiting threads, if any, are woken up. If there are
    445  *                      no waiting threads to be woken up, the missed wakeup is
    446  *                      recorded in the wait queue.
     450 * @param wq   Pointer to wait queue.
     451 * @param mode If mode is WAKEUP_FIRST, then the longest waiting
     452 *             thread, if any, is woken up. If mode is WAKEUP_ALL, then
     453 *             all waiting threads, if any, are woken up. If there are
     454 *             no waiting threads to be woken up, the missed wakeup is
     455 *             recorded in the wait queue.
     456 *
    447457 */
    448458void _waitq_wakeup_unsafe(waitq_t *wq, wakeup_mode_t mode)
    449459{
    450         thread_t *t;
    451460        size_t count = 0;
    452461
    453 loop:   
     462        ASSERT(interrupts_disabled());
     463        ASSERT(irq_spinlock_locked(&wq->lock));
     464       
     465loop:
    454466        if (list_empty(&wq->head)) {
    455467                wq->missed_wakeups++;
    456                 if (count && mode == WAKEUP_ALL)
     468                if ((count) && (mode == WAKEUP_ALL))
    457469                        wq->missed_wakeups--;
     470               
    458471                return;
    459472        }
    460 
     473       
    461474        count++;
    462         t = list_get_instance(wq->head.next, thread_t, wq_link);
     475        thread_t *thread = list_get_instance(wq->head.next, thread_t, wq_link);
    463476       
    464477        /*
     
    472485         * invariant must hold:
    473486         *
    474          * t->sleep_queue != NULL <=> t sleeps in a wait queue
     487         * thread->sleep_queue != NULL <=> thread sleeps in a wait queue
    475488         *
    476489         * For an observer who locks the thread, the invariant
    477490         * holds only when the lock is held prior to removing
    478491         * it from the wait queue.
     492         *
    479493         */
    480         spinlock_lock(&t->lock);
    481         list_remove(&t->wq_link);
    482        
    483         if (t->timeout_pending && timeout_unregister(&t->sleep_timeout))
    484                 t->timeout_pending = false;
    485         t->sleep_queue = NULL;
    486         spinlock_unlock(&t->lock);
    487 
    488         thread_ready(t);
    489 
     494        irq_spinlock_lock(&thread->lock, false);
     495        list_remove(&thread->wq_link);
     496       
     497        if ((thread->timeout_pending) &&
     498            (timeout_unregister(&thread->sleep_timeout)))
     499                thread->timeout_pending = false;
     500       
     501        thread->sleep_queue = NULL;
     502        irq_spinlock_unlock(&thread->lock, false);
     503       
     504        thread_ready(thread);
     505       
    490506        if (mode == WAKEUP_ALL)
    491507                goto loop;
    492508}
    493509
     510/** Get the missed wakeups count.
     511 *
     512 * @param wq    Pointer to wait queue.
     513 * @return      The wait queue's missed_wakeups count.
     514 */
     515int waitq_count_get(waitq_t *wq)
     516{
     517        int cnt;
     518
     519        irq_spinlock_lock(&wq->lock, true);
     520        cnt = wq->missed_wakeups;
     521        irq_spinlock_unlock(&wq->lock, true);
     522
     523        return cnt;
     524}
     525
     526/** Set the missed wakeups count.
     527 *
     528 * @param wq    Pointer to wait queue.
     529 * @param val   New value of the missed_wakeups count.
     530 */
     531void waitq_count_set(waitq_t *wq, int val)
     532{
     533        irq_spinlock_lock(&wq->lock, true);
     534        wq->missed_wakeups = val;
     535        irq_spinlock_unlock(&wq->lock, true);
     536}
     537
    494538/** @}
    495539 */
Note: See TracChangeset for help on using the changeset viewer.