Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/synch/condvar.c

    r497bd656 r9d58539  
    3838#include <synch/condvar.h>
    3939#include <synch/mutex.h>
    40 #include <synch/spinlock.h>
    4140#include <synch/waitq.h>
    4241#include <arch.h>
     
    9190
    9291        ipl = waitq_sleep_prepare(&cv->wq);
    93         /* Unlock only after the waitq is locked so we don't miss a wakeup. */
    9492        mutex_unlock(mtx);
    9593
     
    9795        rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags);
    9896
     97        mutex_lock(mtx);
    9998        waitq_sleep_finish(&cv->wq, rc, ipl);
    100         /* Lock only after releasing the waitq to avoid a possible deadlock. */
    101         mutex_lock(mtx);
    10299
    103100        return rc;
    104101}
    105102
    106 /** Wait for the condition to become true with a locked spinlock.
    107  *
    108  * The function is not aware of irq_spinlock. Therefore do not even
    109  * try passing irq_spinlock_t to it. Use _condvar_wait_timeout_irq_spinlock()
    110  * instead.
    111  *
    112  * @param cv            Condition variable.
    113  * @param lock          Locked spinlock.
    114  * @param usec          Timeout value in microseconds.
    115  * @param flags         Select mode of operation.
    116  *
    117  * For exact description of meaning of possible combinations of usec and flags,
    118  * see comment for waitq_sleep_timeout().  Note that when
    119  * SYNCH_FLAGS_NON_BLOCKING is specified here, ESYNCH_WOULD_BLOCK is always
    120  * returned.
    121  *
    122  * @return See comment for waitq_sleep_timeout().
    123  */
    124 int _condvar_wait_timeout_spinlock_impl(condvar_t *cv, spinlock_t *lock,
    125         uint32_t usec, int flags)
    126 {
    127         int rc;
    128         ipl_t ipl;
    129        
    130         ipl = waitq_sleep_prepare(&cv->wq);
    131 
    132         /* Unlock only after the waitq is locked so we don't miss a wakeup. */
    133         spinlock_unlock(lock);
    134 
    135         cv->wq.missed_wakeups = 0;      /* Enforce blocking. */
    136         rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags);
    137 
    138         waitq_sleep_finish(&cv->wq, rc, ipl);
    139         /* Lock only after releasing the waitq to avoid a possible deadlock. */
    140         spinlock_lock(lock);
    141        
    142         return rc;
    143 }
    144 
    145 /** Wait for the condition to become true with a locked irq spinlock.
    146  *
    147  * @param cv            Condition variable.
    148  * @param lock          Locked irq spinlock.
    149  * @param usec          Timeout value in microseconds.
    150  * @param flags         Select mode of operation.
    151  *
    152  * For exact description of meaning of possible combinations of usec and flags,
    153  * see comment for waitq_sleep_timeout().  Note that when
    154  * SYNCH_FLAGS_NON_BLOCKING is specified here, ESYNCH_WOULD_BLOCK is always
    155  * returned.
    156  *
    157  * @return See comment for waitq_sleep_timeout().
    158  */
    159 int _condvar_wait_timeout_irq_spinlock(condvar_t *cv, irq_spinlock_t *irq_lock,
    160         uint32_t usec, int flags)
    161 {
    162         int rc;
    163         /* Save spinlock's state so we can restore it correctly later on. */
    164         ipl_t ipl = irq_lock->ipl;
    165         bool guard = irq_lock->guard;
    166        
    167         irq_lock->guard = false;
    168        
    169         /*
    170          * waitq_prepare() restores interrupts to the current state,
    171          * ie disabled. Therefore, interrupts will remain disabled while
    172          * it spins waiting for a pending timeout handler to complete.
    173          * Although it spins with interrupts disabled there can only
    174          * be a pending timeout if we failed to cancel an imminent
    175          * timeout (on another cpu) during a wakeup. As a result the
    176          * timeout handler is guaranteed to run (it is most likely already
    177          * running) and there is no danger of a deadlock.
    178          */
    179         rc = _condvar_wait_timeout_spinlock(cv, &irq_lock->lock, usec, flags);
    180        
    181         irq_lock->guard = guard;
    182         irq_lock->ipl = ipl;
    183        
    184         return rc;
    185 }
    186 
    187 
    188103/** @}
    189104 */
Note: See TracChangeset for help on using the changeset viewer.