Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/synch/condvar.c

    re88eb48 r0b47781  
    4848void condvar_initialize(condvar_t *cv)
    4949{
    50         waitq_initialize(&cv->wq);
     50        *cv = CONDVAR_INITIALIZER(*cv);
    5151}
    5252
     
    5858void condvar_signal(condvar_t *cv)
    5959{
    60         waitq_wakeup(&cv->wq, WAKEUP_FIRST);
     60        waitq_signal(&cv->wq);
    6161}
    6262
     
    6868void condvar_broadcast(condvar_t *cv)
    6969{
    70         waitq_wakeup(&cv->wq, WAKEUP_ALL);
     70        waitq_wake_all(&cv->wq);
    7171}
    7272
     
    7676 * @param mtx           Mutex.
    7777 * @param usec          Timeout value in microseconds.
    78  * @param flags         Select mode of operation.
    79  *
    80  * For exact description of meaning of possible combinations of usec and flags,
    81  * see comment for waitq_sleep_timeout().  Note that when
    82  * SYNCH_FLAGS_NON_BLOCKING is specified here, EAGAIN is always
    83  * returned.
    8478 *
    8579 * @return              See comment for waitq_sleep_timeout().
    8680 */
    87 errno_t _condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, uint32_t usec, int flags)
     81errno_t __condvar_wait_timeout_mutex(condvar_t *cv, mutex_t *mtx, uint32_t usec)
    8882{
    89         errno_t rc;
    90         ipl_t ipl;
    91         bool blocked;
     83        wait_guard_t guard = waitq_sleep_prepare(&cv->wq);
    9284
    93         ipl = waitq_sleep_prepare(&cv->wq);
    9485        /* Unlock only after the waitq is locked so we don't miss a wakeup. */
    9586        mutex_unlock(mtx);
    9687
    97         cv->wq.missed_wakeups = 0;      /* Enforce blocking. */
    98         rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags, &blocked);
    99         assert(blocked || rc != EOK);
     88        errno_t rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, SYNCH_FLAGS_NON_BLOCKING, guard);
    10089
    101         waitq_sleep_finish(&cv->wq, blocked, ipl);
    102         /* Lock only after releasing the waitq to avoid a possible deadlock. */
    10390        mutex_lock(mtx);
    104 
    10591        return rc;
    10692}
    10793
    108 /** Wait for the condition to become true with a locked spinlock.
    109  *
    110  * The function is not aware of irq_spinlock. Therefore do not even
    111  * try passing irq_spinlock_t to it. Use _condvar_wait_timeout_irq_spinlock()
    112  * instead.
    113  *
    114  * @param cv            Condition variable.
    115  * @param lock          Locked spinlock.
    116  * @param usec          Timeout value in microseconds.
    117  * @param flags         Select mode of operation.
    118  *
    119  * For exact description of meaning of possible combinations of usec and flags,
    120  * see comment for waitq_sleep_timeout().  Note that when
    121  * SYNCH_FLAGS_NON_BLOCKING is specified here, EAGAIN is always
    122  * returned.
    123  *
    124  * @return See comment for waitq_sleep_timeout().
    125  */
    126 errno_t _condvar_wait_timeout_spinlock_impl(condvar_t *cv, spinlock_t *lock,
    127     uint32_t usec, int flags)
     94errno_t __condvar_wait_mutex(condvar_t *cv, mutex_t *mtx)
    12895{
    129         errno_t rc;
    130         ipl_t ipl;
    131         bool blocked;
     96        wait_guard_t guard = waitq_sleep_prepare(&cv->wq);
    13297
    133         ipl = waitq_sleep_prepare(&cv->wq);
     98        /* Unlock only after the waitq is locked so we don't miss a wakeup. */
     99        mutex_unlock(mtx);
     100
     101        errno_t rc = waitq_sleep_unsafe(&cv->wq, guard);
     102
     103        mutex_lock(mtx);
     104        return rc;
     105}
     106
     107/** Same as __condvar_wait_timeout_mutex(), except for spinlock_t. */
     108errno_t __condvar_wait_timeout_spinlock(condvar_t *cv, spinlock_t *lock,
     109    uint32_t usec)
     110{
     111        wait_guard_t guard = waitq_sleep_prepare(&cv->wq);
    134112
    135113        /* Unlock only after the waitq is locked so we don't miss a wakeup. */
    136114        spinlock_unlock(lock);
    137115
    138         cv->wq.missed_wakeups = 0;      /* Enforce blocking. */
    139         rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags, &blocked);
    140         assert(blocked || rc != EOK);
     116        errno_t rc = waitq_sleep_timeout_unsafe(&cv->wq, usec,
     117            SYNCH_FLAGS_NON_BLOCKING, guard);
    141118
    142         waitq_sleep_finish(&cv->wq, blocked, ipl);
    143         /* Lock only after releasing the waitq to avoid a possible deadlock. */
    144119        spinlock_lock(lock);
    145 
    146120        return rc;
    147121}
    148122
    149 /** Wait for the condition to become true with a locked irq spinlock.
    150  *
    151  * @param cv            Condition variable.
    152  * @param lock          Locked irq spinlock.
    153  * @param usec          Timeout value in microseconds.
    154  * @param flags         Select mode of operation.
    155  *
    156  * For exact description of meaning of possible combinations of usec and flags,
    157  * see comment for waitq_sleep_timeout().  Note that when
    158  * SYNCH_FLAGS_NON_BLOCKING is specified here, EAGAIN is always
    159  * returned.
    160  *
    161  * @return See comment for waitq_sleep_timeout().
    162  */
    163 errno_t _condvar_wait_timeout_irq_spinlock(condvar_t *cv, irq_spinlock_t *irq_lock,
    164     uint32_t usec, int flags)
     123errno_t __condvar_wait_spinlock(condvar_t *cv, spinlock_t *mtx)
     124{
     125        wait_guard_t guard = waitq_sleep_prepare(&cv->wq);
     126
     127        /* Unlock only after the waitq is locked so we don't miss a wakeup. */
     128        spinlock_unlock(mtx);
     129
     130        errno_t rc = waitq_sleep_unsafe(&cv->wq, guard);
     131
     132        spinlock_lock(mtx);
     133        return rc;
     134}
     135
     136/** Same as __condvar_wait_timeout_mutex(), except for irq_spinlock_t. */
     137errno_t __condvar_wait_timeout_irq_spinlock(condvar_t *cv,
     138    irq_spinlock_t *irq_lock, uint32_t usec)
    165139{
    166140        errno_t rc;
     
    181155         * running) and there is no danger of a deadlock.
    182156         */
    183         rc = _condvar_wait_timeout_spinlock(cv, &irq_lock->lock, usec, flags);
     157        rc = __condvar_wait_timeout_spinlock(cv, &irq_lock->lock, usec);
     158
     159        irq_lock->guard = guard;
     160        irq_lock->ipl = ipl;
     161
     162        return rc;
     163}
     164
     165/** Same as __condvar_wait_mutex(), except for irq_spinlock_t. */
     166errno_t __condvar_wait_irq_spinlock(condvar_t *cv, irq_spinlock_t *irq_lock)
     167{
     168        errno_t rc;
     169        /* Save spinlock's state so we can restore it correctly later on. */
     170        ipl_t ipl = irq_lock->ipl;
     171        bool guard = irq_lock->guard;
     172
     173        irq_lock->guard = false;
     174
     175        rc = __condvar_wait_spinlock(cv, &irq_lock->lock);
    184176
    185177        irq_lock->guard = guard;
Note: See TracChangeset for help on using the changeset viewer.