Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/synch/condvar.c

    rb7fd2a0 r497bd656  
    8080 * For exact description of meaning of possible combinations of usec and flags,
    8181 * see comment for waitq_sleep_timeout().  Note that when
    82  * SYNCH_FLAGS_NON_BLOCKING is specified here, EAGAIN is always
     82 * SYNCH_FLAGS_NON_BLOCKING is specified here, ESYNCH_WOULD_BLOCK is always
    8383 * returned.
    8484 *
    8585 * @return              See comment for waitq_sleep_timeout().
    8686 */
    87 errno_t _condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, uint32_t usec, int flags)
     87int _condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, uint32_t usec, int flags)
    8888{
    89         errno_t rc;
     89        int rc;
    9090        ipl_t ipl;
    91         bool blocked;
    9291
    9392        ipl = waitq_sleep_prepare(&cv->wq);
     
    9695
    9796        cv->wq.missed_wakeups = 0;      /* Enforce blocking. */
    98         rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags, &blocked);
    99         assert(blocked || rc != EOK);
     97        rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags);
    10098
    101         waitq_sleep_finish(&cv->wq, blocked, ipl);
     99        waitq_sleep_finish(&cv->wq, rc, ipl);
    102100        /* Lock only after releasing the waitq to avoid a possible deadlock. */
    103101        mutex_lock(mtx);
     
    119117 * For exact description of meaning of possible combinations of usec and flags,
    120118 * see comment for waitq_sleep_timeout().  Note that when
    121  * SYNCH_FLAGS_NON_BLOCKING is specified here, EAGAIN is always
     119 * SYNCH_FLAGS_NON_BLOCKING is specified here, ESYNCH_WOULD_BLOCK is always
    122120 * returned.
    123121 *
    124122 * @return See comment for waitq_sleep_timeout().
    125123 */
    126 errno_t _condvar_wait_timeout_spinlock_impl(condvar_t *cv, spinlock_t *lock,
     124int _condvar_wait_timeout_spinlock_impl(condvar_t *cv, spinlock_t *lock,
    127125        uint32_t usec, int flags)
    128126{
    129         errno_t rc;
     127        int rc;
    130128        ipl_t ipl;
    131         bool blocked;
    132 
     129       
    133130        ipl = waitq_sleep_prepare(&cv->wq);
    134131
     
    137134
    138135        cv->wq.missed_wakeups = 0;      /* Enforce blocking. */
    139         rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags, &blocked);
    140         assert(blocked || rc != EOK);
     136        rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags);
    141137
    142         waitq_sleep_finish(&cv->wq, blocked, ipl);
     138        waitq_sleep_finish(&cv->wq, rc, ipl);
    143139        /* Lock only after releasing the waitq to avoid a possible deadlock. */
    144140        spinlock_lock(lock);
     
    156152 * For exact description of meaning of possible combinations of usec and flags,
    157153 * see comment for waitq_sleep_timeout().  Note that when
    158  * SYNCH_FLAGS_NON_BLOCKING is specified here, EAGAIN is always
     154 * SYNCH_FLAGS_NON_BLOCKING is specified here, ESYNCH_WOULD_BLOCK is always
    159155 * returned.
    160156 *
    161157 * @return See comment for waitq_sleep_timeout().
    162158 */
    163 errno_t _condvar_wait_timeout_irq_spinlock(condvar_t *cv, irq_spinlock_t *irq_lock,
     159int _condvar_wait_timeout_irq_spinlock(condvar_t *cv, irq_spinlock_t *irq_lock,
    164160        uint32_t usec, int flags)
    165161{
    166         errno_t rc;
     162        int rc;
    167163        /* Save spinlock's state so we can restore it correctly later on. */
    168164        ipl_t ipl = irq_lock->ipl;
Note: See TracChangeset for help on using the changeset viewer.