Changes in kernel/generic/src/synch/condvar.c [497bd656:b7fd2a0] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/synch/condvar.c
r497bd656 rb7fd2a0 80 80 * For exact description of meaning of possible combinations of usec and flags, 81 81 * see comment for waitq_sleep_timeout(). Note that when 82 * SYNCH_FLAGS_NON_BLOCKING is specified here, E SYNCH_WOULD_BLOCKis always82 * SYNCH_FLAGS_NON_BLOCKING is specified here, EAGAIN is always 83 83 * returned. 84 84 * 85 85 * @return See comment for waitq_sleep_timeout(). 86 86 */ 87 int _condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, uint32_t usec, int flags)87 errno_t _condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, uint32_t usec, int flags) 88 88 { 89 int rc;89 errno_t rc; 90 90 ipl_t ipl; 91 bool blocked; 91 92 92 93 ipl = waitq_sleep_prepare(&cv->wq); … … 95 96 96 97 cv->wq.missed_wakeups = 0; /* Enforce blocking. */ 97 rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags); 98 rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags, &blocked); 99 assert(blocked || rc != EOK); 98 100 99 waitq_sleep_finish(&cv->wq, rc, ipl);101 waitq_sleep_finish(&cv->wq, blocked, ipl); 100 102 /* Lock only after releasing the waitq to avoid a possible deadlock. */ 101 103 mutex_lock(mtx); … … 117 119 * For exact description of meaning of possible combinations of usec and flags, 118 120 * see comment for waitq_sleep_timeout(). Note that when 119 * SYNCH_FLAGS_NON_BLOCKING is specified here, E SYNCH_WOULD_BLOCKis always121 * SYNCH_FLAGS_NON_BLOCKING is specified here, EAGAIN is always 120 122 * returned. 121 123 * 122 124 * @return See comment for waitq_sleep_timeout(). 123 125 */ 124 int _condvar_wait_timeout_spinlock_impl(condvar_t *cv, spinlock_t *lock,126 errno_t _condvar_wait_timeout_spinlock_impl(condvar_t *cv, spinlock_t *lock, 125 127 uint32_t usec, int flags) 126 128 { 127 int rc;129 errno_t rc; 128 130 ipl_t ipl; 129 131 bool blocked; 132 130 133 ipl = waitq_sleep_prepare(&cv->wq); 131 134 … … 134 137 135 138 cv->wq.missed_wakeups = 0; /* Enforce blocking. */ 136 rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags); 139 rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags, &blocked); 140 assert(blocked || rc != EOK); 137 141 138 waitq_sleep_finish(&cv->wq, rc, ipl);142 waitq_sleep_finish(&cv->wq, blocked, ipl); 139 143 /* Lock only after releasing the waitq to avoid a possible deadlock. */ 140 144 spinlock_lock(lock); … … 152 156 * For exact description of meaning of possible combinations of usec and flags, 153 157 * see comment for waitq_sleep_timeout(). Note that when 154 * SYNCH_FLAGS_NON_BLOCKING is specified here, E SYNCH_WOULD_BLOCKis always158 * SYNCH_FLAGS_NON_BLOCKING is specified here, EAGAIN is always 155 159 * returned. 156 160 * 157 161 * @return See comment for waitq_sleep_timeout(). 158 162 */ 159 int _condvar_wait_timeout_irq_spinlock(condvar_t *cv, irq_spinlock_t *irq_lock,163 errno_t _condvar_wait_timeout_irq_spinlock(condvar_t *cv, irq_spinlock_t *irq_lock, 160 164 uint32_t usec, int flags) 161 165 { 162 int rc;166 errno_t rc; 163 167 /* Save spinlock's state so we can restore it correctly later on. */ 164 168 ipl_t ipl = irq_lock->ipl;
Note:
See TracChangeset
for help on using the changeset viewer.