Changeset 116d1ef4 in mainline for generic/src
- Timestamp:
- 2006-06-02T12:26:50Z (19 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- d0c5901
- Parents:
- 01ebbdf
- Location:
- generic/src
- Files:
-
- 9 edited
Legend:
- Unmodified
- Added
- Removed
-
generic/src/ipc/ipc.c
r01ebbdf r116d1ef4 143 143 144 144 ipc_call(phone, request); 145 ipc_wait_for_call(&sync_box, SYNCH_NO_TIMEOUT, SYNCH_ BLOCKING);145 ipc_wait_for_call(&sync_box, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE); 146 146 } 147 147 … … 306 306 * @param usec Timeout in microseconds. See documentation for waitq_sleep_timeout() for 307 307 * decription of its special meaning. 308 * @param nonblocking Blocking vs. non-blocking operation mode switch. See documentation309 * for waitq_sleep_timeout()for description of its special meaning.308 * @param flags Select mode of sleep operation. See documentation for waitq_sleep_timeout()i 309 * for description of its special meaning. 310 310 * @return Recived message address 311 311 * - to distinguish between call and answer, look at call->flags 312 312 */ 313 call_t * ipc_wait_for_call(answerbox_t *box, __u32 usec, int nonblocking)313 call_t * ipc_wait_for_call(answerbox_t *box, __u32 usec, int flags) 314 314 { 315 315 call_t *request; … … 318 318 319 319 restart: 320 rc = waitq_sleep_timeout(&box->wq, usec, nonblocking);320 rc = waitq_sleep_timeout(&box->wq, usec, flags); 321 321 if (SYNCH_FAILED(rc)) 322 322 return NULL; … … 413 413 /* Wait for all async answers to arrive */ 414 414 while (atomic_get(&task->active_calls)) { 415 call = ipc_wait_for_call(&task->answerbox, SYNCH_NO_TIMEOUT, SYNCH_ BLOCKING);415 call = ipc_wait_for_call(&task->answerbox, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE); 416 416 ASSERT((call->flags & IPC_CALL_ANSWERED) || (call->flags & IPC_CALL_NOTIF)); 417 417 ASSERT(! (call->flags & IPC_CALL_STATIC_ALLOC)); -
generic/src/ipc/sysipc.c
r01ebbdf r116d1ef4 503 503 * @param calldata Pointer to buffer where the call/answer data is stored 504 504 * @param usec Timeout. See waitq_sleep_timeout() for explanation. 505 * @param nonblockingSee waitq_sleep_timeout() for explanation.505 * @param flags Select mode of sleep operation. See waitq_sleep_timeout() for explanation. 506 506 * 507 507 * @return Callid, if callid & 1, then the call is answer 508 508 */ 509 __native sys_ipc_wait_for_call(ipc_data_t *calldata, __u32 usec, int nonblocking)509 __native sys_ipc_wait_for_call(ipc_data_t *calldata, __u32 usec, int flags) 510 510 { 511 511 call_t *call; 512 512 513 513 restart: 514 call = ipc_wait_for_call(&TASK->answerbox, usec, nonblocking);514 call = ipc_wait_for_call(&TASK->answerbox, usec, flags | SYNCH_FLAGS_INTERRUPTIBLE); 515 515 if (!call) 516 516 return 0; -
generic/src/proc/thread.c
r01ebbdf r116d1ef4 304 304 305 305 timeout_initialize(&t->sleep_timeout); 306 t->sleep_interruptible = false; 306 307 t->sleep_queue = NULL; 307 308 t->timeout_pending = 0; … … 386 387 waitq_initialize(&wq); 387 388 388 (void) waitq_sleep_timeout(&wq, usec, SYNCH_ NON_BLOCKING);389 (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING); 389 390 } 390 391 -
generic/src/synch/condvar.c
r01ebbdf r116d1ef4 75 75 * @param mtx Mutex. 76 76 * @param usec Timeout value in microseconds. 77 * @param flags Select mode of operation. 77 78 * 78 * For exact description of meaning of possible values of usec, 79 * see comment for waitq_sleep_timeout(). 79 * For exact description of meaning of possible combinations 80 * of usec and flags, see comment for waitq_sleep_timeout(). 81 * Note that when SYNCH_FLAGS_NON_BLOCKING is specified here, 82 * ESYNCH_WOULD_BLOCK is always returned. 80 83 * 81 84 * @return See comment for waitq_sleep_timeout(). 82 85 */ 83 int _condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, __u32 usec )86 int _condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, __u32 usec, int flags) 84 87 { 85 88 int rc; … … 90 93 91 94 cv->wq.missed_wakeups = 0; /* Enforce blocking. */ 92 rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, SYNCH_BLOCKING);95 rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags); 93 96 94 97 mutex_lock(mtx); -
generic/src/synch/futex.c
r01ebbdf r116d1ef4 100 100 * @param uaddr Userspace address of the futex counter. 101 101 * @param usec If non-zero, number of microseconds this thread is willing to sleep. 102 * @param trydown If usec is zero and trydown is non-zero, conditional operation will be attempted.102 * @param flags Select mode of operation. 103 103 * 104 104 * @return One of ESYNCH_TIMEOUT, ESYNCH_OK_ATOMIC and ESYNCH_OK_BLOCKED. See synch.h. 105 105 * If there is no physical mapping for uaddr ENOENT is returned. 106 106 */ 107 __native sys_futex_sleep_timeout(__address uaddr, __u32 usec, int trydown)107 __native sys_futex_sleep_timeout(__address uaddr, __u32 usec, int flags) 108 108 { 109 109 futex_t *futex; … … 131 131 futex = futex_find(paddr); 132 132 133 return (__native) waitq_sleep_timeout(&futex->wq, usec, trydown);133 return (__native) waitq_sleep_timeout(&futex->wq, usec, flags | SYNCH_FLAGS_INTERRUPTIBLE); 134 134 } 135 135 -
generic/src/synch/mutex.c
r01ebbdf r116d1ef4 54 54 * @param mtx Mutex. 55 55 * @param usec Timeout in microseconds. 56 * @param trylock Switches between blocking and non-blocking mode.56 * @param flags Specify mode of operation. 57 57 * 58 58 * For exact description of possible combinations of 59 * usec and trylock, see comment for waitq_sleep_timeout().59 * usec and flags, see comment for waitq_sleep_timeout(). 60 60 * 61 61 * @return See comment for waitq_sleep_timeout(). 62 62 */ 63 int _mutex_lock_timeout(mutex_t *mtx, __u32 usec, int trylock)63 int _mutex_lock_timeout(mutex_t *mtx, __u32 usec, int flags) 64 64 { 65 return _semaphore_down_timeout(&mtx->sem, usec, trylock);65 return _semaphore_down_timeout(&mtx->sem, usec, flags); 66 66 } 67 67 … … 76 76 semaphore_up(&mtx->sem); 77 77 } 78 -
generic/src/synch/rwlock.c
r01ebbdf r116d1ef4 90 90 * @param rwl Reader/Writer lock. 91 91 * @param usec Timeout in microseconds. 92 * @param trylock Switches between blocking and non-blocking mode.92 * @param flags Specify mode of operation. 93 93 * 94 94 * For exact description of possible combinations of 95 * @usec and @trylock, see comment for waitq_sleep_timeout().95 * usec and flags, see comment for waitq_sleep_timeout(). 96 96 * 97 97 * @return See comment for waitq_sleep_timeout(). 98 98 */ 99 int _rwlock_write_lock_timeout(rwlock_t *rwl, __u32 usec, int trylock)99 int _rwlock_write_lock_timeout(rwlock_t *rwl, __u32 usec, int flags) 100 100 { 101 101 ipl_t ipl; … … 112 112 * They just need to acquire the exclusive mutex. 113 113 */ 114 rc = _mutex_lock_timeout(&rwl->exclusive, usec, trylock);114 rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags); 115 115 if (SYNCH_FAILED(rc)) { 116 116 117 117 /* 118 * Lock operation timed out .118 * Lock operation timed out or was interrupted. 119 119 * The state of rwl is UNKNOWN at this point. 120 120 * No claims about its holder can be made. … … 144 144 * @param rwl Reader/Writer lock. 145 145 * @param usec Timeout in microseconds. 146 * @param trylock Switches between blocking and non-blocking mode.146 * @param flags Select mode of operation. 147 147 * 148 148 * For exact description of possible combinations of 149 * usec and trylock, see comment for waitq_sleep_timeout().149 * usec and flags, see comment for waitq_sleep_timeout(). 150 150 * 151 151 * @return See comment for waitq_sleep_timeout(). 152 152 */ 153 int _rwlock_read_lock_timeout(rwlock_t *rwl, __u32 usec, int trylock)153 int _rwlock_read_lock_timeout(rwlock_t *rwl, __u32 usec, int flags) 154 154 { 155 155 int rc; … … 200 200 #endif 201 201 202 rc = _mutex_lock_timeout(&rwl->exclusive, usec, trylock);202 rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags); 203 203 switch (rc) { 204 204 case ESYNCH_WOULD_BLOCK: … … 209 209 spinlock_unlock(&rwl->lock); 210 210 case ESYNCH_TIMEOUT: 211 case ESYNCH_INTERRUPTED: 211 212 /* 212 * The sleep time outed.213 * The sleep timed out. 213 214 * We just restore interrupt priority level. 214 215 */ -
generic/src/synch/semaphore.c
r01ebbdf r116d1ef4 68 68 * @param s Semaphore. 69 69 * @param usec Timeout in microseconds. 70 * @param trydown Switches between blocking and non-blocking mode.70 * @param flags Select mode of operation. 71 71 * 72 72 * For exact description of possible combinations of 73 * usec and trydown, see comment for waitq_sleep_timeout().73 * usec and flags, see comment for waitq_sleep_timeout(). 74 74 * 75 75 * @return See comment for waitq_sleep_timeout(). 76 76 */ 77 int _semaphore_down_timeout(semaphore_t *s, __u32 usec, int trydown)77 int _semaphore_down_timeout(semaphore_t *s, __u32 usec, int flags) 78 78 { 79 return waitq_sleep_timeout(&s->wq, usec, trydown);79 return waitq_sleep_timeout(&s->wq, usec, flags); 80 80 } 81 81 -
generic/src/synch/waitq.c
r01ebbdf r116d1ef4 136 136 spinlock_lock(&t->lock); 137 137 if ((wq = t->sleep_queue)) { /* assignment */ 138 if (!(t->sleep_interruptible)) { 139 /* 140 * The sleep cannot be interrupted. 141 */ 142 spinlock_unlock(&t->lock); 143 goto out; 144 } 145 138 146 if (!spinlock_trylock(&wq->lock)) { 139 147 spinlock_unlock(&t->lock); … … 160 168 /** Sleep until either wakeup, timeout or interruption occurs 161 169 * 162 * This is a sleep implementation which allows itself to be170 * This is a sleep implementation which allows itself to time out or to be 163 171 * interrupted from the sleep, restoring a failover context. 164 172 * … … 170 178 * @param wq Pointer to wait queue. 171 179 * @param usec Timeout in microseconds. 172 * @param nonblocking Blocking vs. non-blocking operation mode switch. 173 * 174 * If usec is greater than zero, regardless of the value of nonblocking, 175 * the call will not return until either timeout or wakeup comes. 176 * 177 * If usec is zero and @nonblocking is zero (false), the call 178 * will not return until wakeup comes. 179 * 180 * If usec is zero and nonblocking is non-zero (true), the call will 180 * @param flags Specify mode of the sleep. 181 * 182 * The sleep can be interrupted only if the 183 * SYNCH_FLAGS_INTERRUPTIBLE bit is specified in flags. 184 185 * If usec is greater than zero, regardless of the value of the 186 * SYNCH_FLAGS_NON_BLOCKING bit in flags, the call will not return until either timeout, 187 * interruption or wakeup comes. 188 * 189 * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is not set in flags, the call 190 * will not return until wakeup or interruption comes. 191 * 192 * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is set in flags, the call will 181 193 * immediately return, reporting either success or failure. 182 194 * 183 * @return Returns one of: ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT, 195 * @return Returns one of: ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT, ESYNCH_INTERRUPTED, 184 196 * ESYNCH_OK_ATOMIC, ESYNCH_OK_BLOCKED. 185 197 * … … 198 210 * attempted. 199 211 */ 200 int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int nonblocking)212 int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int flags) 201 213 { 202 214 ipl_t ipl; … … 204 216 205 217 ipl = waitq_sleep_prepare(wq); 206 rc = waitq_sleep_timeout_unsafe(wq, usec, nonblocking);218 rc = waitq_sleep_timeout_unsafe(wq, usec, flags); 207 219 waitq_sleep_finish(wq, rc, ipl); 208 220 return rc; … … 277 289 * @param wq See waitq_sleep_timeout(). 278 290 * @param usec See waitq_sleep_timeout(). 279 * @param nonblockingSee waitq_sleep_timeout().291 * @param flags See waitq_sleep_timeout(). 280 292 * 281 293 * @return See waitq_sleep_timeout(). 282 294 */ 283 int waitq_sleep_timeout_unsafe(waitq_t *wq, __u32 usec, int nonblocking)295 int waitq_sleep_timeout_unsafe(waitq_t *wq, __u32 usec, int flags) 284 296 { 285 297 /* checks whether to go to sleep at all */ … … 289 301 } 290 302 else { 291 if ( nonblocking&& (usec == 0)) {303 if ((flags & SYNCH_FLAGS_NON_BLOCKING) && (usec == 0)) { 292 304 /* return immediatelly instead of going to sleep */ 293 305 return ESYNCH_WOULD_BLOCK; … … 300 312 spinlock_lock(&THREAD->lock); 301 313 302 /* 303 * Set context that will be restored if the sleep 304 * of this thread is ever interrupted. 305 */ 306 if (!context_save(&THREAD->sleep_interruption_context)) { 307 /* Short emulation of scheduler() return code. */ 308 spinlock_unlock(&THREAD->lock); 309 return ESYNCH_INTERRUPTED; 314 if (flags & SYNCH_FLAGS_INTERRUPTIBLE) { 315 /* 316 * Set context that will be restored if the sleep 317 * of this thread is ever interrupted. 318 */ 319 THREAD->sleep_interruptible = true; 320 if (!context_save(&THREAD->sleep_interruption_context)) { 321 /* Short emulation of scheduler() return code. */ 322 spinlock_unlock(&THREAD->lock); 323 return ESYNCH_INTERRUPTED; 324 } 325 } else { 326 THREAD->sleep_interruptible = false; 310 327 } 311 328
Note:
See TracChangeset
for help on using the changeset viewer.