Changeset 116d1ef4 in mainline for generic/src


Ignore:
Timestamp:
2006-06-02T12:26:50Z (19 years ago)
Author:
Jakub Jermar <jakub@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
d0c5901
Parents:
01ebbdf
Message:

Replace nonblocking argument of waitq_sleep_timeout with flags that specify mode of operation.
Now a flag can be used to specify interruptible sleep.
Modify waitq_interrupt_sleep() to only interrupt threads that used this flag.
O

Location:
generic/src
Files:
9 edited

Legend:

Unmodified
Added
Removed
  • generic/src/ipc/ipc.c

    r01ebbdf r116d1ef4  
    143143
    144144        ipc_call(phone, request);
    145         ipc_wait_for_call(&sync_box, SYNCH_NO_TIMEOUT, SYNCH_BLOCKING);
     145        ipc_wait_for_call(&sync_box, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE);
    146146}
    147147
     
    306306 * @param usec Timeout in microseconds. See documentation for waitq_sleep_timeout() for
    307307 *             decription of its special meaning.
    308  * @param nonblocking Blocking vs. non-blocking operation mode switch. See documentation
    309  *                    for waitq_sleep_timeout() for description of its special meaning.
     308 * @param flags Select mode of sleep operation. See documentation for waitq_sleep_timeout()i
     309 *              for description of its special meaning.
    310310 * @return Recived message address
    311311 * - to distinguish between call and answer, look at call->flags
    312312 */
    313 call_t * ipc_wait_for_call(answerbox_t *box, __u32 usec, int nonblocking)
     313call_t * ipc_wait_for_call(answerbox_t *box, __u32 usec, int flags)
    314314{
    315315        call_t *request;
     
    318318
    319319restart:
    320         rc = waitq_sleep_timeout(&box->wq, usec, nonblocking);
     320        rc = waitq_sleep_timeout(&box->wq, usec, flags);
    321321        if (SYNCH_FAILED(rc))
    322322                return NULL;
     
    413413        /* Wait for all async answers to arrive */
    414414        while (atomic_get(&task->active_calls)) {
    415                 call = ipc_wait_for_call(&task->answerbox, SYNCH_NO_TIMEOUT, SYNCH_BLOCKING);
     415                call = ipc_wait_for_call(&task->answerbox, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE);
    416416                ASSERT((call->flags & IPC_CALL_ANSWERED) || (call->flags & IPC_CALL_NOTIF));
    417417                ASSERT(! (call->flags & IPC_CALL_STATIC_ALLOC));
  • generic/src/ipc/sysipc.c

    r01ebbdf r116d1ef4  
    503503 * @param calldata Pointer to buffer where the call/answer data is stored
    504504 * @param usec Timeout. See waitq_sleep_timeout() for explanation.
    505  * @param nonblocking See waitq_sleep_timeout() for explanation.
     505 * @param flags Select mode of sleep operation. See waitq_sleep_timeout() for explanation.
    506506 *
    507507 * @return Callid, if callid & 1, then the call is answer
    508508 */
    509 __native sys_ipc_wait_for_call(ipc_data_t *calldata, __u32 usec, int nonblocking)
     509__native sys_ipc_wait_for_call(ipc_data_t *calldata, __u32 usec, int flags)
    510510{
    511511        call_t *call;
    512512
    513513restart:       
    514         call = ipc_wait_for_call(&TASK->answerbox, usec, nonblocking);
     514        call = ipc_wait_for_call(&TASK->answerbox, usec, flags | SYNCH_FLAGS_INTERRUPTIBLE);
    515515        if (!call)
    516516                return 0;
  • generic/src/proc/thread.c

    r01ebbdf r116d1ef4  
    304304       
    305305        timeout_initialize(&t->sleep_timeout);
     306        t->sleep_interruptible = false;
    306307        t->sleep_queue = NULL;
    307308        t->timeout_pending = 0;
     
    386387        waitq_initialize(&wq);
    387388
    388         (void) waitq_sleep_timeout(&wq, usec, SYNCH_NON_BLOCKING);
     389        (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING);
    389390}
    390391
  • generic/src/synch/condvar.c

    r01ebbdf r116d1ef4  
    7575 * @param mtx Mutex.
    7676 * @param usec Timeout value in microseconds.
     77 * @param flags Select mode of operation.
    7778 *
    78  * For exact description of meaning of possible values of usec,
    79  * see comment for waitq_sleep_timeout().
     79 * For exact description of meaning of possible combinations
     80 * of usec and flags, see comment for waitq_sleep_timeout().
     81 * Note that when SYNCH_FLAGS_NON_BLOCKING is specified here,
     82 * ESYNCH_WOULD_BLOCK is always returned.
    8083 *
    8184 * @return See comment for waitq_sleep_timeout().
    8285 */
    83 int _condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, __u32 usec)
     86int _condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, __u32 usec, int flags)
    8487{
    8588        int rc;
     
    9093
    9194        cv->wq.missed_wakeups = 0;      /* Enforce blocking. */
    92         rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, SYNCH_BLOCKING);
     95        rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags);
    9396
    9497        mutex_lock(mtx);
  • generic/src/synch/futex.c

    r01ebbdf r116d1ef4  
    100100 * @param uaddr Userspace address of the futex counter.
    101101 * @param usec If non-zero, number of microseconds this thread is willing to sleep.
    102  * @param trydown If usec is zero and trydown is non-zero, conditional operation will be attempted.
     102 * @param flags Select mode of operation.
    103103 *
    104104 * @return One of ESYNCH_TIMEOUT, ESYNCH_OK_ATOMIC and ESYNCH_OK_BLOCKED. See synch.h.
    105105 *         If there is no physical mapping for uaddr ENOENT is returned.
    106106 */
    107 __native sys_futex_sleep_timeout(__address uaddr, __u32 usec, int trydown)
     107__native sys_futex_sleep_timeout(__address uaddr, __u32 usec, int flags)
    108108{
    109109        futex_t *futex;
     
    131131        futex = futex_find(paddr);
    132132       
    133         return (__native) waitq_sleep_timeout(&futex->wq, usec, trydown);
     133        return (__native) waitq_sleep_timeout(&futex->wq, usec, flags | SYNCH_FLAGS_INTERRUPTIBLE);
    134134}
    135135
  • generic/src/synch/mutex.c

    r01ebbdf r116d1ef4  
    5454 * @param mtx Mutex.
    5555 * @param usec Timeout in microseconds.
    56  * @param trylock Switches between blocking and non-blocking mode.
     56 * @param flags Specify mode of operation.
    5757 *
    5858 * For exact description of possible combinations of
    59  * usec and trylock, see comment for waitq_sleep_timeout().
     59 * usec and flags, see comment for waitq_sleep_timeout().
    6060 *
    6161 * @return See comment for waitq_sleep_timeout().
    6262 */
    63 int _mutex_lock_timeout(mutex_t *mtx, __u32 usec, int trylock)
     63int _mutex_lock_timeout(mutex_t *mtx, __u32 usec, int flags)
    6464{
    65         return _semaphore_down_timeout(&mtx->sem, usec, trylock);
     65        return _semaphore_down_timeout(&mtx->sem, usec, flags);
    6666}
    6767
     
    7676        semaphore_up(&mtx->sem);
    7777}
     78
  • generic/src/synch/rwlock.c

    r01ebbdf r116d1ef4  
    9090 * @param rwl Reader/Writer lock.
    9191 * @param usec Timeout in microseconds.
    92  * @param trylock Switches between blocking and non-blocking mode.
     92 * @param flags Specify mode of operation.
    9393 *
    9494 * For exact description of possible combinations of
    95  * @usec and @trylock, see comment for waitq_sleep_timeout().
     95 * usec and flags, see comment for waitq_sleep_timeout().
    9696 *
    9797 * @return See comment for waitq_sleep_timeout().
    9898 */
    99 int _rwlock_write_lock_timeout(rwlock_t *rwl, __u32 usec, int trylock)
     99int _rwlock_write_lock_timeout(rwlock_t *rwl, __u32 usec, int flags)
    100100{
    101101        ipl_t ipl;
     
    112112         * They just need to acquire the exclusive mutex.
    113113         */
    114         rc = _mutex_lock_timeout(&rwl->exclusive, usec, trylock);
     114        rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags);
    115115        if (SYNCH_FAILED(rc)) {
    116116
    117117                /*
    118                  * Lock operation timed out.
     118                 * Lock operation timed out or was interrupted.
    119119                 * The state of rwl is UNKNOWN at this point.
    120120                 * No claims about its holder can be made.
     
    144144 * @param rwl Reader/Writer lock.
    145145 * @param usec Timeout in microseconds.
    146  * @param trylock Switches between blocking and non-blocking mode.
     146 * @param flags Select mode of operation.
    147147 *
    148148 * For exact description of possible combinations of
    149  * usec and trylock, see comment for waitq_sleep_timeout().
     149 * usec and flags, see comment for waitq_sleep_timeout().
    150150 *
    151151 * @return See comment for waitq_sleep_timeout().
    152152 */
    153 int _rwlock_read_lock_timeout(rwlock_t *rwl, __u32 usec, int trylock)
     153int _rwlock_read_lock_timeout(rwlock_t *rwl, __u32 usec, int flags)
    154154{
    155155        int rc;
     
    200200                #endif
    201201                                 
    202                 rc = _mutex_lock_timeout(&rwl->exclusive, usec, trylock);
     202                rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags);
    203203                switch (rc) {
    204204                        case ESYNCH_WOULD_BLOCK:
     
    209209                                spinlock_unlock(&rwl->lock);
    210210                        case ESYNCH_TIMEOUT:
     211                        case ESYNCH_INTERRUPTED:
    211212                                /*
    212                                  * The sleep timeouted.
     213                                 * The sleep timed out.
    213214                                 * We just restore interrupt priority level.
    214215                                 */
  • generic/src/synch/semaphore.c

    r01ebbdf r116d1ef4  
    6868 * @param s Semaphore.
    6969 * @param usec Timeout in microseconds.
    70  * @param trydown Switches between blocking and non-blocking mode.
     70 * @param flags Select mode of operation.
    7171 *
    7272 * For exact description of possible combinations of
    73  * usec and trydown, see comment for waitq_sleep_timeout().
     73 * usec and flags, see comment for waitq_sleep_timeout().
    7474 *
    7575 * @return See comment for waitq_sleep_timeout().
    7676 */
    77 int _semaphore_down_timeout(semaphore_t *s, __u32 usec, int trydown)
     77int _semaphore_down_timeout(semaphore_t *s, __u32 usec, int flags)
    7878{
    79         return waitq_sleep_timeout(&s->wq, usec, trydown);
     79        return waitq_sleep_timeout(&s->wq, usec, flags);
    8080}
    8181
  • generic/src/synch/waitq.c

    r01ebbdf r116d1ef4  
    136136        spinlock_lock(&t->lock);
    137137        if ((wq = t->sleep_queue)) {            /* assignment */
     138                if (!(t->sleep_interruptible)) {
     139                        /*
     140                         * The sleep cannot be interrupted.
     141                         */
     142                        spinlock_unlock(&t->lock);
     143                        goto out;
     144                }
     145                       
    138146                if (!spinlock_trylock(&wq->lock)) {
    139147                        spinlock_unlock(&t->lock);
     
    160168/** Sleep until either wakeup, timeout or interruption occurs
    161169 *
    162  * This is a sleep implementation which allows itself to be
     170 * This is a sleep implementation which allows itself to time out or to be
    163171 * interrupted from the sleep, restoring a failover context.
    164172 *
     
    170178 * @param wq Pointer to wait queue.
    171179 * @param usec Timeout in microseconds.
    172  * @param nonblocking Blocking vs. non-blocking operation mode switch.
    173  *
    174  * If usec is greater than zero, regardless of the value of nonblocking,
    175  * the call will not return until either timeout or wakeup comes.
    176  *
    177  * If usec is zero and @nonblocking is zero (false), the call
    178  * will not return until wakeup comes.
    179  *
    180  * If usec is zero and nonblocking is non-zero (true), the call will
     180 * @param flags Specify mode of the sleep.
     181 *
     182 * The sleep can be interrupted only if the
     183 * SYNCH_FLAGS_INTERRUPTIBLE bit is specified in flags.
     184 
     185 * If usec is greater than zero, regardless of the value of the
     186 * SYNCH_FLAGS_NON_BLOCKING bit in flags, the call will not return until either timeout,
     187 * interruption or wakeup comes.
     188 *
     189 * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is not set in flags, the call
     190 * will not return until wakeup or interruption comes.
     191 *
     192 * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is set in flags, the call will
    181193 * immediately return, reporting either success or failure.
    182194 *
    183  * @return      Returns one of: ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT,
     195 * @return      Returns one of: ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT, ESYNCH_INTERRUPTED,
    184196 *              ESYNCH_OK_ATOMIC, ESYNCH_OK_BLOCKED.
    185197 *
     
    198210 * attempted.
    199211 */
    200 int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int nonblocking)
     212int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int flags)
    201213{
    202214        ipl_t ipl;
     
    204216       
    205217        ipl = waitq_sleep_prepare(wq);
    206         rc = waitq_sleep_timeout_unsafe(wq, usec, nonblocking);
     218        rc = waitq_sleep_timeout_unsafe(wq, usec, flags);
    207219        waitq_sleep_finish(wq, rc, ipl);
    208220        return rc;
     
    277289 * @param wq See waitq_sleep_timeout().
    278290 * @param usec See waitq_sleep_timeout().
    279  * @param nonblocking See waitq_sleep_timeout().
     291 * @param flags See waitq_sleep_timeout().
    280292 *
    281293 * @return See waitq_sleep_timeout().
    282294 */
    283 int waitq_sleep_timeout_unsafe(waitq_t *wq, __u32 usec, int nonblocking)
     295int waitq_sleep_timeout_unsafe(waitq_t *wq, __u32 usec, int flags)
    284296{
    285297        /* checks whether to go to sleep at all */
     
    289301        }
    290302        else {
    291                 if (nonblocking && (usec == 0)) {
     303                if ((flags & SYNCH_FLAGS_NON_BLOCKING) && (usec == 0)) {
    292304                        /* return immediatelly instead of going to sleep */
    293305                        return ESYNCH_WOULD_BLOCK;
     
    300312        spinlock_lock(&THREAD->lock);
    301313
    302         /*
    303          * Set context that will be restored if the sleep
    304          * of this thread is ever interrupted.
    305          */
    306         if (!context_save(&THREAD->sleep_interruption_context)) {
    307                 /* Short emulation of scheduler() return code. */
    308                 spinlock_unlock(&THREAD->lock);
    309                 return ESYNCH_INTERRUPTED;
     314        if (flags & SYNCH_FLAGS_INTERRUPTIBLE) {
     315                /*
     316                 * Set context that will be restored if the sleep
     317                 * of this thread is ever interrupted.
     318                 */
     319                THREAD->sleep_interruptible = true;
     320                if (!context_save(&THREAD->sleep_interruption_context)) {
     321                        /* Short emulation of scheduler() return code. */
     322                        spinlock_unlock(&THREAD->lock);
     323                        return ESYNCH_INTERRUPTED;
     324                }
     325        } else {
     326                THREAD->sleep_interruptible = false;
    310327        }
    311328
Note: See TracChangeset for help on using the changeset viewer.