Changeset 116d1ef4 in mainline


Ignore:
Timestamp:
2006-06-02T12:26:50Z (18 years ago)
Author:
Jakub Jermar <jakub@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
d0c5901
Parents:
01ebbdf
Message:

Replace nonblocking argument of waitq_sleep_timeout with flags that specify mode of operation.
Now a flag can be used to specify interruptible sleep.
Modify waitq_interrupt_sleep() to only interrupt threads that used this flag.
O

Files:
19 edited

Legend:

Unmodified
Added
Removed
  • arch/ia32/src/smp/smp.c

    r01ebbdf r116d1ef4  
    168168                         * supposed to wake us up.
    169169                         */
    170                         if (waitq_sleep_timeout(&ap_completion_wq, 1000000, SYNCH_BLOCKING) == ESYNCH_TIMEOUT)
     170                        if (waitq_sleep_timeout(&ap_completion_wq, 1000000, SYNCH_FLAGS_NONE) == ESYNCH_TIMEOUT)
    171171                                printf("%s: waiting for cpu%d (APIC ID = %d) timed out\n", __FUNCTION__, config.cpu_active > i ? config.cpu_active : i, ops->cpu_apic_id(i));
    172172                } else
  • generic/include/ipc/ipc.h

    r01ebbdf r116d1ef4  
    210210
    211211extern void ipc_init(void);
    212 extern call_t * ipc_wait_for_call(answerbox_t *box, __u32 usec, int nonblocking);
     212extern call_t * ipc_wait_for_call(answerbox_t *box, __u32 usec, int flags);
    213213extern void ipc_answer(answerbox_t *box, call_t *request);
    214214extern int ipc_call(phone_t *phone, call_t *call);
  • generic/include/proc/thread.h

    r01ebbdf r116d1ef4  
    8888        context_t sleep_interruption_context;
    8989
     90        bool sleep_interruptible;               /**< If true, the thread can be interrupted from sleep. */
    9091        waitq_t *sleep_queue;                   /**< Wait queue in which this thread sleeps. */
    9192        timeout_t sleep_timeout;                /**< Timeout used for timeoutable sleeping.  */
  • generic/include/synch/condvar.h

    r01ebbdf r116d1ef4  
    4040
    4141#define condvar_wait(cv,mtx) \
    42         _condvar_wait_timeout((cv),(mtx),SYNCH_NO_TIMEOUT)
     42        _condvar_wait_timeout((cv),(mtx),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NONE)
    4343#define condvar_wait_timeout(cv,mtx,usec) \
    44         _condvar_wait_timeout((cv),(mtx),(usec))
     44        _condvar_wait_timeout((cv),(mtx),(usec),SYNCH_FLAGS_NONE)
    4545
    4646extern void condvar_initialize(condvar_t *cv);
    4747extern void condvar_signal(condvar_t *cv);
    4848extern void condvar_broadcast(condvar_t *cv);
    49 extern int _condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, __u32 usec);
     49extern int _condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, __u32 usec, int flags);
    5050
    5151#endif
  • generic/include/synch/futex.h

    r01ebbdf r116d1ef4  
    4545
    4646extern void futex_init(void);
    47 extern __native sys_futex_sleep_timeout(__address uaddr, __u32 usec, int trydown);
     47extern __native sys_futex_sleep_timeout(__address uaddr, __u32 usec, int flags);
    4848extern __native sys_futex_wakeup(__address uaddr);
    4949
  • generic/include/synch/mutex.h

    r01ebbdf r116d1ef4  
    4040
    4141#define mutex_lock(mtx) \
    42         _mutex_lock_timeout((mtx),SYNCH_NO_TIMEOUT,SYNCH_BLOCKING)
     42        _mutex_lock_timeout((mtx),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NONE)
    4343#define mutex_trylock(mtx) \
    44         _mutex_lock_timeout((mtx),SYNCH_NO_TIMEOUT,SYNCH_NON_BLOCKING)
     44        _mutex_lock_timeout((mtx),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NON_BLOCKING)
    4545#define mutex_lock_timeout(mtx,usec) \
    46         _mutex_lock_timeout((mtx),(usec),SYNCH_NON_BLOCKING)
     46        _mutex_lock_timeout((mtx),(usec),SYNCH_FLAGS_NON_BLOCKING)
    4747#define mutex_lock_active(mtx) \
    4848        while (mutex_trylock((mtx)) != ESYNCH_OK_ATOMIC)
    4949
    5050extern void mutex_initialize(mutex_t *mtx);
    51 extern int _mutex_lock_timeout(mutex_t *mtx, __u32 usec, int trylock);
     51extern int _mutex_lock_timeout(mutex_t *mtx, __u32 usec, int flags);
    5252extern void mutex_unlock(mutex_t *mtx);
    5353
  • generic/include/synch/rwlock.h

    r01ebbdf r116d1ef4  
    4949
    5050#define rwlock_write_lock(rwl) \
    51         _rwlock_write_lock_timeout((rwl),SYNCH_NO_TIMEOUT,SYNCH_BLOCKING)
     51        _rwlock_write_lock_timeout((rwl),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NONE)
    5252#define rwlock_read_lock(rwl) \
    53         _rwlock_read_lock_timeout((rwl),SYNCH_NO_TIMEOUT,SYNCH_BLOCKING)
     53        _rwlock_read_lock_timeout((rwl),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NONE)
    5454#define rwlock_write_trylock(rwl) \
    55         _rwlock_write_lock_timeout((rwl),SYNCH_NO_TIMEOUT,SYNCH_NON_BLOCKING)
     55        _rwlock_write_lock_timeout((rwl),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NON_BLOCKING)
    5656#define rwlock_read_trylock(rwl) \
    57         _rwlock_read_lock_timeout((rwl),SYNCH_NO_TIMEOUT,SYNCH_NON_BLOCKING)
     57        _rwlock_read_lock_timeout((rwl),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NON_BLOCKING)
    5858#define rwlock_write_lock_timeout(rwl,usec) \
    59         _rwlock_write_lock_timeout((rwl),(usec),SYNCH_NON_BLOCKING)
     59        _rwlock_write_lock_timeout((rwl),(usec),SYNCH_FLAGS_NONE)
    6060#define rwlock_read_lock_timeout(rwl,usec) \
    61         _rwlock_read_lock_timeout((rwl),(usec),SYNCH_NON_BLOCKING)
     61        _rwlock_read_lock_timeout((rwl),(usec),SYNCH_FLAGS_NONE)
    6262
    6363extern void rwlock_initialize(rwlock_t *rwl);
    6464extern void rwlock_read_unlock(rwlock_t *rwl);
    6565extern void rwlock_write_unlock(rwlock_t *rwl);
    66 extern int _rwlock_read_lock_timeout(rwlock_t *rwl, __u32 usec, int trylock);
    67 extern int _rwlock_write_lock_timeout(rwlock_t *rwl, __u32 usec, int trylock);
     66extern int _rwlock_read_lock_timeout(rwlock_t *rwl, __u32 usec, int flags);
     67extern int _rwlock_write_lock_timeout(rwlock_t *rwl, __u32 usec, int flags);
    6868
    6969#endif
     70
  • generic/include/synch/semaphore.h

    r01ebbdf r116d1ef4  
    4141
    4242#define semaphore_down(s) \
    43         _semaphore_down_timeout((s),SYNCH_NO_TIMEOUT,SYNCH_BLOCKING)
     43        _semaphore_down_timeout((s),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NONE)
    4444#define semaphore_trydown(s) \
    45         _semaphore_down_timeout((s),SYNCH_NO_TIMEOUT,SYNCH_NON_BLOCKING)       
     45        _semaphore_down_timeout((s),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NON_BLOCKING) 
    4646#define semaphore_down_timeout(s,usec) \
    47         _semaphore_down_timeout((s),(usec),SYNCH_NON_BLOCKING)
     47        _semaphore_down_timeout((s),(usec),SYNCH_FLAGS_NONE)
    4848
    4949extern void semaphore_initialize(semaphore_t *s, int val);
    50 extern int _semaphore_down_timeout(semaphore_t *s, __u32 usec, int trydown);
     50extern int _semaphore_down_timeout(semaphore_t *s, __u32 usec, int flags);
    5151extern void semaphore_up(semaphore_t *s);
    5252
    5353#endif
     54
  • generic/include/synch/synch.h

    r01ebbdf r116d1ef4  
    3131
    3232#define SYNCH_NO_TIMEOUT        0       /**< Request with no timeout. */
    33 #define SYNCH_BLOCKING          0       /**< Blocking operation request. */
    34 #define SYNCH_NON_BLOCKING      1       /**< Non-blocking operation request. */
     33
     34#define SYNCH_FLAGS_NONE                0       /**< No flags specified. */
     35#define SYNCH_FLAGS_NON_BLOCKING        (1<<0)  /**< Non-blocking operation request. */
     36#define SYNCH_FLAGS_INTERRUPTIBLE       (1<<1)  /**< Interruptible operation. */
    3537
    3638#define ESYNCH_WOULD_BLOCK      1       /**< Could not satisfy the request without going to sleep. */
  • generic/include/synch/waitq.h

    r01ebbdf r116d1ef4  
    5353
    5454#define waitq_sleep(wq) \
    55         waitq_sleep_timeout((wq),SYNCH_NO_TIMEOUT,SYNCH_BLOCKING)
     55        waitq_sleep_timeout((wq),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NONE)
    5656
    5757extern void waitq_initialize(waitq_t *wq);
    58 extern int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int nonblocking);
     58extern int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int flags);
    5959extern ipl_t waitq_sleep_prepare(waitq_t *wq);
    60 extern int waitq_sleep_timeout_unsafe(waitq_t *wq, __u32 usec, int nonblocking);
     60extern int waitq_sleep_timeout_unsafe(waitq_t *wq, __u32 usec, int flags);
    6161extern void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl);
    6262extern void waitq_wakeup(waitq_t *wq, bool all);
  • generic/src/ipc/ipc.c

    r01ebbdf r116d1ef4  
    143143
    144144        ipc_call(phone, request);
    145         ipc_wait_for_call(&sync_box, SYNCH_NO_TIMEOUT, SYNCH_BLOCKING);
     145        ipc_wait_for_call(&sync_box, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE);
    146146}
    147147
     
    306306 * @param usec Timeout in microseconds. See documentation for waitq_sleep_timeout() for
    307307 *             decription of its special meaning.
    308  * @param nonblocking Blocking vs. non-blocking operation mode switch. See documentation
    309  *                    for waitq_sleep_timeout() for description of its special meaning.
     308 * @param flags Select mode of sleep operation. See documentation for waitq_sleep_timeout()i
     309 *              for description of its special meaning.
    310310 * @return Recived message address
    311311 * - to distinguish between call and answer, look at call->flags
    312312 */
    313 call_t * ipc_wait_for_call(answerbox_t *box, __u32 usec, int nonblocking)
     313call_t * ipc_wait_for_call(answerbox_t *box, __u32 usec, int flags)
    314314{
    315315        call_t *request;
     
    318318
    319319restart:
    320         rc = waitq_sleep_timeout(&box->wq, usec, nonblocking);
     320        rc = waitq_sleep_timeout(&box->wq, usec, flags);
    321321        if (SYNCH_FAILED(rc))
    322322                return NULL;
     
    413413        /* Wait for all async answers to arrive */
    414414        while (atomic_get(&task->active_calls)) {
    415                 call = ipc_wait_for_call(&task->answerbox, SYNCH_NO_TIMEOUT, SYNCH_BLOCKING);
     415                call = ipc_wait_for_call(&task->answerbox, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE);
    416416                ASSERT((call->flags & IPC_CALL_ANSWERED) || (call->flags & IPC_CALL_NOTIF));
    417417                ASSERT(! (call->flags & IPC_CALL_STATIC_ALLOC));
  • generic/src/ipc/sysipc.c

    r01ebbdf r116d1ef4  
    503503 * @param calldata Pointer to buffer where the call/answer data is stored
    504504 * @param usec Timeout. See waitq_sleep_timeout() for explanation.
    505  * @param nonblocking See waitq_sleep_timeout() for explanation.
     505 * @param flags Select mode of sleep operation. See waitq_sleep_timeout() for explanation.
    506506 *
    507507 * @return Callid, if callid & 1, then the call is answer
    508508 */
    509 __native sys_ipc_wait_for_call(ipc_data_t *calldata, __u32 usec, int nonblocking)
     509__native sys_ipc_wait_for_call(ipc_data_t *calldata, __u32 usec, int flags)
    510510{
    511511        call_t *call;
    512512
    513513restart:       
    514         call = ipc_wait_for_call(&TASK->answerbox, usec, nonblocking);
     514        call = ipc_wait_for_call(&TASK->answerbox, usec, flags | SYNCH_FLAGS_INTERRUPTIBLE);
    515515        if (!call)
    516516                return 0;
  • generic/src/proc/thread.c

    r01ebbdf r116d1ef4  
    304304       
    305305        timeout_initialize(&t->sleep_timeout);
     306        t->sleep_interruptible = false;
    306307        t->sleep_queue = NULL;
    307308        t->timeout_pending = 0;
     
    386387        waitq_initialize(&wq);
    387388
    388         (void) waitq_sleep_timeout(&wq, usec, SYNCH_NON_BLOCKING);
     389        (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING);
    389390}
    390391
  • generic/src/synch/condvar.c

    r01ebbdf r116d1ef4  
    7575 * @param mtx Mutex.
    7676 * @param usec Timeout value in microseconds.
     77 * @param flags Select mode of operation.
    7778 *
    78  * For exact description of meaning of possible values of usec,
    79  * see comment for waitq_sleep_timeout().
     79 * For exact description of meaning of possible combinations
     80 * of usec and flags, see comment for waitq_sleep_timeout().
     81 * Note that when SYNCH_FLAGS_NON_BLOCKING is specified here,
     82 * ESYNCH_WOULD_BLOCK is always returned.
    8083 *
    8184 * @return See comment for waitq_sleep_timeout().
    8285 */
    83 int _condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, __u32 usec)
     86int _condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, __u32 usec, int flags)
    8487{
    8588        int rc;
     
    9093
    9194        cv->wq.missed_wakeups = 0;      /* Enforce blocking. */
    92         rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, SYNCH_BLOCKING);
     95        rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags);
    9396
    9497        mutex_lock(mtx);
  • generic/src/synch/futex.c

    r01ebbdf r116d1ef4  
    100100 * @param uaddr Userspace address of the futex counter.
    101101 * @param usec If non-zero, number of microseconds this thread is willing to sleep.
    102  * @param trydown If usec is zero and trydown is non-zero, conditional operation will be attempted.
     102 * @param flags Select mode of operation.
    103103 *
    104104 * @return One of ESYNCH_TIMEOUT, ESYNCH_OK_ATOMIC and ESYNCH_OK_BLOCKED. See synch.h.
    105105 *         If there is no physical mapping for uaddr ENOENT is returned.
    106106 */
    107 __native sys_futex_sleep_timeout(__address uaddr, __u32 usec, int trydown)
     107__native sys_futex_sleep_timeout(__address uaddr, __u32 usec, int flags)
    108108{
    109109        futex_t *futex;
     
    131131        futex = futex_find(paddr);
    132132       
    133         return (__native) waitq_sleep_timeout(&futex->wq, usec, trydown);
     133        return (__native) waitq_sleep_timeout(&futex->wq, usec, flags | SYNCH_FLAGS_INTERRUPTIBLE);
    134134}
    135135
  • generic/src/synch/mutex.c

    r01ebbdf r116d1ef4  
    5454 * @param mtx Mutex.
    5555 * @param usec Timeout in microseconds.
    56  * @param trylock Switches between blocking and non-blocking mode.
     56 * @param flags Specify mode of operation.
    5757 *
    5858 * For exact description of possible combinations of
    59  * usec and trylock, see comment for waitq_sleep_timeout().
     59 * usec and flags, see comment for waitq_sleep_timeout().
    6060 *
    6161 * @return See comment for waitq_sleep_timeout().
    6262 */
    63 int _mutex_lock_timeout(mutex_t *mtx, __u32 usec, int trylock)
     63int _mutex_lock_timeout(mutex_t *mtx, __u32 usec, int flags)
    6464{
    65         return _semaphore_down_timeout(&mtx->sem, usec, trylock);
     65        return _semaphore_down_timeout(&mtx->sem, usec, flags);
    6666}
    6767
     
    7676        semaphore_up(&mtx->sem);
    7777}
     78
  • generic/src/synch/rwlock.c

    r01ebbdf r116d1ef4  
    9090 * @param rwl Reader/Writer lock.
    9191 * @param usec Timeout in microseconds.
    92  * @param trylock Switches between blocking and non-blocking mode.
     92 * @param flags Specify mode of operation.
    9393 *
    9494 * For exact description of possible combinations of
    95  * @usec and @trylock, see comment for waitq_sleep_timeout().
     95 * usec and flags, see comment for waitq_sleep_timeout().
    9696 *
    9797 * @return See comment for waitq_sleep_timeout().
    9898 */
    99 int _rwlock_write_lock_timeout(rwlock_t *rwl, __u32 usec, int trylock)
     99int _rwlock_write_lock_timeout(rwlock_t *rwl, __u32 usec, int flags)
    100100{
    101101        ipl_t ipl;
     
    112112         * They just need to acquire the exclusive mutex.
    113113         */
    114         rc = _mutex_lock_timeout(&rwl->exclusive, usec, trylock);
     114        rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags);
    115115        if (SYNCH_FAILED(rc)) {
    116116
    117117                /*
    118                  * Lock operation timed out.
     118                 * Lock operation timed out or was interrupted.
    119119                 * The state of rwl is UNKNOWN at this point.
    120120                 * No claims about its holder can be made.
     
    144144 * @param rwl Reader/Writer lock.
    145145 * @param usec Timeout in microseconds.
    146  * @param trylock Switches between blocking and non-blocking mode.
     146 * @param flags Select mode of operation.
    147147 *
    148148 * For exact description of possible combinations of
    149  * usec and trylock, see comment for waitq_sleep_timeout().
     149 * usec and flags, see comment for waitq_sleep_timeout().
    150150 *
    151151 * @return See comment for waitq_sleep_timeout().
    152152 */
    153 int _rwlock_read_lock_timeout(rwlock_t *rwl, __u32 usec, int trylock)
     153int _rwlock_read_lock_timeout(rwlock_t *rwl, __u32 usec, int flags)
    154154{
    155155        int rc;
     
    200200                #endif
    201201                                 
    202                 rc = _mutex_lock_timeout(&rwl->exclusive, usec, trylock);
     202                rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags);
    203203                switch (rc) {
    204204                        case ESYNCH_WOULD_BLOCK:
     
    209209                                spinlock_unlock(&rwl->lock);
    210210                        case ESYNCH_TIMEOUT:
     211                        case ESYNCH_INTERRUPTED:
    211212                                /*
    212                                  * The sleep timeouted.
     213                                 * The sleep timed out.
    213214                                 * We just restore interrupt priority level.
    214215                                 */
  • generic/src/synch/semaphore.c

    r01ebbdf r116d1ef4  
    6868 * @param s Semaphore.
    6969 * @param usec Timeout in microseconds.
    70  * @param trydown Switches between blocking and non-blocking mode.
     70 * @param flags Select mode of operation.
    7171 *
    7272 * For exact description of possible combinations of
    73  * usec and trydown, see comment for waitq_sleep_timeout().
     73 * usec and flags, see comment for waitq_sleep_timeout().
    7474 *
    7575 * @return See comment for waitq_sleep_timeout().
    7676 */
    77 int _semaphore_down_timeout(semaphore_t *s, __u32 usec, int trydown)
     77int _semaphore_down_timeout(semaphore_t *s, __u32 usec, int flags)
    7878{
    79         return waitq_sleep_timeout(&s->wq, usec, trydown);
     79        return waitq_sleep_timeout(&s->wq, usec, flags);
    8080}
    8181
  • generic/src/synch/waitq.c

    r01ebbdf r116d1ef4  
    136136        spinlock_lock(&t->lock);
    137137        if ((wq = t->sleep_queue)) {            /* assignment */
     138                if (!(t->sleep_interruptible)) {
     139                        /*
     140                         * The sleep cannot be interrupted.
     141                         */
     142                        spinlock_unlock(&t->lock);
     143                        goto out;
     144                }
     145                       
    138146                if (!spinlock_trylock(&wq->lock)) {
    139147                        spinlock_unlock(&t->lock);
     
    160168/** Sleep until either wakeup, timeout or interruption occurs
    161169 *
    162  * This is a sleep implementation which allows itself to be
     170 * This is a sleep implementation which allows itself to time out or to be
    163171 * interrupted from the sleep, restoring a failover context.
    164172 *
     
    170178 * @param wq Pointer to wait queue.
    171179 * @param usec Timeout in microseconds.
    172  * @param nonblocking Blocking vs. non-blocking operation mode switch.
    173  *
    174  * If usec is greater than zero, regardless of the value of nonblocking,
    175  * the call will not return until either timeout or wakeup comes.
    176  *
    177  * If usec is zero and @nonblocking is zero (false), the call
    178  * will not return until wakeup comes.
    179  *
    180  * If usec is zero and nonblocking is non-zero (true), the call will
     180 * @param flags Specify mode of the sleep.
     181 *
     182 * The sleep can be interrupted only if the
     183 * SYNCH_FLAGS_INTERRUPTIBLE bit is specified in flags.
     184 
     185 * If usec is greater than zero, regardless of the value of the
     186 * SYNCH_FLAGS_NON_BLOCKING bit in flags, the call will not return until either timeout,
     187 * interruption or wakeup comes.
     188 *
     189 * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is not set in flags, the call
     190 * will not return until wakeup or interruption comes.
     191 *
     192 * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is set in flags, the call will
    181193 * immediately return, reporting either success or failure.
    182194 *
    183  * @return      Returns one of: ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT,
     195 * @return      Returns one of: ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT, ESYNCH_INTERRUPTED,
    184196 *              ESYNCH_OK_ATOMIC, ESYNCH_OK_BLOCKED.
    185197 *
     
    198210 * attempted.
    199211 */
    200 int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int nonblocking)
     212int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int flags)
    201213{
    202214        ipl_t ipl;
     
    204216       
    205217        ipl = waitq_sleep_prepare(wq);
    206         rc = waitq_sleep_timeout_unsafe(wq, usec, nonblocking);
     218        rc = waitq_sleep_timeout_unsafe(wq, usec, flags);
    207219        waitq_sleep_finish(wq, rc, ipl);
    208220        return rc;
     
    277289 * @param wq See waitq_sleep_timeout().
    278290 * @param usec See waitq_sleep_timeout().
    279  * @param nonblocking See waitq_sleep_timeout().
     291 * @param flags See waitq_sleep_timeout().
    280292 *
    281293 * @return See waitq_sleep_timeout().
    282294 */
    283 int waitq_sleep_timeout_unsafe(waitq_t *wq, __u32 usec, int nonblocking)
     295int waitq_sleep_timeout_unsafe(waitq_t *wq, __u32 usec, int flags)
    284296{
    285297        /* checks whether to go to sleep at all */
     
    289301        }
    290302        else {
    291                 if (nonblocking && (usec == 0)) {
     303                if ((flags & SYNCH_FLAGS_NON_BLOCKING) && (usec == 0)) {
    292304                        /* return immediatelly instead of going to sleep */
    293305                        return ESYNCH_WOULD_BLOCK;
     
    300312        spinlock_lock(&THREAD->lock);
    301313
    302         /*
    303          * Set context that will be restored if the sleep
    304          * of this thread is ever interrupted.
    305          */
    306         if (!context_save(&THREAD->sleep_interruption_context)) {
    307                 /* Short emulation of scheduler() return code. */
    308                 spinlock_unlock(&THREAD->lock);
    309                 return ESYNCH_INTERRUPTED;
     314        if (flags & SYNCH_FLAGS_INTERRUPTIBLE) {
     315                /*
     316                 * Set context that will be restored if the sleep
     317                 * of this thread is ever interrupted.
     318                 */
     319                THREAD->sleep_interruptible = true;
     320                if (!context_save(&THREAD->sleep_interruption_context)) {
     321                        /* Short emulation of scheduler() return code. */
     322                        spinlock_unlock(&THREAD->lock);
     323                        return ESYNCH_INTERRUPTED;
     324                }
     325        } else {
     326                THREAD->sleep_interruptible = false;
    310327        }
    311328
Note: See TracChangeset for help on using the changeset viewer.