Fork us on GitHub Follow us on Facebook Follow us on Twitter

Changeset 897fd8f1 in mainline


Ignore:
Timestamp:
2017-12-19T18:18:15Z (4 years ago)
Author:
Jiří Zárevúcky <zarevucky.jiri@…>
Branches:
lfn, master
Children:
55b56f4
Parents:
7f11dc6
Message:

Use <errno.h> instead of special ESYNCH_xx error codes.

Files:
20 edited

Legend:

Unmodified
Added
Removed
  • abi/include/abi/synch.h

    r7f11dc6 r897fd8f1  
    4646#define SYNCH_FLAGS_INTERRUPTIBLE  (1 << 1)
    4747
    48 /** Could not satisfy the request without going to sleep. */
    49 #define ESYNCH_WOULD_BLOCK  1
    50 /** Timeout occurred. */
    51 #define ESYNCH_TIMEOUT      2
    52 /** Sleep was interrupted. */
    53 #define ESYNCH_INTERRUPTED  4
    54 /** Operation succeeded without sleeping. */
    55 #define ESYNCH_OK_ATOMIC    8
    56 /** Operation succeeded and did sleep. */
    57 #define ESYNCH_OK_BLOCKED   16
    58 
    59 #define SYNCH_FAILED(rc) \
    60         ((rc) & (ESYNCH_WOULD_BLOCK | ESYNCH_TIMEOUT | ESYNCH_INTERRUPTED))
    61 
    62 #define SYNCH_OK(rc) \
    63         ((rc) & (ESYNCH_OK_ATOMIC | ESYNCH_OK_BLOCKED))
    64 
    6548#endif
    6649
  • kernel/arch/ia32/src/smp/smp.c

    r7f11dc6 r897fd8f1  
    3939#include <arch/boot/boot.h>
    4040#include <assert.h>
     41#include <errno.h>
    4142#include <genarch/acpi/acpi.h>
    4243#include <genarch/acpi/madt.h>
     
    178179                         */
    179180                        if (waitq_sleep_timeout(&ap_completion_wq, 1000000,
    180                             SYNCH_FLAGS_NONE) == ESYNCH_TIMEOUT) {
     181                            SYNCH_FLAGS_NONE, NULL) == ETIMEOUT) {
    181182                                log(LF_ARCH, LVL_NOTE, "%s: waiting for cpu%u "
    182183                                    "(APIC ID = %d) timed out", __FUNCTION__,
  • kernel/arch/sparc64/src/smp/sun4u/smp.c

    r7f11dc6 r897fd8f1  
    106106        waking_up_mid = mid;
    107107               
    108         if (waitq_sleep_timeout(&ap_completion_wq, 1000000, SYNCH_FLAGS_NONE) ==
    109             ESYNCH_TIMEOUT)
     108        if (waitq_sleep_timeout(&ap_completion_wq, 1000000,
     109            SYNCH_FLAGS_NONE, NULL) == ETIMEOUT)
    110110                log(LF_ARCH, LVL_NOTE, "%s: waiting for processor (mid = %" PRIu32
    111111                    ") timed out", __func__, mid);
  • kernel/arch/sparc64/src/smp/sun4v/smp.c

    r7f11dc6 r897fd8f1  
    373373#endif
    374374       
    375         if (waitq_sleep_timeout(&ap_completion_wq, 10000000, SYNCH_FLAGS_NONE) ==
    376             ESYNCH_TIMEOUT)
     375        if (waitq_sleep_timeout(&ap_completion_wq, 10000000,
     376            SYNCH_FLAGS_NONE, NULL) == ETIMEOUT)
    377377                printf("%s: waiting for processor (cpuid = %" PRIu64 ") timed out\n",
    378378                    __func__, cpuid);
  • kernel/generic/include/synch/semaphore.h

    r7f11dc6 r897fd8f1  
    3636#define KERN_SEMAPHORE_H_
    3737
     38#include <errno.h>
    3839#include <stdint.h>
    3940#include <synch/waitq.h>
     
    5455
    5556#define semaphore_down_interruptable(s) \
    56         (ESYNCH_INTERRUPTED != _semaphore_down_timeout((s), SYNCH_NO_TIMEOUT, \
    57                 SYNCH_FLAGS_INTERRUPTIBLE))
     57        (_semaphore_down_timeout((s), SYNCH_NO_TIMEOUT, \
     58                SYNCH_FLAGS_INTERRUPTIBLE) != EINTR)
    5859
    5960extern void semaphore_initialize(semaphore_t *, int);
  • kernel/generic/include/synch/waitq.h

    r7f11dc6 r897fd8f1  
    6767
    6868#define waitq_sleep(wq) \
    69         waitq_sleep_timeout((wq), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE)
     69        waitq_sleep_timeout((wq), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE, NULL)
    7070
    7171struct thread;
    7272
    7373extern void waitq_initialize(waitq_t *);
    74 extern int waitq_sleep_timeout(waitq_t *, uint32_t, unsigned int);
     74extern int waitq_sleep_timeout(waitq_t *, uint32_t, unsigned int, bool *);
    7575extern ipl_t waitq_sleep_prepare(waitq_t *);
    76 extern int waitq_sleep_timeout_unsafe(waitq_t *, uint32_t, unsigned int);
    77 extern void waitq_sleep_finish(waitq_t *, int, ipl_t);
     76extern int waitq_sleep_timeout_unsafe(waitq_t *, uint32_t, unsigned int, bool *);
     77extern void waitq_sleep_finish(waitq_t *, bool, ipl_t);
    7878extern void waitq_wakeup(waitq_t *, wakeup_mode_t);
    7979extern void _waitq_wakeup_unsafe(waitq_t *, wakeup_mode_t);
  • kernel/generic/src/ipc/ipc.c

    r7f11dc6 r897fd8f1  
    538538       
    539539restart:
    540         rc = waitq_sleep_timeout(&box->wq, usec, flags);
    541         if (SYNCH_FAILED(rc))
     540        rc = waitq_sleep_timeout(&box->wq, usec, flags, NULL);
     541        if (rc != EOK)
    542542                return NULL;
    543543       
     
    638638                phone = list_get_instance(list_first(&box->connected_phones),
    639639                    phone_t, link);
    640                 if (SYNCH_FAILED(mutex_trylock(&phone->lock))) {
     640                if (mutex_trylock(&phone->lock) != EOK) {
    641641                        irq_spinlock_unlock(&box->lock, true);
    642642                        DEADLOCK_PROBE(p_phonelck, DEADLOCK_THRESHOLD);
  • kernel/generic/src/proc/thread.c

    r7f11dc6 r897fd8f1  
    548548 *
    549549 * Threads that are blocked waiting for a synchronization primitive
    550  * are woken up with a return code of ESYNCH_INTERRUPTED if the
     550 * are woken up with a return code of EINTR if the
    551551 * blocking call was interruptable. See waitq_sleep_timeout().
    552552 *
     
    653653        irq_spinlock_unlock(&thread->lock, true);
    654654       
    655         return waitq_sleep_timeout(&thread->join_wq, usec, flags);
     655        return waitq_sleep_timeout(&thread->join_wq, usec, flags, NULL);
    656656}
    657657
     
    700700        waitq_initialize(&wq);
    701701       
    702         (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING);
     702        (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING, NULL);
    703703}
    704704
  • kernel/generic/src/synch/condvar.c

    r7f11dc6 r897fd8f1  
    8080 * For exact description of meaning of possible combinations of usec and flags,
    8181 * see comment for waitq_sleep_timeout().  Note that when
    82  * SYNCH_FLAGS_NON_BLOCKING is specified here, ESYNCH_WOULD_BLOCK is always
     82 * SYNCH_FLAGS_NON_BLOCKING is specified here, EAGAIN is always
    8383 * returned.
    8484 *
     
    8989        int rc;
    9090        ipl_t ipl;
     91        bool blocked;
    9192
    9293        ipl = waitq_sleep_prepare(&cv->wq);
     
    9596
    9697        cv->wq.missed_wakeups = 0;      /* Enforce blocking. */
    97         rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags);
     98        rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags, &blocked);
     99        assert(blocked || rc != EOK);
    98100
    99         waitq_sleep_finish(&cv->wq, rc, ipl);
     101        waitq_sleep_finish(&cv->wq, blocked, ipl);
    100102        /* Lock only after releasing the waitq to avoid a possible deadlock. */
    101103        mutex_lock(mtx);
     
    117119 * For exact description of meaning of possible combinations of usec and flags,
    118120 * see comment for waitq_sleep_timeout().  Note that when
    119  * SYNCH_FLAGS_NON_BLOCKING is specified here, ESYNCH_WOULD_BLOCK is always
     121 * SYNCH_FLAGS_NON_BLOCKING is specified here, EAGAIN is always
    120122 * returned.
    121123 *
     
    127129        int rc;
    128130        ipl_t ipl;
    129        
     131        bool blocked;
     132
    130133        ipl = waitq_sleep_prepare(&cv->wq);
    131134
     
    134137
    135138        cv->wq.missed_wakeups = 0;      /* Enforce blocking. */
    136         rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags);
     139        rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags, &blocked);
     140        assert(blocked || rc != EOK);
    137141
    138         waitq_sleep_finish(&cv->wq, rc, ipl);
     142        waitq_sleep_finish(&cv->wq, blocked, ipl);
    139143        /* Lock only after releasing the waitq to avoid a possible deadlock. */
    140144        spinlock_lock(lock);
     
    152156 * For exact description of meaning of possible combinations of usec and flags,
    153157 * see comment for waitq_sleep_timeout().  Note that when
    154  * SYNCH_FLAGS_NON_BLOCKING is specified here, ESYNCH_WOULD_BLOCK is always
     158 * SYNCH_FLAGS_NON_BLOCKING is specified here, EAGAIN is always
    155159 * returned.
    156160 *
  • kernel/generic/src/synch/futex.c

    r7f11dc6 r897fd8f1  
    395395 *
    396396 * @return              If there is no physical mapping for uaddr ENOENT is
    397  *                      returned. Otherwise returns a wait result as defined in
    398  *                      synch.h.
     397 *                      returned. Otherwise returns the return value of
     398 *                      waitq_sleep_timeout().
    399399 */
    400400sysarg_t sys_futex_sleep(uintptr_t uaddr)
     
    409409#endif
    410410
    411         int rc = waitq_sleep_timeout(&futex->wq, 0, SYNCH_FLAGS_INTERRUPTIBLE);
     411        int rc = waitq_sleep_timeout(
     412            &futex->wq, 0, SYNCH_FLAGS_INTERRUPTIBLE, NULL);
    412413
    413414#ifdef CONFIG_UDEBUG
     
    430431        if (futex) {
    431432                waitq_wakeup(&futex->wq, WAKEUP_FIRST);
    432                 return 0;
     433                return EOK;
    433434        } else {
    434435                return (sysarg_t) ENOENT;
  • kernel/generic/src/synch/mutex.c

    r7f11dc6 r897fd8f1  
    3737
    3838#include <assert.h>
     39#include <errno.h>
    3940#include <synch/mutex.h>
    4041#include <synch/semaphore.h>
     
    9596                if (mtx->owner == THREAD) {
    9697                        mtx->nesting++;
    97                         return ESYNCH_OK_ATOMIC;
     98                        return EOK;
    9899                } else {
    99100                        rc = _semaphore_down_timeout(&mtx->sem, usec, flags);
    100                         if (SYNCH_OK(rc)) {
     101                        if (rc == EOK) {
    101102                                mtx->owner = THREAD;
    102103                                mtx->nesting = 1;
     
    119120                        }
    120121                        rc = semaphore_trydown(&mtx->sem);
    121                 } while (SYNCH_FAILED(rc) &&
    122                     !(flags & SYNCH_FLAGS_NON_BLOCKING));
     122                } while (rc != EOK && !(flags & SYNCH_FLAGS_NON_BLOCKING));
    123123                if (deadlock_reported)
    124124                        printf("cpu%u: not deadlocked\n", CPU->id);
  • kernel/generic/src/synch/rcu.c

    r7f11dc6 r897fd8f1  
    960960                                SYNCH_NO_TIMEOUT, SYNCH_FLAGS_INTERRUPTIBLE);
    961961                       
    962                         if (ret == ESYNCH_INTERRUPTED) {
     962                        if (ret == EINTR) {
    963963                                spinlock_unlock(&rcu.gp_lock);
    964964                                return false;                   
     
    10181018
    10191019                /* rcu.expedite_now was signaled. */
    1020                 if (ret == ESYNCH_OK_BLOCKED) {
     1020                if (ret == EOK) {
    10211021                        *expedite = true;
    10221022                }
     
    10241024                spinlock_unlock(&rcu.gp_lock);
    10251025
    1026                 return (ret != ESYNCH_INTERRUPTED);
     1026                return (ret != EINTR);
    10271027        }
    10281028}
     
    12711271                int ret = _condvar_wait_timeout_spinlock(&rcu.gp_ended, &rcu.gp_lock,
    12721272                        SYNCH_NO_TIMEOUT, SYNCH_FLAGS_INTERRUPTIBLE);
    1273                 interrupted = (ret == ESYNCH_INTERRUPTED);
     1273                interrupted = (ret == EINTR);
    12741274        }
    12751275       
     
    13321332                        &rcu.gp_lock, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_INTERRUPTIBLE);
    13331333               
    1334                 interrupted = (ret == ESYNCH_INTERRUPTED);
     1334                interrupted = (ret == EINTR);
    13351335        }
    13361336       
     
    14061406        spinlock_unlock(&rcu.gp_lock);
    14071407       
    1408         return (ret != ESYNCH_INTERRUPTED);
     1408        return (ret != EINTR);
    14091409}
    14101410
  • kernel/generic/src/synch/semaphore.c

    r7f11dc6 r897fd8f1  
    7373int _semaphore_down_timeout(semaphore_t *sem, uint32_t usec, unsigned int flags)
    7474{
    75         return waitq_sleep_timeout(&sem->wq, usec, flags);
     75        return waitq_sleep_timeout(&sem->wq, usec, flags, NULL);
    7676}
    7777
  • kernel/generic/src/synch/waitq.c

    r7f11dc6 r897fd8f1  
    4545
    4646#include <assert.h>
     47#include <errno.h>
    4748#include <synch/waitq.h>
    4849#include <synch/spinlock.h>
     
    238239 * @param flags Specify mode of the sleep.
    239240 *
     241 * @param[out] blocked  On return, regardless of the return code,
     242 *                      `*blocked` is set to `true` iff the thread went to
     243 *                      sleep.
     244 *
    240245 * The sleep can be interrupted only if the
    241246 * SYNCH_FLAGS_INTERRUPTIBLE bit is specified in flags.
     
    251256 * call will immediately return, reporting either success or failure.
    252257 *
    253  * @return ESYNCH_WOULD_BLOCK, meaning that the sleep failed because at the
    254  *         time of the call there was no pending wakeup
    255  * @return ESYNCH_TIMEOUT, meaning that the sleep timed out.
    256  * @return ESYNCH_INTERRUPTED, meaning that somebody interrupted the sleeping
    257  *         thread.
    258  * @return ESYNCH_OK_ATOMIC, meaning that the sleep succeeded and that there
    259  *         was a pending wakeup at the time of the call. The caller was not put
    260  *         asleep at all.
    261  * @return ESYNCH_OK_BLOCKED, meaning that the sleep succeeded; the full sleep
    262  *         was attempted.
    263  *
    264  */
    265 int waitq_sleep_timeout(waitq_t *wq, uint32_t usec, unsigned int flags)
     258 * @return EAGAIN, meaning that the sleep failed because it was requested
     259 *                 as SYNCH_FLAGS_NON_BLOCKING, but there was no pending wakeup.
     260 * @return ETIMEOUT, meaning that the sleep timed out.
     261 * @return EINTR, meaning that somebody interrupted the sleeping
     262 *         thread. Check the value of `*blocked` to see if the thread slept,
     263 *         or if a pending interrupt forced it to return immediately.
     264 * @return EOK, meaning that none of the above conditions occured, and the
     265 *              thread was woken up successfuly by `waitq_wakeup()`. Check
     266 *              the value of `*blocked` to see if the thread slept or if
     267 *              the wakeup was already pending.
     268 *
     269 */
     270int waitq_sleep_timeout(waitq_t *wq, uint32_t usec, unsigned int flags, bool *blocked)
    266271{
    267272        assert((!PREEMPTION_DISABLED) || (PARAM_NON_BLOCKING(flags, usec)));
    268273       
    269274        ipl_t ipl = waitq_sleep_prepare(wq);
    270         int rc = waitq_sleep_timeout_unsafe(wq, usec, flags);
    271         waitq_sleep_finish(wq, rc, ipl);
     275        bool nblocked;
     276        int rc = waitq_sleep_timeout_unsafe(wq, usec, flags, &nblocked);
     277        waitq_sleep_finish(wq, nblocked, ipl);
     278
     279        if (blocked != NULL) {
     280                *blocked = nblocked;
     281        }
    272282        return rc;
    273283}
     
    320330 * lock is released.
    321331 *
    322  * @param wq  Wait queue.
    323  * @param rc  Return code of waitq_sleep_timeout_unsafe().
    324  * @param ipl Interrupt level returned by waitq_sleep_prepare().
    325  *
    326  */
    327 void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl)
    328 {
    329         switch (rc) {
    330         case ESYNCH_WOULD_BLOCK:
    331         case ESYNCH_OK_ATOMIC:
    332                 irq_spinlock_unlock(&wq->lock, false);
    333                 break;
    334         default:
    335                 /*
     332 * @param wq       Wait queue.
     333 * @param blocked  Out parameter of waitq_sleep_timeout_unsafe().
     334 * @param ipl      Interrupt level returned by waitq_sleep_prepare().
     335 *
     336 */
     337void waitq_sleep_finish(waitq_t *wq, bool blocked, ipl_t ipl)
     338{
     339        if (blocked) {
     340                /*
    336341                 * Wait for a waitq_wakeup() or waitq_unsleep() to complete
    337342                 * before returning from waitq_sleep() to the caller. Otherwise
    338343                 * the caller might expect that the wait queue is no longer used
    339344                 * and deallocate it (although the wakeup on a another cpu has
    340                  * not yet completed and is using the wait queue).
    341                  *
    342                  * Note that we have to do this for ESYNCH_OK_BLOCKED and
    343                  * ESYNCH_INTERRUPTED, but not necessarily for ESYNCH_TIMEOUT
    344                  * where the timeout handler stops using the waitq before waking
    345                  * us up. To be on the safe side, ensure the waitq is not in use
    346                  * anymore in this case as well.
     345                 * not yet completed and is using the wait queue).
     346                 *
     347                 * Note that we have to do this for EOK and EINTR, but not
     348                 * necessarily for ETIMEOUT where the timeout handler stops
     349                 * using the waitq before waking us up. To be on the safe side,
     350                 * ensure the waitq is not in use anymore in this case as well.
    347351                 */
    348352                waitq_complete_wakeup(wq);
    349                 break;
     353        } else {
     354                irq_spinlock_unlock(&wq->lock, false);
    350355        }
    351356       
     
    363368 * @param flags See waitq_sleep_timeout().
    364369 *
     370 * @param[out] blocked  See waitq_sleep_timeout().
     371 *
    365372 * @return See waitq_sleep_timeout().
    366373 *
    367374 */
    368 int waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, unsigned int flags)
    369 {
     375int waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, unsigned int flags, bool *blocked)
     376{
     377        *blocked = false;
     378
    370379        /* Checks whether to go to sleep at all */
    371380        if (wq->missed_wakeups) {
    372381                wq->missed_wakeups--;
    373                 return ESYNCH_OK_ATOMIC;
     382                return EOK;
    374383        } else {
    375384                if (PARAM_NON_BLOCKING(flags, usec)) {
    376385                        /* Return immediately instead of going to sleep */
    377                         return ESYNCH_WOULD_BLOCK;
     386                        return EAGAIN;
    378387                }
    379388        }
     
    392401                if (THREAD->interrupted) {
    393402                        irq_spinlock_unlock(&THREAD->lock, false);
    394                         irq_spinlock_unlock(&wq->lock, false);
    395                         return ESYNCH_INTERRUPTED;
     403                        return EINTR;
    396404                }
    397405               
     
    405413                        THREAD->last_cycle = get_cycle();
    406414                        irq_spinlock_unlock(&THREAD->lock, false);
    407                         return ESYNCH_INTERRUPTED;
     415                        return EINTR;
    408416                }
    409417        } else
     
    416424                        THREAD->last_cycle = get_cycle();
    417425                        irq_spinlock_unlock(&THREAD->lock, false);
    418                         return ESYNCH_TIMEOUT;
     426                        return ETIMEOUT;
    419427                }
    420428               
     
    433441        THREAD->sleep_queue = wq;
    434442       
     443        /* Must be before entry to scheduler, because there are multiple
     444         * return vectors.
     445         */
     446        *blocked = true;
     447       
    435448        irq_spinlock_unlock(&THREAD->lock, false);
    436449       
     
    438451        scheduler();
    439452       
    440         return ESYNCH_OK_BLOCKED;
     453        return EOK;
    441454}
    442455
  • kernel/generic/src/synch/workqueue.c

    r7f11dc6 r897fd8f1  
    3838
    3939#include <assert.h>
     40#include <errno.h>
    4041#include <synch/workqueue.h>
    4142#include <synch/spinlock.h>
     
    897898                        &info->lock, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_INTERRUPTIBLE);
    898899               
    899                 stop = (ret == ESYNCH_INTERRUPTED);
     900                stop = (ret == EINTR);
    900901        }
    901902       
  • kernel/generic/src/sysinfo/stats.c

    r7f11dc6 r897fd8f1  
    157157         */
    158158       
    159         if (SYNCH_FAILED(mutex_trylock(&as->lock)))
     159        if (mutex_trylock(&as->lock) != EOK)
    160160                return 0;
    161161       
     
    169169                        as_area_t *area = node->value[i];
    170170                       
    171                         if (SYNCH_FAILED(mutex_trylock(&area->lock)))
     171                        if (mutex_trylock(&area->lock) != EOK)
    172172                                continue;
    173173                       
     
    198198         */
    199199       
    200         if (SYNCH_FAILED(mutex_trylock(&as->lock)))
     200        if (mutex_trylock(&as->lock) != EOK)
    201201                return 0;
    202202       
     
    209209                        as_area_t *area = node->value[i];
    210210                       
    211                         if (SYNCH_FAILED(mutex_trylock(&area->lock)))
     211                        if (mutex_trylock(&area->lock) != EOK)
    212212                                continue;
    213213                       
  • kernel/generic/src/udebug/udebug.c

    r7f11dc6 r897fd8f1  
    9898       
    9999        wq->missed_wakeups = 0;  /* Enforce blocking. */
    100         int rc = waitq_sleep_timeout_unsafe(wq, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE);
    101        
    102         waitq_sleep_finish(wq, rc, ipl);
     100        bool blocked;
     101        (void) waitq_sleep_timeout_unsafe(wq, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE, &blocked);
     102        waitq_sleep_finish(wq, blocked, ipl);
    103103}
    104104
  • kernel/test/synch/rcu1.c

    r7f11dc6 r897fd8f1  
    114114                        do {
    115115                                int ret = thread_join_timeout(thread[i], 5 * 1000 * 1000, 0);
    116                                 joined = (ret != ESYNCH_TIMEOUT);
     116                                joined = (ret != ETIMEOUT);
    117117                               
    118                                 if (ret == ESYNCH_OK_BLOCKED) {
     118                                if (ret == EOK) {
    119119                                        TPRINTF("%zu threads remain\n", thread_cnt - i - 1);
    120120                                }
  • kernel/test/synch/semaphore2.c

    r7f11dc6 r897fd8f1  
    7070        TPRINTF("cpu%u, tid %" PRIu64 " down+ (%d)\n", CPU->id, THREAD->tid, to);
    7171        rc = semaphore_down_timeout(&sem, to);
    72         if (SYNCH_FAILED(rc)) {
     72        if (rc != EOK) {
    7373                TPRINTF("cpu%u, tid %" PRIu64 " down!\n", CPU->id, THREAD->tid);
    7474                return;
  • uspace/lib/c/include/futex.h

    r7f11dc6 r897fd8f1  
    3737
    3838#include <atomic.h>
     39#include <errno.h>
    3940#include <libc.h>
    4041
     
    121122 *
    122123 * @return ENOENT if there is no such virtual address.
    123  * @return Zero in the uncontended case.
    124  * @return Otherwise one of ESYNCH_OK_ATOMIC or ESYNCH_OK_BLOCKED.
     124 * @return EOK on success.
     125 * @return Error code from <errno.h> otherwise.
    125126 *
    126127 */
     
    130131                return __SYSCALL1(SYS_FUTEX_SLEEP, (sysarg_t) &futex->val.count);
    131132       
    132         return 0;
     133        return EOK;
    133134}
    134135
     
    138139 *
    139140 * @return ENOENT if there is no such virtual address.
    140  * @return Zero in the uncontended case.
     141 * @return EOK on success.
     142 * @return Error code from <errno.h> otherwise.
    141143 *
    142144 */
     
    146148                return __SYSCALL1(SYS_FUTEX_WAKEUP, (sysarg_t) &futex->val.count);
    147149       
    148         return 0;
     150        return EOK;
    149151}
    150152
Note: See TracChangeset for help on using the changeset viewer.