Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/synch/waitq.c

    rb7fd2a0 r63e27ef  
    4545
    4646#include <assert.h>
    47 #include <errno.h>
    4847#include <synch/waitq.h>
    4948#include <synch/spinlock.h>
     
    239238 * @param flags Specify mode of the sleep.
    240239 *
    241  * @param[out] blocked  On return, regardless of the return code,
    242  *                      `*blocked` is set to `true` iff the thread went to
    243  *                      sleep.
    244  *
    245240 * The sleep can be interrupted only if the
    246241 * SYNCH_FLAGS_INTERRUPTIBLE bit is specified in flags.
     
    256251 * call will immediately return, reporting either success or failure.
    257252 *
    258  * @return EAGAIN, meaning that the sleep failed because it was requested
    259  *                 as SYNCH_FLAGS_NON_BLOCKING, but there was no pending wakeup.
    260  * @return ETIMEOUT, meaning that the sleep timed out.
    261  * @return EINTR, meaning that somebody interrupted the sleeping
    262  *         thread. Check the value of `*blocked` to see if the thread slept,
    263  *         or if a pending interrupt forced it to return immediately.
    264  * @return EOK, meaning that none of the above conditions occured, and the
    265  *              thread was woken up successfuly by `waitq_wakeup()`. Check
    266  *              the value of `*blocked` to see if the thread slept or if
    267  *              the wakeup was already pending.
    268  *
    269  */
    270 errno_t waitq_sleep_timeout(waitq_t *wq, uint32_t usec, unsigned int flags, bool *blocked)
     253 * @return ESYNCH_WOULD_BLOCK, meaning that the sleep failed because at the
     254 *         time of the call there was no pending wakeup
     255 * @return ESYNCH_TIMEOUT, meaning that the sleep timed out.
     256 * @return ESYNCH_INTERRUPTED, meaning that somebody interrupted the sleeping
     257 *         thread.
     258 * @return ESYNCH_OK_ATOMIC, meaning that the sleep succeeded and that there
     259 *         was a pending wakeup at the time of the call. The caller was not put
     260 *         asleep at all.
     261 * @return ESYNCH_OK_BLOCKED, meaning that the sleep succeeded; the full sleep
     262 *         was attempted.
     263 *
     264 */
     265int waitq_sleep_timeout(waitq_t *wq, uint32_t usec, unsigned int flags)
    271266{
    272267        assert((!PREEMPTION_DISABLED) || (PARAM_NON_BLOCKING(flags, usec)));
    273268       
    274269        ipl_t ipl = waitq_sleep_prepare(wq);
    275         bool nblocked;
    276         errno_t rc = waitq_sleep_timeout_unsafe(wq, usec, flags, &nblocked);
    277         waitq_sleep_finish(wq, nblocked, ipl);
    278 
    279         if (blocked != NULL) {
    280                 *blocked = nblocked;
    281         }
     270        int rc = waitq_sleep_timeout_unsafe(wq, usec, flags);
     271        waitq_sleep_finish(wq, rc, ipl);
    282272        return rc;
    283273}
     
    330320 * lock is released.
    331321 *
    332  * @param wq       Wait queue.
    333  * @param blocked  Out parameter of waitq_sleep_timeout_unsafe().
    334  * @param ipl      Interrupt level returned by waitq_sleep_prepare().
    335  *
    336  */
    337 void waitq_sleep_finish(waitq_t *wq, bool blocked, ipl_t ipl)
    338 {
    339         if (blocked) {
    340                 /*
     322 * @param wq  Wait queue.
     323 * @param rc  Return code of waitq_sleep_timeout_unsafe().
     324 * @param ipl Interrupt level returned by waitq_sleep_prepare().
     325 *
     326 */
     327void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl)
     328{
     329        switch (rc) {
     330        case ESYNCH_WOULD_BLOCK:
     331        case ESYNCH_OK_ATOMIC:
     332                irq_spinlock_unlock(&wq->lock, false);
     333                break;
     334        default:
     335                /*
    341336                 * Wait for a waitq_wakeup() or waitq_unsleep() to complete
    342337                 * before returning from waitq_sleep() to the caller. Otherwise
    343338                 * the caller might expect that the wait queue is no longer used
    344339                 * and deallocate it (although the wakeup on a another cpu has
    345                  * not yet completed and is using the wait queue).
    346                  *
    347                  * Note that we have to do this for EOK and EINTR, but not
    348                  * necessarily for ETIMEOUT where the timeout handler stops
    349                  * using the waitq before waking us up. To be on the safe side,
    350                  * ensure the waitq is not in use anymore in this case as well.
     340                 * not yet completed and is using the wait queue).
     341                 *
     342                 * Note that we have to do this for ESYNCH_OK_BLOCKED and
     343                 * ESYNCH_INTERRUPTED, but not necessarily for ESYNCH_TIMEOUT
     344                 * where the timeout handler stops using the waitq before waking
     345                 * us up. To be on the safe side, ensure the waitq is not in use
     346                 * anymore in this case as well.
    351347                 */
    352348                waitq_complete_wakeup(wq);
    353         } else {
    354                 irq_spinlock_unlock(&wq->lock, false);
     349                break;
    355350        }
    356351       
     
    368363 * @param flags See waitq_sleep_timeout().
    369364 *
    370  * @param[out] blocked  See waitq_sleep_timeout().
    371  *
    372365 * @return See waitq_sleep_timeout().
    373366 *
    374367 */
    375 errno_t waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, unsigned int flags, bool *blocked)
    376 {
    377         *blocked = false;
    378 
     368int waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, unsigned int flags)
     369{
    379370        /* Checks whether to go to sleep at all */
    380371        if (wq->missed_wakeups) {
    381372                wq->missed_wakeups--;
    382                 return EOK;
     373                return ESYNCH_OK_ATOMIC;
    383374        } else {
    384375                if (PARAM_NON_BLOCKING(flags, usec)) {
    385376                        /* Return immediately instead of going to sleep */
    386                         return EAGAIN;
     377                        return ESYNCH_WOULD_BLOCK;
    387378                }
    388379        }
     
    401392                if (THREAD->interrupted) {
    402393                        irq_spinlock_unlock(&THREAD->lock, false);
    403                         return EINTR;
     394                        irq_spinlock_unlock(&wq->lock, false);
     395                        return ESYNCH_INTERRUPTED;
    404396                }
    405397               
     
    413405                        THREAD->last_cycle = get_cycle();
    414406                        irq_spinlock_unlock(&THREAD->lock, false);
    415                         return EINTR;
     407                        return ESYNCH_INTERRUPTED;
    416408                }
    417409        } else
     
    424416                        THREAD->last_cycle = get_cycle();
    425417                        irq_spinlock_unlock(&THREAD->lock, false);
    426                         return ETIMEOUT;
     418                        return ESYNCH_TIMEOUT;
    427419                }
    428420               
     
    441433        THREAD->sleep_queue = wq;
    442434       
    443         /* Must be before entry to scheduler, because there are multiple
    444          * return vectors.
    445          */
    446         *blocked = true;
    447        
    448435        irq_spinlock_unlock(&THREAD->lock, false);
    449436       
     
    451438        scheduler();
    452439       
    453         return EOK;
     440        return ESYNCH_OK_BLOCKED;
    454441}
    455442
Note: See TracChangeset for help on using the changeset viewer.