Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/synch/waitq.c

    r63e27ef rb7fd2a0  
    4545
    4646#include <assert.h>
     47#include <errno.h>
    4748#include <synch/waitq.h>
    4849#include <synch/spinlock.h>
     
    238239 * @param flags Specify mode of the sleep.
    239240 *
     241 * @param[out] blocked  On return, regardless of the return code,
     242 *                      `*blocked` is set to `true` iff the thread went to
     243 *                      sleep.
     244 *
    240245 * The sleep can be interrupted only if the
    241246 * SYNCH_FLAGS_INTERRUPTIBLE bit is specified in flags.
     
    251256 * call will immediately return, reporting either success or failure.
    252257 *
    253  * @return ESYNCH_WOULD_BLOCK, meaning that the sleep failed because at the
    254  *         time of the call there was no pending wakeup
    255  * @return ESYNCH_TIMEOUT, meaning that the sleep timed out.
    256  * @return ESYNCH_INTERRUPTED, meaning that somebody interrupted the sleeping
    257  *         thread.
    258  * @return ESYNCH_OK_ATOMIC, meaning that the sleep succeeded and that there
    259  *         was a pending wakeup at the time of the call. The caller was not put
    260  *         asleep at all.
    261  * @return ESYNCH_OK_BLOCKED, meaning that the sleep succeeded; the full sleep
    262  *         was attempted.
    263  *
    264  */
    265 int waitq_sleep_timeout(waitq_t *wq, uint32_t usec, unsigned int flags)
     258 * @return EAGAIN, meaning that the sleep failed because it was requested
     259 *                 as SYNCH_FLAGS_NON_BLOCKING, but there was no pending wakeup.
     260 * @return ETIMEOUT, meaning that the sleep timed out.
     261 * @return EINTR, meaning that somebody interrupted the sleeping
     262 *         thread. Check the value of `*blocked` to see if the thread slept,
     263 *         or if a pending interrupt forced it to return immediately.
     264 * @return EOK, meaning that none of the above conditions occured, and the
     265 *              thread was woken up successfuly by `waitq_wakeup()`. Check
     266 *              the value of `*blocked` to see if the thread slept or if
     267 *              the wakeup was already pending.
     268 *
     269 */
     270errno_t waitq_sleep_timeout(waitq_t *wq, uint32_t usec, unsigned int flags, bool *blocked)
    266271{
    267272        assert((!PREEMPTION_DISABLED) || (PARAM_NON_BLOCKING(flags, usec)));
    268273       
    269274        ipl_t ipl = waitq_sleep_prepare(wq);
    270         int rc = waitq_sleep_timeout_unsafe(wq, usec, flags);
    271         waitq_sleep_finish(wq, rc, ipl);
     275        bool nblocked;
     276        errno_t rc = waitq_sleep_timeout_unsafe(wq, usec, flags, &nblocked);
     277        waitq_sleep_finish(wq, nblocked, ipl);
     278
     279        if (blocked != NULL) {
     280                *blocked = nblocked;
     281        }
    272282        return rc;
    273283}
     
    320330 * lock is released.
    321331 *
    322  * @param wq  Wait queue.
    323  * @param rc  Return code of waitq_sleep_timeout_unsafe().
    324  * @param ipl Interrupt level returned by waitq_sleep_prepare().
    325  *
    326  */
    327 void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl)
    328 {
    329         switch (rc) {
    330         case ESYNCH_WOULD_BLOCK:
    331         case ESYNCH_OK_ATOMIC:
    332                 irq_spinlock_unlock(&wq->lock, false);
    333                 break;
    334         default:
    335                 /*
     332 * @param wq       Wait queue.
     333 * @param blocked  Out parameter of waitq_sleep_timeout_unsafe().
     334 * @param ipl      Interrupt level returned by waitq_sleep_prepare().
     335 *
     336 */
     337void waitq_sleep_finish(waitq_t *wq, bool blocked, ipl_t ipl)
     338{
     339        if (blocked) {
     340                /*
    336341                 * Wait for a waitq_wakeup() or waitq_unsleep() to complete
    337342                 * before returning from waitq_sleep() to the caller. Otherwise
    338343                 * the caller might expect that the wait queue is no longer used
    339344                 * and deallocate it (although the wakeup on a another cpu has
    340                  * not yet completed and is using the wait queue).
    341                  *
    342                  * Note that we have to do this for ESYNCH_OK_BLOCKED and
    343                  * ESYNCH_INTERRUPTED, but not necessarily for ESYNCH_TIMEOUT
    344                  * where the timeout handler stops using the waitq before waking
    345                  * us up. To be on the safe side, ensure the waitq is not in use
    346                  * anymore in this case as well.
     345                 * not yet completed and is using the wait queue).
     346                 *
     347                 * Note that we have to do this for EOK and EINTR, but not
     348                 * necessarily for ETIMEOUT where the timeout handler stops
     349                 * using the waitq before waking us up. To be on the safe side,
     350                 * ensure the waitq is not in use anymore in this case as well.
    347351                 */
    348352                waitq_complete_wakeup(wq);
    349                 break;
     353        } else {
     354                irq_spinlock_unlock(&wq->lock, false);
    350355        }
    351356       
     
    363368 * @param flags See waitq_sleep_timeout().
    364369 *
     370 * @param[out] blocked  See waitq_sleep_timeout().
     371 *
    365372 * @return See waitq_sleep_timeout().
    366373 *
    367374 */
    368 int waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, unsigned int flags)
    369 {
     375errno_t waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, unsigned int flags, bool *blocked)
     376{
     377        *blocked = false;
     378
    370379        /* Checks whether to go to sleep at all */
    371380        if (wq->missed_wakeups) {
    372381                wq->missed_wakeups--;
    373                 return ESYNCH_OK_ATOMIC;
     382                return EOK;
    374383        } else {
    375384                if (PARAM_NON_BLOCKING(flags, usec)) {
    376385                        /* Return immediately instead of going to sleep */
    377                         return ESYNCH_WOULD_BLOCK;
     386                        return EAGAIN;
    378387                }
    379388        }
     
    392401                if (THREAD->interrupted) {
    393402                        irq_spinlock_unlock(&THREAD->lock, false);
    394                         irq_spinlock_unlock(&wq->lock, false);
    395                         return ESYNCH_INTERRUPTED;
     403                        return EINTR;
    396404                }
    397405               
     
    405413                        THREAD->last_cycle = get_cycle();
    406414                        irq_spinlock_unlock(&THREAD->lock, false);
    407                         return ESYNCH_INTERRUPTED;
     415                        return EINTR;
    408416                }
    409417        } else
     
    416424                        THREAD->last_cycle = get_cycle();
    417425                        irq_spinlock_unlock(&THREAD->lock, false);
    418                         return ESYNCH_TIMEOUT;
     426                        return ETIMEOUT;
    419427                }
    420428               
     
    433441        THREAD->sleep_queue = wq;
    434442       
     443        /* Must be before entry to scheduler, because there are multiple
     444         * return vectors.
     445         */
     446        *blocked = true;
     447       
    435448        irq_spinlock_unlock(&THREAD->lock, false);
    436449       
     
    438451        scheduler();
    439452       
    440         return ESYNCH_OK_BLOCKED;
     453        return EOK;
    441454}
    442455
Note: See TracChangeset for help on using the changeset viewer.