Changeset 111b9b9 in mainline


Ignore:
Timestamp:
2023-02-11T19:13:44Z (14 months ago)
Author:
Jiří Zárevúcky <zarevucky.jiri@…>
Branches:
master, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
4777e02
Parents:
76e17d7c
git-author:
Jiří Zárevúcky <zarevucky.jiri@…> (2022-08-15 17:46:39)
git-committer:
Jiří Zárevúcky <zarevucky.jiri@…> (2023-02-11 19:13:44)
Message:

Reimplement waitq using thread_wait/wakeup

This adds a few functions to the thread API which can be
summarized as "stop running until woken up by others".
The ordering and context-switching concerns are thus yeeted
to this abstraction and waitq only deals with maintaining
the queues. Overall, this makes the control flow in waitq
much easier to navigate.

Location:
kernel
Files:
20 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/include/proc/scheduler.h

    r76e17d7c r111b9b9  
    5757extern void scheduler_fpu_lazy_request(void);
    5858extern void scheduler(void);
     59extern void scheduler_locked(ipl_t);
    5960extern void kcpulb(void *arg);
    6061
  • kernel/generic/include/proc/thread.h

    r76e17d7c r111b9b9  
    7979        odlink_t lthreads;
    8080
     81        /** Tracking variable for thread_wait/thread_wakeup */
     82        atomic_int sleep_state;
     83
    8184        /**
    8285         * If true, the thread is terminating.
     
    8588         */
    8689        volatile bool interrupted;
     90
     91        /** Wait queue in which this thread sleeps. Used for debug printouts. */
     92        _Atomic(waitq_t *) sleep_queue;
    8793
    8894        /** Waitq for thread_join_timeout(). */
     
    108114        context_t saved_context;
    109115        ipl_t saved_ipl;
    110 
    111         /**
    112          * From here, the stored timeout context
    113          * is restored when sleep times out.
    114          */
    115         context_t sleep_timeout_context;
    116 
    117         /**
    118          * From here, the stored interruption context
    119          * is restored when sleep is interrupted.
    120          */
    121         context_t sleep_interruption_context;
    122 
    123         /** If true, the thread can be interrupted from sleep. */
    124         bool sleep_interruptible;
    125 
    126         /**
    127          * If true, and this thread's sleep returns without a wakeup
    128          * (timed out or interrupted), waitq ignores the next wakeup.
    129          * This is necessary for futex to be able to handle those conditions.
    130          */
    131         bool sleep_composable;
    132 
    133         /** Wait queue in which this thread sleeps. */
    134         waitq_t *sleep_queue;
    135116
    136117        /**
     
    216197extern void thread_ready(thread_t *);
    217198extern void thread_exit(void) __attribute__((noreturn));
    218 extern void thread_interrupt(thread_t *, bool);
     199extern void thread_interrupt(thread_t *);
     200
     201typedef enum {
     202        THREAD_OK,
     203        THREAD_TERMINATING,
     204} thread_termination_state_t;
     205
     206typedef enum {
     207        THREAD_WAIT_SUCCESS,
     208        THREAD_WAIT_TIMEOUT,
     209} thread_wait_result_t;
     210
     211extern thread_termination_state_t thread_wait_start(void);
     212extern thread_wait_result_t thread_wait_finish(deadline_t);
     213extern void thread_wakeup(thread_t *);
    219214
    220215static inline thread_t *thread_ref(thread_t *thread)
  • kernel/generic/include/synch/waitq.h

    r76e17d7c r111b9b9  
    4141#include <adt/list.h>
    4242
    43 typedef enum {
    44         WAKEUP_FIRST = 0,
    45         WAKEUP_ALL,
    46         WAKEUP_CLOSE,
    47 } wakeup_mode_t;
    48 
    4943/** Wait queue structure.
    5044 *
     
    5852
    5953        /**
    60          * Number of waitq_wakeup() calls that didn't find a thread to wake up.
    61          *
     54         * If negative, number of wakeups that are to be ignored (necessary for futex operation).
     55         * If positive, number of wakeups that weren't able to wake a thread.
    6256         */
    63         int missed_wakeups;
    64 
    65         /** Number of wakeups that need to be ignored due to futex timeout. */
    66         int ignore_wakeups;
     57        int wakeup_balance;
    6758
    6859        /** List of sleeping threads for which there was no missed_wakeup. */
    6960        list_t sleepers;
     61
     62        bool closed;
    7063} waitq_t;
     64
     65typedef struct wait_guard {
     66        ipl_t ipl;
     67} wait_guard_t;
    7168
    7269struct thread;
     
    7572extern void waitq_initialize_with_count(waitq_t *, int);
    7673extern errno_t waitq_sleep(waitq_t *);
    77 extern errno_t waitq_sleep_timeout(waitq_t *, uint32_t, unsigned int, bool *);
    78 extern ipl_t waitq_sleep_prepare(waitq_t *);
    79 extern errno_t waitq_sleep_unsafe(waitq_t *, bool *);
    80 extern errno_t waitq_sleep_timeout_unsafe(waitq_t *, uint32_t, unsigned int, bool *);
    81 extern void waitq_sleep_finish(waitq_t *, bool, ipl_t);
    82 extern void waitq_wakeup(waitq_t *, wakeup_mode_t);
    83 extern void _waitq_wakeup_unsafe(waitq_t *, wakeup_mode_t);
    84 extern void waitq_interrupt_sleep(struct thread *);
     74extern errno_t _waitq_sleep_timeout(waitq_t *, uint32_t, unsigned int);
     75extern errno_t waitq_sleep_timeout(waitq_t *, uint32_t);
     76extern wait_guard_t waitq_sleep_prepare(waitq_t *);
     77extern errno_t waitq_sleep_unsafe(waitq_t *, wait_guard_t);
     78extern errno_t waitq_sleep_timeout_unsafe(waitq_t *, uint32_t, unsigned int, wait_guard_t);
     79
     80extern void waitq_wake_one(waitq_t *);
     81extern void waitq_wake_all(waitq_t *);
     82extern void waitq_signal(waitq_t *);
     83extern void waitq_close(waitq_t *);
    8584
    8685#endif
  • kernel/generic/include/time/timeout.h

    r76e17d7c r111b9b9  
    4242typedef void (*timeout_handler_t)(void *arg);
    4343
     44typedef uint64_t deadline_t;
     45#define DEADLINE_NEVER ((deadline_t) UINT64_MAX)
     46
    4447typedef struct {
    4548        /** Link to the list of active timeouts on timeout->cpu */
    4649        link_t link;
    4750        /** Timeout will be activated when current clock tick reaches this value. */
    48         uint64_t deadline;
     51        deadline_t deadline;
    4952        /** Function that will be called on timeout activation. */
    5053        timeout_handler_t handler;
     
    5962#define us2ticks(us)  ((uint64_t) (((uint32_t) (us) / (1000000 / HZ))))
    6063
     64extern deadline_t timeout_deadline_in_usec(uint32_t us);
     65
    6166extern void timeout_init(void);
    6267extern void timeout_initialize(timeout_t *);
    6368extern void timeout_register(timeout_t *, uint64_t, timeout_handler_t, void *);
     69extern void timeout_register_deadline(timeout_t *, deadline_t, timeout_handler_t, void *);
    6470extern bool timeout_unregister(timeout_t *);
    6571
  • kernel/generic/src/ipc/event.c

    r76e17d7c r111b9b9  
    169169                                    true);
    170170
    171                                 waitq_wakeup(&event->answerbox->wq,
    172                                     WAKEUP_FIRST);
     171                                waitq_wake_one(&event->answerbox->wq);
    173172
    174173                                if (mask)
  • kernel/generic/src/ipc/ipc.c

    r76e17d7c r111b9b9  
    326326                irq_spinlock_unlock(&callerbox->lock, true);
    327327
    328         waitq_wakeup(&callerbox->wq, WAKEUP_FIRST);
     328        waitq_wake_one(&callerbox->wq);
    329329}
    330330
     
    416416        irq_spinlock_unlock(&box->lock, true);
    417417
    418         waitq_wakeup(&box->wq, WAKEUP_FIRST);
     418        waitq_wake_one(&box->wq);
    419419}
    420420
     
    555555        errno_t rc;
    556556
    557         rc = waitq_sleep_timeout(&box->wq, usec, flags, NULL);
     557        rc = _waitq_sleep_timeout(&box->wq, usec, flags);
    558558        if (rc != EOK)
    559559                return rc;
  • kernel/generic/src/ipc/irq.c

    r76e17d7c r111b9b9  
    429429        irq_spinlock_unlock(&irq->notif_cfg.answerbox->irq_lock, false);
    430430
    431         waitq_wakeup(&irq->notif_cfg.answerbox->wq, WAKEUP_FIRST);
     431        waitq_wake_one(&irq->notif_cfg.answerbox->wq);
    432432}
    433433
  • kernel/generic/src/ipc/sysipc.c

    r76e17d7c r111b9b9  
    871871sys_errno_t sys_ipc_poke(void)
    872872{
    873         waitq_wakeup(&TASK->answerbox.wq, WAKEUP_FIRST);
     873        waitq_wake_one(&TASK->answerbox.wq);
    874874        return EOK;
    875875}
  • kernel/generic/src/proc/scheduler.c

    r76e17d7c r111b9b9  
    300300}
    301301
     302void scheduler(void)
     303{
     304        ipl_t ipl = interrupts_disable();
     305
     306        if (atomic_load(&haltstate))
     307                halt();
     308
     309        if (THREAD) {
     310                irq_spinlock_lock(&THREAD->lock, false);
     311        }
     312
     313        scheduler_locked(ipl);
     314}
     315
    302316/** The scheduler
    303317 *
     
    307321 *
    308322 */
    309 void scheduler(void)
    310 {
    311         volatile ipl_t ipl;
    312 
     323void scheduler_locked(ipl_t ipl)
     324{
    313325        assert(CPU != NULL);
    314326
    315         ipl = interrupts_disable();
    316 
    317         if (atomic_load(&haltstate))
    318                 halt();
    319 
    320327        if (THREAD) {
    321                 irq_spinlock_lock(&THREAD->lock, false);
    322 
    323328                /* Update thread kernel accounting */
    324329                THREAD->kcycles += get_cycle() - THREAD->last_cycle;
     
    419424                case Exiting:
    420425                        irq_spinlock_unlock(&THREAD->lock, false);
    421                         waitq_wakeup(&THREAD->join_wq, WAKEUP_CLOSE);
     426                        waitq_close(&THREAD->join_wq);
    422427
    423428                        /*
     
    434439                         */
    435440                        THREAD->priority = -1;
    436 
    437                         /*
    438                          * We need to release wq->lock which we locked in
    439                          * waitq_sleep(). Address of wq->lock is kept in
    440                          * THREAD->sleep_queue.
    441                          */
    442                         irq_spinlock_unlock(&THREAD->sleep_queue->lock, false);
    443 
    444441                        irq_spinlock_unlock(&THREAD->lock, false);
    445442                        break;
  • kernel/generic/src/proc/task.c

    r76e17d7c r111b9b9  
    533533
    534534        list_foreach(task->threads, th_link, thread_t, thread) {
    535                 thread_t *thr = thread_try_ref(thread);
    536                 if (thr)
    537                         thread_interrupt(thr, false);
    538 
    539                 // If NULL, the thread is already getting destroyed concurrently with this.
     535                thread_interrupt(thread);
    540536        }
    541537
  • kernel/generic/src/proc/thread.c

    r76e17d7c r111b9b9  
    6969#include <errno.h>
    7070#include <debug.h>
     71#include <halt.h>
    7172
    7273/** Thread states */
     
    7980        "Exiting",
    8081        "Lingering"
     82};
     83
     84enum sleep_state {
     85        SLEEP_INITIAL,
     86        SLEEP_ASLEEP,
     87        SLEEP_WOKE,
    8188};
    8289
     
    365372        thread->state = Entering;
    366373
    367         thread->sleep_interruptible = false;
    368         thread->sleep_composable = false;
    369         thread->sleep_queue = NULL;
     374        atomic_init(&thread->sleep_queue, NULL);
    370375
    371376        thread->in_copy_from_uspace = false;
     
    373378
    374379        thread->interrupted = false;
     380        atomic_init(&thread->sleep_state, SLEEP_INITIAL);
     381
    375382        waitq_initialize(&thread->join_wq);
    376383
     
    545552 * @param thread A valid thread object.
    546553 */
    547 void thread_interrupt(thread_t *thread, bool irq_dis)
     554void thread_interrupt(thread_t *thread)
    548555{
    549556        assert(thread != NULL);
    550 
    551         irq_spinlock_lock(&thread->lock, irq_dis);
    552 
    553557        thread->interrupted = true;
    554         bool sleeping = (thread->state == Sleeping);
    555 
    556         irq_spinlock_unlock(&thread->lock, irq_dis);
    557 
    558         if (sleeping)
    559                 waitq_interrupt_sleep(thread);
    560 
    561         thread_put(thread);
     558        thread_wakeup(thread);
     559}
     560
     561/** Prepare for putting the thread to sleep.
     562 *
     563 * @returns whether the thread is currently terminating. If THREAD_OK
     564 * is returned, the thread is guaranteed to be woken up instantly if the thread
     565 * is terminated at any time between this function's return and
     566 * thread_wait_finish(). If THREAD_TERMINATING is returned, the thread can still
     567 * go to sleep, but doing so will delay termination.
     568 */
     569thread_termination_state_t thread_wait_start(void)
     570{
     571        assert(THREAD != NULL);
     572
     573        /*
     574         * This is an exchange rather than a store so that we can use the acquire
     575         * semantics, which is needed to ensure that code after this operation sees
     576         * memory ops made before thread_wakeup() in other thread, if that wakeup
     577         * was reset by this operation.
     578         *
     579         * In particular, we need this to ensure we can't miss the thread being
     580         * terminated concurrently with a synchronization primitive preparing to
     581         * sleep.
     582         */
     583        (void) atomic_exchange_explicit(&THREAD->sleep_state, SLEEP_INITIAL,
     584            memory_order_acquire);
     585
     586        return THREAD->interrupted ? THREAD_TERMINATING : THREAD_OK;
     587}
     588
     589static void thread_wait_internal(void)
     590{
     591        assert(THREAD != NULL);
     592
     593        ipl_t ipl = interrupts_disable();
     594
     595        if (atomic_load(&haltstate))
     596                halt();
     597
     598        /*
     599         * Lock here to prevent a race between entering the scheduler and another
     600         * thread rescheduling this thread.
     601         */
     602        irq_spinlock_lock(&THREAD->lock, false);
     603
     604        int expected = SLEEP_INITIAL;
     605
     606        /* Only set SLEEP_ASLEEP in sleep pad if it's still in initial state */
     607        if (atomic_compare_exchange_strong_explicit(&THREAD->sleep_state, &expected,
     608            SLEEP_ASLEEP, memory_order_acq_rel, memory_order_acquire)) {
     609                THREAD->state = Sleeping;
     610                scheduler_locked(ipl);
     611        } else {
     612                assert(expected == SLEEP_WOKE);
     613                /* Return immediately. */
     614                irq_spinlock_unlock(&THREAD->lock, false);
     615                interrupts_restore(ipl);
     616        }
     617}
     618
     619static void thread_wait_timeout_callback(void *arg)
     620{
     621        thread_wakeup(arg);
     622}
     623
     624/**
     625 * Suspends this thread's execution until thread_wakeup() is called on it,
     626 * or deadline is reached.
     627 *
     628 * The way this would normally be used is that the current thread call
     629 * thread_wait_start(), and if interruption has not been signaled, stores
     630 * a reference to itself in a synchronized structure (such as waitq).
     631 * After that, it releases any spinlocks it might hold and calls this function.
     632 *
     633 * The thread doing the wakeup will acquire the thread's reference from said
     634 * synchronized structure and calls thread_wakeup() on it.
     635 *
     636 * Notably, there can be more than one thread performing wakeup.
     637 * The number of performed calls to thread_wakeup(), or their relative
     638 * ordering with thread_wait_finish(), does not matter. However, calls to
     639 * thread_wakeup() are expected to be synchronized with thread_wait_start()
     640 * with which they are associated, otherwise wakeups may be missed.
     641 * However, the operation of thread_wakeup() is defined at any time,
     642 * synchronization notwithstanding (in the sense of C un/defined behavior),
     643 * and is in fact used to interrupt waiting threads by external events.
     644 * The waiting thread must operate correctly in face of spurious wakeups,
     645 * and clean up its reference in the synchronization structure if necessary.
     646 *
     647 * Returns THREAD_WAIT_TIMEOUT if timeout fired, which is a necessary condition
     648 * for it to have been waken up by the timeout, but the caller must assume
     649 * that proper wakeups, timeouts and interrupts may occur concurrently, so
     650 * the fact timeout has been registered does not necessarily mean the thread
     651 * has not been woken up or interrupted.
     652 */
     653thread_wait_result_t thread_wait_finish(deadline_t deadline)
     654{
     655        assert(THREAD != NULL);
     656
     657        timeout_t timeout;
     658
     659        if (deadline != DEADLINE_NEVER) {
     660                /* Extra check to avoid setting up a deadline if we don't need to. */
     661                if (atomic_load_explicit(&THREAD->sleep_state, memory_order_acquire) !=
     662                    SLEEP_INITIAL)
     663                        return THREAD_WAIT_SUCCESS;
     664
     665                timeout_initialize(&timeout);
     666                timeout_register_deadline(&timeout, deadline,
     667                    thread_wait_timeout_callback, THREAD);
     668        }
     669
     670        thread_wait_internal();
     671
     672        if (deadline != DEADLINE_NEVER && !timeout_unregister(&timeout)) {
     673                return THREAD_WAIT_TIMEOUT;
     674        } else {
     675                return THREAD_WAIT_SUCCESS;
     676        }
     677}
     678
     679void thread_wakeup(thread_t *thread)
     680{
     681        assert(thread != NULL);
     682
     683        int state = atomic_exchange_explicit(&thread->sleep_state, SLEEP_WOKE,
     684            memory_order_release);
     685
     686        if (state == SLEEP_ASLEEP) {
     687                /*
     688                 * Only one thread gets to do this.
     689                 * The reference consumed here is the reference implicitly passed to
     690                 * the waking thread by the sleeper in thread_wait_finish().
     691                 */
     692                thread_ready(thread);
     693        }
    562694}
    563695
     
    628760                return EOK;
    629761        } else {
    630                 return waitq_sleep_timeout(&thread->join_wq, usec,
    631                     SYNCH_FLAGS_NON_BLOCKING, NULL);
     762                return _waitq_sleep_timeout(&thread->join_wq, usec, flags);
    632763        }
    633764}
     
    646777        waitq_initialize(&wq);
    647778
    648         (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING, NULL);
     779        (void) waitq_sleep_timeout(&wq, usec);
    649780}
    650781
     
    8901021
    8911022        if (sleeping)
    892                 waitq_interrupt_sleep(thread);
     1023                thread_wakeup(thread);
    8931024
    8941025        thread_put(thread);
  • kernel/generic/src/synch/condvar.c

    r76e17d7c r111b9b9  
    5858void condvar_signal(condvar_t *cv)
    5959{
    60         waitq_wakeup(&cv->wq, WAKEUP_FIRST);
     60        waitq_signal(&cv->wq);
    6161}
    6262
     
    6868void condvar_broadcast(condvar_t *cv)
    6969{
    70         waitq_wakeup(&cv->wq, WAKEUP_ALL);
     70        waitq_wake_all(&cv->wq);
    7171}
    7272
     
    8181errno_t condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, uint32_t usec)
    8282{
    83         errno_t rc;
    84         ipl_t ipl;
    85         bool blocked;
     83        wait_guard_t guard = waitq_sleep_prepare(&cv->wq);
    8684
    87         ipl = waitq_sleep_prepare(&cv->wq);
    8885        /* Unlock only after the waitq is locked so we don't miss a wakeup. */
    8986        mutex_unlock(mtx);
    9087
    91         cv->wq.missed_wakeups = 0;      /* Enforce blocking. */
    92         rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, SYNCH_FLAGS_NON_BLOCKING, &blocked);
    93         assert(blocked || rc != EOK);
     88        errno_t rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, SYNCH_FLAGS_NON_BLOCKING, guard);
    9489
    95         waitq_sleep_finish(&cv->wq, blocked, ipl);
    96         /* Lock only after releasing the waitq to avoid a possible deadlock. */
    9790        mutex_lock(mtx);
    98 
    9991        return rc;
    10092}
     
    10294errno_t condvar_wait(condvar_t *cv, mutex_t *mtx)
    10395{
    104         errno_t rc;
    105         ipl_t ipl;
    106         bool blocked;
     96        wait_guard_t guard = waitq_sleep_prepare(&cv->wq);
    10797
    108         ipl = waitq_sleep_prepare(&cv->wq);
    10998        /* Unlock only after the waitq is locked so we don't miss a wakeup. */
    11099        mutex_unlock(mtx);
    111100
    112         cv->wq.missed_wakeups = 0;      /* Enforce blocking. */
    113         rc = waitq_sleep_unsafe(&cv->wq, &blocked);
    114         assert(blocked || rc != EOK);
     101        errno_t rc = waitq_sleep_unsafe(&cv->wq, guard);
    115102
    116         waitq_sleep_finish(&cv->wq, blocked, ipl);
    117         /* Lock only after releasing the waitq to avoid a possible deadlock. */
    118103        mutex_lock(mtx);
    119 
    120104        return rc;
    121105}
     
    142126    uint32_t usec, int flags)
    143127{
    144         errno_t rc;
    145         ipl_t ipl;
    146         bool blocked;
    147 
    148         ipl = waitq_sleep_prepare(&cv->wq);
     128        wait_guard_t guard = waitq_sleep_prepare(&cv->wq);
    149129
    150130        /* Unlock only after the waitq is locked so we don't miss a wakeup. */
    151131        spinlock_unlock(lock);
    152132
    153         cv->wq.missed_wakeups = 0;      /* Enforce blocking. */
    154         rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags, &blocked);
    155         assert(blocked || rc != EOK);
     133        errno_t rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags, guard);
    156134
    157         waitq_sleep_finish(&cv->wq, blocked, ipl);
    158         /* Lock only after releasing the waitq to avoid a possible deadlock. */
    159135        spinlock_lock(lock);
    160 
    161136        return rc;
    162137}
  • kernel/generic/src/synch/semaphore.c

    r76e17d7c r111b9b9  
    7070errno_t semaphore_down_timeout(semaphore_t *sem, uint32_t usec)
    7171{
    72         errno_t rc = waitq_sleep_timeout(&sem->wq, usec, SYNCH_FLAGS_NON_BLOCKING, NULL);
     72        errno_t rc = waitq_sleep_timeout(&sem->wq, usec);
    7373        assert(rc == EOK || rc == ETIMEOUT || rc == EAGAIN);
    7474        return rc;
     
    9090void semaphore_up(semaphore_t *sem)
    9191{
    92         waitq_wakeup(&sem->wq, WAKEUP_FIRST);
     92        waitq_wake_one(&sem->wq);
    9393}
    9494
  • kernel/generic/src/synch/syswaitq.c

    r76e17d7c r111b9b9  
    159159#endif
    160160
    161         errno_t rc = waitq_sleep_timeout(kobj->waitq, timeout,
    162             SYNCH_FLAGS_INTERRUPTIBLE | flags, NULL);
     161        errno_t rc = _waitq_sleep_timeout(kobj->waitq, timeout,
     162            SYNCH_FLAGS_INTERRUPTIBLE | flags);
    163163
    164164#ifdef CONFIG_UDEBUG
     
    183183                return (sys_errno_t) ENOENT;
    184184
    185         waitq_wakeup(kobj->waitq, WAKEUP_FIRST);
     185        waitq_wake_one(kobj->waitq);
    186186
    187187        kobject_put(kobj);
  • kernel/generic/src/synch/waitq.c

    r76e17d7c r111b9b9  
    11/*
    22 * Copyright (c) 2001-2004 Jakub Jermar
     3 * Copyright (c) 2022 Jiří Zárevúcky
    34 * All rights reserved.
    45 *
     
    6061#include <mem.h>
    6162
    62 static void waitq_sleep_timed_out(void *);
    63 static void waitq_complete_wakeup(waitq_t *);
    64 
    6563/** Initialize wait queue
    6664 *
     
    7775}
    7876
     77/**
     78 * Initialize wait queue with an initial number of queued wakeups
     79 * (or a wakeup debt if negative).
     80 */
    7981void waitq_initialize_with_count(waitq_t *wq, int count)
    8082{
    81         memsetb(wq, sizeof(*wq), 0);
    82         irq_spinlock_initialize(&wq->lock, "wq.lock");
    83         list_initialize(&wq->sleepers);
    84         wq->missed_wakeups = count;
    85 }
    86 
    87 /** Handle timeout during waitq_sleep_timeout() call
    88  *
    89  * This routine is called when waitq_sleep_timeout() times out.
    90  * Interrupts are disabled.
    91  *
    92  * It is supposed to try to remove 'its' thread from the wait queue;
    93  * it can eventually fail to achieve this goal when these two events
    94  * overlap. In that case it behaves just as though there was no
    95  * timeout at all.
    96  *
    97  * @param data Pointer to the thread that called waitq_sleep_timeout().
    98  *
    99  */
    100 void waitq_sleep_timed_out(void *data)
    101 {
    102         thread_t *thread = (thread_t *) data;
    103         bool do_wakeup = false;
    104         DEADLOCK_PROBE_INIT(p_wqlock);
    105 
    106         irq_spinlock_lock(&threads_lock, false);
    107 
    108 grab_locks:
    109         irq_spinlock_lock(&thread->lock, false);
    110 
    111         waitq_t *wq;
    112         if ((wq = thread->sleep_queue)) {  /* Assignment */
    113                 if (!irq_spinlock_trylock(&wq->lock)) {
    114                         irq_spinlock_unlock(&thread->lock, false);
    115                         DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD);
    116                         /* Avoid deadlock */
    117                         goto grab_locks;
    118                 }
    119 
    120                 list_remove(&thread->wq_link);
    121                 thread->saved_context = thread->sleep_timeout_context;
    122                 do_wakeup = true;
    123                 if (thread->sleep_composable)
    124                         wq->ignore_wakeups++;
    125                 thread->sleep_queue = NULL;
    126                 irq_spinlock_unlock(&wq->lock, false);
    127         }
    128 
    129         irq_spinlock_unlock(&thread->lock, false);
    130 
    131         if (do_wakeup)
    132                 thread_ready(thread);
    133 
    134         irq_spinlock_unlock(&threads_lock, false);
    135 }
    136 
    137 /** Interrupt sleeping thread.
    138  *
    139  * This routine attempts to interrupt a thread from its sleep in
    140  * a waitqueue. If the thread is not found sleeping, no action
    141  * is taken.
    142  *
    143  * The threads_lock must be already held and interrupts must be
    144  * disabled upon calling this function.
    145  *
    146  * @param thread Thread to be interrupted.
    147  *
    148  */
    149 void waitq_interrupt_sleep(thread_t *thread)
    150 {
    151         bool do_wakeup = false;
    152         DEADLOCK_PROBE_INIT(p_wqlock);
    153 
    154         /*
    155          * The thread is quaranteed to exist because
    156          * threads_lock is held.
    157          */
    158 
    159 grab_locks:
    160         irq_spinlock_lock(&thread->lock, false);
    161 
    162         waitq_t *wq;
    163         if ((wq = thread->sleep_queue)) {  /* Assignment */
    164                 if (!(thread->sleep_interruptible)) {
    165                         /*
    166                          * The sleep cannot be interrupted.
    167                          */
    168                         irq_spinlock_unlock(&thread->lock, false);
    169                         return;
    170                 }
    171 
    172                 if (!irq_spinlock_trylock(&wq->lock)) {
    173                         /* Avoid deadlock */
    174                         irq_spinlock_unlock(&thread->lock, false);
    175                         DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD);
    176                         goto grab_locks;
    177                 }
    178 
    179                 list_remove(&thread->wq_link);
    180                 thread->saved_context = thread->sleep_interruption_context;
    181                 if (thread->sleep_composable)
    182                         wq->ignore_wakeups++;
    183                 do_wakeup = true;
    184                 thread->sleep_queue = NULL;
    185                 irq_spinlock_unlock(&wq->lock, false);
    186         }
    187 
    188         irq_spinlock_unlock(&thread->lock, false);
    189 
    190         if (do_wakeup)
    191                 thread_ready(thread);
     83        waitq_initialize(wq);
     84        wq->wakeup_balance = count;
    19285}
    19386
     
    19790errno_t waitq_sleep(waitq_t *wq)
    19891{
    199         return waitq_sleep_timeout(wq, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE, NULL);
     92        return _waitq_sleep_timeout(wq, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE);
     93}
     94
     95errno_t waitq_sleep_timeout(waitq_t *wq, uint32_t usec)
     96{
     97        return _waitq_sleep_timeout(wq, usec, SYNCH_FLAGS_NON_BLOCKING);
    20098}
    20199
    202100/** Sleep until either wakeup, timeout or interruption occurs
    203101 *
    204  * This is a sleep implementation which allows itself to time out or to be
    205  * interrupted from the sleep, restoring a failover context.
    206  *
    207102 * Sleepers are organised in a FIFO fashion in a structure called wait queue.
    208103 *
    209  * This function is really basic in that other functions as waitq_sleep()
    210  * and all the *_timeout() functions use it.
     104 * Other functions as waitq_sleep() and all the *_timeout() functions are
     105 * implemented using this function.
    211106 *
    212107 * @param wq    Pointer to wait queue.
     
    214109 * @param flags Specify mode of the sleep.
    215110 *
    216  * @param[out] blocked  On return, regardless of the return code,
    217  *                      `*blocked` is set to `true` iff the thread went to
    218  *                      sleep.
    219  *
    220111 * The sleep can be interrupted only if the
    221112 * SYNCH_FLAGS_INTERRUPTIBLE bit is specified in flags.
     
    231122 * call will immediately return, reporting either success or failure.
    232123 *
    233  * @return EAGAIN, meaning that the sleep failed because it was requested
    234  *                 as SYNCH_FLAGS_NON_BLOCKING, but there was no pending wakeup.
    235  * @return ETIMEOUT, meaning that the sleep timed out.
    236  * @return EINTR, meaning that somebody interrupted the sleeping
    237  *         thread. Check the value of `*blocked` to see if the thread slept,
    238  *         or if a pending interrupt forced it to return immediately.
     124 * @return ETIMEOUT, meaning that the sleep timed out, or a nonblocking call
     125 *                   returned unsuccessfully.
     126 * @return EINTR, meaning that somebody interrupted the sleeping thread.
    239127 * @return EOK, meaning that none of the above conditions occured, and the
    240  *              thread was woken up successfuly by `waitq_wakeup()`. Check
    241  *              the value of `*blocked` to see if the thread slept or if
    242  *              the wakeup was already pending.
    243  *
    244  */
    245 errno_t waitq_sleep_timeout(waitq_t *wq, uint32_t usec, unsigned int flags, bool *blocked)
     128 *              thread was woken up successfuly by `waitq_wake_*()`.
     129 *
     130 */
     131errno_t _waitq_sleep_timeout(waitq_t *wq, uint32_t usec, unsigned int flags)
    246132{
    247133        assert((!PREEMPTION_DISABLED) || (PARAM_NON_BLOCKING(flags, usec)));
    248 
    249         ipl_t ipl = waitq_sleep_prepare(wq);
    250         bool nblocked;
    251         errno_t rc = waitq_sleep_timeout_unsafe(wq, usec, flags, &nblocked);
    252         waitq_sleep_finish(wq, nblocked, ipl);
    253 
    254         if (blocked != NULL) {
    255                 *blocked = nblocked;
    256         }
    257         return rc;
     134        return waitq_sleep_timeout_unsafe(wq, usec, flags, waitq_sleep_prepare(wq));
    258135}
    259136
     
    268145 *
    269146 */
    270 ipl_t waitq_sleep_prepare(waitq_t *wq)
     147wait_guard_t waitq_sleep_prepare(waitq_t *wq)
    271148{
    272149        ipl_t ipl = interrupts_disable();
    273150        irq_spinlock_lock(&wq->lock, false);
    274         return ipl;
    275 }
    276 
    277 /** Finish waiting in a wait queue.
    278  *
    279  * This function restores interrupts to the state that existed prior
    280  * to the call to waitq_sleep_prepare(). If necessary, the wait queue
    281  * lock is released.
    282  *
    283  * @param wq       Wait queue.
    284  * @param blocked  Out parameter of waitq_sleep_timeout_unsafe().
    285  * @param ipl      Interrupt level returned by waitq_sleep_prepare().
    286  *
    287  */
    288 void waitq_sleep_finish(waitq_t *wq, bool blocked, ipl_t ipl)
    289 {
    290         if (blocked) {
    291                 /*
    292                  * Wait for a waitq_wakeup() or waitq_unsleep() to complete
    293                  * before returning from waitq_sleep() to the caller. Otherwise
    294                  * the caller might expect that the wait queue is no longer used
    295                  * and deallocate it (although the wakeup on a another cpu has
    296                  * not yet completed and is using the wait queue).
    297                  *
    298                  * Note that we have to do this for EOK and EINTR, but not
    299                  * necessarily for ETIMEOUT where the timeout handler stops
    300                  * using the waitq before waking us up. To be on the safe side,
    301                  * ensure the waitq is not in use anymore in this case as well.
    302                  */
    303                 waitq_complete_wakeup(wq);
    304         } else {
    305                 irq_spinlock_unlock(&wq->lock, false);
    306         }
    307 
    308         interrupts_restore(ipl);
    309 }
    310 
    311 errno_t waitq_sleep_unsafe(waitq_t *wq, bool *blocked)
    312 {
    313         return waitq_sleep_timeout_unsafe(wq, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE, blocked);
     151        return (wait_guard_t) {
     152                .ipl = ipl,
     153        };
     154}
     155
     156errno_t waitq_sleep_unsafe(waitq_t *wq, wait_guard_t guard)
     157{
     158        return waitq_sleep_timeout_unsafe(wq, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE, guard);
    314159}
    315160
     
    317162 *
    318163 * This function implements logic of sleeping in a wait queue.
    319  * This call must be preceded by a call to waitq_sleep_prepare()
    320  * and followed by a call to waitq_sleep_finish().
     164 * This call must be preceded by a call to waitq_sleep_prepare().
    321165 *
    322166 * @param wq    See waitq_sleep_timeout().
     
    329173 *
    330174 */
    331 errno_t waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, unsigned int flags, bool *blocked)
    332 {
    333         *blocked = false;
     175errno_t waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, unsigned int flags, wait_guard_t guard)
     176{
     177        errno_t rc;
     178
     179        /*
     180         * If true, and this thread's sleep returns without a wakeup
     181         * (timed out or interrupted), waitq ignores the next wakeup.
     182         * This is necessary for futex to be able to handle those conditions.
     183         */
     184        bool sleep_composable = (flags & SYNCH_FLAGS_FUTEX);
     185        bool interruptible = (flags & SYNCH_FLAGS_INTERRUPTIBLE);
     186
     187        if (wq->closed) {
     188                rc = EOK;
     189                goto exit;
     190        }
    334191
    335192        /* Checks whether to go to sleep at all */
    336         if (wq->missed_wakeups) {
    337                 wq->missed_wakeups--;
    338                 return EOK;
    339         } else {
    340                 if (PARAM_NON_BLOCKING(flags, usec)) {
    341                         /* Return immediately instead of going to sleep */
    342                         return EAGAIN;
     193        if (wq->wakeup_balance > 0) {
     194                wq->wakeup_balance--;
     195
     196                rc = EOK;
     197                goto exit;
     198        }
     199
     200        if (PARAM_NON_BLOCKING(flags, usec)) {
     201                /* Return immediately instead of going to sleep */
     202                rc = ETIMEOUT;
     203                goto exit;
     204        }
     205
     206        /* Just for debugging output. */
     207        atomic_store_explicit(&THREAD->sleep_queue, wq, memory_order_relaxed);
     208
     209        /*
     210         * This thread_t field is synchronized exclusively via
     211         * waitq lock of the waitq currently listing it.
     212         */
     213        list_append(&THREAD->wq_link, &wq->sleepers);
     214
     215        /* Needs to be run when interrupts are still disabled. */
     216        deadline_t deadline = usec > 0 ?
     217            timeout_deadline_in_usec(usec) : DEADLINE_NEVER;
     218
     219        while (true) {
     220                bool terminating = (thread_wait_start() == THREAD_TERMINATING);
     221                if (terminating && interruptible) {
     222                        rc = EINTR;
     223                        goto exit;
    343224                }
    344         }
    345 
    346         /*
    347          * Now we are firmly decided to go to sleep.
    348          *
    349          */
    350         irq_spinlock_lock(&THREAD->lock, false);
    351 
    352         timeout_t timeout;
    353         timeout_initialize(&timeout);
    354 
    355         THREAD->sleep_composable = (flags & SYNCH_FLAGS_FUTEX);
    356 
    357         if (flags & SYNCH_FLAGS_INTERRUPTIBLE) {
     225
     226                irq_spinlock_unlock(&wq->lock, false);
     227
     228                bool timed_out = (thread_wait_finish(deadline) == THREAD_WAIT_TIMEOUT);
     229
    358230                /*
    359                  * If the thread was already interrupted,
    360                  * don't go to sleep at all.
     231                 * We always need to re-lock the WQ, since concurrently running
     232                 * waitq_wakeup() may still not have exitted.
     233                 * If we didn't always do this, we'd risk waitq_wakeup() that woke us
     234                 * up still running on another CPU even after this function returns,
     235                 * and that would be an issue if the waitq is allocated locally to
     236                 * wait for a one-off asynchronous event. We'd need more external
     237                 * synchronization in that case, and that would be a pain.
     238                 *
     239                 * On the plus side, always regaining a lock simplifies cleanup.
    361240                 */
    362                 if (THREAD->interrupted) {
    363                         irq_spinlock_unlock(&THREAD->lock, false);
    364                         return EINTR;
     241                irq_spinlock_lock(&wq->lock, false);
     242
     243                if (!link_in_use(&THREAD->wq_link)) {
     244                        /*
     245                         * We were woken up by the desired event. Return success,
     246                         * regardless of any concurrent timeout or interruption.
     247                         */
     248                        rc = EOK;
     249                        goto exit;
    365250                }
    366251
    367                 /*
    368                  * Set context that will be restored if the sleep
    369                  * of this thread is ever interrupted.
    370                  */
    371                 THREAD->sleep_interruptible = true;
    372                 if (!context_save(&THREAD->sleep_interruption_context)) {
    373                         /* Short emulation of scheduler() return code. */
    374                         THREAD->last_cycle = get_cycle();
    375                         irq_spinlock_unlock(&THREAD->lock, false);
    376                         if (usec) {
    377                                 timeout_unregister(&timeout);
    378                         }
    379                         return EINTR;
     252                if (timed_out) {
     253                        rc = ETIMEOUT;
     254                        goto exit;
    380255                }
    381         } else
    382                 THREAD->sleep_interruptible = false;
    383 
    384         if (usec) {
    385                 /* We use the timeout variant. */
    386                 if (!context_save(&THREAD->sleep_timeout_context)) {
    387                         /* Short emulation of scheduler() return code. */
    388                         THREAD->last_cycle = get_cycle();
    389                         irq_spinlock_unlock(&THREAD->lock, false);
    390                         return ETIMEOUT;
    391                 }
    392 
    393                 timeout_register(&timeout, (uint64_t) usec, waitq_sleep_timed_out, THREAD);
    394         }
    395 
    396         list_append(&THREAD->wq_link, &wq->sleepers);
    397 
    398         /*
    399          * Suspend execution.
    400          *
    401          */
    402         THREAD->state = Sleeping;
    403         THREAD->sleep_queue = wq;
    404 
    405         /*
    406          * Must be before entry to scheduler, because there are multiple
    407          * return vectors.
    408          */
    409         *blocked = true;
    410 
    411         irq_spinlock_unlock(&THREAD->lock, false);
    412 
    413         /* wq->lock is released in scheduler_separated_stack() */
    414         scheduler();
    415 
    416         if (usec) {
    417                 timeout_unregister(&timeout);
    418         }
    419 
    420         return EOK;
    421 }
    422 
    423 /** Wake up first thread sleeping in a wait queue
    424  *
    425  * Wake up first thread sleeping in a wait queue. This is the SMP- and IRQ-safe
    426  * wrapper meant for general use.
    427  *
    428  * Besides its 'normal' wakeup operation, it attempts to unregister possible
    429  * timeout.
    430  *
    431  * @param wq   Pointer to wait queue.
    432  * @param mode Wakeup mode.
    433  *
    434  */
    435 void waitq_wakeup(waitq_t *wq, wakeup_mode_t mode)
     256
     257                /* Interrupted for some other reason. */
     258        }
     259
     260exit:
     261        if (THREAD)
     262                list_remove(&THREAD->wq_link);
     263
     264        if (rc != EOK && sleep_composable)
     265                wq->wakeup_balance--;
     266
     267        if (THREAD)
     268                atomic_store_explicit(&THREAD->sleep_queue, NULL, memory_order_relaxed);
     269
     270        irq_spinlock_unlock(&wq->lock, false);
     271        interrupts_restore(guard.ipl);
     272        return rc;
     273}
     274
     275static void _wake_one(waitq_t *wq)
     276{
     277        /* Pop one thread from the queue and wake it up. */
     278        thread_t *thread = list_get_instance(list_first(&wq->sleepers), thread_t, wq_link);
     279        list_remove(&thread->wq_link);
     280        thread_wakeup(thread);
     281}
     282
     283/**
     284 * Meant for implementing condvar signal.
     285 * Always wakes one thread if there are any sleeping,
     286 * has no effect if no threads are waiting for wakeup.
     287 */
     288void waitq_signal(waitq_t *wq)
    436289{
    437290        irq_spinlock_lock(&wq->lock, true);
    438         _waitq_wakeup_unsafe(wq, mode);
     291
     292        if (!list_empty(&wq->sleepers))
     293                _wake_one(wq);
     294
    439295        irq_spinlock_unlock(&wq->lock, true);
    440296}
    441297
    442 /** If there is a wakeup in progress actively waits for it to complete.
    443  *
    444  * The function returns once the concurrently running waitq_wakeup()
    445  * exits. It returns immediately if there are no concurrent wakeups
    446  * at the time.
    447  *
    448  * Interrupts must be disabled.
    449  *
    450  * Example usage:
    451  * @code
    452  * void callback(waitq *wq)
    453  * {
    454  *     // Do something and notify wait_for_completion() that we're done.
    455  *     waitq_wakeup(wq);
    456  * }
    457  * void wait_for_completion(void)
    458  * {
    459  *     waitq wg;
    460  *     waitq_initialize(&wq);
    461  *     // Run callback() in the background, pass it wq.
    462  *     do_asynchronously(callback, &wq);
    463  *     // Wait for callback() to complete its work.
    464  *     waitq_sleep(&wq);
    465  *     // callback() completed its work, but it may still be accessing
    466  *     // wq in waitq_wakeup(). Therefore it is not yet safe to return
    467  *     // from waitq_sleep() or it would clobber up our stack (where wq
    468  *     // is stored). waitq_sleep() ensures the wait queue is no longer
    469  *     // in use by invoking waitq_complete_wakeup() internally.
    470  *
    471  *     // waitq_sleep() returned, it is safe to free wq.
    472  * }
    473  * @endcode
    474  *
    475  * @param wq  Pointer to a wait queue.
    476  */
    477 static void waitq_complete_wakeup(waitq_t *wq)
    478 {
    479         assert(interrupts_disabled());
    480 
    481         irq_spinlock_lock(&wq->lock, false);
    482         irq_spinlock_unlock(&wq->lock, false);
    483 }
    484 
    485 /** Internal SMP- and IRQ-unsafe version of waitq_wakeup()
    486  *
    487  * This is the internal SMP- and IRQ-unsafe version of waitq_wakeup(). It
    488  * assumes wq->lock is already locked and interrupts are already disabled.
    489  *
    490  * @param wq   Pointer to wait queue.
    491  * @param mode If mode is WAKEUP_FIRST, then the longest waiting
    492  *             thread, if any, is woken up. If mode is WAKEUP_ALL, then
    493  *             all waiting threads, if any, are woken up. If there are
    494  *             no waiting threads to be woken up, the missed wakeup is
    495  *             recorded in the wait queue.
    496  *
    497  */
    498 void _waitq_wakeup_unsafe(waitq_t *wq, wakeup_mode_t mode)
    499 {
    500         size_t count = 0;
    501 
    502         assert(interrupts_disabled());
    503         assert(irq_spinlock_locked(&wq->lock));
    504 
    505         if (wq->ignore_wakeups > 0) {
    506                 if (mode == WAKEUP_FIRST) {
    507                         wq->ignore_wakeups--;
    508                         return;
    509                 }
    510                 wq->ignore_wakeups = 0;
    511         }
    512 
    513 loop:
    514         if (list_empty(&wq->sleepers)) {
    515                 if (mode == WAKEUP_CLOSE) {
    516                         // FIXME: this can technically fail if we get two billion sleeps after the wakeup call.
    517                         wq->missed_wakeups = INT_MAX;
    518                 } else if (mode != WAKEUP_ALL) {
    519                         wq->missed_wakeups++;
    520                 }
    521 
    522                 return;
    523         }
    524 
    525         count++;
    526         thread_t *thread = list_get_instance(list_first(&wq->sleepers),
    527             thread_t, wq_link);
    528 
    529         /*
    530          * Lock the thread prior to removing it from the wq.
    531          * This is not necessary because of mutual exclusion
    532          * (the link belongs to the wait queue), but because
    533          * of synchronization with waitq_sleep_timed_out()
    534          * and thread_interrupt_sleep().
    535          *
    536          * In order for these two functions to work, the following
    537          * invariant must hold:
    538          *
    539          * thread->sleep_queue != NULL <=> thread sleeps in a wait queue
    540          *
    541          * For an observer who locks the thread, the invariant
    542          * holds only when the lock is held prior to removing
    543          * it from the wait queue.
    544          *
    545          */
    546         irq_spinlock_lock(&thread->lock, false);
    547         list_remove(&thread->wq_link);
    548 
    549         thread->sleep_queue = NULL;
    550         irq_spinlock_unlock(&thread->lock, false);
    551 
    552         thread_ready(thread);
    553 
    554         if (mode == WAKEUP_ALL)
    555                 goto loop;
     298/**
     299 * Wakes up one thread sleeping on this waitq.
     300 * If there are no threads waiting, saves the wakeup so that the next sleep
     301 * returns immediately. If a previous failure in sleep created a wakeup debt
     302 * (see SYNCH_FLAGS_FUTEX) this debt is annulled and no thread is woken up.
     303 */
     304void waitq_wake_one(waitq_t *wq)
     305{
     306        irq_spinlock_lock(&wq->lock, true);
     307
     308        if (!wq->closed) {
     309                if (wq->wakeup_balance < 0 || list_empty(&wq->sleepers))
     310                        wq->wakeup_balance++;
     311                else
     312                        _wake_one(wq);
     313        }
     314
     315        irq_spinlock_unlock(&wq->lock, true);
     316}
     317
     318static void _wake_all(waitq_t *wq)
     319{
     320        while (!list_empty(&wq->sleepers))
     321                _wake_one(wq);
     322}
     323
     324/**
     325 * Wakes up all threads currently waiting on this waitq
     326 * and makes all future sleeps return instantly.
     327 */
     328void waitq_close(waitq_t *wq)
     329{
     330        irq_spinlock_lock(&wq->lock, true);
     331        wq->wakeup_balance = 0;
     332        wq->closed = true;
     333        _wake_all(wq);
     334        irq_spinlock_unlock(&wq->lock, true);
     335}
     336
     337/**
     338 * Wakes up all threads currently waiting on this waitq
     339 */
     340void waitq_wake_all(waitq_t *wq)
     341{
     342        irq_spinlock_lock(&wq->lock, true);
     343        wq->wakeup_balance = 0;
     344        _wake_all(wq);
     345        irq_spinlock_unlock(&wq->lock, true);
    556346}
    557347
  • kernel/generic/src/time/timeout.c

    r76e17d7c r111b9b9  
    7171}
    7272
    73 /** Register timeout
    74  *
    75  * Insert timeout handler f (with argument arg)
    76  * to timeout list and make it execute in
    77  * time microseconds (or slightly more).
    78  *
    79  * @param timeout Timeout structure.
    80  * @param time    Number of usec in the future to execute the handler.
    81  * @param handler Timeout handler function.
    82  * @param arg     Timeout handler argument.
    83  *
    84  */
    85 void timeout_register(timeout_t *timeout, uint64_t time,
     73/* Only call when interrupts are disabled. */
     74deadline_t timeout_deadline_in_usec(uint32_t usec)
     75{
     76        if (usec == 0)
     77                return 0;
     78
     79        return CPU->current_clock_tick + us2ticks(usec);
     80}
     81
     82static void timeout_register_deadline_locked(timeout_t *timeout, deadline_t deadline,
    8683    timeout_handler_t handler, void *arg)
    8784{
    88         irq_spinlock_lock(&CPU->timeoutlock, true);
    89 
    9085        assert(!link_in_use(&timeout->link));
    9186
    9287        *timeout = (timeout_t) {
    9388                .cpu = CPU,
    94                 .deadline = CPU->current_clock_tick + us2ticks(time),
     89                .deadline = deadline,
    9590                .handler = handler,
    9691                .arg = arg,
     
    113108                }
    114109        }
     110}
    115111
     112/** Register timeout
     113 *
     114 * Insert timeout handler f (with argument arg)
     115 * to timeout list and make it execute in
     116 * time microseconds (or slightly more).
     117 *
     118 * @param timeout Timeout structure.
     119 * @param time    Number of usec in the future to execute the handler.
     120 * @param handler Timeout handler function.
     121 * @param arg     Timeout handler argument.
     122 *
     123 */
     124void timeout_register(timeout_t *timeout, uint64_t time,
     125    timeout_handler_t handler, void *arg)
     126{
     127        irq_spinlock_lock(&CPU->timeoutlock, true);
     128        timeout_register_deadline_locked(timeout, timeout_deadline_in_usec(time), handler, arg);
     129        irq_spinlock_unlock(&CPU->timeoutlock, true);
     130}
     131
     132void timeout_register_deadline(timeout_t *timeout, deadline_t deadline,
     133    timeout_handler_t handler, void *arg)
     134{
     135        irq_spinlock_lock(&CPU->timeoutlock, true);
     136        timeout_register_deadline_locked(timeout, deadline, handler, arg);
    116137        irq_spinlock_unlock(&CPU->timeoutlock, true);
    117138}
  • kernel/generic/src/udebug/udebug.c

    r76e17d7c r111b9b9  
    438438                                /*
    439439                                 * thread's lock must not be held when calling
    440                                  * waitq_wakeup.
     440                                 * waitq_close.
    441441                                 *
    442442                                 */
    443                                 waitq_wakeup(&thread->udebug.go_wq, WAKEUP_ALL);
     443                                waitq_close(&thread->udebug.go_wq);
    444444                        }
    445445
  • kernel/generic/src/udebug/udebug_ops.c

    r76e17d7c r111b9b9  
    276276         *
    277277         */
    278         waitq_wakeup(&thread->udebug.go_wq, WAKEUP_ALL);
     278        waitq_wake_all(&thread->udebug.go_wq);
    279279
    280280        _thread_op_end(thread);
  • kernel/test/synch/semaphore1.c

    r76e17d7c r111b9b9  
    107107
    108108                thread_sleep(1);
    109                 waitq_wakeup(&can_start, WAKEUP_ALL);
     109                waitq_wake_all(&can_start);
    110110
    111111                while ((items_consumed != consumers) || (items_produced != producers)) {
  • kernel/test/synch/semaphore2.c

    r76e17d7c r111b9b9  
    9999
    100100        thread_usleep(20000);
    101         waitq_wakeup(&can_start, WAKEUP_ALL);
     101        waitq_wake_all(&can_start);
    102102
    103103        return NULL;
Note: See TracChangeset for help on using the changeset viewer.