Changeset 111b9b9 in mainline for kernel/generic/src/proc/thread.c


Ignore:
Timestamp:
2023-02-11T19:13:44Z (2 years ago)
Author:
Jiří Zárevúcky <zarevucky.jiri@…>
Branches:
master, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
4777e02
Parents:
76e17d7c
git-author:
Jiří Zárevúcky <zarevucky.jiri@…> (2022-08-15 17:46:39)
git-committer:
Jiří Zárevúcky <zarevucky.jiri@…> (2023-02-11 19:13:44)
Message:

Reimplement waitq using thread_wait/wakeup

This adds a few functions to the thread API which can be
summarized as "stop running until woken up by others".
The ordering and context-switching concerns are thus yeeted
to this abstraction and waitq only deals with maintaining
the queues. Overall, this makes the control flow in waitq
much easier to navigate.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/proc/thread.c

    r76e17d7c r111b9b9  
    6969#include <errno.h>
    7070#include <debug.h>
     71#include <halt.h>
    7172
    7273/** Thread states */
     
    7980        "Exiting",
    8081        "Lingering"
     82};
     83
     84enum sleep_state {
     85        SLEEP_INITIAL,
     86        SLEEP_ASLEEP,
     87        SLEEP_WOKE,
    8188};
    8289
     
    365372        thread->state = Entering;
    366373
    367         thread->sleep_interruptible = false;
    368         thread->sleep_composable = false;
    369         thread->sleep_queue = NULL;
     374        atomic_init(&thread->sleep_queue, NULL);
    370375
    371376        thread->in_copy_from_uspace = false;
     
    373378
    374379        thread->interrupted = false;
     380        atomic_init(&thread->sleep_state, SLEEP_INITIAL);
     381
    375382        waitq_initialize(&thread->join_wq);
    376383
     
    545552 * @param thread A valid thread object.
    546553 */
    547 void thread_interrupt(thread_t *thread, bool irq_dis)
     554void thread_interrupt(thread_t *thread)
    548555{
    549556        assert(thread != NULL);
    550 
    551         irq_spinlock_lock(&thread->lock, irq_dis);
    552 
    553557        thread->interrupted = true;
    554         bool sleeping = (thread->state == Sleeping);
    555 
    556         irq_spinlock_unlock(&thread->lock, irq_dis);
    557 
    558         if (sleeping)
    559                 waitq_interrupt_sleep(thread);
    560 
    561         thread_put(thread);
     558        thread_wakeup(thread);
     559}
     560
     561/** Prepare for putting the thread to sleep.
     562 *
     563 * @returns whether the thread is currently terminating. If THREAD_OK
     564 * is returned, the thread is guaranteed to be woken up instantly if the thread
     565 * is terminated at any time between this function's return and
     566 * thread_wait_finish(). If THREAD_TERMINATING is returned, the thread can still
     567 * go to sleep, but doing so will delay termination.
     568 */
     569thread_termination_state_t thread_wait_start(void)
     570{
     571        assert(THREAD != NULL);
     572
     573        /*
     574         * This is an exchange rather than a store so that we can use the acquire
     575         * semantics, which is needed to ensure that code after this operation sees
     576         * memory ops made before thread_wakeup() in other thread, if that wakeup
     577         * was reset by this operation.
     578         *
     579         * In particular, we need this to ensure we can't miss the thread being
     580         * terminated concurrently with a synchronization primitive preparing to
     581         * sleep.
     582         */
     583        (void) atomic_exchange_explicit(&THREAD->sleep_state, SLEEP_INITIAL,
     584            memory_order_acquire);
     585
     586        return THREAD->interrupted ? THREAD_TERMINATING : THREAD_OK;
     587}
     588
     589static void thread_wait_internal(void)
     590{
     591        assert(THREAD != NULL);
     592
     593        ipl_t ipl = interrupts_disable();
     594
     595        if (atomic_load(&haltstate))
     596                halt();
     597
     598        /*
     599         * Lock here to prevent a race between entering the scheduler and another
     600         * thread rescheduling this thread.
     601         */
     602        irq_spinlock_lock(&THREAD->lock, false);
     603
     604        int expected = SLEEP_INITIAL;
     605
     606        /* Only set SLEEP_ASLEEP in sleep pad if it's still in initial state */
     607        if (atomic_compare_exchange_strong_explicit(&THREAD->sleep_state, &expected,
     608            SLEEP_ASLEEP, memory_order_acq_rel, memory_order_acquire)) {
     609                THREAD->state = Sleeping;
     610                scheduler_locked(ipl);
     611        } else {
     612                assert(expected == SLEEP_WOKE);
     613                /* Return immediately. */
     614                irq_spinlock_unlock(&THREAD->lock, false);
     615                interrupts_restore(ipl);
     616        }
     617}
     618
     619static void thread_wait_timeout_callback(void *arg)
     620{
     621        thread_wakeup(arg);
     622}
     623
     624/**
     625 * Suspends this thread's execution until thread_wakeup() is called on it,
     626 * or deadline is reached.
     627 *
     628 * The way this would normally be used is that the current thread call
     629 * thread_wait_start(), and if interruption has not been signaled, stores
     630 * a reference to itself in a synchronized structure (such as waitq).
     631 * After that, it releases any spinlocks it might hold and calls this function.
     632 *
     633 * The thread doing the wakeup will acquire the thread's reference from said
     634 * synchronized structure and calls thread_wakeup() on it.
     635 *
     636 * Notably, there can be more than one thread performing wakeup.
     637 * The number of performed calls to thread_wakeup(), or their relative
     638 * ordering with thread_wait_finish(), does not matter. However, calls to
     639 * thread_wakeup() are expected to be synchronized with thread_wait_start()
     640 * with which they are associated, otherwise wakeups may be missed.
     641 * However, the operation of thread_wakeup() is defined at any time,
     642 * synchronization notwithstanding (in the sense of C un/defined behavior),
     643 * and is in fact used to interrupt waiting threads by external events.
     644 * The waiting thread must operate correctly in face of spurious wakeups,
     645 * and clean up its reference in the synchronization structure if necessary.
     646 *
     647 * Returns THREAD_WAIT_TIMEOUT if timeout fired, which is a necessary condition
     648 * for it to have been waken up by the timeout, but the caller must assume
     649 * that proper wakeups, timeouts and interrupts may occur concurrently, so
     650 * the fact timeout has been registered does not necessarily mean the thread
     651 * has not been woken up or interrupted.
     652 */
     653thread_wait_result_t thread_wait_finish(deadline_t deadline)
     654{
     655        assert(THREAD != NULL);
     656
     657        timeout_t timeout;
     658
     659        if (deadline != DEADLINE_NEVER) {
     660                /* Extra check to avoid setting up a deadline if we don't need to. */
     661                if (atomic_load_explicit(&THREAD->sleep_state, memory_order_acquire) !=
     662                    SLEEP_INITIAL)
     663                        return THREAD_WAIT_SUCCESS;
     664
     665                timeout_initialize(&timeout);
     666                timeout_register_deadline(&timeout, deadline,
     667                    thread_wait_timeout_callback, THREAD);
     668        }
     669
     670        thread_wait_internal();
     671
     672        if (deadline != DEADLINE_NEVER && !timeout_unregister(&timeout)) {
     673                return THREAD_WAIT_TIMEOUT;
     674        } else {
     675                return THREAD_WAIT_SUCCESS;
     676        }
     677}
     678
     679void thread_wakeup(thread_t *thread)
     680{
     681        assert(thread != NULL);
     682
     683        int state = atomic_exchange_explicit(&thread->sleep_state, SLEEP_WOKE,
     684            memory_order_release);
     685
     686        if (state == SLEEP_ASLEEP) {
     687                /*
     688                 * Only one thread gets to do this.
     689                 * The reference consumed here is the reference implicitly passed to
     690                 * the waking thread by the sleeper in thread_wait_finish().
     691                 */
     692                thread_ready(thread);
     693        }
    562694}
    563695
     
    628760                return EOK;
    629761        } else {
    630                 return waitq_sleep_timeout(&thread->join_wq, usec,
    631                     SYNCH_FLAGS_NON_BLOCKING, NULL);
     762                return _waitq_sleep_timeout(&thread->join_wq, usec, flags);
    632763        }
    633764}
     
    646777        waitq_initialize(&wq);
    647778
    648         (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING, NULL);
     779        (void) waitq_sleep_timeout(&wq, usec);
    649780}
    650781
     
    8901021
    8911022        if (sleeping)
    892                 waitq_interrupt_sleep(thread);
     1023                thread_wakeup(thread);
    8931024
    8941025        thread_put(thread);
Note: See TracChangeset for help on using the changeset viewer.