Changeset 5663872 in mainline for kernel/generic/src/proc/thread.c


Ignore:
Timestamp:
2024-01-14T18:24:05Z (16 months ago)
Author:
Jiří Zárevúcky <zarevucky.jiri@…>
Branches:
master
Children:
23f36a3
Parents:
4760793
git-author:
Jiří Zárevúcky <zarevucky.jiri@…> (2023-02-18 13:37:30)
git-committer:
Jiří Zárevúcky <zarevucky.jiri@…> (2024-01-14 18:24:05)
Message:

Move stuff around for thread sleep

Only mark the thread as ready for wakeup after we switch to
another context. This way, soundness of the sychronization
does not depend on thread lock being held across the context
switch, which gives us more freedom.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/proc/thread.c

    r4760793 r5663872  
    8282};
    8383
    84 enum sleep_state {
    85         SLEEP_INITIAL,
    86         SLEEP_ASLEEP,
    87         SLEEP_WOKE,
    88 };
    89 
    9084/** Lock protecting the @c threads ordered dictionary .
    9185 *
     
    579573}
    580574
    581 static void thread_wait_internal(void)
    582 {
    583         assert(THREAD != NULL);
    584 
    585         ipl_t ipl = interrupts_disable();
    586 
    587         if (atomic_load(&haltstate))
    588                 halt();
    589 
    590         /*
    591          * Lock here to prevent a race between entering the scheduler and another
    592          * thread rescheduling this thread.
    593          */
    594         irq_spinlock_lock(&THREAD->lock, false);
    595 
    596         int expected = SLEEP_INITIAL;
    597 
    598         /* Only set SLEEP_ASLEEP in sleep pad if it's still in initial state */
    599         if (atomic_compare_exchange_strong_explicit(&THREAD->sleep_state, &expected,
    600             SLEEP_ASLEEP, memory_order_acq_rel, memory_order_acquire)) {
    601                 THREAD->state = Sleeping;
    602                 scheduler_locked(ipl);
    603         } else {
    604                 assert(expected == SLEEP_WOKE);
    605                 /* Return immediately. */
    606                 irq_spinlock_unlock(&THREAD->lock, false);
    607                 interrupts_restore(ipl);
    608         }
    609 }
    610 
    611575static void thread_wait_timeout_callback(void *arg)
    612576{
     
    649613        timeout_t timeout;
    650614
     615        /* Extra check to avoid going to scheduler if we don't need to. */
     616        if (atomic_load_explicit(&THREAD->sleep_state, memory_order_acquire) !=
     617            SLEEP_INITIAL)
     618                return THREAD_WAIT_SUCCESS;
     619
    651620        if (deadline != DEADLINE_NEVER) {
    652                 /* Extra check to avoid setting up a deadline if we don't need to. */
    653                 if (atomic_load_explicit(&THREAD->sleep_state, memory_order_acquire) !=
    654                     SLEEP_INITIAL)
    655                         return THREAD_WAIT_SUCCESS;
    656 
    657621                timeout_initialize(&timeout);
    658622                timeout_register_deadline(&timeout, deadline,
     
    660624        }
    661625
    662         thread_wait_internal();
     626        ipl_t ipl = interrupts_disable();
     627        irq_spinlock_lock(&THREAD->lock, false);
     628        THREAD->state = Sleeping;
     629        scheduler_locked(ipl);
    663630
    664631        if (deadline != DEADLINE_NEVER && !timeout_unregister(&timeout)) {
     
    674641
    675642        int state = atomic_exchange_explicit(&thread->sleep_state, SLEEP_WOKE,
    676             memory_order_release);
     643            memory_order_acq_rel);
    677644
    678645        if (state == SLEEP_ASLEEP) {
Note: See TracChangeset for help on using the changeset viewer.