Changeset 5663872 in mainline


Ignore:
Timestamp:
2024-01-14T18:24:05Z (4 months ago)
Author:
Jiří Zárevúcky <zarevucky.jiri@…>
Branches:
master
Children:
23f36a3
Parents:
4760793
git-author:
Jiří Zárevúcky <zarevucky.jiri@…> (2023-02-18 13:37:30)
git-committer:
Jiří Zárevúcky <zarevucky.jiri@…> (2024-01-14 18:24:05)
Message:

Move stuff around for thread sleep

Only mark the thread as ready for wakeup after we switch to
another context. This way, soundness of the sychronization
does not depend on thread lock being held across the context
switch, which gives us more freedom.

Location:
kernel/generic
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/include/proc/thread.h

    r4760793 r5663872  
    190190extern void thread_interrupt(thread_t *);
    191191
     192enum sleep_state {
     193        SLEEP_INITIAL,
     194        SLEEP_ASLEEP,
     195        SLEEP_WOKE,
     196};
     197
    192198typedef enum {
    193199        THREAD_OK,
  • kernel/generic/src/proc/scheduler.c

    r4760793 r5663872  
    443443                after_thread_ran();
    444444
     445                int expected;
     446
    445447                switch (THREAD->state) {
    446448                case Running:
     
    462464
    463465                case Sleeping:
    464                         /*
    465                          * Prefer the thread after it's woken up.
    466                          */
    467                         THREAD->priority = -1;
    468                         irq_spinlock_unlock(&THREAD->lock, false);
     466                        expected = SLEEP_INITIAL;
     467
     468                        /* Only set SLEEP_ASLEEP in sleep pad if it's still in initial state */
     469                        if (atomic_compare_exchange_strong_explicit(&THREAD->sleep_state,
     470                            &expected, SLEEP_ASLEEP,
     471                            memory_order_acq_rel, memory_order_acquire)) {
     472
     473                                /* Prefer the thread after it's woken up. */
     474                                THREAD->priority = -1;
     475                                irq_spinlock_unlock(&THREAD->lock, false);
     476                        } else {
     477                                assert(expected == SLEEP_WOKE);
     478                                /* The thread has already been woken up, requeue immediately. */
     479                                irq_spinlock_unlock(&THREAD->lock, false);
     480                                thread_ready(THREAD);
     481                        }
     482
    469483                        break;
    470484
  • kernel/generic/src/proc/thread.c

    r4760793 r5663872  
    8282};
    8383
    84 enum sleep_state {
    85         SLEEP_INITIAL,
    86         SLEEP_ASLEEP,
    87         SLEEP_WOKE,
    88 };
    89 
    9084/** Lock protecting the @c threads ordered dictionary .
    9185 *
     
    579573}
    580574
    581 static void thread_wait_internal(void)
    582 {
    583         assert(THREAD != NULL);
    584 
    585         ipl_t ipl = interrupts_disable();
    586 
    587         if (atomic_load(&haltstate))
    588                 halt();
    589 
    590         /*
    591          * Lock here to prevent a race between entering the scheduler and another
    592          * thread rescheduling this thread.
    593          */
    594         irq_spinlock_lock(&THREAD->lock, false);
    595 
    596         int expected = SLEEP_INITIAL;
    597 
    598         /* Only set SLEEP_ASLEEP in sleep pad if it's still in initial state */
    599         if (atomic_compare_exchange_strong_explicit(&THREAD->sleep_state, &expected,
    600             SLEEP_ASLEEP, memory_order_acq_rel, memory_order_acquire)) {
    601                 THREAD->state = Sleeping;
    602                 scheduler_locked(ipl);
    603         } else {
    604                 assert(expected == SLEEP_WOKE);
    605                 /* Return immediately. */
    606                 irq_spinlock_unlock(&THREAD->lock, false);
    607                 interrupts_restore(ipl);
    608         }
    609 }
    610 
    611575static void thread_wait_timeout_callback(void *arg)
    612576{
     
    649613        timeout_t timeout;
    650614
     615        /* Extra check to avoid going to scheduler if we don't need to. */
     616        if (atomic_load_explicit(&THREAD->sleep_state, memory_order_acquire) !=
     617            SLEEP_INITIAL)
     618                return THREAD_WAIT_SUCCESS;
     619
    651620        if (deadline != DEADLINE_NEVER) {
    652                 /* Extra check to avoid setting up a deadline if we don't need to. */
    653                 if (atomic_load_explicit(&THREAD->sleep_state, memory_order_acquire) !=
    654                     SLEEP_INITIAL)
    655                         return THREAD_WAIT_SUCCESS;
    656 
    657621                timeout_initialize(&timeout);
    658622                timeout_register_deadline(&timeout, deadline,
     
    660624        }
    661625
    662         thread_wait_internal();
     626        ipl_t ipl = interrupts_disable();
     627        irq_spinlock_lock(&THREAD->lock, false);
     628        THREAD->state = Sleeping;
     629        scheduler_locked(ipl);
    663630
    664631        if (deadline != DEADLINE_NEVER && !timeout_unregister(&timeout)) {
     
    674641
    675642        int state = atomic_exchange_explicit(&thread->sleep_state, SLEEP_WOKE,
    676             memory_order_release);
     643            memory_order_acq_rel);
    677644
    678645        if (state == SLEEP_ASLEEP) {
Note: See TracChangeset for help on using the changeset viewer.