Changeset a5b5f17 in mainline for kernel/generic/include/proc/thread.h


Ignore:
Timestamp:
2024-01-21T16:36:15Z (4 months ago)
Author:
Jiří Zárevúcky <zarevucky.jiri@…>
Branches:
master
Children:
1a1e124
Parents:
ed7e057 (diff), d23712e (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge scheduler refactoring to remove the need for thread structure lock

All necessary synchronization is already a product of other operations
that enforce ordering (that is, runqueue manipulation and thread_sleep()
/thread_wakeup()). Some fields formally become atomic, which is only
needed because they are read from other threads to print out statistics.
These atomic operations are limited to relaxed individual reads/writes
to native-sized fields, which should at least in theory be compiled
identically to regular volatile variable accesses, the only difference
being that concurrent accesses from different threads are not undefined
behavior by definition.

Additionally, it is now made possible to switch directly to new thread
context instead of going through a separate scheduler stack. A separate
context is only needed and used when no runnable threads is immediately
available, which means we optimize switching in the limiting case where
many threads are waiting for execution. Switching is also avoided
altogether when there's only one runnable thread and it is being
preempted. Originally, the scheduler would switch to a separate stack,
requeue the thread that was running, retrieve that same thread from
queue, and switch to it again, all that work is now avoided.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/include/proc/thread.h

    red7e057 ra5b5f17  
    9595        waitq_t join_wq;
    9696
    97         /** Lock protecting thread structure.
     97        /** Thread accounting. */
     98        atomic_time_stat_t ucycles;
     99        atomic_time_stat_t kcycles;
     100
     101        /** Architecture-specific data. */
     102        thread_arch_t arch;
     103
     104#ifdef CONFIG_UDEBUG
     105        /**
     106         * If true, the scheduler will print a stack trace
     107         * to the kernel console upon scheduling this thread.
     108         */
     109        atomic_int_fast8_t btrace;
     110
     111        /** Debugging stuff */
     112        udebug_thread_t udebug;
     113#endif /* CONFIG_UDEBUG */
     114
     115        /*
     116         * Immutable fields.
    98117         *
    99          * Protects the whole thread structure except fields listed above.
    100          */
    101         IRQ_SPINLOCK_DECLARE(lock);
    102 
    103         char name[THREAD_NAME_BUFLEN];
     118         * These fields are only modified during initialization, and are not
     119         * changed at any time between initialization and destruction.
     120         * Can be accessed without synchronization in most places.
     121         */
     122
     123        /** Thread ID. */
     124        thread_id_t tid;
    104125
    105126        /** Function implementing the thread. */
     
    108129        void *thread_arg;
    109130
     131        char name[THREAD_NAME_BUFLEN];
     132
     133        /** Thread is executed in user space. */
     134        bool uspace;
     135
     136        /** Thread doesn't affect accumulated accounting. */
     137        bool uncounted;
     138
     139        /** Containing task. */
     140        task_t *task;
     141
     142        /** Thread's kernel stack. */
     143        uint8_t *kstack;
     144
     145        /*
     146         * Local fields.
     147         *
     148         * These fields can be safely accessed from code that _controls execution_
     149         * of this thread. Code controls execution of a thread if either:
     150         *  - it runs in the context of said thread AND interrupts are disabled
     151         *    (interrupts can and will access these fields)
     152         *  - the thread is not running, and the code accessing it can legally
     153         *    add/remove the thread to/from a runqueue, i.e., either:
     154         *    - it is allowed to enqueue thread in a new runqueue
     155         *    - it holds the lock to the runqueue containing the thread
     156         *
     157         */
     158
    110159        /**
    111160         * From here, the stored context is restored
     
    114163        context_t saved_context;
    115164
     165        // TODO: we only need one of the two bools below
     166
    116167        /**
    117168         * True if this thread is executing copy_from_uspace().
     
    126177        bool in_copy_to_uspace;
    127178
     179        /*
     180         * FPU context is a special case. If lazy FPU switching is disabled,
     181         * it acts as a regular local field. However, if lazy switching is enabled,
     182         * the context is synchronized via CPU->fpu_lock
     183         */
    128184#ifdef CONFIG_FPU
    129185        fpu_context_t fpu_context;
     
    134190        unsigned int nomigrate;
    135191
    136         /** Thread state. */
    137         state_t state;
    138 
    139         /** Thread CPU. */
    140         cpu_t *cpu;
    141         /** Containing task. */
    142         task_t *task;
    143192        /** Thread was migrated to another CPU and has not run yet. */
    144193        bool stolen;
    145         /** Thread is executed in user space. */
    146         bool uspace;
    147 
    148         /** Thread accounting. */
    149         uint64_t ucycles;
    150         uint64_t kcycles;
     194
     195        /**
     196         * Thread state (state_t).
     197         * This is atomic because we read it via some commands for debug output,
     198         * otherwise it could just be a regular local.
     199         */
     200        atomic_int_fast32_t state;
     201
     202        /** Thread CPU. */
     203        _Atomic(cpu_t *) cpu;
     204
     205        /** Thread's priority. Implemented as index to CPU->rq */
     206        atomic_int_fast32_t priority;
     207
    151208        /** Last sampled cycle. */
    152209        uint64_t last_cycle;
    153         /** Thread doesn't affect accumulated accounting. */
    154         bool uncounted;
    155 
    156         /** Thread's priority. Implemented as index to CPU->rq */
    157         int priority;
    158         /** Thread ID. */
    159         thread_id_t tid;
    160 
    161         /** Architecture-specific data. */
    162         thread_arch_t arch;
    163 
    164         /** Thread's kernel stack. */
    165         uint8_t *kstack;
    166 
    167 #ifdef CONFIG_UDEBUG
    168         /**
    169          * If true, the scheduler will print a stack trace
    170          * to the kernel console upon scheduling this thread.
    171          */
    172         bool btrace;
    173 
    174         /** Debugging stuff */
    175         udebug_thread_t udebug;
    176 #endif /* CONFIG_UDEBUG */
    177210} thread_t;
    178211
     
    186219extern void thread_attach(thread_t *, task_t *);
    187220extern void thread_start(thread_t *);
    188 extern void thread_ready(thread_t *);
     221extern void thread_requeue_sleeping(thread_t *);
    189222extern void thread_exit(void) __attribute__((noreturn));
    190223extern void thread_interrupt(thread_t *);
Note: See TracChangeset for help on using the changeset viewer.