Changeset a5b5f17 in mainline for kernel/generic/src/sysinfo/stats.c


Ignore:
Timestamp:
2024-01-21T16:36:15Z (4 months ago)
Author:
Jiří Zárevúcky <zarevucky.jiri@…>
Branches:
master
Children:
1a1e124
Parents:
ed7e057 (diff), d23712e (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge scheduler refactoring to remove the need for thread structure lock

All necessary synchronization is already a product of other operations
that enforce ordering (that is, runqueue manipulation and thread_sleep()
/thread_wakeup()). Some fields formally become atomic, which is only
needed because they are read from other threads to print out statistics.
These atomic operations are limited to relaxed individual reads/writes
to native-sized fields, which should at least in theory be compiled
identically to regular volatile variable accesses, the only difference
being that concurrent accesses from different threads are not undefined
behavior by definition.

Additionally, it is now made possible to switch directly to new thread
context instead of going through a separate scheduler stack. A separate
context is only needed and used when no runnable threads is immediately
available, which means we optimize switching in the limiting case where
many threads are waiting for execution. Switching is also avoided
altogether when there's only one runnable thread and it is being
preempted. Originally, the scheduler would switch to a separate stack,
requeue the thread that was running, retrieve that same thread from
queue, and switch to it again, all that work is now avoided.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/sysinfo/stats.c

    red7e057 ra5b5f17  
    299299{
    300300        assert(interrupts_disabled());
    301         assert(irq_spinlock_locked(&thread->lock));
    302301
    303302        stats_thread->thread_id = thread->tid;
    304303        stats_thread->task_id = thread->task->taskid;
    305         stats_thread->state = thread->state;
    306         stats_thread->priority = thread->priority;
    307         stats_thread->ucycles = thread->ucycles;
    308         stats_thread->kcycles = thread->kcycles;
    309 
    310         if (thread->cpu != NULL) {
     304        stats_thread->state = atomic_get_unordered(&thread->state);
     305        stats_thread->priority = atomic_get_unordered(&thread->priority);
     306        stats_thread->ucycles = atomic_time_read(&thread->ucycles);
     307        stats_thread->kcycles = atomic_time_read(&thread->kcycles);
     308
     309        cpu_t *cpu = atomic_get_unordered(&thread->cpu);
     310
     311        if (cpu != NULL) {
    311312                stats_thread->on_cpu = true;
    312                 stats_thread->cpu = thread->cpu->id;
     313                stats_thread->cpu = cpu->id;
    313314        } else
    314315                stats_thread->on_cpu = false;
     
    361362        thread_t *thread = thread_first();
    362363        while (thread != NULL) {
    363                 /* Interrupts are already disabled */
    364                 irq_spinlock_lock(&thread->lock, false);
    365 
    366364                /* Record the statistics and increment the index */
    367365                produce_stats_thread(thread, &stats_threads[i]);
    368366                i++;
    369 
    370                 irq_spinlock_unlock(&thread->lock, false);
    371367
    372368                thread = thread_next(thread);
     
    624620                ret.data.size = sizeof(stats_thread_t);
    625621
    626                 /*
    627                  * Replaced hand-over-hand locking with regular nested sections
    628                  * to avoid weak reference leak issues.
    629                  */
    630                 irq_spinlock_lock(&thread->lock, false);
    631622                produce_stats_thread(thread, stats_thread);
    632                 irq_spinlock_unlock(&thread->lock, false);
    633623
    634624                irq_spinlock_unlock(&threads_lock, true);
Note: See TracChangeset for help on using the changeset viewer.