Changeset efed95a3 in mainline


Ignore:
Timestamp:
2024-01-20T17:09:00Z (3 months ago)
Author:
Jiří Zárevúcky <zarevucky.jiri@…>
Branches:
master
Children:
3d84734
Parents:
286da52
git-author:
Jiří Zárevúcky <zarevucky.jiri@…> (2024-01-20 16:12:46)
git-committer:
Jiří Zárevúcky <zarevucky.jiri@…> (2024-01-20 17:09:00)
Message:

Make thread→cpu weakly atomic, to avoid need for thread lock

Location:
kernel/generic
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/include/atomic.h

    r286da52 refed95a3  
    3939#include <typedefs.h>
    4040#include <stdatomic.h>
     41
     42/*
     43 * Shorthand for relaxed atomic read/write, something that's needed to formally
     44 * avoid undefined behavior in cases where we need to read a variable in
     45 * different threads and we don't particularly care about ordering
     46 * (e.g. statistic printouts). This is most likely translated into the same
     47 * assembly instructions as regular read/writes.
     48 */
     49#define atomic_set_unordered(var, val) atomic_store_explicit((var), (val), memory_order_relaxed)
     50#define atomic_get_unordered(var) atomic_load_explicit((var), memory_order_relaxed)
    4151
    4252#define atomic_predec(val) \
  • kernel/generic/include/proc/thread.h

    r286da52 refed95a3  
    138138
    139139        /** Thread CPU. */
    140         cpu_t *cpu;
     140        _Atomic(cpu_t *) cpu;
    141141        /** Containing task. */
    142142        task_t *task;
  • kernel/generic/src/proc/scheduler.c

    r286da52 refed95a3  
    311311
    312312        irq_spinlock_lock(&THREAD->lock, false);
    313         assert(THREAD->cpu == CPU);
     313        assert(atomic_get_unordered(&THREAD->cpu) == CPU);
    314314
    315315        THREAD->state = Running;
     
    387387
    388388        assert(thread->state == Running);
    389         assert(thread->cpu == CPU);
     389        assert(atomic_get_unordered(&thread->cpu) == CPU);
    390390
    391391        int i = (thread->priority < RQ_COUNT - 1) ?
     
    411411
    412412        /* Prefer the CPU on which the thread ran last */
    413         if (!thread->cpu)
    414                 thread->cpu = CPU;
    415 
    416         cpu_t *cpu = thread->cpu;
     413        cpu_t *cpu = atomic_get_unordered(&thread->cpu);
     414
     415        if (!cpu) {
     416                cpu = CPU;
     417                atomic_set_unordered(&thread->cpu, CPU);
     418        }
    417419
    418420        irq_spinlock_unlock(&thread->lock, false);
     
    656658
    657659                thread->stolen = true;
    658                 thread->cpu = CPU;
     660                atomic_set_unordered(&thread->cpu, CPU);
    659661
    660662                irq_spinlock_unlock(&thread->lock, false);
  • kernel/generic/src/proc/thread.c

    r286da52 refed95a3  
    198198{
    199199        irq_spinlock_lock(&thread->lock, true);
    200         thread->cpu = cpu;
     200        atomic_set_unordered(&thread->cpu, cpu);
    201201        thread->nomigrate++;
    202202        irq_spinlock_unlock(&thread->lock, true);
     
    263263            ((flags & THREAD_FLAG_UNCOUNTED) == THREAD_FLAG_UNCOUNTED);
    264264        thread->priority = -1;          /* Start in rq[0] */
    265         thread->cpu = NULL;
     265        atomic_init(&thread->cpu, NULL);
    266266        thread->stolen = false;
    267267        thread->uspace =
     
    343343        /* Clear cpu->fpu_owner if set to this thread. */
    344344#ifdef CONFIG_FPU_LAZY
    345         if (thread->cpu) {
     345        cpu_t *cpu = atomic_get_unordered(&thread->cpu);
     346        if (cpu) {
    346347                /*
    347348                 * We need to lock for this because the old CPU can concurrently try
     
    349350                 * it to finish. An atomic compare-and-swap wouldn't be enough.
    350351                 */
    351                 irq_spinlock_lock(&thread->cpu->fpu_lock, false);
    352 
    353                 thread_t *owner = atomic_load_explicit(&thread->cpu->fpu_owner,
    354                     memory_order_relaxed);
    355 
    356                 if (owner == thread) {
    357                         atomic_store_explicit(&thread->cpu->fpu_owner, NULL,
    358                             memory_order_relaxed);
    359                 }
    360 
    361                 irq_spinlock_unlock(&thread->cpu->fpu_lock, false);
     352                irq_spinlock_lock(&cpu->fpu_lock, false);
     353
     354                if (atomic_get_unordered(&cpu->fpu_owner) == thread)
     355                        atomic_set_unordered(&cpu->fpu_owner, NULL);
     356
     357                irq_spinlock_unlock(&cpu->fpu_lock, false);
    362358        }
    363359#endif
     
    707703
    708704        if (additional) {
    709                 if (thread->cpu)
    710                         printf("%-5u", thread->cpu->id);
     705                cpu_t *cpu = atomic_get_unordered(&thread->cpu);
     706                if (cpu)
     707                        printf("%-5u", cpu->id);
    711708                else
    712709                        printf("none ");
  • kernel/generic/src/sysinfo/stats.c

    r286da52 refed95a3  
    308308        stats_thread->kcycles = thread->kcycles;
    309309
    310         if (thread->cpu != NULL) {
     310        cpu_t *cpu = atomic_get_unordered(&thread->cpu);
     311
     312        if (cpu != NULL) {
    311313                stats_thread->on_cpu = true;
    312                 stats_thread->cpu = thread->cpu->id;
     314                stats_thread->cpu = cpu->id;
    313315        } else
    314316                stats_thread->on_cpu = false;
Note: See TracChangeset for help on using the changeset viewer.