Changeset efed95a3 in mainline for kernel/generic/src/proc/thread.c


Ignore:
Timestamp:
2024-01-20T17:09:00Z (4 months ago)
Author:
Jiří Zárevúcky <zarevucky.jiri@…>
Branches:
master
Children:
3d84734
Parents:
286da52
git-author:
Jiří Zárevúcky <zarevucky.jiri@…> (2024-01-20 16:12:46)
git-committer:
Jiří Zárevúcky <zarevucky.jiri@…> (2024-01-20 17:09:00)
Message:

Make thread→cpu weakly atomic, to avoid need for thread lock

File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/proc/thread.c

    r286da52 refed95a3  
    198198{
    199199        irq_spinlock_lock(&thread->lock, true);
    200         thread->cpu = cpu;
     200        atomic_set_unordered(&thread->cpu, cpu);
    201201        thread->nomigrate++;
    202202        irq_spinlock_unlock(&thread->lock, true);
     
    263263            ((flags & THREAD_FLAG_UNCOUNTED) == THREAD_FLAG_UNCOUNTED);
    264264        thread->priority = -1;          /* Start in rq[0] */
    265         thread->cpu = NULL;
     265        atomic_init(&thread->cpu, NULL);
    266266        thread->stolen = false;
    267267        thread->uspace =
     
    343343        /* Clear cpu->fpu_owner if set to this thread. */
    344344#ifdef CONFIG_FPU_LAZY
    345         if (thread->cpu) {
     345        cpu_t *cpu = atomic_get_unordered(&thread->cpu);
     346        if (cpu) {
    346347                /*
    347348                 * We need to lock for this because the old CPU can concurrently try
     
    349350                 * it to finish. An atomic compare-and-swap wouldn't be enough.
    350351                 */
    351                 irq_spinlock_lock(&thread->cpu->fpu_lock, false);
    352 
    353                 thread_t *owner = atomic_load_explicit(&thread->cpu->fpu_owner,
    354                     memory_order_relaxed);
    355 
    356                 if (owner == thread) {
    357                         atomic_store_explicit(&thread->cpu->fpu_owner, NULL,
    358                             memory_order_relaxed);
    359                 }
    360 
    361                 irq_spinlock_unlock(&thread->cpu->fpu_lock, false);
     352                irq_spinlock_lock(&cpu->fpu_lock, false);
     353
     354                if (atomic_get_unordered(&cpu->fpu_owner) == thread)
     355                        atomic_set_unordered(&cpu->fpu_owner, NULL);
     356
     357                irq_spinlock_unlock(&cpu->fpu_lock, false);
    362358        }
    363359#endif
     
    707703
    708704        if (additional) {
    709                 if (thread->cpu)
    710                         printf("%-5u", thread->cpu->id);
     705                cpu_t *cpu = atomic_get_unordered(&thread->cpu);
     706                if (cpu)
     707                        printf("%-5u", cpu->id);
    711708                else
    712709                        printf("none ");
Note: See TracChangeset for help on using the changeset viewer.