Changeset f3dbe27 in mainline
- Timestamp:
- 2023-04-18T17:33:02Z (18 months ago)
- Branches:
- master, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 117ad5a2
- Parents:
- 06f81c4
- git-author:
- Jiří Zárevúcky <zarevucky.jiri@…> (2023-04-18 17:27:32)
- git-committer:
- Jiří Zárevúcky <zarevucky.jiri@…> (2023-04-18 17:33:02)
- Location:
- kernel
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/amd64/src/cpu/cpu.c
r06f81c4 rf3dbe27 99 99 CPU->arch.tss->iomap_base = &CPU->arch.tss->iomap[0] - 100 100 ((uint8_t *) CPU->arch.tss); 101 CPU->fpu_owner = NULL;102 101 } 103 102 -
kernel/arch/ia32/src/cpu/cpu.c
r06f81c4 rf3dbe27 87 87 CPU->arch.tss->iomap_base = &CPU->arch.tss->iomap[0] - ((uint8_t *) CPU->arch.tss); 88 88 89 CPU->fpu_owner = NULL;90 91 89 cpuid(INTEL_CPUID_STANDARD, &info); 92 90 -
kernel/generic/include/cpu.h
r06f81c4 rf3dbe27 98 98 99 99 #ifdef CONFIG_FPU_LAZY 100 /* For synchronization between FPU trap and thread destructor. */ 100 101 IRQ_SPINLOCK_DECLARE(fpu_lock); 101 struct thread *fpu_owner;102 102 #endif 103 _Atomic(struct thread *) fpu_owner; 103 104 104 105 /** -
kernel/generic/src/proc/scheduler.c
r06f81c4 rf3dbe27 84 84 85 85 #ifdef CONFIG_FPU_LAZY 86 irq_spinlock_lock(&CPU->fpu_lock, true); 87 88 if (THREAD == CPU->fpu_owner) 86 /* 87 * The only concurrent modification possible for fpu_owner here is 88 * another thread changing it from itself to NULL in its destructor. 89 */ 90 thread_t *owner = atomic_load_explicit(&CPU->fpu_owner, 91 memory_order_relaxed); 92 93 if (THREAD == owner) 89 94 fpu_enable(); 90 95 else 91 96 fpu_disable(); 92 93 irq_spinlock_unlock(&CPU->fpu_lock, true);94 97 #elif defined CONFIG_FPU 95 98 fpu_enable(); … … 133 136 { 134 137 fpu_enable(); 138 139 /* We need this lock to ensure synchronization with thread destructor. */ 135 140 irq_spinlock_lock(&CPU->fpu_lock, false); 136 141 137 142 /* Save old context */ 138 if (CPU->fpu_owner != NULL) { 139 fpu_context_save(&CPU->fpu_owner->fpu_context); 140 CPU->fpu_owner = NULL; 141 } 143 thread_t *owner = atomic_load_explicit(&CPU->fpu_owner, memory_order_relaxed); 144 if (owner != NULL) { 145 fpu_context_save(&owner->fpu_context); 146 atomic_store_explicit(&CPU->fpu_owner, NULL, memory_order_relaxed); 147 } 148 149 irq_spinlock_unlock(&CPU->fpu_lock, false); 142 150 143 151 if (THREAD->fpu_context_exists) { … … 148 156 } 149 157 150 CPU->fpu_owner = THREAD; 151 152 irq_spinlock_unlock(&CPU->fpu_lock, false); 158 atomic_store_explicit(&CPU->fpu_owner, THREAD, memory_order_relaxed); 153 159 } 154 160 #endif /* CONFIG_FPU_LAZY */ … … 499 505 #ifdef CONFIG_SMP 500 506 501 static inline void fpu_owner_lock(cpu_t *cpu)502 {503 #ifdef CONFIG_FPU_LAZY504 irq_spinlock_lock(&cpu->fpu_lock, false);505 #endif506 }507 508 static inline void fpu_owner_unlock(cpu_t *cpu)509 {510 #ifdef CONFIG_FPU_LAZY511 irq_spinlock_unlock(&cpu->fpu_lock, false);512 #endif513 }514 515 static inline thread_t *fpu_owner(cpu_t *cpu)516 {517 #ifdef CONFIG_FPU_LAZY518 assert(irq_spinlock_locked(&cpu->fpu_lock));519 return cpu->fpu_owner;520 #else521 return NULL;522 #endif523 }524 525 507 static thread_t *steal_thread_from(cpu_t *old_cpu, int i) 526 508 { … … 530 512 ipl_t ipl = interrupts_disable(); 531 513 532 fpu_owner_lock(old_cpu);533 514 irq_spinlock_lock(&old_rq->lock, false); 515 516 /* 517 * If fpu_owner is any thread in the list, its store is seen here thanks to 518 * the runqueue lock. 519 */ 520 thread_t *fpu_owner = atomic_load_explicit(&old_cpu->fpu_owner, 521 memory_order_relaxed); 534 522 535 523 /* Search rq from the back */ … … 545 533 */ 546 534 if (thread->stolen || thread->nomigrate || 547 thread == fpu_owner (old_cpu)) {535 thread == fpu_owner) { 548 536 irq_spinlock_unlock(&thread->lock, false); 549 537 continue; 550 538 } 551 552 fpu_owner_unlock(old_cpu);553 539 554 540 thread->stolen = true; … … 587 573 588 574 irq_spinlock_unlock(&old_rq->lock, false); 589 fpu_owner_unlock(old_cpu);590 575 interrupts_restore(ipl); 591 576 return NULL; -
kernel/generic/src/proc/thread.c
r06f81c4 rf3dbe27 415 415 #ifdef CONFIG_FPU_LAZY 416 416 if (thread->cpu) { 417 /* 418 * We need to lock for this because the old CPU can concurrently try 419 * to dump this thread's FPU state, in which case we need to wait for 420 * it to finish. An atomic compare-and-swap wouldn't be enough. 421 */ 417 422 irq_spinlock_lock(&thread->cpu->fpu_lock, false); 418 if (thread->cpu->fpu_owner == thread) 419 thread->cpu->fpu_owner = NULL; 423 424 thread_t *owner = atomic_load_explicit(&thread->cpu->fpu_owner, 425 memory_order_relaxed); 426 427 if (owner == thread) { 428 atomic_store_explicit(&thread->cpu->fpu_owner, NULL, 429 memory_order_relaxed); 430 } 431 420 432 irq_spinlock_unlock(&thread->cpu->fpu_lock, false); 421 433 }
Note:
See TracChangeset
for help on using the changeset viewer.