Changeset f3dbe27 in mainline for kernel/generic/src/proc/scheduler.c
- Timestamp:
- 2023-04-18T17:33:02Z (2 years ago)
- Branches:
- master, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 117ad5a2
- Parents:
- 06f81c4
- git-author:
- Jiří Zárevúcky <zarevucky.jiri@…> (2023-04-18 17:27:32)
- git-committer:
- Jiří Zárevúcky <zarevucky.jiri@…> (2023-04-18 17:33:02)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/scheduler.c
r06f81c4 rf3dbe27 84 84 85 85 #ifdef CONFIG_FPU_LAZY 86 irq_spinlock_lock(&CPU->fpu_lock, true); 87 88 if (THREAD == CPU->fpu_owner) 86 /* 87 * The only concurrent modification possible for fpu_owner here is 88 * another thread changing it from itself to NULL in its destructor. 89 */ 90 thread_t *owner = atomic_load_explicit(&CPU->fpu_owner, 91 memory_order_relaxed); 92 93 if (THREAD == owner) 89 94 fpu_enable(); 90 95 else 91 96 fpu_disable(); 92 93 irq_spinlock_unlock(&CPU->fpu_lock, true);94 97 #elif defined CONFIG_FPU 95 98 fpu_enable(); … … 133 136 { 134 137 fpu_enable(); 138 139 /* We need this lock to ensure synchronization with thread destructor. */ 135 140 irq_spinlock_lock(&CPU->fpu_lock, false); 136 141 137 142 /* Save old context */ 138 if (CPU->fpu_owner != NULL) { 139 fpu_context_save(&CPU->fpu_owner->fpu_context); 140 CPU->fpu_owner = NULL; 141 } 143 thread_t *owner = atomic_load_explicit(&CPU->fpu_owner, memory_order_relaxed); 144 if (owner != NULL) { 145 fpu_context_save(&owner->fpu_context); 146 atomic_store_explicit(&CPU->fpu_owner, NULL, memory_order_relaxed); 147 } 148 149 irq_spinlock_unlock(&CPU->fpu_lock, false); 142 150 143 151 if (THREAD->fpu_context_exists) { … … 148 156 } 149 157 150 CPU->fpu_owner = THREAD; 151 152 irq_spinlock_unlock(&CPU->fpu_lock, false); 158 atomic_store_explicit(&CPU->fpu_owner, THREAD, memory_order_relaxed); 153 159 } 154 160 #endif /* CONFIG_FPU_LAZY */ … … 499 505 #ifdef CONFIG_SMP 500 506 501 static inline void fpu_owner_lock(cpu_t *cpu)502 {503 #ifdef CONFIG_FPU_LAZY504 irq_spinlock_lock(&cpu->fpu_lock, false);505 #endif506 }507 508 static inline void fpu_owner_unlock(cpu_t *cpu)509 {510 #ifdef CONFIG_FPU_LAZY511 irq_spinlock_unlock(&cpu->fpu_lock, false);512 #endif513 }514 515 static inline thread_t *fpu_owner(cpu_t *cpu)516 {517 #ifdef CONFIG_FPU_LAZY518 assert(irq_spinlock_locked(&cpu->fpu_lock));519 return cpu->fpu_owner;520 #else521 return NULL;522 #endif523 }524 525 507 static thread_t *steal_thread_from(cpu_t *old_cpu, int i) 526 508 { … … 530 512 ipl_t ipl = interrupts_disable(); 531 513 532 fpu_owner_lock(old_cpu);533 514 irq_spinlock_lock(&old_rq->lock, false); 515 516 /* 517 * If fpu_owner is any thread in the list, its store is seen here thanks to 518 * the runqueue lock. 519 */ 520 thread_t *fpu_owner = atomic_load_explicit(&old_cpu->fpu_owner, 521 memory_order_relaxed); 534 522 535 523 /* Search rq from the back */ … … 545 533 */ 546 534 if (thread->stolen || thread->nomigrate || 547 thread == fpu_owner (old_cpu)) {535 thread == fpu_owner) { 548 536 irq_spinlock_unlock(&thread->lock, false); 549 537 continue; 550 538 } 551 552 fpu_owner_unlock(old_cpu);553 539 554 540 thread->stolen = true; … … 587 573 588 574 irq_spinlock_unlock(&old_rq->lock, false); 589 fpu_owner_unlock(old_cpu);590 575 interrupts_restore(ipl); 591 576 return NULL;
Note:
See TracChangeset
for help on using the changeset viewer.