Changes in kernel/generic/src/proc/scheduler.c [43ac0cc:26aafe8] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/scheduler.c
r43ac0cc r26aafe8 586 586 * Searching least priority queues on all CPU's first and most priority 587 587 * queues on all CPU's last. 588 * 588 589 */ 589 590 size_t acpu; … … 619 620 620 621 while (link != &(cpu->rq[rq].rq_head)) { 621 thread = (thread_t *) list_get_instance(link, 622 thread_t, rq_link); 622 thread = (thread_t *) list_get_instance(link, thread_t, rq_link); 623 623 624 624 /* 625 * Do not steal CPU-wired threads, threads 626 * already stolen, threads for which migration 627 * was temporarily disabled or threads whose 628 * FPU context is still in the CPU. 625 * We don't want to steal CPU-wired threads 626 * neither threads already stolen. The latter 627 * prevents threads from migrating between CPU's 628 * without ever being run. We don't want to 629 * steal threads whose FPU context is still in 630 * CPU. 631 * 629 632 */ 630 633 irq_spinlock_lock(&thread->lock, false); 631 634 632 if (!(thread->flags & THREAD_FLAG_WIRED) && 633 !(thread->flags & THREAD_FLAG_STOLEN) && 634 !thread->nomigrate && 635 !thread->fpu_context_engaged) { 635 if ((!(thread->flags & (THREAD_FLAG_WIRED | THREAD_FLAG_STOLEN))) 636 && (!(thread->fpu_context_engaged))) { 636 637 /* 637 638 * Remove thread from ready queue. 638 639 */ 639 irq_spinlock_unlock(&thread->lock, 640 false); 640 irq_spinlock_unlock(&thread->lock, false); 641 641 642 642 atomic_dec(&cpu->nrdy); … … 660 660 */ 661 661 662 irq_spinlock_pass(&(cpu->rq[rq].lock), 663 &thread->lock); 662 irq_spinlock_pass(&(cpu->rq[rq].lock), &thread->lock); 664 663 665 664 #ifdef KCPULB_VERBOSE
Note:
See TracChangeset
for help on using the changeset viewer.