Changes in kernel/generic/src/proc/scheduler.c [26aafe8:43ac0cc] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/scheduler.c
r26aafe8 r43ac0cc 586 586 * Searching least priority queues on all CPU's first and most priority 587 587 * queues on all CPU's last. 588 *589 588 */ 590 589 size_t acpu; … … 620 619 621 620 while (link != &(cpu->rq[rq].rq_head)) { 622 thread = (thread_t *) list_get_instance(link, thread_t, rq_link); 621 thread = (thread_t *) list_get_instance(link, 622 thread_t, rq_link); 623 623 624 624 /* 625 * We don't want to steal CPU-wired threads 626 * neither threads already stolen. The latter 627 * prevents threads from migrating between CPU's 628 * without ever being run. We don't want to 629 * steal threads whose FPU context is still in 630 * CPU. 631 * 625 * Do not steal CPU-wired threads, threads 626 * already stolen, threads for which migration 627 * was temporarily disabled or threads whose 628 * FPU context is still in the CPU. 632 629 */ 633 630 irq_spinlock_lock(&thread->lock, false); 634 631 635 if ((!(thread->flags & (THREAD_FLAG_WIRED | THREAD_FLAG_STOLEN))) 636 && (!(thread->fpu_context_engaged))) { 632 if (!(thread->flags & THREAD_FLAG_WIRED) && 633 !(thread->flags & THREAD_FLAG_STOLEN) && 634 !thread->nomigrate && 635 !thread->fpu_context_engaged) { 637 636 /* 638 637 * Remove thread from ready queue. 639 638 */ 640 irq_spinlock_unlock(&thread->lock, false); 639 irq_spinlock_unlock(&thread->lock, 640 false); 641 641 642 642 atomic_dec(&cpu->nrdy); … … 660 660 */ 661 661 662 irq_spinlock_pass(&(cpu->rq[rq].lock), &thread->lock); 662 irq_spinlock_pass(&(cpu->rq[rq].lock), 663 &thread->lock); 663 664 664 665 #ifdef KCPULB_VERBOSE
Note:
See TracChangeset
for help on using the changeset viewer.