Changeset 32fffef0 in mainline for kernel/generic/src/proc/scheduler.c
- Timestamp:
- 2006-08-29T11:06:57Z (19 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 0fa6044
- Parents:
- c8ea4a8b
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/scheduler.c
rc8ea4a8b r32fffef0 142 142 spinlock_unlock(&THREAD->lock); 143 143 spinlock_unlock(&CPU->lock); 144 THREAD->saved_fpu_context = slab_alloc(fpu_context_slab, 145 0); 144 THREAD->saved_fpu_context = slab_alloc(fpu_context_slab, 0); 146 145 /* We may have switched CPUs during slab_alloc */ 147 146 goto restart; … … 236 235 237 236 /* 238 * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge. 237 * Clear the THREAD_FLAG_STOLEN flag so that t can be migrated 238 * when load balancing needs emerge. 239 239 */ 240 t->flags &= ~ X_STOLEN;240 t->flags &= ~THREAD_FLAG_STOLEN; 241 241 spinlock_unlock(&t->lock); 242 242 … … 350 350 */ 351 351 context_save(&CPU->saved_context); 352 context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (uintptr_t) CPU->stack, CPU_STACK_SIZE); 352 context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), 353 (uintptr_t) CPU->stack, CPU_STACK_SIZE); 353 354 context_restore(&CPU->saved_context); 354 355 /* not reached */ … … 484 485 485 486 #ifdef SCHEDULER_VERBOSE 486 printf("cpu%d: tid %d (priority=%d,ticks=%lld,nrdy=%ld)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy)); 487 printf("cpu%d: tid %d (priority=%d,ticks=%lld,nrdy=%ld)\n", 488 CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy)); 487 489 #endif 488 490 … … 557 559 /* 558 560 * Not interested in ourselves. 559 * Doesn't require interrupt disabling for kcpulb is X_WIRED.561 * Doesn't require interrupt disabling for kcpulb has THREAD_FLAG_WIRED. 560 562 */ 561 563 if (CPU == cpu) … … 578 580 t = list_get_instance(l, thread_t, rq_link); 579 581 /* 580 * We don't want to steal CPU-wired threads neither threads already stolen. 581 * The latter prevents threads from migrating between CPU's without ever being run. 582 * We don't want to steal threads whose FPU context is still in CPU. 582 * We don't want to steal CPU-wired threads neither threads already 583 * stolen. The latter prevents threads from migrating between CPU's 584 * without ever being run. We don't want to steal threads whose FPU 585 * context is still in CPU. 583 586 */ 584 587 spinlock_lock(&t->lock); 585 if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) { 588 if ((!(t->flags & (THREAD_FLAG_WIRED | THREAD_FLAG_STOLEN))) && 589 (!(t->fpu_context_engaged)) ) { 586 590 /* 587 591 * Remove t from r. … … 609 613 spinlock_lock(&t->lock); 610 614 #ifdef KCPULB_VERBOSE 611 printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, avg=%nd\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active); 615 printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, avg=%nd\n", 616 CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), 617 atomic_get(&nrdy) / config.cpu_active); 612 618 #endif 613 t->flags |= X_STOLEN;619 t->flags |= THREAD_FLAG_STOLEN; 614 620 t->state = Entering; 615 621 spinlock_unlock(&t->lock);
Note:
See TracChangeset
for help on using the changeset viewer.