Changes in kernel/generic/src/proc/scheduler.c [df58e44:55b77d9] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/scheduler.c
rdf58e44 r55b77d9 102 102 #endif 103 103 104 #ifdef CONFIG_UDEBUG 104 105 if (THREAD->btrace) { 105 106 istate_t *istate = THREAD->udebug.uspace_state; … … 111 112 THREAD->btrace = false; 112 113 } 114 #endif 113 115 } 114 116 … … 235 237 * Take the first thread from the queue. 236 238 */ 237 thread_t *thread = 238 list_ get_instance(CPU->rq[i].rq_head.next, thread_t, rq_link);239 thread_t *thread = list_get_instance( 240 list_first(&CPU->rq[i].rq), thread_t, rq_link); 239 241 list_remove(&thread->rq_link); 240 242 … … 271 273 static void relink_rq(int start) 272 274 { 273 li nk_t head;274 275 list_initialize(& head);275 list_t list; 276 277 list_initialize(&list); 276 278 irq_spinlock_lock(&CPU->lock, false); 277 279 … … 282 284 283 285 irq_spinlock_lock(&CPU->rq[i + 1].lock, false); 284 list_concat(& head, &CPU->rq[i + 1].rq_head);286 list_concat(&list, &CPU->rq[i + 1].rq); 285 287 size_t n = CPU->rq[i + 1].n; 286 288 CPU->rq[i + 1].n = 0; … … 290 292 291 293 irq_spinlock_lock(&CPU->rq[i].lock, false); 292 list_concat(&CPU->rq[i].rq _head, &head);294 list_concat(&CPU->rq[i].rq, &list); 293 295 CPU->rq[i].n += n; 294 296 irq_spinlock_unlock(&CPU->rq[i].lock, false); … … 352 354 353 355 /* 354 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM356 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, AS 355 357 * and preemption counter. At this point THE could be coming either 356 358 * from THREAD's or CPU's stack. … … 374 376 context_save(&CPU->saved_context); 375 377 context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), 376 (uintptr_t) CPU->stack, CPU_STACK_SIZE);378 (uintptr_t) CPU->stack, STACK_SIZE); 377 379 context_restore(&CPU->saved_context); 378 380 … … 584 586 * Searching least priority queues on all CPU's first and most priority 585 587 * queues on all CPU's last. 586 *587 588 */ 588 589 size_t acpu; … … 615 616 616 617 /* Search rq from the back */ 617 link_t *link = cpu->rq[rq].rq_head.prev; 618 619 while (link != &(cpu->rq[rq].rq_head)) { 620 thread = (thread_t *) list_get_instance(link, thread_t, rq_link); 618 link_t *link = cpu->rq[rq].rq.head.prev; 619 620 while (link != &(cpu->rq[rq].rq.head)) { 621 thread = (thread_t *) list_get_instance(link, 622 thread_t, rq_link); 621 623 622 624 /* 623 * We don't want to steal CPU-wired threads 624 * neither threads already stolen. The latter 625 * prevents threads from migrating between CPU's 626 * without ever being run. We don't want to 627 * steal threads whose FPU context is still in 628 * CPU. 629 * 625 * Do not steal CPU-wired threads, threads 626 * already stolen, threads for which migration 627 * was temporarily disabled or threads whose 628 * FPU context is still in the CPU. 630 629 */ 631 630 irq_spinlock_lock(&thread->lock, false); 632 631 633 if ((!(thread->flags & (THREAD_FLAG_WIRED | THREAD_FLAG_STOLEN))) 634 && (!(thread->fpu_context_engaged))) { 632 if (!(thread->flags & THREAD_FLAG_WIRED) && 633 !(thread->flags & THREAD_FLAG_STOLEN) && 634 !thread->nomigrate && 635 !thread->fpu_context_engaged) { 635 636 /* 636 637 * Remove thread from ready queue. 637 638 */ 638 irq_spinlock_unlock(&thread->lock, false); 639 irq_spinlock_unlock(&thread->lock, 640 false); 639 641 640 642 atomic_dec(&cpu->nrdy); … … 658 660 */ 659 661 660 irq_spinlock_pass(&(cpu->rq[rq].lock), &thread->lock); 662 irq_spinlock_pass(&(cpu->rq[rq].lock), 663 &thread->lock); 661 664 662 665 #ifdef KCPULB_VERBOSE … … 737 740 738 741 printf("\trq[%u]: ", i); 739 link_t *cur; 740 for (cur = cpus[cpu].rq[i].rq_head.next; 741 cur != &(cpus[cpu].rq[i].rq_head); 742 cur = cur->next) { 743 thread_t *thread = list_get_instance(cur, thread_t, rq_link); 742 list_foreach(cpus[cpu].rq[i].rq, cur) { 743 thread_t *thread = list_get_instance(cur, 744 thread_t, rq_link); 744 745 printf("%" PRIu64 "(%s) ", thread->tid, 745 746 thread_states[thread->state]);
Note:
See TracChangeset
for help on using the changeset viewer.