Changeset fbaf6ac in mainline
- Timestamp:
- 2023-04-16T12:39:04Z (18 months ago)
- Branches:
- master, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 06f81c4
- Parents:
- 169815e
- git-author:
- Jiří Zárevúcky <zarevucky.jiri@…> (2023-03-27 17:01:08)
- git-committer:
- Jiří Zárevúcky <zarevucky.jiri@…> (2023-04-16 12:39:04)
- Location:
- kernel/generic/src/proc
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/scheduler.c
r169815e rfbaf6ac 502 502 503 503 #ifdef CONFIG_SMP 504 505 static thread_t *steal_thread_from(cpu_t *old_cpu, int i) 506 { 507 runq_t *old_rq = &old_cpu->rq[i]; 508 runq_t *new_rq = &CPU->rq[i]; 509 510 irq_spinlock_lock(&old_rq->lock, true); 511 512 /* Search rq from the back */ 513 list_foreach_rev(old_rq->rq, rq_link, thread_t, thread) { 514 515 irq_spinlock_lock(&thread->lock, false); 516 517 /* 518 * Do not steal CPU-wired threads, threads 519 * already stolen, threads for which migration 520 * was temporarily disabled or threads whose 521 * FPU context is still in the CPU. 522 */ 523 if (thread->stolen || thread->nomigrate || thread->fpu_context_engaged) { 524 irq_spinlock_unlock(&thread->lock, false); 525 continue; 526 } 527 528 thread->stolen = true; 529 thread->cpu = CPU; 530 531 irq_spinlock_unlock(&thread->lock, false); 532 533 /* 534 * Ready thread on local CPU 535 */ 536 537 #ifdef KCPULB_VERBOSE 538 log(LF_OTHER, LVL_DEBUG, 539 "kcpulb%u: TID %" PRIu64 " -> cpu%u, " 540 "nrdy=%ld, avg=%ld", CPU->id, thread->tid, 541 CPU->id, atomic_load(&CPU->nrdy), 542 atomic_load(&nrdy) / config.cpu_active); 543 #endif 544 545 /* Remove thread from ready queue. */ 546 old_rq->n--; 547 list_remove(&thread->rq_link); 548 549 irq_spinlock_pass(&old_rq->lock, &new_rq->lock); 550 551 /* Append thread to local queue. */ 552 list_append(&thread->rq_link, &new_rq->rq); 553 new_rq->n++; 554 555 irq_spinlock_unlock(&new_rq->lock, true); 556 557 atomic_dec(&old_cpu->nrdy); 558 atomic_inc(&CPU->nrdy); 559 560 return thread; 561 } 562 563 irq_spinlock_unlock(&old_rq->lock, true); 564 return NULL; 565 } 566 504 567 /** Load balancing thread 505 568 * … … 541 604 */ 542 605 size_t acpu; 543 size_t acpu_bias = 0;544 606 int rq; 545 607 546 608 for (rq = RQ_COUNT - 1; rq >= 0; rq--) { 547 609 for (acpu = 0; acpu < config.cpu_active; acpu++) { 548 cpu_t *cpu = &cpus[ (acpu + acpu_bias) % config.cpu_active];610 cpu_t *cpu = &cpus[acpu]; 549 611 550 612 /* … … 560 622 continue; 561 623 562 irq_spinlock_lock(&(cpu->rq[rq].lock), true); 563 if (cpu->rq[rq].n == 0) { 564 irq_spinlock_unlock(&(cpu->rq[rq].lock), true); 565 continue; 566 } 567 568 thread_t *thread = NULL; 569 570 /* Search rq from the back */ 571 link_t *link = list_last(&cpu->rq[rq].rq); 572 573 while (link != NULL) { 574 thread = (thread_t *) list_get_instance(link, 575 thread_t, rq_link); 576 577 /* 578 * Do not steal CPU-wired threads, threads 579 * already stolen, threads for which migration 580 * was temporarily disabled or threads whose 581 * FPU context is still in the CPU. 582 */ 583 irq_spinlock_lock(&thread->lock, false); 584 585 if ((!thread->stolen) && 586 (!thread->nomigrate) && 587 (!thread->fpu_context_engaged)) { 588 /* 589 * Remove thread from ready queue. 590 */ 591 irq_spinlock_unlock(&thread->lock, 592 false); 593 594 atomic_dec(&cpu->nrdy); 595 atomic_dec(&nrdy); 596 597 cpu->rq[rq].n--; 598 list_remove(&thread->rq_link); 599 600 break; 601 } 602 603 irq_spinlock_unlock(&thread->lock, false); 604 605 link = list_prev(link, &cpu->rq[rq].rq); 606 thread = NULL; 607 } 608 609 if (thread) { 610 /* 611 * Ready thread on local CPU 612 */ 613 614 irq_spinlock_pass(&(cpu->rq[rq].lock), 615 &thread->lock); 616 617 #ifdef KCPULB_VERBOSE 618 log(LF_OTHER, LVL_DEBUG, 619 "kcpulb%u: TID %" PRIu64 " -> cpu%u, " 620 "nrdy=%ld, avg=%ld", CPU->id, thread->tid, 621 CPU->id, atomic_load(&CPU->nrdy), 622 atomic_load(&nrdy) / config.cpu_active); 623 #endif 624 625 thread->stolen = true; 626 thread->state = Entering; 627 628 irq_spinlock_unlock(&thread->lock, true); 629 thread_ready(thread); 630 631 if (--count == 0) 632 goto satisfied; 633 634 /* 635 * We are not satisfied yet, focus on another 636 * CPU next time. 637 * 638 */ 639 acpu_bias++; 640 641 continue; 642 } else 643 irq_spinlock_unlock(&(cpu->rq[rq].lock), true); 644 624 if (steal_thread_from(cpu, rq) && --count == 0) 625 goto satisfied; 645 626 } 646 627 } -
kernel/generic/src/proc/thread.c
r169815e rfbaf6ac 259 259 ++thread->priority : thread->priority; 260 260 261 cpu_t *cpu; 262 if (thread->nomigrate || thread->fpu_context_engaged) { 263 /* Cannot ready to another CPU */ 264 assert(thread->cpu != NULL); 265 cpu = thread->cpu; 266 } else if (thread->stolen) { 267 /* Ready to the stealing CPU */ 268 cpu = CPU; 269 } else if (thread->cpu) { 270 /* Prefer the CPU on which the thread ran last */ 271 assert(thread->cpu != NULL); 272 cpu = thread->cpu; 273 } else { 274 cpu = CPU; 275 } 261 /* Prefer the CPU on which the thread ran last */ 262 cpu_t *cpu = thread->cpu ? thread->cpu : CPU; 276 263 277 264 thread->state = Ready;
Note:
See TracChangeset
for help on using the changeset viewer.