Changeset a35b458 in mainline for kernel/generic/src/proc/scheduler.c
- Timestamp:
- 2018-03-02T20:10:49Z (7 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- f1380b7
- Parents:
- 3061bc1
- git-author:
- Jiří Zárevúcky <zarevucky.jiri@…> (2018-02-28 17:38:31)
- git-committer:
- Jiří Zárevúcky <zarevucky.jiri@…> (2018-03-02 20:10:49)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/scheduler.c
r3061bc1 ra35b458 90 90 before_thread_runs_arch(); 91 91 rcu_before_thread_runs(); 92 92 93 93 #ifdef CONFIG_FPU_LAZY 94 94 if (THREAD == CPU->fpu_owner) … … 105 105 } 106 106 #endif 107 107 108 108 #ifdef CONFIG_UDEBUG 109 109 if (THREAD->btrace) { … … 113 113 stack_trace_istate(istate); 114 114 } 115 115 116 116 THREAD->btrace = false; 117 117 } … … 141 141 fpu_enable(); 142 142 irq_spinlock_lock(&CPU->lock, false); 143 143 144 144 /* Save old context */ 145 145 if (CPU->fpu_owner != NULL) { 146 146 irq_spinlock_lock(&CPU->fpu_owner->lock, false); 147 147 fpu_context_save(CPU->fpu_owner->saved_fpu_context); 148 148 149 149 /* Don't prevent migration */ 150 150 CPU->fpu_owner->fpu_context_engaged = false; … … 152 152 CPU->fpu_owner = NULL; 153 153 } 154 154 155 155 irq_spinlock_lock(&THREAD->lock, false); 156 156 if (THREAD->fpu_context_exists) { … … 164 164 THREAD->saved_fpu_context = 165 165 (fpu_context_t *) slab_alloc(fpu_context_cache, 0); 166 166 167 167 /* We may have switched CPUs during slab_alloc */ 168 168 goto restart; … … 171 171 THREAD->fpu_context_exists = true; 172 172 } 173 173 174 174 CPU->fpu_owner = THREAD; 175 175 THREAD->fpu_context_engaged = true; 176 176 irq_spinlock_unlock(&THREAD->lock, false); 177 177 178 178 irq_spinlock_unlock(&CPU->lock, false); 179 179 } … … 201 201 { 202 202 assert(CPU != NULL); 203 203 204 204 loop: 205 205 206 206 if (atomic_get(&CPU->nrdy) == 0) { 207 207 /* … … 214 214 irq_spinlock_unlock(&CPU->lock, false); 215 215 interrupts_enable(); 216 216 217 217 /* 218 218 * An interrupt might occur right now and wake up a thread. … … 226 226 227 227 assert(!CPU->idle); 228 228 229 229 unsigned int i; 230 230 for (i = 0; i < RQ_COUNT; i++) { … … 237 237 continue; 238 238 } 239 239 240 240 atomic_dec(&CPU->nrdy); 241 241 atomic_dec(&nrdy); 242 242 CPU->rq[i].n--; 243 243 244 244 /* 245 245 * Take the first thread from the queue. … … 248 248 list_first(&CPU->rq[i].rq), thread_t, rq_link); 249 249 list_remove(&thread->rq_link); 250 250 251 251 irq_spinlock_pass(&(CPU->rq[i].lock), &thread->lock); 252 252 253 253 thread->cpu = CPU; 254 254 thread->ticks = us2ticks((i + 1) * 10000); 255 255 thread->priority = i; /* Correct rq index */ 256 256 257 257 /* 258 258 * Clear the stolen flag so that it can be migrated … … 261 261 thread->stolen = false; 262 262 irq_spinlock_unlock(&thread->lock, false); 263 263 264 264 return thread; 265 265 } 266 266 267 267 goto loop; 268 268 } … … 282 282 { 283 283 list_t list; 284 284 285 285 list_initialize(&list); 286 286 irq_spinlock_lock(&CPU->lock, false); 287 287 288 288 if (CPU->needs_relink > NEEDS_RELINK_MAX) { 289 289 int i; 290 290 for (i = start; i < RQ_COUNT - 1; i++) { 291 291 /* Remember and empty rq[i + 1] */ 292 292 293 293 irq_spinlock_lock(&CPU->rq[i + 1].lock, false); 294 294 list_concat(&list, &CPU->rq[i + 1].rq); … … 296 296 CPU->rq[i + 1].n = 0; 297 297 irq_spinlock_unlock(&CPU->rq[i + 1].lock, false); 298 298 299 299 /* Append rq[i + 1] to rq[i] */ 300 300 301 301 irq_spinlock_lock(&CPU->rq[i].lock, false); 302 302 list_concat(&CPU->rq[i].rq, &list); … … 304 304 irq_spinlock_unlock(&CPU->rq[i].lock, false); 305 305 } 306 306 307 307 CPU->needs_relink = 0; 308 308 } 309 309 310 310 irq_spinlock_unlock(&CPU->lock, false); 311 311 } … … 321 321 { 322 322 volatile ipl_t ipl; 323 323 324 324 assert(CPU != NULL); 325 325 326 326 ipl = interrupts_disable(); 327 327 328 328 if (atomic_get(&haltstate)) 329 329 halt(); 330 330 331 331 if (THREAD) { 332 332 irq_spinlock_lock(&THREAD->lock, false); 333 333 334 334 /* Update thread kernel accounting */ 335 335 THREAD->kcycles += get_cycle() - THREAD->last_cycle; 336 336 337 337 #if (defined CONFIG_FPU) && (!defined CONFIG_FPU_LAZY) 338 338 fpu_context_save(THREAD->saved_fpu_context); … … 342 342 * This is the place where threads leave scheduler(); 343 343 */ 344 344 345 345 /* Save current CPU cycle */ 346 346 THREAD->last_cycle = get_cycle(); 347 347 348 348 irq_spinlock_unlock(&THREAD->lock, false); 349 349 interrupts_restore(THREAD->saved_context.ipl); 350 350 351 351 return; 352 352 } 353 353 354 354 /* 355 355 * Interrupt priority level of preempted thread is recorded … … 360 360 THREAD->saved_context.ipl = ipl; 361 361 } 362 362 363 363 /* 364 364 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, AS … … 368 368 */ 369 369 the_copy(THE, (the_t *) CPU->stack); 370 370 371 371 /* 372 372 * We may not keep the old stack. … … 386 386 (uintptr_t) CPU->stack, STACK_SIZE); 387 387 context_restore(&CPU->saved_context); 388 388 389 389 /* Not reached */ 390 390 } … … 402 402 task_t *old_task = TASK; 403 403 as_t *old_as = AS; 404 404 405 405 assert((!THREAD) || (irq_spinlock_locked(&THREAD->lock))); 406 406 assert(CPU != NULL); 407 407 assert(interrupts_disabled()); 408 408 409 409 /* 410 410 * Hold the current task and the address space to prevent their … … 414 414 if (old_task) 415 415 task_hold(old_task); 416 416 417 417 if (old_as) 418 418 as_hold(old_as); 419 419 420 420 if (THREAD) { 421 421 /* Must be run after the switch to scheduler stack */ 422 422 after_thread_ran(); 423 423 424 424 switch (THREAD->state) { 425 425 case Running: … … 427 427 thread_ready(THREAD); 428 428 break; 429 429 430 430 case Exiting: 431 431 rcu_thread_exiting(); … … 452 452 WAKEUP_FIRST); 453 453 irq_spinlock_unlock(&THREAD->join_wq.lock, false); 454 454 455 455 THREAD->state = Lingering; 456 456 irq_spinlock_unlock(&THREAD->lock, false); 457 457 } 458 458 break; 459 459 460 460 case Sleeping: 461 461 /* … … 463 463 */ 464 464 THREAD->priority = -1; 465 465 466 466 /* 467 467 * We need to release wq->lock which we locked in … … 470 470 */ 471 471 irq_spinlock_unlock(&THREAD->sleep_queue->lock, false); 472 472 473 473 irq_spinlock_unlock(&THREAD->lock, false); 474 474 break; 475 475 476 476 default: 477 477 /* … … 482 482 break; 483 483 } 484 484 485 485 THREAD = NULL; 486 486 } 487 487 488 488 THREAD = find_best_thread(); 489 489 490 490 irq_spinlock_lock(&THREAD->lock, false); 491 491 int priority = THREAD->priority; 492 492 irq_spinlock_unlock(&THREAD->lock, false); 493 493 494 494 relink_rq(priority); 495 495 496 496 /* 497 497 * If both the old and the new task are the same, … … 500 500 if (TASK != THREAD->task) { 501 501 as_t *new_as = THREAD->task->as; 502 502 503 503 /* 504 504 * Note that it is possible for two tasks … … 512 512 as_switch(old_as, new_as); 513 513 } 514 514 515 515 TASK = THREAD->task; 516 516 before_task_runs(); 517 517 } 518 518 519 519 if (old_task) 520 520 task_release(old_task); 521 521 522 522 if (old_as) 523 523 as_release(old_as); 524 524 525 525 irq_spinlock_lock(&THREAD->lock, false); 526 526 THREAD->state = Running; 527 527 528 528 #ifdef SCHEDULER_VERBOSE 529 529 log(LF_OTHER, LVL_DEBUG, … … 532 532 THREAD->ticks, atomic_get(&CPU->nrdy)); 533 533 #endif 534 534 535 535 /* 536 536 * Some architectures provide late kernel PA2KA(identity) … … 542 542 */ 543 543 before_thread_runs(); 544 544 545 545 /* 546 546 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to … … 548 548 */ 549 549 the_copy(THE, (the_t *) THREAD->kstack); 550 550 551 551 context_restore(&THREAD->saved_context); 552 552 553 553 /* Not reached */ 554 554 } … … 567 567 atomic_count_t average; 568 568 atomic_count_t rdy; 569 569 570 570 /* 571 571 * Detach kcpulb as nobody will call thread_join_timeout() on it. 572 572 */ 573 573 thread_detach(THREAD); 574 574 575 575 loop: 576 576 /* … … 578 578 */ 579 579 thread_sleep(1); 580 580 581 581 not_satisfied: 582 582 /* … … 588 588 average = atomic_get(&nrdy) / config.cpu_active + 1; 589 589 rdy = atomic_get(&CPU->nrdy); 590 590 591 591 if (average <= rdy) 592 592 goto satisfied; 593 593 594 594 atomic_count_t count = average - rdy; 595 595 596 596 /* 597 597 * Searching least priority queues on all CPU's first and most priority … … 601 601 size_t acpu_bias = 0; 602 602 int rq; 603 603 604 604 for (rq = RQ_COUNT - 1; rq >= 0; rq--) { 605 605 for (acpu = 0; acpu < config.cpu_active; acpu++) { 606 606 cpu_t *cpu = &cpus[(acpu + acpu_bias) % config.cpu_active]; 607 607 608 608 /* 609 609 * Not interested in ourselves. … … 614 614 if (CPU == cpu) 615 615 continue; 616 616 617 617 if (atomic_get(&cpu->nrdy) <= average) 618 618 continue; 619 619 620 620 irq_spinlock_lock(&(cpu->rq[rq].lock), true); 621 621 if (cpu->rq[rq].n == 0) { … … 623 623 continue; 624 624 } 625 625 626 626 thread_t *thread = NULL; 627 627 628 628 /* Search rq from the back */ 629 629 link_t *link = cpu->rq[rq].rq.head.prev; 630 630 631 631 while (link != &(cpu->rq[rq].rq.head)) { 632 632 thread = (thread_t *) list_get_instance(link, 633 633 thread_t, rq_link); 634 634 635 635 /* 636 636 * Do not steal CPU-wired threads, threads … … 640 640 */ 641 641 irq_spinlock_lock(&thread->lock, false); 642 642 643 643 if ((!thread->wired) && (!thread->stolen) && 644 644 (!thread->nomigrate) && … … 649 649 irq_spinlock_unlock(&thread->lock, 650 650 false); 651 651 652 652 atomic_dec(&cpu->nrdy); 653 653 atomic_dec(&nrdy); 654 654 655 655 cpu->rq[rq].n--; 656 656 list_remove(&thread->rq_link); 657 657 658 658 break; 659 659 } 660 660 661 661 irq_spinlock_unlock(&thread->lock, false); 662 662 663 663 link = link->prev; 664 664 thread = NULL; 665 665 } 666 666 667 667 if (thread) { 668 668 /* 669 669 * Ready thread on local CPU 670 670 */ 671 671 672 672 irq_spinlock_pass(&(cpu->rq[rq].lock), 673 673 &thread->lock); 674 674 675 675 #ifdef KCPULB_VERBOSE 676 676 log(LF_OTHER, LVL_DEBUG, … … 680 680 atomic_get(&nrdy) / config.cpu_active); 681 681 #endif 682 682 683 683 thread->stolen = true; 684 684 thread->state = Entering; 685 685 686 686 irq_spinlock_unlock(&thread->lock, true); 687 687 thread_ready(thread); 688 688 689 689 if (--count == 0) 690 690 goto satisfied; 691 691 692 692 /* 693 693 * We are not satisfied yet, focus on another … … 696 696 */ 697 697 acpu_bias++; 698 698 699 699 continue; 700 700 } else 701 701 irq_spinlock_unlock(&(cpu->rq[rq].lock), true); 702 703 } 704 } 705 702 703 } 704 } 705 706 706 if (atomic_get(&CPU->nrdy)) { 707 707 /* … … 718 718 goto loop; 719 719 } 720 720 721 721 goto not_satisfied; 722 722 723 723 satisfied: 724 724 goto loop; … … 735 735 if (!cpus[cpu].active) 736 736 continue; 737 737 738 738 irq_spinlock_lock(&cpus[cpu].lock, true); 739 739 740 740 printf("cpu%u: address=%p, nrdy=%" PRIua ", needs_relink=%zu\n", 741 741 cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy), 742 742 cpus[cpu].needs_relink); 743 743 744 744 unsigned int i; 745 745 for (i = 0; i < RQ_COUNT; i++) { … … 749 749 continue; 750 750 } 751 751 752 752 printf("\trq[%u]: ", i); 753 753 list_foreach(cpus[cpu].rq[i].rq, rq_link, thread_t, … … 757 757 } 758 758 printf("\n"); 759 759 760 760 irq_spinlock_unlock(&(cpus[cpu].rq[i].lock), false); 761 761 } 762 762 763 763 irq_spinlock_unlock(&cpus[cpu].lock, true); 764 764 }
Note:
See TracChangeset
for help on using the changeset viewer.