Changeset 4e33b6b in mainline for kernel/generic/src/proc/scheduler.c
- Timestamp:
- 2007-01-07T14:44:33Z (18 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- d78d603
- Parents:
- c109dd0
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/scheduler.c
rc109dd0 r4e33b6b 1 1 /* 2 * Copyright (C) 2001-200 4Jakub Jermar2 * Copyright (C) 2001-2007 Jakub Jermar 3 3 * All rights reserved. 4 4 * … … 143 143 spinlock_unlock(&THREAD->lock); 144 144 spinlock_unlock(&CPU->lock); 145 THREAD->saved_fpu_context = slab_alloc(fpu_context_slab, 0); 145 THREAD->saved_fpu_context = 146 slab_alloc(fpu_context_slab, 0); 146 147 /* We may have switched CPUs during slab_alloc */ 147 148 goto restart; … … 232 233 t->cpu = CPU; 233 234 234 t->ticks = us2ticks((i +1)*10000);235 t->ticks = us2ticks((i + 1) * 10000); 235 236 t->priority = i; /* correct rq index */ 236 237 … … 268 269 spinlock_lock(&CPU->lock); 269 270 if (CPU->needs_relink > NEEDS_RELINK_MAX) { 270 for (i = start; i <RQ_COUNT-1; i++) {271 for (i = start; i < RQ_COUNT - 1; i++) { 271 272 /* remember and empty rq[i + 1] */ 272 273 r = &CPU->rq[i + 1]; … … 332 333 333 334 /* 334 * Interrupt priority level of preempted thread is recorded here335 * to facilitate scheduler() invocations from interrupts_disable()'d336 * code (e.g. waitq_sleep_timeout()).335 * Interrupt priority level of preempted thread is recorded 336 * here to facilitate scheduler() invocations from 337 * interrupts_disable()'d code (e.g. waitq_sleep_timeout()). 337 338 */ 338 339 THREAD->saved_context.ipl = ipl; … … 395 396 } else { 396 397 /* 397 * The thread structure is kept allocated until somebody398 * calls thread_detach() on it.398 * The thread structure is kept allocated until 399 * somebody calls thread_detach() on it. 399 400 */ 400 401 if (!spinlock_trylock(&THREAD->join_wq.lock)) { … … 422 423 423 424 /* 424 * We need to release wq->lock which we locked in waitq_sleep(). 425 * Address of wq->lock is kept in THREAD->sleep_queue. 425 * We need to release wq->lock which we locked in 426 * waitq_sleep(). Address of wq->lock is kept in 427 * THREAD->sleep_queue. 426 428 */ 427 429 spinlock_unlock(&THREAD->sleep_queue->lock); 428 430 429 431 /* 430 * Check for possible requests for out-of-context invocation. 432 * Check for possible requests for out-of-context 433 * invocation. 431 434 */ 432 435 if (THREAD->call_me) { … … 444 447 * Entering state is unexpected. 445 448 */ 446 panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); 449 panic("tid%d: unexpected state %s\n", THREAD->tid, 450 thread_states[THREAD->state]); 447 451 break; 448 452 } … … 460 464 461 465 /* 462 * If both the old and the new task are the same, lots of work is avoided. 466 * If both the old and the new task are the same, lots of work is 467 * avoided. 463 468 */ 464 469 if (TASK != THREAD->task) { … … 477 482 478 483 /* 479 * Note that it is possible for two tasks to share one address space. 484 * Note that it is possible for two tasks to share one address 485 * space. 480 486 */ 481 487 if (as1 != as2) { … … 494 500 495 501 #ifdef SCHEDULER_VERBOSE 496 printf("cpu%d: tid %d (priority=%d,ticks=%lld,nrdy=%ld)\n", 497 CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy)); 502 printf("cpu%d: tid %d (priority=%d, ticks=%lld, nrdy=%ld)\n", 503 CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, 504 atomic_get(&CPU->nrdy)); 498 505 #endif 499 506 … … 509 516 510 517 /* 511 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack. 518 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to 519 * thread's stack. 512 520 */ 513 521 the_copy(THE, (the_t *) THREAD->kstack); … … 556 564 557 565 /* 558 * Searching least priority queues on all CPU's first and most priority queues on all CPU's last. 559 */ 560 for (j=RQ_COUNT-1; j >= 0; j--) { 561 for (i=0; i < config.cpu_active; i++) { 566 * Searching least priority queues on all CPU's first and most priority 567 * queues on all CPU's last. 568 */ 569 for (j= RQ_COUNT - 1; j >= 0; j--) { 570 for (i = 0; i < config.cpu_active; i++) { 562 571 link_t *l; 563 572 runq_t *r; … … 568 577 /* 569 578 * Not interested in ourselves. 570 * Doesn't require interrupt disabling for kcpulb has THREAD_FLAG_WIRED. 579 * Doesn't require interrupt disabling for kcpulb has 580 * THREAD_FLAG_WIRED. 571 581 */ 572 582 if (CPU == cpu) … … 589 599 t = list_get_instance(l, thread_t, rq_link); 590 600 /* 591 * We don't want to steal CPU-wired threads neither threads already 592 * stolen. The latter prevents threads from migrating between CPU's 593 * without ever being run. We don't want to steal threads whose FPU 594 * context is still in CPU. 601 * We don't want to steal CPU-wired threads 602 * neither threads already stolen. The latter 603 * prevents threads from migrating between CPU's 604 * without ever being run. We don't want to 605 * steal threads whose FPU context is still in 606 * CPU. 595 607 */ 596 608 spinlock_lock(&t->lock); 597 if ((!(t->flags & (THREAD_FLAG_WIRED | THREAD_FLAG_STOLEN))) && 609 if ((!(t->flags & (THREAD_FLAG_WIRED | 610 THREAD_FLAG_STOLEN))) && 598 611 (!(t->fpu_context_engaged)) ) { 599 612 /* … … 622 635 spinlock_lock(&t->lock); 623 636 #ifdef KCPULB_VERBOSE 624 printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, avg=%nd\n", 625 CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), 637 printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, " 638 "avg=%nd\n", CPU->id, t->tid, CPU->id, 639 atomic_get(&CPU->nrdy), 626 640 atomic_get(&nrdy) / config.cpu_active); 627 641 #endif … … 638 652 639 653 /* 640 * We are not satisfied yet, focus on another CPU next time. 654 * We are not satisfied yet, focus on another 655 * CPU next time. 641 656 */ 642 657 k++; … … 689 704 spinlock_lock(&cpus[cpu].lock); 690 705 printf("cpu%d: address=%p, nrdy=%ld, needs_relink=%ld\n", 691 cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink); 706 cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy), 707 cpus[cpu].needs_relink); 692 708 693 for (i =0; i<RQ_COUNT; i++) {709 for (i = 0; i < RQ_COUNT; i++) { 694 710 r = &cpus[cpu].rq[i]; 695 711 spinlock_lock(&r->lock); … … 699 715 } 700 716 printf("\trq[%d]: ", i); 701 for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) { 717 for (cur = r->rq_head.next; cur != &r->rq_head; 718 cur = cur->next) { 702 719 t = list_get_instance(cur, thread_t, rq_link); 703 720 printf("%d(%s) ", t->tid, 704 721 thread_states[t->state]); 705 722 } 706 723 printf("\n");
Note:
See TracChangeset
for help on using the changeset viewer.