Changeset 04803bf in mainline for kernel/generic/src/proc/scheduler.c
- Timestamp:
- 2011-03-21T22:00:17Z (15 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 143932e3
- Parents:
- b50b5af2 (diff), 7308e84 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - File:
-
- 1 edited
-
kernel/generic/src/proc/scheduler.c (modified) (30 diffs)
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/scheduler.c
rb50b5af2 r04803bf 1 1 /* 2 * Copyright (c) 20 01-2007Jakub Jermar2 * Copyright (c) 2010 Jakub Jermar 3 3 * All rights reserved. 4 4 * … … 33 33 /** 34 34 * @file 35 * @brief Scheduler and load balancing.35 * @brief Scheduler and load balancing. 36 36 * 37 37 * This file contains the scheduler and kcpulb kernel thread which … … 62 62 #include <print.h> 63 63 #include <debug.h> 64 65 static void before_task_runs(void); 66 static void before_thread_runs(void); 67 static void after_thread_ran(void); 64 #include <stacktrace.h> 65 68 66 static void scheduler_separated_stack(void); 69 67 70 atomic_t nrdy; /**< Number of ready threads in the system. */68 atomic_t nrdy; /**< Number of ready threads in the system. */ 71 69 72 70 /** Carry out actions before new task runs. */ 73 void before_task_runs(void)71 static void before_task_runs(void) 74 72 { 75 73 before_task_runs_arch(); … … 80 78 * Perform actions that need to be 81 79 * taken before the newly selected 82 * t read is passed control.80 * thread is passed control. 83 81 * 84 82 * THREAD->lock is locked on entry 85 83 * 86 84 */ 87 void before_thread_runs(void)85 static void before_thread_runs(void) 88 86 { 89 87 before_thread_runs_arch(); 88 90 89 #ifdef CONFIG_FPU_LAZY 91 if (THREAD == CPU->fpu_owner)90 if (THREAD == CPU->fpu_owner) 92 91 fpu_enable(); 93 92 else 94 fpu_disable(); 93 fpu_disable(); 95 94 #else 96 95 fpu_enable(); … … 102 101 } 103 102 #endif 103 104 #ifdef CONFIG_UDEBUG 105 if (THREAD->btrace) { 106 istate_t *istate = THREAD->udebug.uspace_state; 107 if (istate != NULL) { 108 printf("Thread %" PRIu64 " stack trace:\n", THREAD->tid); 109 stack_trace_istate(istate); 110 } 111 112 THREAD->btrace = false; 113 } 114 #endif 104 115 } 105 116 … … 113 124 * 114 125 */ 115 void after_thread_ran(void)126 static void after_thread_ran(void) 116 127 { 117 128 after_thread_ran_arch(); … … 123 134 restart: 124 135 fpu_enable(); 125 spinlock_lock(&CPU->lock);126 136 irq_spinlock_lock(&CPU->lock, false); 137 127 138 /* Save old context */ 128 if (CPU->fpu_owner != NULL) { 129 spinlock_lock(&CPU->fpu_owner->lock);139 if (CPU->fpu_owner != NULL) { 140 irq_spinlock_lock(&CPU->fpu_owner->lock, false); 130 141 fpu_context_save(CPU->fpu_owner->saved_fpu_context); 131 /* don't prevent migration */ 142 143 /* Don't prevent migration */ 132 144 CPU->fpu_owner->fpu_context_engaged = 0; 133 spinlock_unlock(&CPU->fpu_owner->lock);145 irq_spinlock_unlock(&CPU->fpu_owner->lock, false); 134 146 CPU->fpu_owner = NULL; 135 147 } 136 137 spinlock_lock(&THREAD->lock);148 149 irq_spinlock_lock(&THREAD->lock, false); 138 150 if (THREAD->fpu_context_exists) { 139 151 fpu_context_restore(THREAD->saved_fpu_context); … … 142 154 if (!THREAD->saved_fpu_context) { 143 155 /* Might sleep */ 144 spinlock_unlock(&THREAD->lock);145 spinlock_unlock(&CPU->lock);156 irq_spinlock_unlock(&THREAD->lock, false); 157 irq_spinlock_unlock(&CPU->lock, false); 146 158 THREAD->saved_fpu_context = 147 159 (fpu_context_t *) slab_alloc(fpu_context_slab, 0); 160 148 161 /* We may have switched CPUs during slab_alloc */ 149 goto restart; 162 goto restart; 150 163 } 151 164 fpu_init(); 152 165 THREAD->fpu_context_exists = 1; 153 166 } 167 154 168 CPU->fpu_owner = THREAD; 155 169 THREAD->fpu_context_engaged = 1; 156 spinlock_unlock(&THREAD->lock);157 158 spinlock_unlock(&CPU->lock);159 } 160 #endif 170 irq_spinlock_unlock(&THREAD->lock, false); 171 172 irq_spinlock_unlock(&CPU->lock, false); 173 } 174 #endif /* CONFIG_FPU_LAZY */ 161 175 162 176 /** Initialize scheduler … … 180 194 static thread_t *find_best_thread(void) 181 195 { 182 thread_t *t;183 runq_t *r;184 int i;185 186 196 ASSERT(CPU != NULL); 187 197 188 198 loop: 189 interrupts_enable();190 199 191 200 if (atomic_get(&CPU->nrdy) == 0) { … … 195 204 * This improves energy saving and hyperthreading. 196 205 */ 197 206 irq_spinlock_lock(&CPU->lock, false); 207 CPU->idle = true; 208 irq_spinlock_unlock(&CPU->lock, false); 209 interrupts_enable(); 210 198 211 /* 199 212 * An interrupt might occur right now and wake up a thread. … … 201 214 * even though there is a runnable thread. 202 215 */ 203 204 cpu_sleep(); 205 goto loop; 206 } 207 208 interrupts_disable(); 209 216 cpu_sleep(); 217 interrupts_disable(); 218 goto loop; 219 } 220 221 unsigned int i; 210 222 for (i = 0; i < RQ_COUNT; i++) { 211 r = &CPU->rq[i]; 212 spinlock_lock(&r->lock); 213 if (r->n == 0) { 223 irq_spinlock_lock(&(CPU->rq[i].lock), false); 224 if (CPU->rq[i].n == 0) { 214 225 /* 215 226 * If this queue is empty, try a lower-priority queue. 216 227 */ 217 spinlock_unlock(&r->lock);228 irq_spinlock_unlock(&(CPU->rq[i].lock), false); 218 229 continue; 219 230 } 220 231 221 232 atomic_dec(&CPU->nrdy); 222 233 atomic_dec(&nrdy); 223 r->n--;224 234 CPU->rq[i].n--; 235 225 236 /* 226 237 * Take the first thread from the queue. 227 238 */ 228 t = list_get_instance(r->rq_head.next, thread_t, rq_link); 229 list_remove(&t->rq_link); 230 231 spinlock_unlock(&r->lock); 232 233 spinlock_lock(&t->lock); 234 t->cpu = CPU; 235 236 t->ticks = us2ticks((i + 1) * 10000); 237 t->priority = i; /* correct rq index */ 238 239 thread_t *thread = 240 list_get_instance(CPU->rq[i].rq_head.next, thread_t, rq_link); 241 list_remove(&thread->rq_link); 242 243 irq_spinlock_pass(&(CPU->rq[i].lock), &thread->lock); 244 245 thread->cpu = CPU; 246 thread->ticks = us2ticks((i + 1) * 10000); 247 thread->priority = i; /* Correct rq index */ 248 239 249 /* 240 250 * Clear the THREAD_FLAG_STOLEN flag so that t can be migrated 241 251 * when load balancing needs emerge. 242 252 */ 243 t->flags &= ~THREAD_FLAG_STOLEN; 244 spinlock_unlock(&t->lock); 245 246 return t; 247 } 253 thread->flags &= ~THREAD_FLAG_STOLEN; 254 irq_spinlock_unlock(&thread->lock, false); 255 256 return thread; 257 } 258 248 259 goto loop; 249 250 260 } 251 261 … … 264 274 { 265 275 link_t head; 266 runq_t *r; 267 int i, n; 268 276 269 277 list_initialize(&head); 270 spinlock_lock(&CPU->lock); 278 irq_spinlock_lock(&CPU->lock, false); 279 271 280 if (CPU->needs_relink > NEEDS_RELINK_MAX) { 281 int i; 272 282 for (i = start; i < RQ_COUNT - 1; i++) { 273 /* remember and empty rq[i + 1] */ 274 r = &CPU->rq[i + 1]; 275 spinlock_lock(&r->lock); 276 list_concat(&head, &r->rq_head); 277 n = r->n; 278 r->n = 0; 279 spinlock_unlock(&r->lock); 280 281 /* append rq[i + 1] to rq[i] */ 282 r = &CPU->rq[i]; 283 spinlock_lock(&r->lock); 284 list_concat(&r->rq_head, &head); 285 r->n += n; 286 spinlock_unlock(&r->lock); 287 } 283 /* Remember and empty rq[i + 1] */ 284 285 irq_spinlock_lock(&CPU->rq[i + 1].lock, false); 286 list_concat(&head, &CPU->rq[i + 1].rq_head); 287 size_t n = CPU->rq[i + 1].n; 288 CPU->rq[i + 1].n = 0; 289 irq_spinlock_unlock(&CPU->rq[i + 1].lock, false); 290 291 /* Append rq[i + 1] to rq[i] */ 292 293 irq_spinlock_lock(&CPU->rq[i].lock, false); 294 list_concat(&CPU->rq[i].rq_head, &head); 295 CPU->rq[i].n += n; 296 irq_spinlock_unlock(&CPU->rq[i].lock, false); 297 } 298 288 299 CPU->needs_relink = 0; 289 300 } 290 spinlock_unlock(&CPU->lock);291 301 302 irq_spinlock_unlock(&CPU->lock, false); 292 303 } 293 304 … … 302 313 { 303 314 volatile ipl_t ipl; 304 315 305 316 ASSERT(CPU != NULL); 306 317 307 318 ipl = interrupts_disable(); 308 319 309 320 if (atomic_get(&haltstate)) 310 321 halt(); 311 322 312 323 if (THREAD) { 313 spinlock_lock(&THREAD->lock);314 315 /* Update thread accounting */316 THREAD-> cycles += get_cycle() - THREAD->last_cycle;324 irq_spinlock_lock(&THREAD->lock, false); 325 326 /* Update thread kernel accounting */ 327 THREAD->kcycles += get_cycle() - THREAD->last_cycle; 317 328 318 329 #ifndef CONFIG_FPU_LAZY … … 327 338 THREAD->last_cycle = get_cycle(); 328 339 329 spinlock_unlock(&THREAD->lock);340 irq_spinlock_unlock(&THREAD->lock, false); 330 341 interrupts_restore(THREAD->saved_context.ipl); 331 342 332 343 return; 333 344 } 334 345 335 346 /* 336 347 * Interrupt priority level of preempted thread is recorded 337 348 * here to facilitate scheduler() invocations from 338 * interrupts_disable()'d code (e.g. waitq_sleep_timeout()). 349 * interrupts_disable()'d code (e.g. waitq_sleep_timeout()). 350 * 339 351 */ 340 352 THREAD->saved_context.ipl = ipl; 341 353 } 342 354 343 355 /* 344 356 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM 345 357 * and preemption counter. At this point THE could be coming either 346 358 * from THREAD's or CPU's stack. 359 * 347 360 */ 348 361 the_copy(THE, (the_t *) CPU->stack); 349 362 350 363 /* 351 364 * We may not keep the old stack. … … 359 372 * Therefore the scheduler() function continues in 360 373 * scheduler_separated_stack(). 374 * 361 375 */ 362 376 context_save(&CPU->saved_context); … … 364 378 (uintptr_t) CPU->stack, CPU_STACK_SIZE); 365 379 context_restore(&CPU->saved_context); 366 /* not reached */ 380 381 /* Not reached */ 367 382 } 368 383 … … 373 388 * switch to a new thread. 374 389 * 375 * Assume THREAD->lock is held.376 390 */ 377 391 void scheduler_separated_stack(void) 378 392 { 379 int priority;380 393 DEADLOCK_PROBE_INIT(p_joinwq); 381 394 task_t *old_task = TASK; 395 as_t *old_as = AS; 396 397 ASSERT((!THREAD) || (irq_spinlock_locked(&THREAD->lock))); 382 398 ASSERT(CPU != NULL); 383 399 400 /* 401 * Hold the current task and the address space to prevent their 402 * possible destruction should thread_destroy() be called on this or any 403 * other processor while the scheduler is still using them. 404 */ 405 if (old_task) 406 task_hold(old_task); 407 408 if (old_as) 409 as_hold(old_as); 410 384 411 if (THREAD) { 385 /* must be run after the switch to scheduler stack */412 /* Must be run after the switch to scheduler stack */ 386 413 after_thread_ran(); 387 414 388 415 switch (THREAD->state) { 389 416 case Running: 390 spinlock_unlock(&THREAD->lock);417 irq_spinlock_unlock(&THREAD->lock, false); 391 418 thread_ready(THREAD); 392 419 break; 393 420 394 421 case Exiting: 395 422 repeat: 396 423 if (THREAD->detached) { 397 thread_destroy(THREAD );424 thread_destroy(THREAD, false); 398 425 } else { 399 426 /* … … 401 428 * somebody calls thread_detach() on it. 402 429 */ 403 if (! spinlock_trylock(&THREAD->join_wq.lock)) {430 if (!irq_spinlock_trylock(&THREAD->join_wq.lock)) { 404 431 /* 405 432 * Avoid deadlock. 406 433 */ 407 spinlock_unlock(&THREAD->lock);434 irq_spinlock_unlock(&THREAD->lock, false); 408 435 delay(HZ); 409 spinlock_lock(&THREAD->lock);436 irq_spinlock_lock(&THREAD->lock, false); 410 437 DEADLOCK_PROBE(p_joinwq, 411 438 DEADLOCK_THRESHOLD); … … 414 441 _waitq_wakeup_unsafe(&THREAD->join_wq, 415 442 WAKEUP_FIRST); 416 spinlock_unlock(&THREAD->join_wq.lock);443 irq_spinlock_unlock(&THREAD->join_wq.lock, false); 417 444 418 445 THREAD->state = Lingering; 419 spinlock_unlock(&THREAD->lock);446 irq_spinlock_unlock(&THREAD->lock, false); 420 447 } 421 448 break; … … 426 453 */ 427 454 THREAD->priority = -1; 428 455 429 456 /* 430 457 * We need to release wq->lock which we locked in … … 432 459 * THREAD->sleep_queue. 433 460 */ 434 spinlock_unlock(&THREAD->sleep_queue->lock); 435 436 /* 437 * Check for possible requests for out-of-context 438 * invocation. 439 */ 440 if (THREAD->call_me) { 441 THREAD->call_me(THREAD->call_me_with); 442 THREAD->call_me = NULL; 443 THREAD->call_me_with = NULL; 444 } 445 446 spinlock_unlock(&THREAD->lock); 447 461 irq_spinlock_unlock(&THREAD->sleep_queue->lock, false); 462 463 irq_spinlock_unlock(&THREAD->lock, false); 448 464 break; 449 465 450 466 default: 451 467 /* … … 456 472 break; 457 473 } 458 474 459 475 THREAD = NULL; 460 476 } 461 477 462 478 THREAD = find_best_thread(); 463 479 464 spinlock_lock(&THREAD->lock);465 priority = THREAD->priority;466 spinlock_unlock(&THREAD->lock);467 468 relink_rq(priority); 469 470 /* 471 * If both the old and the new task are the same, lots of work is472 * avoided.480 irq_spinlock_lock(&THREAD->lock, false); 481 int priority = THREAD->priority; 482 irq_spinlock_unlock(&THREAD->lock, false); 483 484 relink_rq(priority); 485 486 /* 487 * If both the old and the new task are the same, 488 * lots of work is avoided. 473 489 */ 474 490 if (TASK != THREAD->task) { 475 as_t *as1 = NULL; 476 as_t *as2; 477 478 if (TASK) { 479 spinlock_lock(&TASK->lock); 480 as1 = TASK->as; 481 spinlock_unlock(&TASK->lock); 482 } 483 484 spinlock_lock(&THREAD->task->lock); 485 as2 = THREAD->task->as; 486 spinlock_unlock(&THREAD->task->lock); 491 as_t *new_as = THREAD->task->as; 487 492 488 493 /* 489 * Note that it is possible for two tasks to share one address490 * space.494 * Note that it is possible for two tasks 495 * to share one address space. 491 496 */ 492 if ( as1 != as2) {497 if (old_as != new_as) { 493 498 /* 494 499 * Both tasks and address spaces are different. 495 500 * Replace the old one with the new one. 496 501 */ 497 as_switch(as1, as2); 498 } 502 as_switch(old_as, new_as); 503 } 504 499 505 TASK = THREAD->task; 500 506 before_task_runs(); 501 507 } 502 503 spinlock_lock(&THREAD->lock); 508 509 if (old_task) 510 task_release(old_task); 511 512 if (old_as) 513 as_release(old_as); 514 515 irq_spinlock_lock(&THREAD->lock, false); 504 516 THREAD->state = Running; 505 517 506 518 #ifdef SCHEDULER_VERBOSE 507 519 printf("cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64 508 520 ", nrdy=%ld)\n", CPU->id, THREAD->tid, THREAD->priority, 509 521 THREAD->ticks, atomic_get(&CPU->nrdy)); 510 #endif 511 522 #endif 523 512 524 /* 513 525 * Some architectures provide late kernel PA2KA(identity) … … 519 531 */ 520 532 before_thread_runs(); 521 533 522 534 /* 523 535 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to … … 527 539 528 540 context_restore(&THREAD->saved_context); 529 /* not reached */ 541 542 /* Not reached */ 530 543 } 531 544 … … 541 554 void kcpulb(void *arg) 542 555 { 543 thread_t *t; 544 int count, average, j, k = 0; 545 unsigned int i; 546 ipl_t ipl; 547 556 atomic_count_t average; 557 atomic_count_t rdy; 558 548 559 /* 549 560 * Detach kcpulb as nobody will call thread_join_timeout() on it. … … 556 567 */ 557 568 thread_sleep(1); 558 569 559 570 not_satisfied: 560 571 /* … … 562 573 * other CPU's. Note that situation can have changed between two 563 574 * passes. Each time get the most up to date counts. 575 * 564 576 */ 565 577 average = atomic_get(&nrdy) / config.cpu_active + 1; 566 count = average -atomic_get(&CPU->nrdy);567 568 if ( count <= 0)578 rdy = atomic_get(&CPU->nrdy); 579 580 if (average <= rdy) 569 581 goto satisfied; 570 582 583 atomic_count_t count = average - rdy; 584 571 585 /* 572 586 * Searching least priority queues on all CPU's first and most priority 573 587 * queues on all CPU's last. 574 */ 575 for (j = RQ_COUNT - 1; j >= 0; j--) { 576 for (i = 0; i < config.cpu_active; i++) { 577 link_t *l; 578 runq_t *r; 579 cpu_t *cpu; 580 581 cpu = &cpus[(i + k) % config.cpu_active]; 582 588 * 589 */ 590 size_t acpu; 591 size_t acpu_bias = 0; 592 int rq; 593 594 for (rq = RQ_COUNT - 1; rq >= 0; rq--) { 595 for (acpu = 0; acpu < config.cpu_active; acpu++) { 596 cpu_t *cpu = &cpus[(acpu + acpu_bias) % config.cpu_active]; 597 583 598 /* 584 599 * Not interested in ourselves. 585 600 * Doesn't require interrupt disabling for kcpulb has 586 601 * THREAD_FLAG_WIRED. 602 * 587 603 */ 588 604 if (CPU == cpu) 589 605 continue; 606 590 607 if (atomic_get(&cpu->nrdy) <= average) 591 608 continue; 592 593 ipl = interrupts_disable(); 594 r = &cpu->rq[j]; 595 spinlock_lock(&r->lock); 596 if (r->n == 0) { 597 spinlock_unlock(&r->lock); 598 interrupts_restore(ipl); 609 610 irq_spinlock_lock(&(cpu->rq[rq].lock), true); 611 if (cpu->rq[rq].n == 0) { 612 irq_spinlock_unlock(&(cpu->rq[rq].lock), true); 599 613 continue; 600 614 } 601 602 t = NULL; 603 l = r->rq_head.prev; /* search rq from the back */ 604 while (l != &r->rq_head) { 605 t = list_get_instance(l, thread_t, rq_link); 615 616 thread_t *thread = NULL; 617 618 /* Search rq from the back */ 619 link_t *link = cpu->rq[rq].rq_head.prev; 620 621 while (link != &(cpu->rq[rq].rq_head)) { 622 thread = (thread_t *) list_get_instance(link, thread_t, rq_link); 623 606 624 /* 607 625 * We don't want to steal CPU-wired threads … … 611 629 * steal threads whose FPU context is still in 612 630 * CPU. 631 * 613 632 */ 614 spinlock_lock(&t->lock);615 if ((!(t->flags & (THREAD_FLAG_WIRED |616 THREAD_FLAG_STOLEN))) &&617 (!(t->fpu_context_engaged))) {633 irq_spinlock_lock(&thread->lock, false); 634 635 if ((!(thread->flags & (THREAD_FLAG_WIRED | THREAD_FLAG_STOLEN))) 636 && (!(thread->fpu_context_engaged))) { 618 637 /* 619 * Remove t from r.638 * Remove thread from ready queue. 620 639 */ 621 spinlock_unlock(&t->lock);640 irq_spinlock_unlock(&thread->lock, false); 622 641 623 642 atomic_dec(&cpu->nrdy); 624 643 atomic_dec(&nrdy); 625 626 r->n--;627 list_remove(&t ->rq_link);628 644 645 cpu->rq[rq].n--; 646 list_remove(&thread->rq_link); 647 629 648 break; 630 649 } 631 spinlock_unlock(&t->lock); 632 l = l->prev; 633 t = NULL; 650 651 irq_spinlock_unlock(&thread->lock, false); 652 653 link = link->prev; 654 thread = NULL; 634 655 } 635 spinlock_unlock(&r->lock); 636 637 if (t) { 656 657 if (thread) { 638 658 /* 639 * Ready t on local CPU659 * Ready thread on local CPU 640 660 */ 641 spinlock_lock(&t->lock); 661 662 irq_spinlock_pass(&(cpu->rq[rq].lock), &thread->lock); 663 642 664 #ifdef KCPULB_VERBOSE 643 665 printf("kcpulb%u: TID %" PRIu64 " -> cpu%u, " … … 646 668 atomic_get(&nrdy) / config.cpu_active); 647 669 #endif 648 t->flags |= THREAD_FLAG_STOLEN; 649 t->state = Entering; 650 spinlock_unlock(&t->lock); 651 652 thread_ready(t); 653 654 interrupts_restore(ipl); 655 670 671 thread->flags |= THREAD_FLAG_STOLEN; 672 thread->state = Entering; 673 674 irq_spinlock_unlock(&thread->lock, true); 675 thread_ready(thread); 676 656 677 if (--count == 0) 657 678 goto satisfied; 658 679 659 680 /* 660 681 * We are not satisfied yet, focus on another 661 682 * CPU next time. 683 * 662 684 */ 663 k++;685 acpu_bias++; 664 686 665 687 continue; 666 } 667 interrupts_restore(ipl); 668 } 669 } 670 688 } else 689 irq_spinlock_unlock(&(cpu->rq[rq].lock), true); 690 691 } 692 } 693 671 694 if (atomic_get(&CPU->nrdy)) { 672 695 /* 673 696 * Be a little bit light-weight and let migrated threads run. 697 * 674 698 */ 675 699 scheduler(); … … 678 702 * We failed to migrate a single thread. 679 703 * Give up this turn. 704 * 680 705 */ 681 706 goto loop; 682 707 } 683 708 684 709 goto not_satisfied; 685 710 686 711 satisfied: 687 712 goto loop; 688 713 } 689 690 714 #endif /* CONFIG_SMP */ 691 715 692 693 /** Print information about threads & scheduler queues */ 716 /** Print information about threads & scheduler queues 717 * 718 */ 694 719 void sched_print_list(void) 695 720 { 696 ipl_t ipl; 697 unsigned int cpu, i; 698 runq_t *r; 699 thread_t *t; 700 link_t *cur; 701 702 /* We are going to mess with scheduler structures, 703 * let's not be interrupted */ 704 ipl = interrupts_disable(); 721 size_t cpu; 705 722 for (cpu = 0; cpu < config.cpu_count; cpu++) { 706 707 723 if (!cpus[cpu].active) 708 724 continue; 709 710 spinlock_lock(&cpus[cpu].lock); 711 printf("cpu%u: address=%p, nrdy=%ld, needs_relink=%" PRIs "\n", 725 726 irq_spinlock_lock(&cpus[cpu].lock, true); 727 728 printf("cpu%u: address=%p, nrdy=%" PRIua ", needs_relink=%zu\n", 712 729 cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy), 713 730 cpus[cpu].needs_relink); 714 731 732 unsigned int i; 715 733 for (i = 0; i < RQ_COUNT; i++) { 716 r = &cpus[cpu].rq[i]; 717 spinlock_lock(&r->lock); 718 if (!r->n) { 719 spinlock_unlock(&r->lock); 734 irq_spinlock_lock(&(cpus[cpu].rq[i].lock), false); 735 if (cpus[cpu].rq[i].n == 0) { 736 irq_spinlock_unlock(&(cpus[cpu].rq[i].lock), false); 720 737 continue; 721 738 } 739 722 740 printf("\trq[%u]: ", i); 723 for (cur = r->rq_head.next; cur != &r->rq_head; 724 cur = cur->next) { 725 t = list_get_instance(cur, thread_t, rq_link); 726 printf("%" PRIu64 "(%s) ", t->tid, 727 thread_states[t->state]); 741 link_t *cur; 742 for (cur = cpus[cpu].rq[i].rq_head.next; 743 cur != &(cpus[cpu].rq[i].rq_head); 744 cur = cur->next) { 745 thread_t *thread = list_get_instance(cur, thread_t, rq_link); 746 printf("%" PRIu64 "(%s) ", thread->tid, 747 thread_states[thread->state]); 728 748 } 729 749 printf("\n"); 730 spinlock_unlock(&r->lock);731 }732 spinlock_unlock(&cpus[cpu].lock);733 }734 735 interrupts_restore(ipl);750 751 irq_spinlock_unlock(&(cpus[cpu].rq[i].lock), false); 752 } 753 754 irq_spinlock_unlock(&cpus[cpu].lock, true); 755 } 736 756 } 737 757
Note:
See TracChangeset
for help on using the changeset viewer.
