Changes in kernel/generic/src/proc/scheduler.c [7e752b2:98000fb] in mainline
- File:
-
- 1 edited
-
kernel/generic/src/proc/scheduler.c (modified) (25 diffs)
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/scheduler.c
r7e752b2 r98000fb 1 1 /* 2 * Copyright (c) 20 10Jakub Jermar2 * Copyright (c) 2001-2007 Jakub Jermar 3 3 * All rights reserved. 4 4 * … … 33 33 /** 34 34 * @file 35 * @brief Scheduler and load balancing.35 * @brief Scheduler and load balancing. 36 36 * 37 37 * This file contains the scheduler and kcpulb kernel thread which … … 68 68 static void scheduler_separated_stack(void); 69 69 70 atomic_t nrdy; /**< Number of ready threads in the system. */70 atomic_t nrdy; /**< Number of ready threads in the system. */ 71 71 72 72 /** Carry out actions before new task runs. */ … … 89 89 before_thread_runs_arch(); 90 90 #ifdef CONFIG_FPU_LAZY 91 if(THREAD == CPU->fpu_owner) 91 if(THREAD == CPU->fpu_owner) 92 92 fpu_enable(); 93 93 else 94 fpu_disable(); 94 fpu_disable(); 95 95 #else 96 96 fpu_enable(); … … 123 123 restart: 124 124 fpu_enable(); 125 irq_spinlock_lock(&CPU->lock, false);126 125 spinlock_lock(&CPU->lock); 126 127 127 /* Save old context */ 128 if (CPU->fpu_owner != NULL) { 129 irq_spinlock_lock(&CPU->fpu_owner->lock, false);128 if (CPU->fpu_owner != NULL) { 129 spinlock_lock(&CPU->fpu_owner->lock); 130 130 fpu_context_save(CPU->fpu_owner->saved_fpu_context); 131 132 /* Don't prevent migration */ 131 /* don't prevent migration */ 133 132 CPU->fpu_owner->fpu_context_engaged = 0; 134 irq_spinlock_unlock(&CPU->fpu_owner->lock, false);133 spinlock_unlock(&CPU->fpu_owner->lock); 135 134 CPU->fpu_owner = NULL; 136 135 } 137 138 irq_spinlock_lock(&THREAD->lock, false);136 137 spinlock_lock(&THREAD->lock); 139 138 if (THREAD->fpu_context_exists) { 140 139 fpu_context_restore(THREAD->saved_fpu_context); … … 143 142 if (!THREAD->saved_fpu_context) { 144 143 /* Might sleep */ 145 irq_spinlock_unlock(&THREAD->lock, false);146 irq_spinlock_unlock(&CPU->lock, false);144 spinlock_unlock(&THREAD->lock); 145 spinlock_unlock(&CPU->lock); 147 146 THREAD->saved_fpu_context = 148 147 (fpu_context_t *) slab_alloc(fpu_context_slab, 0); 149 150 148 /* We may have switched CPUs during slab_alloc */ 151 goto restart; 149 goto restart; 152 150 } 153 151 fpu_init(); 154 152 THREAD->fpu_context_exists = 1; 155 153 } 156 157 154 CPU->fpu_owner = THREAD; 158 155 THREAD->fpu_context_engaged = 1; 159 irq_spinlock_unlock(&THREAD->lock, false);160 161 irq_spinlock_unlock(&CPU->lock, false);162 } 163 #endif /* CONFIG_FPU_LAZY */156 spinlock_unlock(&THREAD->lock); 157 158 spinlock_unlock(&CPU->lock); 159 } 160 #endif 164 161 165 162 /** Initialize scheduler … … 183 180 static thread_t *find_best_thread(void) 184 181 { 182 thread_t *t; 183 runq_t *r; 184 int i; 185 185 186 ASSERT(CPU != NULL); 186 187 187 188 loop: 189 interrupts_enable(); 188 190 189 191 if (atomic_get(&CPU->nrdy) == 0) { … … 193 195 * This improves energy saving and hyperthreading. 194 196 */ 195 irq_spinlock_lock(&CPU->lock, false); 196 CPU->idle = true; 197 irq_spinlock_unlock(&CPU->lock, false); 198 interrupts_enable(); 199 197 200 198 /* 201 199 * An interrupt might occur right now and wake up a thread. … … 203 201 * even though there is a runnable thread. 204 202 */ 205 cpu_sleep(); 206 interrupts_disable(); 207 goto loop; 208 } 209 210 unsigned int i; 203 204 cpu_sleep(); 205 goto loop; 206 } 207 208 interrupts_disable(); 209 211 210 for (i = 0; i < RQ_COUNT; i++) { 212 irq_spinlock_lock(&(CPU->rq[i].lock), false); 213 if (CPU->rq[i].n == 0) { 211 r = &CPU->rq[i]; 212 spinlock_lock(&r->lock); 213 if (r->n == 0) { 214 214 /* 215 215 * If this queue is empty, try a lower-priority queue. 216 216 */ 217 irq_spinlock_unlock(&(CPU->rq[i].lock), false);217 spinlock_unlock(&r->lock); 218 218 continue; 219 219 } 220 220 221 221 atomic_dec(&CPU->nrdy); 222 222 atomic_dec(&nrdy); 223 CPU->rq[i].n--;224 223 r->n--; 224 225 225 /* 226 226 * Take the first thread from the queue. 227 227 */ 228 thread_t *thread = 229 list_get_instance(CPU->rq[i].rq_head.next, thread_t, rq_link); 230 list_remove(&thread->rq_link); 231 232 irq_spinlock_pass(&(CPU->rq[i].lock), &thread->lock); 233 234 thread->cpu = CPU; 235 thread->ticks = us2ticks((i + 1) * 10000); 236 thread->priority = i; /* Correct rq index */ 237 228 t = list_get_instance(r->rq_head.next, thread_t, rq_link); 229 list_remove(&t->rq_link); 230 231 spinlock_unlock(&r->lock); 232 233 spinlock_lock(&t->lock); 234 t->cpu = CPU; 235 236 t->ticks = us2ticks((i + 1) * 10000); 237 t->priority = i; /* correct rq index */ 238 238 239 /* 239 240 * Clear the THREAD_FLAG_STOLEN flag so that t can be migrated 240 241 * when load balancing needs emerge. 241 242 */ 242 thread->flags &= ~THREAD_FLAG_STOLEN; 243 irq_spinlock_unlock(&thread->lock, false); 244 245 return thread; 246 } 247 243 t->flags &= ~THREAD_FLAG_STOLEN; 244 spinlock_unlock(&t->lock); 245 246 return t; 247 } 248 248 goto loop; 249 249 250 } 250 251 … … 263 264 { 264 265 link_t head; 265 266 runq_t *r; 267 int i, n; 268 266 269 list_initialize(&head); 267 irq_spinlock_lock(&CPU->lock, false); 268 270 spinlock_lock(&CPU->lock); 269 271 if (CPU->needs_relink > NEEDS_RELINK_MAX) { 270 int i;271 272 for (i = start; i < RQ_COUNT - 1; i++) { 272 /* Remember and empty rq[i + 1] */ 273 274 irq_spinlock_lock(&CPU->rq[i + 1].lock, false); 275 list_concat(&head, &CPU->rq[i + 1].rq_head); 276 size_t n = CPU->rq[i + 1].n; 277 CPU->rq[i + 1].n = 0; 278 irq_spinlock_unlock(&CPU->rq[i + 1].lock, false); 279 280 /* Append rq[i + 1] to rq[i] */ 281 282 irq_spinlock_lock(&CPU->rq[i].lock, false); 283 list_concat(&CPU->rq[i].rq_head, &head); 284 CPU->rq[i].n += n; 285 irq_spinlock_unlock(&CPU->rq[i].lock, false); 286 } 273 /* remember and empty rq[i + 1] */ 274 r = &CPU->rq[i + 1]; 275 spinlock_lock(&r->lock); 276 list_concat(&head, &r->rq_head); 277 n = r->n; 278 r->n = 0; 279 spinlock_unlock(&r->lock); 287 280 281 /* append rq[i + 1] to rq[i] */ 282 r = &CPU->rq[i]; 283 spinlock_lock(&r->lock); 284 list_concat(&r->rq_head, &head); 285 r->n += n; 286 spinlock_unlock(&r->lock); 287 } 288 288 CPU->needs_relink = 0; 289 289 } 290 291 irq_spinlock_unlock(&CPU->lock, false); 290 spinlock_unlock(&CPU->lock); 291 292 292 } 293 293 … … 302 302 { 303 303 volatile ipl_t ipl; 304 304 305 305 ASSERT(CPU != NULL); 306 306 307 307 ipl = interrupts_disable(); 308 308 309 309 if (atomic_get(&haltstate)) 310 310 halt(); 311 311 312 312 if (THREAD) { 313 irq_spinlock_lock(&THREAD->lock, false);313 spinlock_lock(&THREAD->lock); 314 314 315 /* Update thread kernelaccounting */316 THREAD-> kcycles += get_cycle() - THREAD->last_cycle;315 /* Update thread accounting */ 316 THREAD->cycles += get_cycle() - THREAD->last_cycle; 317 317 318 318 #ifndef CONFIG_FPU_LAZY … … 327 327 THREAD->last_cycle = get_cycle(); 328 328 329 irq_spinlock_unlock(&THREAD->lock, false);329 spinlock_unlock(&THREAD->lock); 330 330 interrupts_restore(THREAD->saved_context.ipl); 331 331 332 332 return; 333 333 } 334 334 335 335 /* 336 336 * Interrupt priority level of preempted thread is recorded 337 337 * here to facilitate scheduler() invocations from 338 * interrupts_disable()'d code (e.g. waitq_sleep_timeout()). 339 * 338 * interrupts_disable()'d code (e.g. waitq_sleep_timeout()). 340 339 */ 341 340 THREAD->saved_context.ipl = ipl; 342 341 } 343 342 344 343 /* 345 344 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM 346 345 * and preemption counter. At this point THE could be coming either 347 346 * from THREAD's or CPU's stack. 348 *349 347 */ 350 348 the_copy(THE, (the_t *) CPU->stack); 351 349 352 350 /* 353 351 * We may not keep the old stack. … … 361 359 * Therefore the scheduler() function continues in 362 360 * scheduler_separated_stack(). 363 *364 361 */ 365 362 context_save(&CPU->saved_context); … … 367 364 (uintptr_t) CPU->stack, CPU_STACK_SIZE); 368 365 context_restore(&CPU->saved_context); 369 370 /* Not reached */ 366 /* not reached */ 371 367 } 372 368 … … 377 373 * switch to a new thread. 378 374 * 375 * Assume THREAD->lock is held. 379 376 */ 380 377 void scheduler_separated_stack(void) 381 378 { 379 int priority; 382 380 DEADLOCK_PROBE_INIT(p_joinwq); 383 task_t *old_task = TASK; 384 as_t *old_as = AS; 385 386 ASSERT((!THREAD) || (irq_spinlock_locked(&THREAD->lock))); 381 387 382 ASSERT(CPU != NULL); 388 383 389 /*390 * Hold the current task and the address space to prevent their391 * possible destruction should thread_destroy() be called on this or any392 * other processor while the scheduler is still using them.393 *394 */395 if (old_task)396 task_hold(old_task);397 398 if (old_as)399 as_hold(old_as);400 401 384 if (THREAD) { 402 /* Must be run after the switch to scheduler stack */385 /* must be run after the switch to scheduler stack */ 403 386 after_thread_ran(); 404 387 405 388 switch (THREAD->state) { 406 389 case Running: 407 irq_spinlock_unlock(&THREAD->lock, false);390 spinlock_unlock(&THREAD->lock); 408 391 thread_ready(THREAD); 409 392 break; 410 393 411 394 case Exiting: 412 395 repeat: 413 396 if (THREAD->detached) { 414 thread_destroy(THREAD , false);397 thread_destroy(THREAD); 415 398 } else { 416 399 /* 417 400 * The thread structure is kept allocated until 418 401 * somebody calls thread_detach() on it. 419 *420 402 */ 421 if (! irq_spinlock_trylock(&THREAD->join_wq.lock)) {403 if (!spinlock_trylock(&THREAD->join_wq.lock)) { 422 404 /* 423 405 * Avoid deadlock. 424 *425 406 */ 426 irq_spinlock_unlock(&THREAD->lock, false);407 spinlock_unlock(&THREAD->lock); 427 408 delay(HZ); 428 irq_spinlock_lock(&THREAD->lock, false);409 spinlock_lock(&THREAD->lock); 429 410 DEADLOCK_PROBE(p_joinwq, 430 411 DEADLOCK_THRESHOLD); … … 433 414 _waitq_wakeup_unsafe(&THREAD->join_wq, 434 415 WAKEUP_FIRST); 435 irq_spinlock_unlock(&THREAD->join_wq.lock, false);416 spinlock_unlock(&THREAD->join_wq.lock); 436 417 437 418 THREAD->state = Lingering; 438 irq_spinlock_unlock(&THREAD->lock, false);419 spinlock_unlock(&THREAD->lock); 439 420 } 440 421 break; … … 443 424 /* 444 425 * Prefer the thread after it's woken up. 445 *446 426 */ 447 427 THREAD->priority = -1; 448 428 449 429 /* 450 430 * We need to release wq->lock which we locked in 451 431 * waitq_sleep(). Address of wq->lock is kept in 452 432 * THREAD->sleep_queue. 453 *454 433 */ 455 irq_spinlock_unlock(&THREAD->sleep_queue->lock, false); 456 457 irq_spinlock_unlock(&THREAD->lock, false); 434 spinlock_unlock(&THREAD->sleep_queue->lock); 435 436 /* 437 * Check for possible requests for out-of-context 438 * invocation. 439 */ 440 if (THREAD->call_me) { 441 THREAD->call_me(THREAD->call_me_with); 442 THREAD->call_me = NULL; 443 THREAD->call_me_with = NULL; 444 } 445 446 spinlock_unlock(&THREAD->lock); 447 458 448 break; 459 449 460 450 default: 461 451 /* 462 452 * Entering state is unexpected. 463 *464 453 */ 465 454 panic("tid%" PRIu64 ": unexpected state %s.", … … 467 456 break; 468 457 } 469 458 470 459 THREAD = NULL; 471 460 } 472 461 473 462 THREAD = find_best_thread(); 474 463 475 irq_spinlock_lock(&THREAD->lock, false);476 intpriority = THREAD->priority;477 irq_spinlock_unlock(&THREAD->lock, false);478 479 relink_rq(priority); 480 464 spinlock_lock(&THREAD->lock); 465 priority = THREAD->priority; 466 spinlock_unlock(&THREAD->lock); 467 468 relink_rq(priority); 469 481 470 /* 482 471 * If both the old and the new task are the same, lots of work is 483 472 * avoided. 484 *485 473 */ 486 474 if (TASK != THREAD->task) { 487 as_t *new_as = THREAD->task->as; 475 as_t *as1 = NULL; 476 as_t *as2; 477 478 if (TASK) { 479 spinlock_lock(&TASK->lock); 480 as1 = TASK->as; 481 spinlock_unlock(&TASK->lock); 482 } 483 484 spinlock_lock(&THREAD->task->lock); 485 as2 = THREAD->task->as; 486 spinlock_unlock(&THREAD->task->lock); 488 487 489 488 /* 490 489 * Note that it is possible for two tasks to share one address 491 490 * space. 492 (493 491 */ 494 if ( old_as != new_as) {492 if (as1 != as2) { 495 493 /* 496 494 * Both tasks and address spaces are different. 497 495 * Replace the old one with the new one. 498 *499 496 */ 500 as_switch(old_as, new_as); 501 } 502 497 as_switch(as1, as2); 498 } 503 499 TASK = THREAD->task; 504 500 before_task_runs(); 505 501 } 506 507 if (old_task) 508 task_release(old_task); 509 510 if (old_as) 511 as_release(old_as); 512 513 irq_spinlock_lock(&THREAD->lock, false); 502 503 spinlock_lock(&THREAD->lock); 514 504 THREAD->state = Running; 515 505 516 506 #ifdef SCHEDULER_VERBOSE 517 507 printf("cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64 518 508 ", nrdy=%ld)\n", CPU->id, THREAD->tid, THREAD->priority, 519 509 THREAD->ticks, atomic_get(&CPU->nrdy)); 520 #endif 521 510 #endif 511 522 512 /* 523 513 * Some architectures provide late kernel PA2KA(identity) … … 527 517 * necessary, is to be mapped in before_thread_runs(). This 528 518 * function must be executed before the switch to the new stack. 529 *530 519 */ 531 520 before_thread_runs(); 532 521 533 522 /* 534 523 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to 535 524 * thread's stack. 536 *537 525 */ 538 526 the_copy(THE, (the_t *) THREAD->kstack); 539 527 540 528 context_restore(&THREAD->saved_context); 541 542 /* Not reached */ 529 /* not reached */ 543 530 } 544 531 … … 554 541 void kcpulb(void *arg) 555 542 { 556 atomic_count_t average; 557 atomic_count_t rdy; 558 543 thread_t *t; 544 int count, average, j, k = 0; 545 unsigned int i; 546 ipl_t ipl; 547 559 548 /* 560 549 * Detach kcpulb as nobody will call thread_join_timeout() on it. … … 567 556 */ 568 557 thread_sleep(1); 569 558 570 559 not_satisfied: 571 560 /* … … 573 562 * other CPU's. Note that situation can have changed between two 574 563 * passes. Each time get the most up to date counts. 575 *576 564 */ 577 565 average = atomic_get(&nrdy) / config.cpu_active + 1; 578 rdy =atomic_get(&CPU->nrdy);579 580 if ( average <= rdy)566 count = average - atomic_get(&CPU->nrdy); 567 568 if (count <= 0) 581 569 goto satisfied; 582 583 atomic_count_t count = average - rdy; 584 570 585 571 /* 586 572 * Searching least priority queues on all CPU's first and most priority 587 573 * queues on all CPU's last. 588 * 589 */ 590 size_t acpu; 591 size_t acpu_bias = 0; 592 int rq; 593 594 for (rq = RQ_COUNT - 1; rq >= 0; rq--) { 595 for (acpu = 0; acpu < config.cpu_active; acpu++) { 596 cpu_t *cpu = &cpus[(acpu + acpu_bias) % config.cpu_active]; 597 574 */ 575 for (j = RQ_COUNT - 1; j >= 0; j--) { 576 for (i = 0; i < config.cpu_active; i++) { 577 link_t *l; 578 runq_t *r; 579 cpu_t *cpu; 580 581 cpu = &cpus[(i + k) % config.cpu_active]; 582 598 583 /* 599 584 * Not interested in ourselves. 600 585 * Doesn't require interrupt disabling for kcpulb has 601 586 * THREAD_FLAG_WIRED. 602 *603 587 */ 604 588 if (CPU == cpu) 605 589 continue; 606 607 590 if (atomic_get(&cpu->nrdy) <= average) 608 591 continue; 609 610 irq_spinlock_lock(&(cpu->rq[rq].lock), true); 611 if (cpu->rq[rq].n == 0) { 612 irq_spinlock_unlock(&(cpu->rq[rq].lock), true); 592 593 ipl = interrupts_disable(); 594 r = &cpu->rq[j]; 595 spinlock_lock(&r->lock); 596 if (r->n == 0) { 597 spinlock_unlock(&r->lock); 598 interrupts_restore(ipl); 613 599 continue; 614 600 } 615 616 thread_t *thread = NULL; 617 618 /* Search rq from the back */ 619 link_t *link = cpu->rq[rq].rq_head.prev; 620 621 while (link != &(cpu->rq[rq].rq_head)) { 622 thread = (thread_t *) list_get_instance(link, thread_t, rq_link); 623 601 602 t = NULL; 603 l = r->rq_head.prev; /* search rq from the back */ 604 while (l != &r->rq_head) { 605 t = list_get_instance(l, thread_t, rq_link); 624 606 /* 625 607 * We don't want to steal CPU-wired threads … … 629 611 * steal threads whose FPU context is still in 630 612 * CPU. 631 *632 613 */ 633 irq_spinlock_lock(&thread->lock, false);634 635 if ((!(thread->flags & (THREAD_FLAG_WIRED | THREAD_FLAG_STOLEN)))636 && (!(thread->fpu_context_engaged))) {614 spinlock_lock(&t->lock); 615 if ((!(t->flags & (THREAD_FLAG_WIRED | 616 THREAD_FLAG_STOLEN))) && 617 (!(t->fpu_context_engaged))) { 637 618 /* 638 * Remove t hread from ready queue.619 * Remove t from r. 639 620 */ 640 irq_spinlock_unlock(&thread->lock, false);621 spinlock_unlock(&t->lock); 641 622 642 623 atomic_dec(&cpu->nrdy); 643 624 atomic_dec(&nrdy); 644 645 cpu->rq[rq].n--;646 list_remove(&t hread->rq_link);647 625 626 r->n--; 627 list_remove(&t->rq_link); 628 648 629 break; 649 630 } 650 651 irq_spinlock_unlock(&thread->lock, false); 652 653 link = link->prev; 654 thread = NULL; 631 spinlock_unlock(&t->lock); 632 l = l->prev; 633 t = NULL; 655 634 } 656 657 if (thread) { 635 spinlock_unlock(&r->lock); 636 637 if (t) { 658 638 /* 659 * Ready thread on local CPU 660 * 639 * Ready t on local CPU 661 640 */ 662 663 irq_spinlock_pass(&(cpu->rq[rq].lock), &thread->lock); 664 641 spinlock_lock(&t->lock); 665 642 #ifdef KCPULB_VERBOSE 666 643 printf("kcpulb%u: TID %" PRIu64 " -> cpu%u, " … … 669 646 atomic_get(&nrdy) / config.cpu_active); 670 647 #endif 671 672 thread->flags |= THREAD_FLAG_STOLEN; 673 thread->state = Entering; 674 675 irq_spinlock_unlock(&thread->lock, true); 676 thread_ready(thread); 677 648 t->flags |= THREAD_FLAG_STOLEN; 649 t->state = Entering; 650 spinlock_unlock(&t->lock); 651 652 thread_ready(t); 653 654 interrupts_restore(ipl); 655 678 656 if (--count == 0) 679 657 goto satisfied; 680 658 681 659 /* 682 660 * We are not satisfied yet, focus on another 683 661 * CPU next time. 684 *685 662 */ 686 acpu_bias++;663 k++; 687 664 688 665 continue; 689 } else 690 irq_spinlock_unlock(&(cpu->rq[rq].lock), true); 691 692 } 693 } 694 666 } 667 interrupts_restore(ipl); 668 } 669 } 670 695 671 if (atomic_get(&CPU->nrdy)) { 696 672 /* 697 673 * Be a little bit light-weight and let migrated threads run. 698 *699 674 */ 700 675 scheduler(); … … 703 678 * We failed to migrate a single thread. 704 679 * Give up this turn. 705 *706 680 */ 707 681 goto loop; 708 682 } 709 683 710 684 goto not_satisfied; 711 685 712 686 satisfied: 713 687 goto loop; 714 688 } 689 715 690 #endif /* CONFIG_SMP */ 716 691 717 /** Print information about threads & scheduler queues 718 * 719 */ 692 693 /** Print information about threads & scheduler queues */ 720 694 void sched_print_list(void) 721 695 { 722 size_t cpu; 696 ipl_t ipl; 697 unsigned int cpu, i; 698 runq_t *r; 699 thread_t *t; 700 link_t *cur; 701 702 /* We are going to mess with scheduler structures, 703 * let's not be interrupted */ 704 ipl = interrupts_disable(); 723 705 for (cpu = 0; cpu < config.cpu_count; cpu++) { 706 724 707 if (!cpus[cpu].active) 725 708 continue; 726 727 irq_spinlock_lock(&cpus[cpu].lock, true); 728 729 printf("cpu%u: address=%p, nrdy=%" PRIua ", needs_relink=%zu\n", 709 710 spinlock_lock(&cpus[cpu].lock); 711 printf("cpu%u: address=%p, nrdy=%ld, needs_relink=%" PRIs "\n", 730 712 cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy), 731 713 cpus[cpu].needs_relink); 732 714 733 unsigned int i;734 715 for (i = 0; i < RQ_COUNT; i++) { 735 irq_spinlock_lock(&(cpus[cpu].rq[i].lock), false); 736 if (cpus[cpu].rq[i].n == 0) { 737 irq_spinlock_unlock(&(cpus[cpu].rq[i].lock), false); 716 r = &cpus[cpu].rq[i]; 717 spinlock_lock(&r->lock); 718 if (!r->n) { 719 spinlock_unlock(&r->lock); 738 720 continue; 739 721 } 740 741 722 printf("\trq[%u]: ", i); 742 link_t *cur; 743 for (cur = cpus[cpu].rq[i].rq_head.next; 744 cur != &(cpus[cpu].rq[i].rq_head); 745 cur = cur->next) { 746 thread_t *thread = list_get_instance(cur, thread_t, rq_link); 747 printf("%" PRIu64 "(%s) ", thread->tid, 748 thread_states[thread->state]); 723 for (cur = r->rq_head.next; cur != &r->rq_head; 724 cur = cur->next) { 725 t = list_get_instance(cur, thread_t, rq_link); 726 printf("%" PRIu64 "(%s) ", t->tid, 727 thread_states[t->state]); 749 728 } 750 729 printf("\n"); 751 752 irq_spinlock_unlock(&(cpus[cpu].rq[i].lock), false);753 }754 755 irq_spinlock_unlock(&cpus[cpu].lock, true);756 }730 spinlock_unlock(&r->lock); 731 } 732 spinlock_unlock(&cpus[cpu].lock); 733 } 734 735 interrupts_restore(ipl); 757 736 } 758 737
Note:
See TracChangeset
for help on using the changeset viewer.
