Changes in kernel/generic/src/proc/scheduler.c [98000fb:7e752b2] in mainline
- File:
-
- 1 edited
-
kernel/generic/src/proc/scheduler.c (modified) (25 diffs)
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/scheduler.c
r98000fb r7e752b2 1 1 /* 2 * Copyright (c) 20 01-2007Jakub Jermar2 * Copyright (c) 2010 Jakub Jermar 3 3 * All rights reserved. 4 4 * … … 33 33 /** 34 34 * @file 35 * @brief Scheduler and load balancing.35 * @brief Scheduler and load balancing. 36 36 * 37 37 * This file contains the scheduler and kcpulb kernel thread which … … 68 68 static void scheduler_separated_stack(void); 69 69 70 atomic_t nrdy; /**< Number of ready threads in the system. */70 atomic_t nrdy; /**< Number of ready threads in the system. */ 71 71 72 72 /** Carry out actions before new task runs. */ … … 89 89 before_thread_runs_arch(); 90 90 #ifdef CONFIG_FPU_LAZY 91 if(THREAD == CPU->fpu_owner) 91 if(THREAD == CPU->fpu_owner) 92 92 fpu_enable(); 93 93 else 94 fpu_disable(); 94 fpu_disable(); 95 95 #else 96 96 fpu_enable(); … … 123 123 restart: 124 124 fpu_enable(); 125 spinlock_lock(&CPU->lock);126 125 irq_spinlock_lock(&CPU->lock, false); 126 127 127 /* Save old context */ 128 if (CPU->fpu_owner != NULL) { 129 spinlock_lock(&CPU->fpu_owner->lock);128 if (CPU->fpu_owner != NULL) { 129 irq_spinlock_lock(&CPU->fpu_owner->lock, false); 130 130 fpu_context_save(CPU->fpu_owner->saved_fpu_context); 131 /* don't prevent migration */ 131 132 /* Don't prevent migration */ 132 133 CPU->fpu_owner->fpu_context_engaged = 0; 133 spinlock_unlock(&CPU->fpu_owner->lock);134 irq_spinlock_unlock(&CPU->fpu_owner->lock, false); 134 135 CPU->fpu_owner = NULL; 135 136 } 136 137 spinlock_lock(&THREAD->lock);137 138 irq_spinlock_lock(&THREAD->lock, false); 138 139 if (THREAD->fpu_context_exists) { 139 140 fpu_context_restore(THREAD->saved_fpu_context); … … 142 143 if (!THREAD->saved_fpu_context) { 143 144 /* Might sleep */ 144 spinlock_unlock(&THREAD->lock);145 spinlock_unlock(&CPU->lock);145 irq_spinlock_unlock(&THREAD->lock, false); 146 irq_spinlock_unlock(&CPU->lock, false); 146 147 THREAD->saved_fpu_context = 147 148 (fpu_context_t *) slab_alloc(fpu_context_slab, 0); 149 148 150 /* We may have switched CPUs during slab_alloc */ 149 goto restart; 151 goto restart; 150 152 } 151 153 fpu_init(); 152 154 THREAD->fpu_context_exists = 1; 153 155 } 156 154 157 CPU->fpu_owner = THREAD; 155 158 THREAD->fpu_context_engaged = 1; 156 spinlock_unlock(&THREAD->lock);157 158 spinlock_unlock(&CPU->lock);159 } 160 #endif 159 irq_spinlock_unlock(&THREAD->lock, false); 160 161 irq_spinlock_unlock(&CPU->lock, false); 162 } 163 #endif /* CONFIG_FPU_LAZY */ 161 164 162 165 /** Initialize scheduler … … 180 183 static thread_t *find_best_thread(void) 181 184 { 182 thread_t *t;183 runq_t *r;184 int i;185 186 185 ASSERT(CPU != NULL); 187 186 188 187 loop: 189 interrupts_enable();190 188 191 189 if (atomic_get(&CPU->nrdy) == 0) { … … 195 193 * This improves energy saving and hyperthreading. 196 194 */ 197 195 irq_spinlock_lock(&CPU->lock, false); 196 CPU->idle = true; 197 irq_spinlock_unlock(&CPU->lock, false); 198 interrupts_enable(); 199 198 200 /* 199 201 * An interrupt might occur right now and wake up a thread. … … 201 203 * even though there is a runnable thread. 202 204 */ 203 204 cpu_sleep(); 205 goto loop; 206 } 207 208 interrupts_disable(); 209 205 cpu_sleep(); 206 interrupts_disable(); 207 goto loop; 208 } 209 210 unsigned int i; 210 211 for (i = 0; i < RQ_COUNT; i++) { 211 r = &CPU->rq[i]; 212 spinlock_lock(&r->lock); 213 if (r->n == 0) { 212 irq_spinlock_lock(&(CPU->rq[i].lock), false); 213 if (CPU->rq[i].n == 0) { 214 214 /* 215 215 * If this queue is empty, try a lower-priority queue. 216 216 */ 217 spinlock_unlock(&r->lock);217 irq_spinlock_unlock(&(CPU->rq[i].lock), false); 218 218 continue; 219 219 } 220 220 221 221 atomic_dec(&CPU->nrdy); 222 222 atomic_dec(&nrdy); 223 r->n--;224 223 CPU->rq[i].n--; 224 225 225 /* 226 226 * Take the first thread from the queue. 227 227 */ 228 t = list_get_instance(r->rq_head.next, thread_t, rq_link); 229 list_remove(&t->rq_link); 230 231 spinlock_unlock(&r->lock); 232 233 spinlock_lock(&t->lock); 234 t->cpu = CPU; 235 236 t->ticks = us2ticks((i + 1) * 10000); 237 t->priority = i; /* correct rq index */ 238 228 thread_t *thread = 229 list_get_instance(CPU->rq[i].rq_head.next, thread_t, rq_link); 230 list_remove(&thread->rq_link); 231 232 irq_spinlock_pass(&(CPU->rq[i].lock), &thread->lock); 233 234 thread->cpu = CPU; 235 thread->ticks = us2ticks((i + 1) * 10000); 236 thread->priority = i; /* Correct rq index */ 237 239 238 /* 240 239 * Clear the THREAD_FLAG_STOLEN flag so that t can be migrated 241 240 * when load balancing needs emerge. 242 241 */ 243 t->flags &= ~THREAD_FLAG_STOLEN; 244 spinlock_unlock(&t->lock); 245 246 return t; 247 } 242 thread->flags &= ~THREAD_FLAG_STOLEN; 243 irq_spinlock_unlock(&thread->lock, false); 244 245 return thread; 246 } 247 248 248 goto loop; 249 250 249 } 251 250 … … 264 263 { 265 264 link_t head; 266 runq_t *r; 267 int i, n; 268 265 269 266 list_initialize(&head); 270 spinlock_lock(&CPU->lock); 267 irq_spinlock_lock(&CPU->lock, false); 268 271 269 if (CPU->needs_relink > NEEDS_RELINK_MAX) { 270 int i; 272 271 for (i = start; i < RQ_COUNT - 1; i++) { 273 /* remember and empty rq[i + 1] */274 r = &CPU->rq[i + 1];275 spinlock_lock(&r->lock);276 list_concat(&head, & r->rq_head);277 n = r->n;278 r->n = 0;279 spinlock_unlock(&r->lock);280 281 /* append rq[i + 1] to rq[i] */282 r = &CPU->rq[i];283 spinlock_lock(&r->lock);284 list_concat(& r->rq_head, &head);285 r->n += n;286 spinlock_unlock(&r->lock);272 /* Remember and empty rq[i + 1] */ 273 274 irq_spinlock_lock(&CPU->rq[i + 1].lock, false); 275 list_concat(&head, &CPU->rq[i + 1].rq_head); 276 size_t n = CPU->rq[i + 1].n; 277 CPU->rq[i + 1].n = 0; 278 irq_spinlock_unlock(&CPU->rq[i + 1].lock, false); 279 280 /* Append rq[i + 1] to rq[i] */ 281 282 irq_spinlock_lock(&CPU->rq[i].lock, false); 283 list_concat(&CPU->rq[i].rq_head, &head); 284 CPU->rq[i].n += n; 285 irq_spinlock_unlock(&CPU->rq[i].lock, false); 287 286 } 287 288 288 CPU->needs_relink = 0; 289 289 } 290 spinlock_unlock(&CPU->lock);291 290 291 irq_spinlock_unlock(&CPU->lock, false); 292 292 } 293 293 … … 302 302 { 303 303 volatile ipl_t ipl; 304 304 305 305 ASSERT(CPU != NULL); 306 306 307 307 ipl = interrupts_disable(); 308 308 309 309 if (atomic_get(&haltstate)) 310 310 halt(); 311 311 312 312 if (THREAD) { 313 spinlock_lock(&THREAD->lock);314 315 /* Update thread accounting */316 THREAD-> cycles += get_cycle() - THREAD->last_cycle;313 irq_spinlock_lock(&THREAD->lock, false); 314 315 /* Update thread kernel accounting */ 316 THREAD->kcycles += get_cycle() - THREAD->last_cycle; 317 317 318 318 #ifndef CONFIG_FPU_LAZY … … 327 327 THREAD->last_cycle = get_cycle(); 328 328 329 spinlock_unlock(&THREAD->lock);329 irq_spinlock_unlock(&THREAD->lock, false); 330 330 interrupts_restore(THREAD->saved_context.ipl); 331 331 332 332 return; 333 333 } 334 334 335 335 /* 336 336 * Interrupt priority level of preempted thread is recorded 337 337 * here to facilitate scheduler() invocations from 338 * interrupts_disable()'d code (e.g. waitq_sleep_timeout()). 338 * interrupts_disable()'d code (e.g. waitq_sleep_timeout()). 339 * 339 340 */ 340 341 THREAD->saved_context.ipl = ipl; 341 342 } 342 343 343 344 /* 344 345 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM 345 346 * and preemption counter. At this point THE could be coming either 346 347 * from THREAD's or CPU's stack. 348 * 347 349 */ 348 350 the_copy(THE, (the_t *) CPU->stack); 349 351 350 352 /* 351 353 * We may not keep the old stack. … … 359 361 * Therefore the scheduler() function continues in 360 362 * scheduler_separated_stack(). 363 * 361 364 */ 362 365 context_save(&CPU->saved_context); … … 364 367 (uintptr_t) CPU->stack, CPU_STACK_SIZE); 365 368 context_restore(&CPU->saved_context); 366 /* not reached */ 369 370 /* Not reached */ 367 371 } 368 372 … … 373 377 * switch to a new thread. 374 378 * 375 * Assume THREAD->lock is held.376 379 */ 377 380 void scheduler_separated_stack(void) 378 381 { 379 int priority;380 382 DEADLOCK_PROBE_INIT(p_joinwq); 381 383 task_t *old_task = TASK; 384 as_t *old_as = AS; 385 386 ASSERT((!THREAD) || (irq_spinlock_locked(&THREAD->lock))); 382 387 ASSERT(CPU != NULL); 383 388 389 /* 390 * Hold the current task and the address space to prevent their 391 * possible destruction should thread_destroy() be called on this or any 392 * other processor while the scheduler is still using them. 393 * 394 */ 395 if (old_task) 396 task_hold(old_task); 397 398 if (old_as) 399 as_hold(old_as); 400 384 401 if (THREAD) { 385 /* must be run after the switch to scheduler stack */402 /* Must be run after the switch to scheduler stack */ 386 403 after_thread_ran(); 387 404 388 405 switch (THREAD->state) { 389 406 case Running: 390 spinlock_unlock(&THREAD->lock);407 irq_spinlock_unlock(&THREAD->lock, false); 391 408 thread_ready(THREAD); 392 409 break; 393 410 394 411 case Exiting: 395 412 repeat: 396 413 if (THREAD->detached) { 397 thread_destroy(THREAD );414 thread_destroy(THREAD, false); 398 415 } else { 399 416 /* 400 417 * The thread structure is kept allocated until 401 418 * somebody calls thread_detach() on it. 419 * 402 420 */ 403 if (! spinlock_trylock(&THREAD->join_wq.lock)) {421 if (!irq_spinlock_trylock(&THREAD->join_wq.lock)) { 404 422 /* 405 423 * Avoid deadlock. 424 * 406 425 */ 407 spinlock_unlock(&THREAD->lock);426 irq_spinlock_unlock(&THREAD->lock, false); 408 427 delay(HZ); 409 spinlock_lock(&THREAD->lock);428 irq_spinlock_lock(&THREAD->lock, false); 410 429 DEADLOCK_PROBE(p_joinwq, 411 430 DEADLOCK_THRESHOLD); … … 414 433 _waitq_wakeup_unsafe(&THREAD->join_wq, 415 434 WAKEUP_FIRST); 416 spinlock_unlock(&THREAD->join_wq.lock);435 irq_spinlock_unlock(&THREAD->join_wq.lock, false); 417 436 418 437 THREAD->state = Lingering; 419 spinlock_unlock(&THREAD->lock);438 irq_spinlock_unlock(&THREAD->lock, false); 420 439 } 421 440 break; … … 424 443 /* 425 444 * Prefer the thread after it's woken up. 445 * 426 446 */ 427 447 THREAD->priority = -1; 428 448 429 449 /* 430 450 * We need to release wq->lock which we locked in 431 451 * waitq_sleep(). Address of wq->lock is kept in 432 452 * THREAD->sleep_queue. 453 * 433 454 */ 434 spinlock_unlock(&THREAD->sleep_queue->lock); 435 436 /* 437 * Check for possible requests for out-of-context 438 * invocation. 439 */ 440 if (THREAD->call_me) { 441 THREAD->call_me(THREAD->call_me_with); 442 THREAD->call_me = NULL; 443 THREAD->call_me_with = NULL; 444 } 445 446 spinlock_unlock(&THREAD->lock); 447 455 irq_spinlock_unlock(&THREAD->sleep_queue->lock, false); 456 457 irq_spinlock_unlock(&THREAD->lock, false); 448 458 break; 449 459 450 460 default: 451 461 /* 452 462 * Entering state is unexpected. 463 * 453 464 */ 454 465 panic("tid%" PRIu64 ": unexpected state %s.", … … 456 467 break; 457 468 } 458 469 459 470 THREAD = NULL; 460 471 } 461 472 462 473 THREAD = find_best_thread(); 463 474 464 spinlock_lock(&THREAD->lock);465 priority = THREAD->priority;466 spinlock_unlock(&THREAD->lock);467 468 relink_rq(priority); 469 475 irq_spinlock_lock(&THREAD->lock, false); 476 int priority = THREAD->priority; 477 irq_spinlock_unlock(&THREAD->lock, false); 478 479 relink_rq(priority); 480 470 481 /* 471 482 * If both the old and the new task are the same, lots of work is 472 483 * avoided. 484 * 473 485 */ 474 486 if (TASK != THREAD->task) { 475 as_t *as1 = NULL; 476 as_t *as2; 477 478 if (TASK) { 479 spinlock_lock(&TASK->lock); 480 as1 = TASK->as; 481 spinlock_unlock(&TASK->lock); 482 } 483 484 spinlock_lock(&THREAD->task->lock); 485 as2 = THREAD->task->as; 486 spinlock_unlock(&THREAD->task->lock); 487 as_t *new_as = THREAD->task->as; 487 488 488 489 /* 489 490 * Note that it is possible for two tasks to share one address 490 491 * space. 492 ( 491 493 */ 492 if ( as1 != as2) {494 if (old_as != new_as) { 493 495 /* 494 496 * Both tasks and address spaces are different. 495 497 * Replace the old one with the new one. 498 * 496 499 */ 497 as_switch( as1, as2);500 as_switch(old_as, new_as); 498 501 } 502 499 503 TASK = THREAD->task; 500 504 before_task_runs(); 501 505 } 502 503 spinlock_lock(&THREAD->lock); 506 507 if (old_task) 508 task_release(old_task); 509 510 if (old_as) 511 as_release(old_as); 512 513 irq_spinlock_lock(&THREAD->lock, false); 504 514 THREAD->state = Running; 505 515 506 516 #ifdef SCHEDULER_VERBOSE 507 517 printf("cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64 508 518 ", nrdy=%ld)\n", CPU->id, THREAD->tid, THREAD->priority, 509 519 THREAD->ticks, atomic_get(&CPU->nrdy)); 510 #endif 511 520 #endif 521 512 522 /* 513 523 * Some architectures provide late kernel PA2KA(identity) … … 517 527 * necessary, is to be mapped in before_thread_runs(). This 518 528 * function must be executed before the switch to the new stack. 529 * 519 530 */ 520 531 before_thread_runs(); 521 532 522 533 /* 523 534 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to 524 535 * thread's stack. 536 * 525 537 */ 526 538 the_copy(THE, (the_t *) THREAD->kstack); 527 539 528 540 context_restore(&THREAD->saved_context); 529 /* not reached */ 541 542 /* Not reached */ 530 543 } 531 544 … … 541 554 void kcpulb(void *arg) 542 555 { 543 thread_t *t; 544 int count, average, j, k = 0; 545 unsigned int i; 546 ipl_t ipl; 547 556 atomic_count_t average; 557 atomic_count_t rdy; 558 548 559 /* 549 560 * Detach kcpulb as nobody will call thread_join_timeout() on it. … … 556 567 */ 557 568 thread_sleep(1); 558 569 559 570 not_satisfied: 560 571 /* … … 562 573 * other CPU's. Note that situation can have changed between two 563 574 * passes. Each time get the most up to date counts. 575 * 564 576 */ 565 577 average = atomic_get(&nrdy) / config.cpu_active + 1; 566 count = average -atomic_get(&CPU->nrdy);567 568 if ( count <= 0)578 rdy = atomic_get(&CPU->nrdy); 579 580 if (average <= rdy) 569 581 goto satisfied; 570 582 583 atomic_count_t count = average - rdy; 584 571 585 /* 572 586 * Searching least priority queues on all CPU's first and most priority 573 587 * queues on all CPU's last. 574 */ 575 for (j = RQ_COUNT - 1; j >= 0; j--) { 576 for (i = 0; i < config.cpu_active; i++) { 577 link_t *l; 578 runq_t *r; 579 cpu_t *cpu; 580 581 cpu = &cpus[(i + k) % config.cpu_active]; 582 588 * 589 */ 590 size_t acpu; 591 size_t acpu_bias = 0; 592 int rq; 593 594 for (rq = RQ_COUNT - 1; rq >= 0; rq--) { 595 for (acpu = 0; acpu < config.cpu_active; acpu++) { 596 cpu_t *cpu = &cpus[(acpu + acpu_bias) % config.cpu_active]; 597 583 598 /* 584 599 * Not interested in ourselves. 585 600 * Doesn't require interrupt disabling for kcpulb has 586 601 * THREAD_FLAG_WIRED. 602 * 587 603 */ 588 604 if (CPU == cpu) 589 605 continue; 606 590 607 if (atomic_get(&cpu->nrdy) <= average) 591 608 continue; 592 593 ipl = interrupts_disable(); 594 r = &cpu->rq[j]; 595 spinlock_lock(&r->lock); 596 if (r->n == 0) { 597 spinlock_unlock(&r->lock); 598 interrupts_restore(ipl); 609 610 irq_spinlock_lock(&(cpu->rq[rq].lock), true); 611 if (cpu->rq[rq].n == 0) { 612 irq_spinlock_unlock(&(cpu->rq[rq].lock), true); 599 613 continue; 600 614 } 601 602 t = NULL; 603 l = r->rq_head.prev; /* search rq from the back */ 604 while (l != &r->rq_head) { 605 t = list_get_instance(l, thread_t, rq_link); 615 616 thread_t *thread = NULL; 617 618 /* Search rq from the back */ 619 link_t *link = cpu->rq[rq].rq_head.prev; 620 621 while (link != &(cpu->rq[rq].rq_head)) { 622 thread = (thread_t *) list_get_instance(link, thread_t, rq_link); 623 606 624 /* 607 625 * We don't want to steal CPU-wired threads … … 611 629 * steal threads whose FPU context is still in 612 630 * CPU. 631 * 613 632 */ 614 spinlock_lock(&t->lock);615 if ((!(t->flags & (THREAD_FLAG_WIRED |616 THREAD_FLAG_STOLEN))) &&617 (!(t->fpu_context_engaged))) {633 irq_spinlock_lock(&thread->lock, false); 634 635 if ((!(thread->flags & (THREAD_FLAG_WIRED | THREAD_FLAG_STOLEN))) 636 && (!(thread->fpu_context_engaged))) { 618 637 /* 619 * Remove t from r.638 * Remove thread from ready queue. 620 639 */ 621 spinlock_unlock(&t->lock);640 irq_spinlock_unlock(&thread->lock, false); 622 641 623 642 atomic_dec(&cpu->nrdy); 624 643 atomic_dec(&nrdy); 625 626 r->n--;627 list_remove(&t ->rq_link);628 644 645 cpu->rq[rq].n--; 646 list_remove(&thread->rq_link); 647 629 648 break; 630 649 } 631 spinlock_unlock(&t->lock); 632 l = l->prev; 633 t = NULL; 650 651 irq_spinlock_unlock(&thread->lock, false); 652 653 link = link->prev; 654 thread = NULL; 634 655 } 635 spinlock_unlock(&r->lock); 636 637 if (t) { 656 657 if (thread) { 638 658 /* 639 * Ready t on local CPU 659 * Ready thread on local CPU 660 * 640 661 */ 641 spinlock_lock(&t->lock); 662 663 irq_spinlock_pass(&(cpu->rq[rq].lock), &thread->lock); 664 642 665 #ifdef KCPULB_VERBOSE 643 666 printf("kcpulb%u: TID %" PRIu64 " -> cpu%u, " … … 646 669 atomic_get(&nrdy) / config.cpu_active); 647 670 #endif 648 t->flags |= THREAD_FLAG_STOLEN; 649 t->state = Entering; 650 spinlock_unlock(&t->lock); 651 652 thread_ready(t); 653 654 interrupts_restore(ipl); 655 671 672 thread->flags |= THREAD_FLAG_STOLEN; 673 thread->state = Entering; 674 675 irq_spinlock_unlock(&thread->lock, true); 676 thread_ready(thread); 677 656 678 if (--count == 0) 657 679 goto satisfied; 658 680 659 681 /* 660 682 * We are not satisfied yet, focus on another 661 683 * CPU next time. 684 * 662 685 */ 663 k++;686 acpu_bias++; 664 687 665 688 continue; 666 } 667 interrupts_restore(ipl); 689 } else 690 irq_spinlock_unlock(&(cpu->rq[rq].lock), true); 691 668 692 } 669 693 } 670 694 671 695 if (atomic_get(&CPU->nrdy)) { 672 696 /* 673 697 * Be a little bit light-weight and let migrated threads run. 698 * 674 699 */ 675 700 scheduler(); … … 678 703 * We failed to migrate a single thread. 679 704 * Give up this turn. 705 * 680 706 */ 681 707 goto loop; 682 708 } 683 709 684 710 goto not_satisfied; 685 711 686 712 satisfied: 687 713 goto loop; 688 714 } 689 690 715 #endif /* CONFIG_SMP */ 691 716 692 693 /** Print information about threads & scheduler queues */ 717 /** Print information about threads & scheduler queues 718 * 719 */ 694 720 void sched_print_list(void) 695 721 { 696 ipl_t ipl; 697 unsigned int cpu, i; 698 runq_t *r; 699 thread_t *t; 700 link_t *cur; 701 702 /* We are going to mess with scheduler structures, 703 * let's not be interrupted */ 704 ipl = interrupts_disable(); 722 size_t cpu; 705 723 for (cpu = 0; cpu < config.cpu_count; cpu++) { 706 707 724 if (!cpus[cpu].active) 708 725 continue; 709 710 spinlock_lock(&cpus[cpu].lock); 711 printf("cpu%u: address=%p, nrdy=%ld, needs_relink=%" PRIs "\n", 726 727 irq_spinlock_lock(&cpus[cpu].lock, true); 728 729 printf("cpu%u: address=%p, nrdy=%" PRIua ", needs_relink=%zu\n", 712 730 cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy), 713 731 cpus[cpu].needs_relink); 714 732 733 unsigned int i; 715 734 for (i = 0; i < RQ_COUNT; i++) { 716 r = &cpus[cpu].rq[i]; 717 spinlock_lock(&r->lock); 718 if (!r->n) { 719 spinlock_unlock(&r->lock); 735 irq_spinlock_lock(&(cpus[cpu].rq[i].lock), false); 736 if (cpus[cpu].rq[i].n == 0) { 737 irq_spinlock_unlock(&(cpus[cpu].rq[i].lock), false); 720 738 continue; 721 739 } 740 722 741 printf("\trq[%u]: ", i); 723 for (cur = r->rq_head.next; cur != &r->rq_head; 724 cur = cur->next) { 725 t = list_get_instance(cur, thread_t, rq_link); 726 printf("%" PRIu64 "(%s) ", t->tid, 727 thread_states[t->state]); 742 link_t *cur; 743 for (cur = cpus[cpu].rq[i].rq_head.next; 744 cur != &(cpus[cpu].rq[i].rq_head); 745 cur = cur->next) { 746 thread_t *thread = list_get_instance(cur, thread_t, rq_link); 747 printf("%" PRIu64 "(%s) ", thread->tid, 748 thread_states[thread->state]); 728 749 } 729 750 printf("\n"); 730 spinlock_unlock(&r->lock); 751 752 irq_spinlock_unlock(&(cpus[cpu].rq[i].lock), false); 731 753 } 732 spinlock_unlock(&cpus[cpu].lock); 733 } 734 735 interrupts_restore(ipl); 754 755 irq_spinlock_unlock(&cpus[cpu].lock, true); 756 } 736 757 } 737 758
Note:
See TracChangeset
for help on using the changeset viewer.
