Changes in kernel/generic/src/proc/scheduler.c [481d4751:ee42e43] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/scheduler.c
r481d4751 ree42e43 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Scheduler and load balancing. 36 36 * 37 37 * This file contains the scheduler and kcpulb kernel thread which … … 68 68 static void scheduler_separated_stack(void); 69 69 70 atomic_t nrdy; 70 atomic_t nrdy; /**< Number of ready threads in the system. */ 71 71 72 72 /** Carry out actions before new task runs. */ … … 89 89 before_thread_runs_arch(); 90 90 #ifdef CONFIG_FPU_LAZY 91 if(THREAD == CPU->fpu_owner) 91 if(THREAD == CPU->fpu_owner) 92 92 fpu_enable(); 93 93 else 94 fpu_disable(); 94 fpu_disable(); 95 95 #else 96 96 fpu_enable(); … … 123 123 restart: 124 124 fpu_enable(); 125 spinlock_lock(&CPU->lock);126 125 irq_spinlock_lock(&CPU->lock, false); 126 127 127 /* Save old context */ 128 if (CPU->fpu_owner != NULL) { 129 spinlock_lock(&CPU->fpu_owner->lock);128 if (CPU->fpu_owner != NULL) { 129 irq_spinlock_lock(&CPU->fpu_owner->lock, false); 130 130 fpu_context_save(CPU->fpu_owner->saved_fpu_context); 131 /* don't prevent migration */ 131 132 /* Don't prevent migration */ 132 133 CPU->fpu_owner->fpu_context_engaged = 0; 133 spinlock_unlock(&CPU->fpu_owner->lock);134 irq_spinlock_unlock(&CPU->fpu_owner->lock, false); 134 135 CPU->fpu_owner = NULL; 135 136 } 136 137 spinlock_lock(&THREAD->lock);137 138 irq_spinlock_lock(&THREAD->lock, false); 138 139 if (THREAD->fpu_context_exists) { 139 140 fpu_context_restore(THREAD->saved_fpu_context); … … 142 143 if (!THREAD->saved_fpu_context) { 143 144 /* Might sleep */ 144 spinlock_unlock(&THREAD->lock);145 spinlock_unlock(&CPU->lock);145 irq_spinlock_unlock(&THREAD->lock, false); 146 irq_spinlock_unlock(&CPU->lock, false); 146 147 THREAD->saved_fpu_context = 147 148 (fpu_context_t *) slab_alloc(fpu_context_slab, 0); 149 148 150 /* We may have switched CPUs during slab_alloc */ 149 goto restart; 151 goto restart; 150 152 } 151 153 fpu_init(); 152 154 THREAD->fpu_context_exists = 1; 153 155 } 156 154 157 CPU->fpu_owner = THREAD; 155 158 THREAD->fpu_context_engaged = 1; 156 spinlock_unlock(&THREAD->lock);157 158 spinlock_unlock(&CPU->lock);159 } 160 #endif 159 irq_spinlock_unlock(&THREAD->lock, false); 160 161 irq_spinlock_unlock(&CPU->lock, false); 162 } 163 #endif /* CONFIG_FPU_LAZY */ 161 164 162 165 /** Initialize scheduler … … 180 183 static thread_t *find_best_thread(void) 181 184 { 182 thread_t *t;183 runq_t *r;184 int i;185 186 185 ASSERT(CPU != NULL); 187 186 188 187 loop: 189 188 … … 194 193 * This improves energy saving and hyperthreading. 195 194 */ 196 197 /* Mark CPU as it was idle this clock tick */ 198 spinlock_lock(&CPU->lock); 199 CPU->idle = true; 200 spinlock_unlock(&CPU->lock); 201 202 interrupts_enable(); 203 /* 195 irq_spinlock_lock(&CPU->lock, false); 196 CPU->idle = true; 197 irq_spinlock_unlock(&CPU->lock, false); 198 interrupts_enable(); 199 200 /* 204 201 * An interrupt might occur right now and wake up a thread. 205 202 * In such case, the CPU will continue to go to sleep 206 203 * even though there is a runnable thread. 207 204 */ 208 cpu_sleep(); 209 interrupts_disable(); 210 goto loop; 211 } 212 205 cpu_sleep(); 206 interrupts_disable(); 207 goto loop; 208 } 209 210 unsigned int i; 213 211 for (i = 0; i < RQ_COUNT; i++) { 214 r = &CPU->rq[i]; 215 spinlock_lock(&r->lock); 216 if (r->n == 0) { 212 irq_spinlock_lock(&(CPU->rq[i].lock), false); 213 if (CPU->rq[i].n == 0) { 217 214 /* 218 215 * If this queue is empty, try a lower-priority queue. 219 216 */ 220 spinlock_unlock(&r->lock);217 irq_spinlock_unlock(&(CPU->rq[i].lock), false); 221 218 continue; 222 219 } 223 220 224 221 atomic_dec(&CPU->nrdy); 225 222 atomic_dec(&nrdy); 226 r->n--;227 223 CPU->rq[i].n--; 224 228 225 /* 229 226 * Take the first thread from the queue. 230 227 */ 231 t = list_get_instance(r->rq_head.next, thread_t, rq_link); 232 list_remove(&t->rq_link); 233 234 spinlock_unlock(&r->lock); 235 236 spinlock_lock(&t->lock); 237 t->cpu = CPU; 238 239 t->ticks = us2ticks((i + 1) * 10000); 240 t->priority = i; /* correct rq index */ 241 228 thread_t *thread = 229 list_get_instance(CPU->rq[i].rq_head.next, thread_t, rq_link); 230 list_remove(&thread->rq_link); 231 232 irq_spinlock_pass(&(CPU->rq[i].lock), &thread->lock); 233 234 thread->cpu = CPU; 235 thread->ticks = us2ticks((i + 1) * 10000); 236 thread->priority = i; /* Correct rq index */ 237 242 238 /* 243 239 * Clear the THREAD_FLAG_STOLEN flag so that t can be migrated 244 240 * when load balancing needs emerge. 245 241 */ 246 t->flags &= ~THREAD_FLAG_STOLEN; 247 spinlock_unlock(&t->lock); 248 249 return t; 250 } 242 thread->flags &= ~THREAD_FLAG_STOLEN; 243 irq_spinlock_unlock(&thread->lock, false); 244 245 return thread; 246 } 247 251 248 goto loop; 252 253 249 } 254 250 … … 267 263 { 268 264 link_t head; 269 runq_t *r; 270 int i, n; 271 265 272 266 list_initialize(&head); 273 spinlock_lock(&CPU->lock); 267 irq_spinlock_lock(&CPU->lock, false); 268 274 269 if (CPU->needs_relink > NEEDS_RELINK_MAX) { 270 int i; 275 271 for (i = start; i < RQ_COUNT - 1; i++) { 276 /* remember and empty rq[i + 1] */277 r = &CPU->rq[i + 1];278 spinlock_lock(&r->lock);279 list_concat(&head, & r->rq_head);280 n = r->n;281 r->n = 0;282 spinlock_unlock(&r->lock);283 284 /* append rq[i + 1] to rq[i] */285 r = &CPU->rq[i];286 spinlock_lock(&r->lock);287 list_concat(& r->rq_head, &head);288 r->n += n;289 spinlock_unlock(&r->lock);272 /* Remember and empty rq[i + 1] */ 273 274 irq_spinlock_lock(&CPU->rq[i + 1].lock, false); 275 list_concat(&head, &CPU->rq[i + 1].rq_head); 276 size_t n = CPU->rq[i + 1].n; 277 CPU->rq[i + 1].n = 0; 278 irq_spinlock_unlock(&CPU->rq[i + 1].lock, false); 279 280 /* Append rq[i + 1] to rq[i] */ 281 282 irq_spinlock_lock(&CPU->rq[i].lock, false); 283 list_concat(&CPU->rq[i].rq_head, &head); 284 CPU->rq[i].n += n; 285 irq_spinlock_unlock(&CPU->rq[i].lock, false); 290 286 } 287 291 288 CPU->needs_relink = 0; 292 289 } 293 spinlock_unlock(&CPU->lock);294 290 291 irq_spinlock_unlock(&CPU->lock, false); 295 292 } 296 293 … … 305 302 { 306 303 volatile ipl_t ipl; 307 304 308 305 ASSERT(CPU != NULL); 309 306 310 307 ipl = interrupts_disable(); 311 308 312 309 if (atomic_get(&haltstate)) 313 310 halt(); 314 311 315 312 if (THREAD) { 316 spinlock_lock(&THREAD->lock);313 irq_spinlock_lock(&THREAD->lock, false); 317 314 318 315 /* Update thread kernel accounting */ … … 330 327 THREAD->last_cycle = get_cycle(); 331 328 332 spinlock_unlock(&THREAD->lock);329 irq_spinlock_unlock(&THREAD->lock, false); 333 330 interrupts_restore(THREAD->saved_context.ipl); 334 331 335 332 return; 336 333 } 337 334 338 335 /* 339 336 * Interrupt priority level of preempted thread is recorded 340 337 * here to facilitate scheduler() invocations from 341 * interrupts_disable()'d code (e.g. waitq_sleep_timeout()). 338 * interrupts_disable()'d code (e.g. waitq_sleep_timeout()). 339 * 342 340 */ 343 341 THREAD->saved_context.ipl = ipl; 344 342 } 345 343 346 344 /* 347 345 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM 348 346 * and preemption counter. At this point THE could be coming either 349 347 * from THREAD's or CPU's stack. 348 * 350 349 */ 351 350 the_copy(THE, (the_t *) CPU->stack); 352 351 353 352 /* 354 353 * We may not keep the old stack. … … 362 361 * Therefore the scheduler() function continues in 363 362 * scheduler_separated_stack(). 363 * 364 364 */ 365 365 context_save(&CPU->saved_context); … … 367 367 (uintptr_t) CPU->stack, CPU_STACK_SIZE); 368 368 context_restore(&CPU->saved_context); 369 /* not reached */ 369 370 /* Not reached */ 370 371 } 371 372 … … 376 377 * switch to a new thread. 377 378 * 378 * Assume THREAD->lock is held.379 379 */ 380 380 void scheduler_separated_stack(void) 381 381 { 382 int priority;383 382 DEADLOCK_PROBE_INIT(p_joinwq); 384 383 task_t *old_task = TASK; 385 384 as_t *old_as = AS; 386 385 386 ASSERT((!THREAD) || (irq_spinlock_locked(&THREAD->lock))); 387 387 ASSERT(CPU != NULL); 388 388 … … 391 391 * possible destruction should thread_destroy() be called on this or any 392 392 * other processor while the scheduler is still using them. 393 * 393 394 */ 394 395 if (old_task) 395 396 task_hold(old_task); 397 396 398 if (old_as) 397 399 as_hold(old_as); 398 400 399 401 if (THREAD) { 400 /* must be run after the switch to scheduler stack */402 /* Must be run after the switch to scheduler stack */ 401 403 after_thread_ran(); 402 404 403 405 switch (THREAD->state) { 404 406 case Running: 405 spinlock_unlock(&THREAD->lock);407 irq_spinlock_unlock(&THREAD->lock, false); 406 408 thread_ready(THREAD); 407 409 break; 408 410 409 411 case Exiting: 410 412 repeat: 411 413 if (THREAD->detached) { 412 thread_destroy(THREAD );414 thread_destroy(THREAD, false); 413 415 } else { 414 416 /* 415 417 * The thread structure is kept allocated until 416 418 * somebody calls thread_detach() on it. 419 * 417 420 */ 418 if (! spinlock_trylock(&THREAD->join_wq.lock)) {421 if (!irq_spinlock_trylock(&THREAD->join_wq.lock)) { 419 422 /* 420 423 * Avoid deadlock. 424 * 421 425 */ 422 spinlock_unlock(&THREAD->lock);426 irq_spinlock_unlock(&THREAD->lock, false); 423 427 delay(HZ); 424 spinlock_lock(&THREAD->lock);428 irq_spinlock_lock(&THREAD->lock, false); 425 429 DEADLOCK_PROBE(p_joinwq, 426 430 DEADLOCK_THRESHOLD); … … 429 433 _waitq_wakeup_unsafe(&THREAD->join_wq, 430 434 WAKEUP_FIRST); 431 spinlock_unlock(&THREAD->join_wq.lock);435 irq_spinlock_unlock(&THREAD->join_wq.lock, false); 432 436 433 437 THREAD->state = Lingering; 434 spinlock_unlock(&THREAD->lock);438 irq_spinlock_unlock(&THREAD->lock, false); 435 439 } 436 440 break; … … 439 443 /* 440 444 * Prefer the thread after it's woken up. 445 * 441 446 */ 442 447 THREAD->priority = -1; 443 448 444 449 /* 445 450 * We need to release wq->lock which we locked in 446 451 * waitq_sleep(). Address of wq->lock is kept in 447 452 * THREAD->sleep_queue. 453 * 448 454 */ 449 spinlock_unlock(&THREAD->sleep_queue->lock); 450 451 /* 452 * Check for possible requests for out-of-context 453 * invocation. 454 */ 455 if (THREAD->call_me) { 456 THREAD->call_me(THREAD->call_me_with); 457 THREAD->call_me = NULL; 458 THREAD->call_me_with = NULL; 459 } 460 461 spinlock_unlock(&THREAD->lock); 462 455 irq_spinlock_unlock(&THREAD->sleep_queue->lock, false); 456 457 irq_spinlock_unlock(&THREAD->lock, false); 463 458 break; 464 459 465 460 default: 466 461 /* 467 462 * Entering state is unexpected. 463 * 468 464 */ 469 465 panic("tid%" PRIu64 ": unexpected state %s.", … … 471 467 break; 472 468 } 473 469 474 470 THREAD = NULL; 475 471 } 476 472 477 473 THREAD = find_best_thread(); 478 474 479 spinlock_lock(&THREAD->lock);480 priority = THREAD->priority;481 spinlock_unlock(&THREAD->lock);482 483 relink_rq(priority); 484 475 irq_spinlock_lock(&THREAD->lock, false); 476 int priority = THREAD->priority; 477 irq_spinlock_unlock(&THREAD->lock, false); 478 479 relink_rq(priority); 480 485 481 /* 486 482 * If both the old and the new task are the same, lots of work is 487 483 * avoided. 484 * 488 485 */ 489 486 if (TASK != THREAD->task) { … … 493 490 * Note that it is possible for two tasks to share one address 494 491 * space. 492 ( 495 493 */ 496 494 if (old_as != new_as) { … … 498 496 * Both tasks and address spaces are different. 499 497 * Replace the old one with the new one. 498 * 500 499 */ 501 500 as_switch(old_as, new_as); 502 501 } 503 502 504 503 TASK = THREAD->task; 505 504 before_task_runs(); 506 505 } 507 506 508 507 if (old_task) 509 508 task_release(old_task); 509 510 510 if (old_as) 511 511 as_release(old_as); 512 512 513 spinlock_lock(&THREAD->lock);513 irq_spinlock_lock(&THREAD->lock, false); 514 514 THREAD->state = Running; 515 515 516 516 #ifdef SCHEDULER_VERBOSE 517 517 printf("cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64 518 518 ", nrdy=%ld)\n", CPU->id, THREAD->tid, THREAD->priority, 519 519 THREAD->ticks, atomic_get(&CPU->nrdy)); 520 #endif 521 520 #endif 521 522 522 /* 523 523 * Some architectures provide late kernel PA2KA(identity) … … 527 527 * necessary, is to be mapped in before_thread_runs(). This 528 528 * function must be executed before the switch to the new stack. 529 * 529 530 */ 530 531 before_thread_runs(); 531 532 532 533 /* 533 534 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to 534 535 * thread's stack. 536 * 535 537 */ 536 538 the_copy(THE, (the_t *) THREAD->kstack); 537 539 538 540 context_restore(&THREAD->saved_context); 539 /* not reached */ 541 542 /* Not reached */ 540 543 } 541 544 … … 551 554 void kcpulb(void *arg) 552 555 { 553 thread_t *t;554 int count;555 556 atomic_count_t average; 556 unsigned int i; 557 int j; 558 int k = 0; 559 ipl_t ipl; 560 557 atomic_count_t rdy; 558 561 559 /* 562 560 * Detach kcpulb as nobody will call thread_join_timeout() on it. … … 569 567 */ 570 568 thread_sleep(1); 571 569 572 570 not_satisfied: 573 571 /* … … 575 573 * other CPU's. Note that situation can have changed between two 576 574 * passes. Each time get the most up to date counts. 575 * 577 576 */ 578 577 average = atomic_get(&nrdy) / config.cpu_active + 1; 579 count = average -atomic_get(&CPU->nrdy);580 581 if ( count <= 0)578 rdy = atomic_get(&CPU->nrdy); 579 580 if (average <= rdy) 582 581 goto satisfied; 583 582 583 atomic_count_t count = average - rdy; 584 584 585 /* 585 586 * Searching least priority queues on all CPU's first and most priority 586 587 * queues on all CPU's last. 587 */ 588 for (j = RQ_COUNT - 1; j >= 0; j--) { 589 for (i = 0; i < config.cpu_active; i++) { 590 link_t *l; 591 runq_t *r; 592 cpu_t *cpu; 593 594 cpu = &cpus[(i + k) % config.cpu_active]; 595 588 * 589 */ 590 size_t acpu; 591 size_t acpu_bias = 0; 592 int rq; 593 594 for (rq = RQ_COUNT - 1; rq >= 0; rq--) { 595 for (acpu = 0; acpu < config.cpu_active; acpu++) { 596 cpu_t *cpu = &cpus[(acpu + acpu_bias) % config.cpu_active]; 597 596 598 /* 597 599 * Not interested in ourselves. 598 600 * Doesn't require interrupt disabling for kcpulb has 599 601 * THREAD_FLAG_WIRED. 602 * 600 603 */ 601 604 if (CPU == cpu) 602 605 continue; 606 603 607 if (atomic_get(&cpu->nrdy) <= average) 604 608 continue; 605 606 ipl = interrupts_disable(); 607 r = &cpu->rq[j]; 608 spinlock_lock(&r->lock); 609 if (r->n == 0) { 610 spinlock_unlock(&r->lock); 611 interrupts_restore(ipl); 609 610 irq_spinlock_lock(&(cpu->rq[rq].lock), true); 611 if (cpu->rq[rq].n == 0) { 612 irq_spinlock_unlock(&(cpu->rq[rq].lock), true); 612 613 continue; 613 614 } 614 615 t = NULL; 616 l = r->rq_head.prev; /* search rq from the back */ 617 while (l != &r->rq_head) { 618 t = list_get_instance(l, thread_t, rq_link); 615 616 thread_t *thread = NULL; 617 618 /* Search rq from the back */ 619 link_t *link = cpu->rq[rq].rq_head.prev; 620 621 while (link != &(cpu->rq[rq].rq_head)) { 622 thread = (thread_t *) list_get_instance(link, thread_t, rq_link); 623 619 624 /* 620 625 * We don't want to steal CPU-wired threads … … 624 629 * steal threads whose FPU context is still in 625 630 * CPU. 631 * 626 632 */ 627 spinlock_lock(&t->lock);628 if ((!(t->flags & (THREAD_FLAG_WIRED |629 THREAD_FLAG_STOLEN))) &&630 (!(t->fpu_context_engaged))) {633 irq_spinlock_lock(&thread->lock, false); 634 635 if ((!(thread->flags & (THREAD_FLAG_WIRED | THREAD_FLAG_STOLEN))) 636 && (!(thread->fpu_context_engaged))) { 631 637 /* 632 * Remove t from r.638 * Remove thread from ready queue. 633 639 */ 634 spinlock_unlock(&t->lock);640 irq_spinlock_unlock(&thread->lock, false); 635 641 636 642 atomic_dec(&cpu->nrdy); 637 643 atomic_dec(&nrdy); 638 639 r->n--;640 list_remove(&t ->rq_link);641 644 645 cpu->rq[rq].n--; 646 list_remove(&thread->rq_link); 647 642 648 break; 643 649 } 644 spinlock_unlock(&t->lock); 645 l = l->prev; 646 t = NULL; 650 651 irq_spinlock_unlock(&thread->lock, false); 652 653 link = link->prev; 654 thread = NULL; 647 655 } 648 spinlock_unlock(&r->lock); 649 650 if (t) { 656 657 if (thread) { 651 658 /* 652 * Ready t on local CPU 659 * Ready thread on local CPU 660 * 653 661 */ 654 spinlock_lock(&t->lock); 662 663 irq_spinlock_pass(&(cpu->rq[rq].lock), &thread->lock); 664 655 665 #ifdef KCPULB_VERBOSE 656 666 printf("kcpulb%u: TID %" PRIu64 " -> cpu%u, " … … 659 669 atomic_get(&nrdy) / config.cpu_active); 660 670 #endif 661 t->flags |= THREAD_FLAG_STOLEN; 662 t->state = Entering; 663 spinlock_unlock(&t->lock); 664 665 thread_ready(t); 666 667 interrupts_restore(ipl); 668 671 672 thread->flags |= THREAD_FLAG_STOLEN; 673 thread->state = Entering; 674 675 irq_spinlock_unlock(&thread->lock, true); 676 thread_ready(thread); 677 669 678 if (--count == 0) 670 679 goto satisfied; 671 680 672 681 /* 673 682 * We are not satisfied yet, focus on another 674 683 * CPU next time. 684 * 675 685 */ 676 k++;686 acpu_bias++; 677 687 678 688 continue; 679 } 680 interrupts_restore(ipl); 689 } else 690 irq_spinlock_unlock(&(cpu->rq[rq].lock), true); 691 681 692 } 682 693 } 683 694 684 695 if (atomic_get(&CPU->nrdy)) { 685 696 /* 686 697 * Be a little bit light-weight and let migrated threads run. 698 * 687 699 */ 688 700 scheduler(); … … 691 703 * We failed to migrate a single thread. 692 704 * Give up this turn. 705 * 693 706 */ 694 707 goto loop; 695 708 } 696 709 697 710 goto not_satisfied; 698 711 699 712 satisfied: 700 713 goto loop; 701 714 } 702 703 715 #endif /* CONFIG_SMP */ 704 716 705 706 /** Print information about threads & scheduler queues */ 717 /** Print information about threads & scheduler queues 718 * 719 */ 707 720 void sched_print_list(void) 708 721 { 709 ipl_t ipl; 710 unsigned int cpu, i; 711 runq_t *r; 712 thread_t *t; 713 link_t *cur; 714 715 /* We are going to mess with scheduler structures, 716 * let's not be interrupted */ 717 ipl = interrupts_disable(); 722 size_t cpu; 718 723 for (cpu = 0; cpu < config.cpu_count; cpu++) { 719 720 724 if (!cpus[cpu].active) 721 725 continue; 722 723 spinlock_lock(&cpus[cpu].lock); 726 727 irq_spinlock_lock(&cpus[cpu].lock, true); 728 724 729 printf("cpu%u: address=%p, nrdy=%ld, needs_relink=%" PRIs "\n", 725 730 cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy), 726 731 cpus[cpu].needs_relink); 727 732 733 unsigned int i; 728 734 for (i = 0; i < RQ_COUNT; i++) { 729 r = &cpus[cpu].rq[i]; 730 spinlock_lock(&r->lock); 731 if (!r->n) { 732 spinlock_unlock(&r->lock); 735 irq_spinlock_lock(&(cpus[cpu].rq[i].lock), false); 736 if (cpus[cpu].rq[i].n == 0) { 737 irq_spinlock_unlock(&(cpus[cpu].rq[i].lock), false); 733 738 continue; 734 739 } 740 735 741 printf("\trq[%u]: ", i); 736 for (cur = r->rq_head.next; cur != &r->rq_head; 737 cur = cur->next) { 738 t = list_get_instance(cur, thread_t, rq_link); 739 printf("%" PRIu64 "(%s) ", t->tid, 740 thread_states[t->state]); 742 link_t *cur; 743 for (cur = cpus[cpu].rq[i].rq_head.next; 744 cur != &(cpus[cpu].rq[i].rq_head); 745 cur = cur->next) { 746 thread_t *thread = list_get_instance(cur, thread_t, rq_link); 747 printf("%" PRIu64 "(%s) ", thread->tid, 748 thread_states[thread->state]); 741 749 } 742 750 printf("\n"); 743 spinlock_unlock(&r->lock); 751 752 irq_spinlock_unlock(&(cpus[cpu].rq[i].lock), false); 744 753 } 745 spinlock_unlock(&cpus[cpu].lock); 746 } 747 748 interrupts_restore(ipl); 754 755 irq_spinlock_unlock(&cpus[cpu].lock, true); 756 } 749 757 } 750 758
Note:
See TracChangeset
for help on using the changeset viewer.