Changes in kernel/generic/src/proc/scheduler.c [7e752b2:df58e44] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/scheduler.c
r7e752b2 rdf58e44 62 62 #include <print.h> 63 63 #include <debug.h> 64 65 static void before_task_runs(void); 66 static void before_thread_runs(void); 67 static void after_thread_ran(void); 64 #include <stacktrace.h> 65 68 66 static void scheduler_separated_stack(void); 69 67 … … 71 69 72 70 /** Carry out actions before new task runs. */ 73 void before_task_runs(void)71 static void before_task_runs(void) 74 72 { 75 73 before_task_runs_arch(); … … 80 78 * Perform actions that need to be 81 79 * taken before the newly selected 82 * t read is passed control.80 * thread is passed control. 83 81 * 84 82 * THREAD->lock is locked on entry 85 83 * 86 84 */ 87 void before_thread_runs(void)85 static void before_thread_runs(void) 88 86 { 89 87 before_thread_runs_arch(); 88 90 89 #ifdef CONFIG_FPU_LAZY 91 if (THREAD == CPU->fpu_owner)90 if (THREAD == CPU->fpu_owner) 92 91 fpu_enable(); 93 92 else … … 102 101 } 103 102 #endif 103 104 if (THREAD->btrace) { 105 istate_t *istate = THREAD->udebug.uspace_state; 106 if (istate != NULL) { 107 printf("Thread %" PRIu64 " stack trace:\n", THREAD->tid); 108 stack_trace_istate(istate); 109 } 110 111 THREAD->btrace = false; 112 } 104 113 } 105 114 … … 113 122 * 114 123 */ 115 void after_thread_ran(void)124 static void after_thread_ran(void) 116 125 { 117 126 after_thread_ran_arch(); … … 391 400 * possible destruction should thread_destroy() be called on this or any 392 401 * other processor while the scheduler is still using them. 393 *394 402 */ 395 403 if (old_task) … … 417 425 * The thread structure is kept allocated until 418 426 * somebody calls thread_detach() on it. 419 *420 427 */ 421 428 if (!irq_spinlock_trylock(&THREAD->join_wq.lock)) { 422 429 /* 423 430 * Avoid deadlock. 424 *425 431 */ 426 432 irq_spinlock_unlock(&THREAD->lock, false); … … 443 449 /* 444 450 * Prefer the thread after it's woken up. 445 *446 451 */ 447 452 THREAD->priority = -1; … … 451 456 * waitq_sleep(). Address of wq->lock is kept in 452 457 * THREAD->sleep_queue. 453 *454 458 */ 455 459 irq_spinlock_unlock(&THREAD->sleep_queue->lock, false); … … 461 465 /* 462 466 * Entering state is unexpected. 463 *464 467 */ 465 468 panic("tid%" PRIu64 ": unexpected state %s.", … … 480 483 481 484 /* 482 * If both the old and the new task are the same, lots of work is 483 * avoided. 484 * 485 * If both the old and the new task are the same, 486 * lots of work is avoided. 485 487 */ 486 488 if (TASK != THREAD->task) { … … 488 490 489 491 /* 490 * Note that it is possible for two tasks to share one address 491 * space. 492 ( 492 * Note that it is possible for two tasks 493 * to share one address space. 493 494 */ 494 495 if (old_as != new_as) { … … 496 497 * Both tasks and address spaces are different. 497 498 * Replace the old one with the new one. 498 *499 499 */ 500 500 as_switch(old_as, new_as); … … 527 527 * necessary, is to be mapped in before_thread_runs(). This 528 528 * function must be executed before the switch to the new stack. 529 *530 529 */ 531 530 before_thread_runs(); … … 534 533 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to 535 534 * thread's stack. 536 *537 535 */ 538 536 the_copy(THE, (the_t *) THREAD->kstack); … … 658 656 /* 659 657 * Ready thread on local CPU 660 *661 658 */ 662 659
Note:
See TracChangeset
for help on using the changeset viewer.