Changes in kernel/generic/src/proc/scheduler.c [df58e44:7e752b2] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/scheduler.c
rdf58e44 r7e752b2 62 62 #include <print.h> 63 63 #include <debug.h> 64 #include <stacktrace.h> 65 64 65 static void before_task_runs(void); 66 static void before_thread_runs(void); 67 static void after_thread_ran(void); 66 68 static void scheduler_separated_stack(void); 67 69 … … 69 71 70 72 /** Carry out actions before new task runs. */ 71 staticvoid before_task_runs(void)73 void before_task_runs(void) 72 74 { 73 75 before_task_runs_arch(); … … 78 80 * Perform actions that need to be 79 81 * taken before the newly selected 80 * t hread is passed control.82 * tread is passed control. 81 83 * 82 84 * THREAD->lock is locked on entry 83 85 * 84 86 */ 85 staticvoid before_thread_runs(void)87 void before_thread_runs(void) 86 88 { 87 89 before_thread_runs_arch(); 88 89 90 #ifdef CONFIG_FPU_LAZY 90 if 91 if(THREAD == CPU->fpu_owner) 91 92 fpu_enable(); 92 93 else … … 101 102 } 102 103 #endif 103 104 if (THREAD->btrace) {105 istate_t *istate = THREAD->udebug.uspace_state;106 if (istate != NULL) {107 printf("Thread %" PRIu64 " stack trace:\n", THREAD->tid);108 stack_trace_istate(istate);109 }110 111 THREAD->btrace = false;112 }113 104 } 114 105 … … 122 113 * 123 114 */ 124 staticvoid after_thread_ran(void)115 void after_thread_ran(void) 125 116 { 126 117 after_thread_ran_arch(); … … 400 391 * possible destruction should thread_destroy() be called on this or any 401 392 * other processor while the scheduler is still using them. 393 * 402 394 */ 403 395 if (old_task) … … 425 417 * The thread structure is kept allocated until 426 418 * somebody calls thread_detach() on it. 419 * 427 420 */ 428 421 if (!irq_spinlock_trylock(&THREAD->join_wq.lock)) { 429 422 /* 430 423 * Avoid deadlock. 424 * 431 425 */ 432 426 irq_spinlock_unlock(&THREAD->lock, false); … … 449 443 /* 450 444 * Prefer the thread after it's woken up. 445 * 451 446 */ 452 447 THREAD->priority = -1; … … 456 451 * waitq_sleep(). Address of wq->lock is kept in 457 452 * THREAD->sleep_queue. 453 * 458 454 */ 459 455 irq_spinlock_unlock(&THREAD->sleep_queue->lock, false); … … 465 461 /* 466 462 * Entering state is unexpected. 463 * 467 464 */ 468 465 panic("tid%" PRIu64 ": unexpected state %s.", … … 483 480 484 481 /* 485 * If both the old and the new task are the same, 486 * lots of work is avoided. 482 * If both the old and the new task are the same, lots of work is 483 * avoided. 484 * 487 485 */ 488 486 if (TASK != THREAD->task) { … … 490 488 491 489 /* 492 * Note that it is possible for two tasks 493 * to share one address space. 490 * Note that it is possible for two tasks to share one address 491 * space. 492 ( 494 493 */ 495 494 if (old_as != new_as) { … … 497 496 * Both tasks and address spaces are different. 498 497 * Replace the old one with the new one. 498 * 499 499 */ 500 500 as_switch(old_as, new_as); … … 527 527 * necessary, is to be mapped in before_thread_runs(). This 528 528 * function must be executed before the switch to the new stack. 529 * 529 530 */ 530 531 before_thread_runs(); … … 533 534 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to 534 535 * thread's stack. 536 * 535 537 */ 536 538 the_copy(THE, (the_t *) THREAD->kstack); … … 656 658 /* 657 659 * Ready thread on local CPU 660 * 658 661 */ 659 662
Note:
See TracChangeset
for help on using the changeset viewer.