Changeset 875c629 in mainline for kernel/generic/src/proc
- Timestamp:
- 2011-01-27T17:19:49Z (15 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 577f042a, f579760
- Parents:
- bf75e3cb (diff), 5b7a107 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- kernel/generic/src/proc
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/scheduler.c
rbf75e3cb r875c629 62 62 #include <print.h> 63 63 #include <debug.h> 64 65 static void before_task_runs(void); 66 static void before_thread_runs(void); 67 static void after_thread_ran(void); 64 #include <stacktrace.h> 65 68 66 static void scheduler_separated_stack(void); 69 67 … … 71 69 72 70 /** Carry out actions before new task runs. */ 73 void before_task_runs(void)71 static void before_task_runs(void) 74 72 { 75 73 before_task_runs_arch(); … … 80 78 * Perform actions that need to be 81 79 * taken before the newly selected 82 * t read is passed control.80 * thread is passed control. 83 81 * 84 82 * THREAD->lock is locked on entry 85 83 * 86 84 */ 87 void before_thread_runs(void)85 static void before_thread_runs(void) 88 86 { 89 87 before_thread_runs_arch(); 88 90 89 #ifdef CONFIG_FPU_LAZY 91 if (THREAD == CPU->fpu_owner)90 if (THREAD == CPU->fpu_owner) 92 91 fpu_enable(); 93 92 else … … 102 101 } 103 102 #endif 103 104 #ifdef CONFIG_UDEBUG 105 if (THREAD->btrace) { 106 istate_t *istate = THREAD->udebug.uspace_state; 107 if (istate != NULL) { 108 printf("Thread %" PRIu64 " stack trace:\n", THREAD->tid); 109 stack_trace_istate(istate); 110 } 111 112 THREAD->btrace = false; 113 } 114 #endif 104 115 } 105 116 … … 113 124 * 114 125 */ 115 void after_thread_ran(void)126 static void after_thread_ran(void) 116 127 { 117 128 after_thread_ran_arch(); … … 391 402 * possible destruction should thread_destroy() be called on this or any 392 403 * other processor while the scheduler is still using them. 393 *394 404 */ 395 405 if (old_task) … … 417 427 * The thread structure is kept allocated until 418 428 * somebody calls thread_detach() on it. 419 *420 429 */ 421 430 if (!irq_spinlock_trylock(&THREAD->join_wq.lock)) { 422 431 /* 423 432 * Avoid deadlock. 424 *425 433 */ 426 434 irq_spinlock_unlock(&THREAD->lock, false); … … 443 451 /* 444 452 * Prefer the thread after it's woken up. 445 *446 453 */ 447 454 THREAD->priority = -1; … … 451 458 * waitq_sleep(). Address of wq->lock is kept in 452 459 * THREAD->sleep_queue. 453 *454 460 */ 455 461 irq_spinlock_unlock(&THREAD->sleep_queue->lock, false); … … 461 467 /* 462 468 * Entering state is unexpected. 463 *464 469 */ 465 470 panic("tid%" PRIu64 ": unexpected state %s.", … … 480 485 481 486 /* 482 * If both the old and the new task are the same, lots of work is 483 * avoided. 484 * 487 * If both the old and the new task are the same, 488 * lots of work is avoided. 485 489 */ 486 490 if (TASK != THREAD->task) { … … 488 492 489 493 /* 490 * Note that it is possible for two tasks to share one address 491 * space. 492 ( 494 * Note that it is possible for two tasks 495 * to share one address space. 493 496 */ 494 497 if (old_as != new_as) { … … 496 499 * Both tasks and address spaces are different. 497 500 * Replace the old one with the new one. 498 *499 501 */ 500 502 as_switch(old_as, new_as); … … 527 529 * necessary, is to be mapped in before_thread_runs(). This 528 530 * function must be executed before the switch to the new stack. 529 *530 531 */ 531 532 before_thread_runs(); … … 534 535 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to 535 536 * thread's stack. 536 *537 537 */ 538 538 the_copy(THE, (the_t *) THREAD->kstack); … … 658 658 /* 659 659 * Ready thread on local CPU 660 *661 660 */ 662 661 -
kernel/generic/src/proc/task.c
rbf75e3cb r875c629 449 449 static void task_kill_internal(task_t *task) 450 450 { 451 irq_spinlock_lock(&task->lock, false); 452 irq_spinlock_lock(&threads_lock, false); 453 454 /* 455 * Interrupt all threads. 456 */ 457 451 458 link_t *cur; 452 453 /*454 * Interrupt all threads.455 */456 irq_spinlock_lock(&task->lock, false);457 459 for (cur = task->th_head.next; cur != &task->th_head; cur = cur->next) { 458 460 thread_t *thread = list_get_instance(cur, thread_t, th_link); … … 471 473 } 472 474 475 irq_spinlock_unlock(&threads_lock, false); 473 476 irq_spinlock_unlock(&task->lock, false); 474 477 } -
kernel/generic/src/proc/thread.c
rbf75e3cb r875c629 239 239 * Switch thread to the ready state. 240 240 * 241 * @param t Thread to make ready.241 * @param thread Thread to make ready. 242 242 * 243 243 */ … … 246 246 irq_spinlock_lock(&thread->lock, true); 247 247 248 ASSERT( !(thread->state == Ready));248 ASSERT(thread->state != Ready); 249 249 250 250 int i = (thread->priority < RQ_COUNT - 1) … … 350 350 351 351 #ifdef CONFIG_UDEBUG 352 /* Init debugging stuff */ 352 /* Initialize debugging stuff */ 353 thread->btrace = false; 353 354 udebug_thread_initialize(&thread->udebug); 354 355 #endif … … 535 536 /** Detach thread. 536 537 * 537 * Mark the thread as detached , if the thread is already in the Lingering538 * state, deallocate its resources.538 * Mark the thread as detached. If the thread is already 539 * in the Lingering state, deallocate its resources. 539 540 * 540 541 * @param thread Thread to be detached. … … 740 741 ASSERT(interrupts_disabled()); 741 742 ASSERT(irq_spinlock_locked(&threads_lock)); 742 743 743 744 thread_iterator_t iterator; 744 745 … … 751 752 } 752 753 754 #ifdef CONFIG_UDEBUG 755 756 void thread_stack_trace(thread_id_t thread_id) 757 { 758 irq_spinlock_lock(&threads_lock, true); 759 760 thread_t *thread = thread_find_by_id(thread_id); 761 if (thread == NULL) { 762 printf("No such thread.\n"); 763 irq_spinlock_unlock(&threads_lock, true); 764 return; 765 } 766 767 irq_spinlock_lock(&thread->lock, false); 768 769 /* 770 * Schedule a stack trace to be printed 771 * just before the thread is scheduled next. 772 * 773 * If the thread is sleeping then try to interrupt 774 * the sleep. Any request for printing an uspace stack 775 * trace from within the kernel should be always 776 * considered a last resort debugging means, therefore 777 * forcing the thread's sleep to be interrupted 778 * is probably justifiable. 779 */ 780 781 bool sleeping = false; 782 istate_t *istate = thread->udebug.uspace_state; 783 if (istate != NULL) { 784 printf("Scheduling thread stack trace.\n"); 785 thread->btrace = true; 786 if (thread->state == Sleeping) 787 sleeping = true; 788 } else 789 printf("Thread interrupt state not available.\n"); 790 791 irq_spinlock_unlock(&thread->lock, false); 792 793 if (sleeping) 794 waitq_interrupt_sleep(thread); 795 796 irq_spinlock_unlock(&threads_lock, true); 797 } 798 799 #endif /* CONFIG_UDEBUG */ 753 800 754 801 /** Process syscall to create new thread. … … 793 840 * has already been created. We need to undo its 794 841 * creation now. 795 *796 842 */ 797 843 … … 815 861 * THREAD_B events for threads that already existed 816 862 * and could be detected with THREAD_READ before. 817 *818 863 */ 819 864 udebug_thread_b_event_attach(thread, TASK);
Note:
See TracChangeset
for help on using the changeset viewer.
