Changeset df58e44 in mainline for kernel/generic/src/proc
- Timestamp:
- 2011-01-27T16:36:35Z (15 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 5b7a107
- Parents:
- 0843f02
- Location:
- kernel/generic/src/proc
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/scheduler.c
r0843f02 rdf58e44 62 62 #include <print.h> 63 63 #include <debug.h> 64 #include <stacktrace.h> 64 65 65 66 static void scheduler_separated_stack(void); … … 77 78 * Perform actions that need to be 78 79 * taken before the newly selected 79 * t read is passed control.80 * thread is passed control. 80 81 * 81 82 * THREAD->lock is locked on entry … … 87 88 88 89 #ifdef CONFIG_FPU_LAZY 89 if (THREAD == CPU->fpu_owner)90 if (THREAD == CPU->fpu_owner) 90 91 fpu_enable(); 91 92 else … … 100 101 } 101 102 #endif 103 104 if (THREAD->btrace) { 105 istate_t *istate = THREAD->udebug.uspace_state; 106 if (istate != NULL) { 107 printf("Thread %" PRIu64 " stack trace:\n", THREAD->tid); 108 stack_trace_istate(istate); 109 } 110 111 THREAD->btrace = false; 112 } 102 113 } 103 114 … … 645 656 /* 646 657 * Ready thread on local CPU 647 *648 658 */ 649 659 -
kernel/generic/src/proc/task.c
r0843f02 rdf58e44 449 449 static void task_kill_internal(task_t *task) 450 450 { 451 irq_spinlock_lock(&task->lock, false); 452 irq_spinlock_lock(&threads_lock, false); 453 454 /* 455 * Interrupt all threads. 456 */ 457 451 458 link_t *cur; 452 453 /*454 * Interrupt all threads.455 */456 irq_spinlock_lock(&task->lock, false);457 459 for (cur = task->th_head.next; cur != &task->th_head; cur = cur->next) { 458 460 thread_t *thread = list_get_instance(cur, thread_t, th_link); … … 471 473 } 472 474 475 irq_spinlock_unlock(&threads_lock, false); 473 476 irq_spinlock_unlock(&task->lock, false); 474 477 } -
kernel/generic/src/proc/thread.c
r0843f02 rdf58e44 239 239 * Switch thread to the ready state. 240 240 * 241 * @param t Thread to make ready.241 * @param thread Thread to make ready. 242 242 * 243 243 */ … … 246 246 irq_spinlock_lock(&thread->lock, true); 247 247 248 ASSERT( !(thread->state == Ready));248 ASSERT(thread->state != Ready); 249 249 250 250 int i = (thread->priority < RQ_COUNT - 1) … … 338 338 339 339 thread->interrupted = false; 340 thread->btrace = false; 340 341 thread->detached = false; 341 342 waitq_initialize(&thread->join_wq); … … 535 536 /** Detach thread. 536 537 * 537 * Mark the thread as detached , if the thread is already in the Lingering538 * state, deallocate its resources.538 * Mark the thread as detached. If the thread is already 539 * in the Lingering state, deallocate its resources. 539 540 * 540 541 * @param thread Thread to be detached. … … 740 741 ASSERT(interrupts_disabled()); 741 742 ASSERT(irq_spinlock_locked(&threads_lock)); 742 743 743 744 thread_iterator_t iterator; 744 745 … … 751 752 } 752 753 754 void thread_stack_trace(thread_id_t thread_id) 755 { 756 irq_spinlock_lock(&threads_lock, true); 757 758 thread_t *thread = thread_find_by_id(thread_id); 759 if (thread == NULL) { 760 printf("No such thread.\n"); 761 irq_spinlock_unlock(&threads_lock, true); 762 return; 763 } 764 765 irq_spinlock_lock(&thread->lock, false); 766 767 /* 768 * Schedule a stack trace to be printed 769 * just before the thread is scheduled next. 770 * 771 * If the thread is sleeping then try to interrupt 772 * the sleep. Any request for printing an uspace stack 773 * trace from within the kernel should be always 774 * considered a last resort debugging means, therefore 775 * forcing the thread's sleep to be interrupted 776 * is probably justifiable. 777 */ 778 779 bool sleeping = false; 780 istate_t *istate = thread->udebug.uspace_state; 781 if (istate != NULL) { 782 printf("Scheduling thread stack trace.\n"); 783 thread->btrace = true; 784 if (thread->state == Sleeping) 785 sleeping = true; 786 } else 787 printf("Thread interrupt state not available.\n"); 788 789 irq_spinlock_unlock(&thread->lock, false); 790 791 if (sleeping) 792 waitq_interrupt_sleep(thread); 793 794 irq_spinlock_unlock(&threads_lock, true); 795 } 753 796 754 797 /** Process syscall to create new thread. … … 793 836 * has already been created. We need to undo its 794 837 * creation now. 795 *796 838 */ 797 839 … … 815 857 * THREAD_B events for threads that already existed 816 858 * and could be detected with THREAD_READ before. 817 *818 859 */ 819 860 udebug_thread_b_event_attach(thread, TASK);
Note:
See TracChangeset
for help on using the changeset viewer.