Changeset 12573db in mainline for kernel/generic/src/proc
- Timestamp:
- 2011-01-31T20:32:33Z (15 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 37cf3792
- Parents:
- 4fe94c66 (diff), 197ef43 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- kernel/generic/src/proc
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/scheduler.c
r4fe94c66 r12573db 62 62 #include <print.h> 63 63 #include <debug.h> 64 65 static void before_task_runs(void); 66 static void before_thread_runs(void); 67 static void after_thread_ran(void); 64 #include <stacktrace.h> 65 68 66 static void scheduler_separated_stack(void); 69 67 … … 71 69 72 70 /** Carry out actions before new task runs. */ 73 void before_task_runs(void)71 static void before_task_runs(void) 74 72 { 75 73 before_task_runs_arch(); … … 80 78 * Perform actions that need to be 81 79 * taken before the newly selected 82 * t read is passed control.80 * thread is passed control. 83 81 * 84 82 * THREAD->lock is locked on entry 85 83 * 86 84 */ 87 void before_thread_runs(void)85 static void before_thread_runs(void) 88 86 { 89 87 before_thread_runs_arch(); 88 90 89 #ifdef CONFIG_FPU_LAZY 91 if (THREAD == CPU->fpu_owner)90 if (THREAD == CPU->fpu_owner) 92 91 fpu_enable(); 93 92 else … … 102 101 } 103 102 #endif 103 104 #ifdef CONFIG_UDEBUG 105 if (THREAD->btrace) { 106 istate_t *istate = THREAD->udebug.uspace_state; 107 if (istate != NULL) { 108 printf("Thread %" PRIu64 " stack trace:\n", THREAD->tid); 109 stack_trace_istate(istate); 110 } 111 112 THREAD->btrace = false; 113 } 114 #endif 104 115 } 105 116 … … 113 124 * 114 125 */ 115 void after_thread_ran(void)126 static void after_thread_ran(void) 116 127 { 117 128 after_thread_ran_arch(); … … 391 402 * possible destruction should thread_destroy() be called on this or any 392 403 * other processor while the scheduler is still using them. 393 *394 404 */ 395 405 if (old_task) … … 417 427 * The thread structure is kept allocated until 418 428 * somebody calls thread_detach() on it. 419 *420 429 */ 421 430 if (!irq_spinlock_trylock(&THREAD->join_wq.lock)) { 422 431 /* 423 432 * Avoid deadlock. 424 *425 433 */ 426 434 irq_spinlock_unlock(&THREAD->lock, false); … … 443 451 /* 444 452 * Prefer the thread after it's woken up. 445 *446 453 */ 447 454 THREAD->priority = -1; … … 451 458 * waitq_sleep(). Address of wq->lock is kept in 452 459 * THREAD->sleep_queue. 453 *454 460 */ 455 461 irq_spinlock_unlock(&THREAD->sleep_queue->lock, false); … … 461 467 /* 462 468 * Entering state is unexpected. 463 *464 469 */ 465 470 panic("tid%" PRIu64 ": unexpected state %s.", … … 480 485 481 486 /* 482 * If both the old and the new task are the same, lots of work is 483 * avoided. 484 * 487 * If both the old and the new task are the same, 488 * lots of work is avoided. 485 489 */ 486 490 if (TASK != THREAD->task) { … … 488 492 489 493 /* 490 * Note that it is possible for two tasks to share one address 491 * space. 492 ( 494 * Note that it is possible for two tasks 495 * to share one address space. 493 496 */ 494 497 if (old_as != new_as) { … … 496 499 * Both tasks and address spaces are different. 497 500 * Replace the old one with the new one. 498 *499 501 */ 500 502 as_switch(old_as, new_as); … … 527 529 * necessary, is to be mapped in before_thread_runs(). This 528 530 * function must be executed before the switch to the new stack. 529 *530 531 */ 531 532 before_thread_runs(); … … 534 535 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to 535 536 * thread's stack. 536 *537 537 */ 538 538 the_copy(THE, (the_t *) THREAD->kstack); … … 658 658 /* 659 659 * Ready thread on local CPU 660 *661 660 */ 662 661 -
kernel/generic/src/proc/task.c
r4fe94c66 r12573db 342 342 sysarg_t sys_task_set_name(const char *uspace_name, size_t name_len) 343 343 { 344 int rc;345 344 char namebuf[TASK_NAME_BUFLEN]; 346 345 347 346 /* Cap length of name and copy it from userspace. */ 348 349 347 if (name_len > TASK_NAME_BUFLEN - 1) 350 348 name_len = TASK_NAME_BUFLEN - 1; 351 349 352 rc = copy_from_uspace(namebuf, uspace_name, name_len);350 int rc = copy_from_uspace(namebuf, uspace_name, name_len); 353 351 if (rc != 0) 354 352 return (sysarg_t) rc; 355 353 356 354 namebuf[name_len] = '\0'; 355 356 /* 357 * As the task name is referenced also from the 358 * threads, lock the threads' lock for the course 359 * of the update. 360 */ 361 362 irq_spinlock_lock(&tasks_lock, true); 363 irq_spinlock_lock(&TASK->lock, false); 364 irq_spinlock_lock(&threads_lock, false); 365 366 /* Set task name */ 357 367 str_cpy(TASK->name, TASK_NAME_BUFLEN, namebuf); 358 368 369 irq_spinlock_unlock(&threads_lock, false); 370 irq_spinlock_unlock(&TASK->lock, false); 371 irq_spinlock_unlock(&tasks_lock, true); 372 359 373 return EOK; 360 374 } … … 370 384 { 371 385 task_id_t taskid; 372 int rc; 373 374 rc = copy_from_uspace(&taskid, uspace_taskid, sizeof(taskid)); 386 int rc = copy_from_uspace(&taskid, uspace_taskid, sizeof(taskid)); 375 387 if (rc != 0) 376 388 return (sysarg_t) rc; 377 389 378 390 return (sysarg_t) task_kill(taskid); 379 391 } … … 449 461 static void task_kill_internal(task_t *task) 450 462 { 463 irq_spinlock_lock(&task->lock, false); 464 irq_spinlock_lock(&threads_lock, false); 465 466 /* 467 * Interrupt all threads. 468 */ 469 451 470 link_t *cur; 452 453 /*454 * Interrupt all threads.455 */456 irq_spinlock_lock(&task->lock, false);457 471 for (cur = task->th_head.next; cur != &task->th_head; cur = cur->next) { 458 472 thread_t *thread = list_get_instance(cur, thread_t, th_link); … … 471 485 } 472 486 487 irq_spinlock_unlock(&threads_lock, false); 473 488 irq_spinlock_unlock(&task->lock, false); 474 489 } … … 500 515 irq_spinlock_unlock(&tasks_lock, true); 501 516 517 return EOK; 518 } 519 520 /** Kill the currently running task. 521 * 522 * @param notify Send out fault notifications. 523 * 524 * @return Zero on success or an error code from errno.h. 525 * 526 */ 527 void task_kill_self(bool notify) 528 { 529 /* 530 * User space can subscribe for FAULT events to take action 531 * whenever a task faults (to take a dump, run a debugger, etc.). 532 * The notification is always available, but unless udebug is enabled, 533 * that's all you get. 534 */ 535 if (notify) { 536 if (event_is_subscribed(EVENT_FAULT)) { 537 /* Notify the subscriber that a fault occurred. */ 538 event_notify_3(EVENT_FAULT, LOWER32(TASK->taskid), 539 UPPER32(TASK->taskid), (sysarg_t) THREAD); 540 541 #ifdef CONFIG_UDEBUG 542 /* Wait for a debugging session. */ 543 udebug_thread_fault(); 544 #endif 545 } 546 } 547 548 irq_spinlock_lock(&tasks_lock, true); 549 task_kill_internal(TASK); 550 irq_spinlock_unlock(&tasks_lock, true); 551 552 thread_exit(); 553 } 554 555 /** Process syscall to terminate the current task. 556 * 557 * @param notify Send out fault notifications. 558 * 559 */ 560 sysarg_t sys_task_exit(sysarg_t notify) 561 { 562 task_kill_self(notify); 563 564 /* Unreachable */ 502 565 return EOK; 503 566 } -
kernel/generic/src/proc/thread.c
r4fe94c66 r12573db 239 239 * Switch thread to the ready state. 240 240 * 241 * @param t Thread to make ready.241 * @param thread Thread to make ready. 242 242 * 243 243 */ … … 246 246 irq_spinlock_lock(&thread->lock, true); 247 247 248 ASSERT( !(thread->state == Ready));248 ASSERT(thread->state != Ready); 249 249 250 250 int i = (thread->priority < RQ_COUNT - 1) … … 350 350 351 351 #ifdef CONFIG_UDEBUG 352 /* Init debugging stuff */ 352 /* Initialize debugging stuff */ 353 thread->btrace = false; 353 354 udebug_thread_initialize(&thread->udebug); 354 355 #endif … … 535 536 /** Detach thread. 536 537 * 537 * Mark the thread as detached , if the thread is already in the Lingering538 * state, deallocate its resources.538 * Mark the thread as detached. If the thread is already 539 * in the Lingering state, deallocate its resources. 539 540 * 540 541 * @param thread Thread to be detached. … … 590 591 order_suffix(thread->kcycles, &kcycles, &ksuffix); 591 592 593 char *name; 594 if (str_cmp(thread->name, "uinit") == 0) 595 name = thread->task->name; 596 else 597 name = thread->name; 598 592 599 #ifdef __32_BITS__ 593 600 if (*additional) 594 printf("%-8" PRIu64 "%10p %9" PRIu64 "%c %9" PRIu64 "%c ",595 thread->tid, thread-> kstack, ucycles, usuffix,596 kcycles, ksuffix);601 printf("%-8" PRIu64 " %10p %10p %9" PRIu64 "%c %9" PRIu64 "%c ", 602 thread->tid, thread->thread_code, thread->kstack, 603 ucycles, usuffix, kcycles, ksuffix); 597 604 else 598 printf("%-8" PRIu64 " %-14s %10p %-8s %10p %-5" PRIu32 " %10p\n",599 thread->tid, thread->name, thread, thread_states[thread->state],600 thread->task, thread->task->context , thread->thread_code);605 printf("%-8" PRIu64 " %-14s %10p %-8s %10p %-5" PRIu32 "\n", 606 thread->tid, name, thread, thread_states[thread->state], 607 thread->task, thread->task->context); 601 608 #endif 602 609 603 610 #ifdef __64_BITS__ 604 611 if (*additional) 605 printf("%-8" PRIu64 " %18p %18p\n"612 printf("%-8" PRIu64 " %18p %18p\n" 606 613 " %9" PRIu64 "%c %9" PRIu64 "%c ", 607 614 thread->tid, thread->thread_code, thread->kstack, 608 615 ucycles, usuffix, kcycles, ksuffix); 609 616 else 610 printf("%-8" PRIu64 " %-14s %18p %-8s %18p %-5" PRIu32 "\n",611 thread->tid, thread->name, thread, thread_states[thread->state],617 printf("%-8" PRIu64 " %-14s %18p %-8s %18p %-5" PRIu32 "\n", 618 thread->tid, name, thread, thread_states[thread->state], 612 619 thread->task, thread->task->context); 613 620 #endif … … 647 654 #ifdef __32_BITS__ 648 655 if (additional) 649 printf("[id ] [ stack ] [ucycles ] [kcycles ] [cpu]"650 " [ waitqueue]\n");656 printf("[id ] [code ] [stack ] [ucycles ] [kcycles ]" 657 " [cpu] [waitqueue]\n"); 651 658 else 652 659 printf("[id ] [name ] [address ] [state ] [task ]" 653 " [ctx] [code ]\n");660 " [ctx]\n"); 654 661 #endif 655 662 … … 740 747 ASSERT(interrupts_disabled()); 741 748 ASSERT(irq_spinlock_locked(&threads_lock)); 742 749 743 750 thread_iterator_t iterator; 744 751 … … 751 758 } 752 759 760 #ifdef CONFIG_UDEBUG 761 762 void thread_stack_trace(thread_id_t thread_id) 763 { 764 irq_spinlock_lock(&threads_lock, true); 765 766 thread_t *thread = thread_find_by_id(thread_id); 767 if (thread == NULL) { 768 printf("No such thread.\n"); 769 irq_spinlock_unlock(&threads_lock, true); 770 return; 771 } 772 773 irq_spinlock_lock(&thread->lock, false); 774 775 /* 776 * Schedule a stack trace to be printed 777 * just before the thread is scheduled next. 778 * 779 * If the thread is sleeping then try to interrupt 780 * the sleep. Any request for printing an uspace stack 781 * trace from within the kernel should be always 782 * considered a last resort debugging means, therefore 783 * forcing the thread's sleep to be interrupted 784 * is probably justifiable. 785 */ 786 787 bool sleeping = false; 788 istate_t *istate = thread->udebug.uspace_state; 789 if (istate != NULL) { 790 printf("Scheduling thread stack trace.\n"); 791 thread->btrace = true; 792 if (thread->state == Sleeping) 793 sleeping = true; 794 } else 795 printf("Thread interrupt state not available.\n"); 796 797 irq_spinlock_unlock(&thread->lock, false); 798 799 if (sleeping) 800 waitq_interrupt_sleep(thread); 801 802 irq_spinlock_unlock(&threads_lock, true); 803 } 804 805 #endif /* CONFIG_UDEBUG */ 753 806 754 807 /** Process syscall to create new thread. … … 793 846 * has already been created. We need to undo its 794 847 * creation now. 795 *796 848 */ 797 849 … … 815 867 * THREAD_B events for threads that already existed 816 868 * and could be detected with THREAD_READ before. 817 *818 869 */ 819 870 udebug_thread_b_event_attach(thread, TASK);
Note:
See TracChangeset
for help on using the changeset viewer.
