Changes in kernel/generic/src/proc/thread.c [dfa4be62:b169619] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/thread.c
rdfa4be62 rb169619 60 60 #include <arch/interrupt.h> 61 61 #include <smp/ipi.h> 62 #include <arch/faddr.h> 62 63 #include <atomic.h> 63 64 #include <memw.h> … … 81 82 }; 82 83 84 enum sleep_state { 85 SLEEP_INITIAL, 86 SLEEP_ASLEEP, 87 SLEEP_WOKE, 88 }; 89 83 90 /** Lock protecting the @c threads ordered dictionary . 84 91 * … … 108 115 static int threads_cmp(void *, void *); 109 116 117 /** Thread wrapper. 118 * 119 * This wrapper is provided to ensure that every thread makes a call to 120 * thread_exit() when its implementing function returns. 121 * 122 * interrupts_disable() is assumed. 123 * 124 */ 125 static void cushion(void) 126 { 127 void (*f)(void *) = THREAD->thread_code; 128 void *arg = THREAD->thread_arg; 129 THREAD->last_cycle = get_cycle(); 130 131 /* This is where each thread wakes up after its creation */ 132 irq_spinlock_unlock(&THREAD->lock, false); 133 interrupts_enable(); 134 135 f(arg); 136 137 thread_exit(); 138 139 /* Not reached */ 140 } 141 110 142 /** Initialization and allocation for thread_t structure 111 143 * … … 115 147 thread_t *thread = (thread_t *) obj; 116 148 149 irq_spinlock_initialize(&thread->lock, "thread_t_lock"); 117 150 link_initialize(&thread->rq_link); 118 151 link_initialize(&thread->wq_link); … … 196 229 void thread_wire(thread_t *thread, cpu_t *cpu) 197 230 { 198 i pl_t ipl = interrupts_disable();199 atomic_set_unordered(&thread->cpu, cpu);231 irq_spinlock_lock(&thread->lock, true); 232 thread->cpu = cpu; 200 233 thread->nomigrate++; 201 interrupts_restore(ipl); 202 } 203 204 /** Start a thread that wasn't started yet since it was created. 205 * 206 * @param thread A reference to the newly created thread. 207 */ 208 void thread_start(thread_t *thread) 209 { 210 assert(atomic_get_unordered(&thread->state) == Entering); 211 thread_requeue_sleeping(thread_ref(thread)); 234 irq_spinlock_unlock(&thread->lock, true); 235 } 236 237 /** Invoked right before thread_ready() readies the thread. thread is locked. */ 238 static void before_thread_is_ready(thread_t *thread) 239 { 240 assert(irq_spinlock_locked(&thread->lock)); 241 } 242 243 /** Make thread ready 244 * 245 * Switch thread to the ready state. Consumes reference passed by the caller. 246 * 247 * @param thread Thread to make ready. 248 * 249 */ 250 void thread_ready(thread_t *thread) 251 { 252 irq_spinlock_lock(&thread->lock, true); 253 254 assert(thread->state != Ready); 255 256 before_thread_is_ready(thread); 257 258 int i = (thread->priority < RQ_COUNT - 1) ? 259 ++thread->priority : thread->priority; 260 261 /* Prefer the CPU on which the thread ran last */ 262 cpu_t *cpu = thread->cpu ? thread->cpu : CPU; 263 264 thread->state = Ready; 265 266 irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock)); 267 268 /* 269 * Append thread to respective ready queue 270 * on respective processor. 271 */ 272 273 list_append(&thread->rq_link, &cpu->rq[i].rq); 274 cpu->rq[i].n++; 275 irq_spinlock_unlock(&(cpu->rq[i].lock), true); 276 277 atomic_inc(&nrdy); 278 atomic_inc(&cpu->nrdy); 212 279 } 213 280 … … 248 315 irq_spinlock_unlock(&tidlock, true); 249 316 250 context_create(&thread->saved_context, thread_main_func, 251 thread->kstack, STACK_SIZE); 317 memset(&thread->saved_context, 0, sizeof(thread->saved_context)); 318 context_set(&thread->saved_context, FADDR(cushion), 319 (uintptr_t) thread->kstack, STACK_SIZE); 252 320 253 321 current_initialize((current_t *) thread->kstack); 322 323 ipl_t ipl = interrupts_disable(); 324 thread->saved_ipl = interrupts_read(); 325 interrupts_restore(ipl); 254 326 255 327 str_cpy(thread->name, THREAD_NAME_BUFLEN, name); … … 257 329 thread->thread_code = func; 258 330 thread->thread_arg = arg; 259 thread->ucycles = ATOMIC_TIME_INITIALIZER();260 thread->kcycles = ATOMIC_TIME_INITIALIZER();331 thread->ucycles = 0; 332 thread->kcycles = 0; 261 333 thread->uncounted = 262 334 ((flags & THREAD_FLAG_UNCOUNTED) == THREAD_FLAG_UNCOUNTED); 263 atomic_init(&thread->priority, 0);264 atomic_init(&thread->cpu, NULL);335 thread->priority = -1; /* Start in rq[0] */ 336 thread->cpu = NULL; 265 337 thread->stolen = false; 266 338 thread->uspace = … … 268 340 269 341 thread->nomigrate = 0; 270 atomic_init(&thread->state, Entering);342 thread->state = Entering; 271 343 272 344 atomic_init(&thread->sleep_queue, NULL); … … 288 360 #ifdef CONFIG_UDEBUG 289 361 /* Initialize debugging stuff */ 290 atomic_init(&thread->btrace, false);362 thread->btrace = false; 291 363 udebug_thread_initialize(&thread->udebug); 292 364 #endif … … 332 404 333 405 if (!thread->uncounted) { 334 thread->task->ucycles += atomic_time_read(&thread->ucycles);335 thread->task->kcycles += atomic_time_read(&thread->kcycles);406 thread->task->ucycles += thread->ucycles; 407 thread->task->kcycles += thread->kcycles; 336 408 } 337 409 338 410 irq_spinlock_unlock(&thread->task->lock, false); 339 411 340 assert(( atomic_get_unordered(&thread->state) == Exiting) || (atomic_get_unordered(&thread->state)== Lingering));412 assert((thread->state == Exiting) || (thread->state == Lingering)); 341 413 342 414 /* Clear cpu->fpu_owner if set to this thread. */ 343 415 #ifdef CONFIG_FPU_LAZY 344 cpu_t *cpu = atomic_get_unordered(&thread->cpu); 345 if (cpu) { 416 if (thread->cpu) { 346 417 /* 347 418 * We need to lock for this because the old CPU can concurrently try … … 349 420 * it to finish. An atomic compare-and-swap wouldn't be enough. 350 421 */ 351 irq_spinlock_lock(&cpu->fpu_lock, false); 352 353 if (atomic_get_unordered(&cpu->fpu_owner) == thread) 354 atomic_set_unordered(&cpu->fpu_owner, NULL); 355 356 irq_spinlock_unlock(&cpu->fpu_lock, false); 422 irq_spinlock_lock(&thread->cpu->fpu_lock, false); 423 424 thread_t *owner = atomic_load_explicit(&thread->cpu->fpu_owner, 425 memory_order_relaxed); 426 427 if (owner == thread) { 428 atomic_store_explicit(&thread->cpu->fpu_owner, NULL, 429 memory_order_relaxed); 430 } 431 432 irq_spinlock_unlock(&thread->cpu->fpu_lock, false); 357 433 } 358 434 #endif … … 449 525 } 450 526 451 scheduler_enter(Exiting); 452 unreachable(); 527 irq_spinlock_lock(&THREAD->lock, true); 528 THREAD->state = Exiting; 529 irq_spinlock_unlock(&THREAD->lock, true); 530 531 scheduler(); 532 533 panic("should never be reached"); 453 534 } 454 535 … … 496 577 497 578 return THREAD->interrupted ? THREAD_TERMINATING : THREAD_OK; 579 } 580 581 static void thread_wait_internal(void) 582 { 583 assert(THREAD != NULL); 584 585 ipl_t ipl = interrupts_disable(); 586 587 if (atomic_load(&haltstate)) 588 halt(); 589 590 /* 591 * Lock here to prevent a race between entering the scheduler and another 592 * thread rescheduling this thread. 593 */ 594 irq_spinlock_lock(&THREAD->lock, false); 595 596 int expected = SLEEP_INITIAL; 597 598 /* Only set SLEEP_ASLEEP in sleep pad if it's still in initial state */ 599 if (atomic_compare_exchange_strong_explicit(&THREAD->sleep_state, &expected, 600 SLEEP_ASLEEP, memory_order_acq_rel, memory_order_acquire)) { 601 THREAD->state = Sleeping; 602 scheduler_locked(ipl); 603 } else { 604 assert(expected == SLEEP_WOKE); 605 /* Return immediately. */ 606 irq_spinlock_unlock(&THREAD->lock, false); 607 interrupts_restore(ipl); 608 } 498 609 } 499 610 … … 538 649 timeout_t timeout; 539 650 540 /* Extra check to avoid going to scheduler if we don't need to. */541 if (atomic_load_explicit(&THREAD->sleep_state, memory_order_acquire) !=542 SLEEP_INITIAL)543 return THREAD_WAIT_SUCCESS;544 545 651 if (deadline != DEADLINE_NEVER) { 652 /* Extra check to avoid setting up a deadline if we don't need to. */ 653 if (atomic_load_explicit(&THREAD->sleep_state, memory_order_acquire) != 654 SLEEP_INITIAL) 655 return THREAD_WAIT_SUCCESS; 656 546 657 timeout_initialize(&timeout); 547 658 timeout_register_deadline(&timeout, deadline, … … 549 660 } 550 661 551 scheduler_enter(Sleeping);662 thread_wait_internal(); 552 663 553 664 if (deadline != DEADLINE_NEVER && !timeout_unregister(&timeout)) { … … 563 674 564 675 int state = atomic_exchange_explicit(&thread->sleep_state, SLEEP_WOKE, 565 memory_order_ acq_rel);676 memory_order_release); 566 677 567 678 if (state == SLEEP_ASLEEP) { … … 571 682 * the waking thread by the sleeper in thread_wait_finish(). 572 683 */ 573 thread_re queue_sleeping(thread);684 thread_ready(thread); 574 685 } 575 686 } … … 578 689 void thread_migration_disable(void) 579 690 { 580 ipl_t ipl = interrupts_disable();581 582 691 assert(THREAD); 692 583 693 THREAD->nomigrate++; 584 585 interrupts_restore(ipl);586 694 } 587 695 … … 589 697 void thread_migration_enable(void) 590 698 { 591 ipl_t ipl = interrupts_disable();592 593 699 assert(THREAD); 594 700 assert(THREAD->nomigrate > 0); … … 596 702 if (THREAD->nomigrate > 0) 597 703 THREAD->nomigrate--; 598 599 interrupts_restore(ipl);600 704 } 601 705 … … 627 731 628 732 /** Wait for another thread to exit. 629 * After successful wait, the thread reference is destroyed.733 * This function does not destroy the thread. Reference counting handles that. 630 734 * 631 735 * @param thread Thread to join on exit. … … 638 742 errno_t thread_join_timeout(thread_t *thread, uint32_t usec, unsigned int flags) 639 743 { 640 assert(thread != NULL);641 642 744 if (thread == THREAD) 643 745 return EINVAL; 644 746 645 errno_t rc = _waitq_sleep_timeout(&thread->join_wq, usec, flags); 646 647 if (rc == EOK) 648 thread_put(thread); 649 650 return rc; 651 } 652 653 void thread_detach(thread_t *thread) 654 { 655 thread_put(thread); 747 irq_spinlock_lock(&thread->lock, true); 748 state_t state = thread->state; 749 irq_spinlock_unlock(&thread->lock, true); 750 751 if (state == Exiting) { 752 return EOK; 753 } else { 754 return _waitq_sleep_timeout(&thread->join_wq, usec, flags); 755 } 656 756 } 657 757 … … 670 770 671 771 (void) waitq_sleep_timeout(&wq, usec); 672 }673 674 /** Allow other threads to run. */675 void thread_yield(void)676 {677 assert(THREAD != NULL);678 scheduler_enter(Running);679 772 } 680 773 … … 683 776 uint64_t ucycles, kcycles; 684 777 char usuffix, ksuffix; 685 order_suffix(atomic_time_read(&thread->ucycles), &ucycles, &usuffix); 686 order_suffix(atomic_time_read(&thread->kcycles), &kcycles, &ksuffix); 687 688 state_t state = atomic_get_unordered(&thread->state); 778 order_suffix(thread->ucycles, &ucycles, &usuffix); 779 order_suffix(thread->kcycles, &kcycles, &ksuffix); 689 780 690 781 char *name; … … 700 791 else 701 792 printf("%-8" PRIu64 " %-14s %p %-8s %p %-5" PRIu32 "\n", 702 thread->tid, name, thread, thread_states[ state],793 thread->tid, name, thread, thread_states[thread->state], 703 794 thread->task, thread->task->container); 704 795 705 796 if (additional) { 706 cpu_t *cpu = atomic_get_unordered(&thread->cpu); 707 if (cpu) 708 printf("%-5u", cpu->id); 797 if (thread->cpu) 798 printf("%-5u", thread->cpu->id); 709 799 else 710 800 printf("none "); 711 801 712 if ( state == Sleeping) {802 if (thread->state == Sleeping) { 713 803 printf(" %p", thread->sleep_queue); 714 804 } … … 789 879 void thread_update_accounting(bool user) 790 880 { 881 uint64_t time = get_cycle(); 882 791 883 assert(interrupts_disabled()); 792 793 uint64_t time = get_cycle(); 884 assert(irq_spinlock_locked(&THREAD->lock)); 794 885 795 886 if (user) 796 atomic_time_increment(&THREAD->ucycles, time - THREAD->last_cycle);887 THREAD->ucycles += time - THREAD->last_cycle; 797 888 else 798 atomic_time_increment(&THREAD->kcycles, time - THREAD->last_cycle);889 THREAD->kcycles += time - THREAD->last_cycle; 799 890 800 891 THREAD->last_cycle = time; … … 907 998 */ 908 999 909 printf("Scheduling thread stack trace.\n"); 910 atomic_set_unordered(&thread->btrace, true); 911 912 thread_wakeup(thread); 1000 irq_spinlock_lock(&thread->lock, true); 1001 1002 bool sleeping = false; 1003 istate_t *istate = thread->udebug.uspace_state; 1004 if (istate != NULL) { 1005 printf("Scheduling thread stack trace.\n"); 1006 thread->btrace = true; 1007 if (thread->state == Sleeping) 1008 sleeping = true; 1009 } else 1010 printf("Thread interrupt state not available.\n"); 1011 1012 irq_spinlock_unlock(&thread->lock, true); 1013 1014 if (sleeping) 1015 thread_wakeup(thread); 1016 913 1017 thread_put(thread); 914 1018 } … … 1011 1115 thread_attach(thread, TASK); 1012 1116 #endif 1013 thread_start(thread); 1014 thread_put(thread); 1117 thread_ready(thread); 1015 1118 1016 1119 return 0;
Note:
See TracChangeset
for help on using the changeset viewer.