Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/proc/thread.c

    rdfa4be62 rb169619  
    6060#include <arch/interrupt.h>
    6161#include <smp/ipi.h>
     62#include <arch/faddr.h>
    6263#include <atomic.h>
    6364#include <memw.h>
     
    8182};
    8283
     84enum sleep_state {
     85        SLEEP_INITIAL,
     86        SLEEP_ASLEEP,
     87        SLEEP_WOKE,
     88};
     89
    8390/** Lock protecting the @c threads ordered dictionary .
    8491 *
     
    108115static int threads_cmp(void *, void *);
    109116
     117/** Thread wrapper.
     118 *
     119 * This wrapper is provided to ensure that every thread makes a call to
     120 * thread_exit() when its implementing function returns.
     121 *
     122 * interrupts_disable() is assumed.
     123 *
     124 */
     125static void cushion(void)
     126{
     127        void (*f)(void *) = THREAD->thread_code;
     128        void *arg = THREAD->thread_arg;
     129        THREAD->last_cycle = get_cycle();
     130
     131        /* This is where each thread wakes up after its creation */
     132        irq_spinlock_unlock(&THREAD->lock, false);
     133        interrupts_enable();
     134
     135        f(arg);
     136
     137        thread_exit();
     138
     139        /* Not reached */
     140}
     141
    110142/** Initialization and allocation for thread_t structure
    111143 *
     
    115147        thread_t *thread = (thread_t *) obj;
    116148
     149        irq_spinlock_initialize(&thread->lock, "thread_t_lock");
    117150        link_initialize(&thread->rq_link);
    118151        link_initialize(&thread->wq_link);
     
    196229void thread_wire(thread_t *thread, cpu_t *cpu)
    197230{
    198         ipl_t ipl = interrupts_disable();
    199         atomic_set_unordered(&thread->cpu, cpu);
     231        irq_spinlock_lock(&thread->lock, true);
     232        thread->cpu = cpu;
    200233        thread->nomigrate++;
    201         interrupts_restore(ipl);
    202 }
    203 
    204 /** Start a thread that wasn't started yet since it was created.
    205  *
    206  * @param thread A reference to the newly created thread.
    207  */
    208 void thread_start(thread_t *thread)
    209 {
    210         assert(atomic_get_unordered(&thread->state) == Entering);
    211         thread_requeue_sleeping(thread_ref(thread));
     234        irq_spinlock_unlock(&thread->lock, true);
     235}
     236
     237/** Invoked right before thread_ready() readies the thread. thread is locked. */
     238static void before_thread_is_ready(thread_t *thread)
     239{
     240        assert(irq_spinlock_locked(&thread->lock));
     241}
     242
     243/** Make thread ready
     244 *
     245 * Switch thread to the ready state. Consumes reference passed by the caller.
     246 *
     247 * @param thread Thread to make ready.
     248 *
     249 */
     250void thread_ready(thread_t *thread)
     251{
     252        irq_spinlock_lock(&thread->lock, true);
     253
     254        assert(thread->state != Ready);
     255
     256        before_thread_is_ready(thread);
     257
     258        int i = (thread->priority < RQ_COUNT - 1) ?
     259            ++thread->priority : thread->priority;
     260
     261        /* Prefer the CPU on which the thread ran last */
     262        cpu_t *cpu = thread->cpu ? thread->cpu : CPU;
     263
     264        thread->state = Ready;
     265
     266        irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock));
     267
     268        /*
     269         * Append thread to respective ready queue
     270         * on respective processor.
     271         */
     272
     273        list_append(&thread->rq_link, &cpu->rq[i].rq);
     274        cpu->rq[i].n++;
     275        irq_spinlock_unlock(&(cpu->rq[i].lock), true);
     276
     277        atomic_inc(&nrdy);
     278        atomic_inc(&cpu->nrdy);
    212279}
    213280
     
    248315        irq_spinlock_unlock(&tidlock, true);
    249316
    250         context_create(&thread->saved_context, thread_main_func,
    251             thread->kstack, STACK_SIZE);
     317        memset(&thread->saved_context, 0, sizeof(thread->saved_context));
     318        context_set(&thread->saved_context, FADDR(cushion),
     319            (uintptr_t) thread->kstack, STACK_SIZE);
    252320
    253321        current_initialize((current_t *) thread->kstack);
     322
     323        ipl_t ipl = interrupts_disable();
     324        thread->saved_ipl = interrupts_read();
     325        interrupts_restore(ipl);
    254326
    255327        str_cpy(thread->name, THREAD_NAME_BUFLEN, name);
     
    257329        thread->thread_code = func;
    258330        thread->thread_arg = arg;
    259         thread->ucycles = ATOMIC_TIME_INITIALIZER();
    260         thread->kcycles = ATOMIC_TIME_INITIALIZER();
     331        thread->ucycles = 0;
     332        thread->kcycles = 0;
    261333        thread->uncounted =
    262334            ((flags & THREAD_FLAG_UNCOUNTED) == THREAD_FLAG_UNCOUNTED);
    263         atomic_init(&thread->priority, 0);
    264         atomic_init(&thread->cpu, NULL);
     335        thread->priority = -1;          /* Start in rq[0] */
     336        thread->cpu = NULL;
    265337        thread->stolen = false;
    266338        thread->uspace =
     
    268340
    269341        thread->nomigrate = 0;
    270         atomic_init(&thread->state, Entering);
     342        thread->state = Entering;
    271343
    272344        atomic_init(&thread->sleep_queue, NULL);
     
    288360#ifdef CONFIG_UDEBUG
    289361        /* Initialize debugging stuff */
    290         atomic_init(&thread->btrace, false);
     362        thread->btrace = false;
    291363        udebug_thread_initialize(&thread->udebug);
    292364#endif
     
    332404
    333405        if (!thread->uncounted) {
    334                 thread->task->ucycles += atomic_time_read(&thread->ucycles);
    335                 thread->task->kcycles += atomic_time_read(&thread->kcycles);
     406                thread->task->ucycles += thread->ucycles;
     407                thread->task->kcycles += thread->kcycles;
    336408        }
    337409
    338410        irq_spinlock_unlock(&thread->task->lock, false);
    339411
    340         assert((atomic_get_unordered(&thread->state) == Exiting) || (atomic_get_unordered(&thread->state) == Lingering));
     412        assert((thread->state == Exiting) || (thread->state == Lingering));
    341413
    342414        /* Clear cpu->fpu_owner if set to this thread. */
    343415#ifdef CONFIG_FPU_LAZY
    344         cpu_t *cpu = atomic_get_unordered(&thread->cpu);
    345         if (cpu) {
     416        if (thread->cpu) {
    346417                /*
    347418                 * We need to lock for this because the old CPU can concurrently try
     
    349420                 * it to finish. An atomic compare-and-swap wouldn't be enough.
    350421                 */
    351                 irq_spinlock_lock(&cpu->fpu_lock, false);
    352 
    353                 if (atomic_get_unordered(&cpu->fpu_owner) == thread)
    354                         atomic_set_unordered(&cpu->fpu_owner, NULL);
    355 
    356                 irq_spinlock_unlock(&cpu->fpu_lock, false);
     422                irq_spinlock_lock(&thread->cpu->fpu_lock, false);
     423
     424                thread_t *owner = atomic_load_explicit(&thread->cpu->fpu_owner,
     425                    memory_order_relaxed);
     426
     427                if (owner == thread) {
     428                        atomic_store_explicit(&thread->cpu->fpu_owner, NULL,
     429                            memory_order_relaxed);
     430                }
     431
     432                irq_spinlock_unlock(&thread->cpu->fpu_lock, false);
    357433        }
    358434#endif
     
    449525        }
    450526
    451         scheduler_enter(Exiting);
    452         unreachable();
     527        irq_spinlock_lock(&THREAD->lock, true);
     528        THREAD->state = Exiting;
     529        irq_spinlock_unlock(&THREAD->lock, true);
     530
     531        scheduler();
     532
     533        panic("should never be reached");
    453534}
    454535
     
    496577
    497578        return THREAD->interrupted ? THREAD_TERMINATING : THREAD_OK;
     579}
     580
     581static void thread_wait_internal(void)
     582{
     583        assert(THREAD != NULL);
     584
     585        ipl_t ipl = interrupts_disable();
     586
     587        if (atomic_load(&haltstate))
     588                halt();
     589
     590        /*
     591         * Lock here to prevent a race between entering the scheduler and another
     592         * thread rescheduling this thread.
     593         */
     594        irq_spinlock_lock(&THREAD->lock, false);
     595
     596        int expected = SLEEP_INITIAL;
     597
     598        /* Only set SLEEP_ASLEEP in sleep pad if it's still in initial state */
     599        if (atomic_compare_exchange_strong_explicit(&THREAD->sleep_state, &expected,
     600            SLEEP_ASLEEP, memory_order_acq_rel, memory_order_acquire)) {
     601                THREAD->state = Sleeping;
     602                scheduler_locked(ipl);
     603        } else {
     604                assert(expected == SLEEP_WOKE);
     605                /* Return immediately. */
     606                irq_spinlock_unlock(&THREAD->lock, false);
     607                interrupts_restore(ipl);
     608        }
    498609}
    499610
     
    538649        timeout_t timeout;
    539650
    540         /* Extra check to avoid going to scheduler if we don't need to. */
    541         if (atomic_load_explicit(&THREAD->sleep_state, memory_order_acquire) !=
    542             SLEEP_INITIAL)
    543                 return THREAD_WAIT_SUCCESS;
    544 
    545651        if (deadline != DEADLINE_NEVER) {
     652                /* Extra check to avoid setting up a deadline if we don't need to. */
     653                if (atomic_load_explicit(&THREAD->sleep_state, memory_order_acquire) !=
     654                    SLEEP_INITIAL)
     655                        return THREAD_WAIT_SUCCESS;
     656
    546657                timeout_initialize(&timeout);
    547658                timeout_register_deadline(&timeout, deadline,
     
    549660        }
    550661
    551         scheduler_enter(Sleeping);
     662        thread_wait_internal();
    552663
    553664        if (deadline != DEADLINE_NEVER && !timeout_unregister(&timeout)) {
     
    563674
    564675        int state = atomic_exchange_explicit(&thread->sleep_state, SLEEP_WOKE,
    565             memory_order_acq_rel);
     676            memory_order_release);
    566677
    567678        if (state == SLEEP_ASLEEP) {
     
    571682                 * the waking thread by the sleeper in thread_wait_finish().
    572683                 */
    573                 thread_requeue_sleeping(thread);
     684                thread_ready(thread);
    574685        }
    575686}
     
    578689void thread_migration_disable(void)
    579690{
    580         ipl_t ipl = interrupts_disable();
    581 
    582691        assert(THREAD);
     692
    583693        THREAD->nomigrate++;
    584 
    585         interrupts_restore(ipl);
    586694}
    587695
     
    589697void thread_migration_enable(void)
    590698{
    591         ipl_t ipl = interrupts_disable();
    592 
    593699        assert(THREAD);
    594700        assert(THREAD->nomigrate > 0);
     
    596702        if (THREAD->nomigrate > 0)
    597703                THREAD->nomigrate--;
    598 
    599         interrupts_restore(ipl);
    600704}
    601705
     
    627731
    628732/** Wait for another thread to exit.
    629  * After successful wait, the thread reference is destroyed.
     733 * This function does not destroy the thread. Reference counting handles that.
    630734 *
    631735 * @param thread Thread to join on exit.
     
    638742errno_t thread_join_timeout(thread_t *thread, uint32_t usec, unsigned int flags)
    639743{
    640         assert(thread != NULL);
    641 
    642744        if (thread == THREAD)
    643745                return EINVAL;
    644746
    645         errno_t rc = _waitq_sleep_timeout(&thread->join_wq, usec, flags);
    646 
    647         if (rc == EOK)
    648                 thread_put(thread);
    649 
    650         return rc;
    651 }
    652 
    653 void thread_detach(thread_t *thread)
    654 {
    655         thread_put(thread);
     747        irq_spinlock_lock(&thread->lock, true);
     748        state_t state = thread->state;
     749        irq_spinlock_unlock(&thread->lock, true);
     750
     751        if (state == Exiting) {
     752                return EOK;
     753        } else {
     754                return _waitq_sleep_timeout(&thread->join_wq, usec, flags);
     755        }
    656756}
    657757
     
    670770
    671771        (void) waitq_sleep_timeout(&wq, usec);
    672 }
    673 
    674 /** Allow other threads to run. */
    675 void thread_yield(void)
    676 {
    677         assert(THREAD != NULL);
    678         scheduler_enter(Running);
    679772}
    680773
     
    683776        uint64_t ucycles, kcycles;
    684777        char usuffix, ksuffix;
    685         order_suffix(atomic_time_read(&thread->ucycles), &ucycles, &usuffix);
    686         order_suffix(atomic_time_read(&thread->kcycles), &kcycles, &ksuffix);
    687 
    688         state_t state = atomic_get_unordered(&thread->state);
     778        order_suffix(thread->ucycles, &ucycles, &usuffix);
     779        order_suffix(thread->kcycles, &kcycles, &ksuffix);
    689780
    690781        char *name;
     
    700791        else
    701792                printf("%-8" PRIu64 " %-14s %p %-8s %p %-5" PRIu32 "\n",
    702                     thread->tid, name, thread, thread_states[state],
     793                    thread->tid, name, thread, thread_states[thread->state],
    703794                    thread->task, thread->task->container);
    704795
    705796        if (additional) {
    706                 cpu_t *cpu = atomic_get_unordered(&thread->cpu);
    707                 if (cpu)
    708                         printf("%-5u", cpu->id);
     797                if (thread->cpu)
     798                        printf("%-5u", thread->cpu->id);
    709799                else
    710800                        printf("none ");
    711801
    712                 if (state == Sleeping) {
     802                if (thread->state == Sleeping) {
    713803                        printf(" %p", thread->sleep_queue);
    714804                }
     
    789879void thread_update_accounting(bool user)
    790880{
     881        uint64_t time = get_cycle();
     882
    791883        assert(interrupts_disabled());
    792 
    793         uint64_t time = get_cycle();
     884        assert(irq_spinlock_locked(&THREAD->lock));
    794885
    795886        if (user)
    796                 atomic_time_increment(&THREAD->ucycles, time - THREAD->last_cycle);
     887                THREAD->ucycles += time - THREAD->last_cycle;
    797888        else
    798                 atomic_time_increment(&THREAD->kcycles, time - THREAD->last_cycle);
     889                THREAD->kcycles += time - THREAD->last_cycle;
    799890
    800891        THREAD->last_cycle = time;
     
    907998         */
    908999
    909         printf("Scheduling thread stack trace.\n");
    910         atomic_set_unordered(&thread->btrace, true);
    911 
    912         thread_wakeup(thread);
     1000        irq_spinlock_lock(&thread->lock, true);
     1001
     1002        bool sleeping = false;
     1003        istate_t *istate = thread->udebug.uspace_state;
     1004        if (istate != NULL) {
     1005                printf("Scheduling thread stack trace.\n");
     1006                thread->btrace = true;
     1007                if (thread->state == Sleeping)
     1008                        sleeping = true;
     1009        } else
     1010                printf("Thread interrupt state not available.\n");
     1011
     1012        irq_spinlock_unlock(&thread->lock, true);
     1013
     1014        if (sleeping)
     1015                thread_wakeup(thread);
     1016
    9131017        thread_put(thread);
    9141018}
     
    10111115                thread_attach(thread, TASK);
    10121116#endif
    1013                 thread_start(thread);
    1014                 thread_put(thread);
     1117                thread_ready(thread);
    10151118
    10161119                return 0;
Note: See TracChangeset for help on using the changeset viewer.