Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/proc/thread.c

    rdfa4be62 r128359eb  
    6060#include <arch/interrupt.h>
    6161#include <smp/ipi.h>
     62#include <arch/faddr.h>
    6263#include <atomic.h>
    63 #include <memw.h>
     64#include <mem.h>
    6465#include <stdio.h>
    6566#include <stdlib.h>
     
    6869#include <errno.h>
    6970#include <debug.h>
    70 #include <halt.h>
    7171
    7272/** Thread states */
     
    9494 *
    9595 * Members are of type thread_t.
    96  *
    97  * This structure contains weak references. Any reference from it must not leave
    98  * threads_lock critical section unless strengthened via thread_try_ref().
    9996 */
    10097odict_t threads;
     
    105102static slab_cache_t *thread_cache;
    106103
     104#ifdef CONFIG_FPU
     105slab_cache_t *fpu_context_cache;
     106#endif
     107
    107108static void *threads_getkey(odlink_t *);
    108109static int threads_cmp(void *, void *);
    109110
     111/** Thread wrapper.
     112 *
     113 * This wrapper is provided to ensure that every thread makes a call to
     114 * thread_exit() when its implementing function returns.
     115 *
     116 * interrupts_disable() is assumed.
     117 *
     118 */
     119static void cushion(void)
     120{
     121        void (*f)(void *) = THREAD->thread_code;
     122        void *arg = THREAD->thread_arg;
     123        THREAD->last_cycle = get_cycle();
     124
     125        /* This is where each thread wakes up after its creation */
     126        irq_spinlock_unlock(&THREAD->lock, false);
     127        interrupts_enable();
     128
     129        f(arg);
     130
     131        /* Accumulate accounting to the task */
     132        irq_spinlock_lock(&THREAD->lock, true);
     133        if (!THREAD->uncounted) {
     134                thread_update_accounting(true);
     135                uint64_t ucycles = THREAD->ucycles;
     136                THREAD->ucycles = 0;
     137                uint64_t kcycles = THREAD->kcycles;
     138                THREAD->kcycles = 0;
     139
     140                irq_spinlock_pass(&THREAD->lock, &TASK->lock);
     141                TASK->ucycles += ucycles;
     142                TASK->kcycles += kcycles;
     143                irq_spinlock_unlock(&TASK->lock, true);
     144        } else
     145                irq_spinlock_unlock(&THREAD->lock, true);
     146
     147        thread_exit();
     148
     149        /* Not reached */
     150}
     151
    110152/** Initialization and allocation for thread_t structure
    111153 *
     
    115157        thread_t *thread = (thread_t *) obj;
    116158
     159        irq_spinlock_initialize(&thread->lock, "thread_t_lock");
    117160        link_initialize(&thread->rq_link);
    118161        link_initialize(&thread->wq_link);
     
    121164        /* call the architecture-specific part of the constructor */
    122165        thr_constructor_arch(thread);
     166
     167#ifdef CONFIG_FPU
     168        thread->saved_fpu_context = slab_alloc(fpu_context_cache,
     169            FRAME_ATOMIC | kmflags);
     170        if (!thread->saved_fpu_context)
     171                return ENOMEM;
     172#endif /* CONFIG_FPU */
    123173
    124174        /*
     
    148198        uintptr_t stack_phys =
    149199            frame_alloc(STACK_FRAMES, kmflags, STACK_SIZE - 1);
    150         if (!stack_phys)
     200        if (!stack_phys) {
     201#ifdef CONFIG_FPU
     202                assert(thread->saved_fpu_context);
     203                slab_free(fpu_context_cache, thread->saved_fpu_context);
     204#endif
    151205                return ENOMEM;
     206        }
    152207
    153208        thread->kstack = (uint8_t *) PA2KA(stack_phys);
     
    170225        frame_free(KA2PA(thread->kstack), STACK_FRAMES);
    171226
     227#ifdef CONFIG_FPU
     228        assert(thread->saved_fpu_context);
     229        slab_free(fpu_context_cache, thread->saved_fpu_context);
     230#endif
     231
    172232        return STACK_FRAMES;  /* number of frames freed */
    173233}
     
    183243
    184244        atomic_store(&nrdy, 0);
    185         thread_cache = slab_cache_create("thread_t", sizeof(thread_t), _Alignof(thread_t),
     245        thread_cache = slab_cache_create("thread_t", sizeof(thread_t), 0,
    186246            thr_constructor, thr_destructor, 0);
    187247
     248#ifdef CONFIG_FPU
     249        fpu_context_cache = slab_cache_create("fpu_context_t",
     250            sizeof(fpu_context_t), FPU_CONTEXT_ALIGN, NULL, NULL, 0);
     251#endif
     252
    188253        odict_initialize(&threads, threads_getkey, threads_cmp);
    189254}
     
    196261void thread_wire(thread_t *thread, cpu_t *cpu)
    197262{
    198         ipl_t ipl = interrupts_disable();
    199         atomic_set_unordered(&thread->cpu, cpu);
    200         thread->nomigrate++;
    201         interrupts_restore(ipl);
    202 }
    203 
    204 /** Start a thread that wasn't started yet since it was created.
    205  *
    206  * @param thread A reference to the newly created thread.
    207  */
    208 void thread_start(thread_t *thread)
    209 {
    210         assert(atomic_get_unordered(&thread->state) == Entering);
    211         thread_requeue_sleeping(thread_ref(thread));
     263        irq_spinlock_lock(&thread->lock, true);
     264        thread->cpu = cpu;
     265        thread->wired = true;
     266        irq_spinlock_unlock(&thread->lock, true);
     267}
     268
     269/** Invoked right before thread_ready() readies the thread. thread is locked. */
     270static void before_thread_is_ready(thread_t *thread)
     271{
     272        assert(irq_spinlock_locked(&thread->lock));
     273}
     274
     275/** Make thread ready
     276 *
     277 * Switch thread to the ready state.
     278 *
     279 * @param thread Thread to make ready.
     280 *
     281 */
     282void thread_ready(thread_t *thread)
     283{
     284        irq_spinlock_lock(&thread->lock, true);
     285
     286        assert(thread->state != Ready);
     287
     288        before_thread_is_ready(thread);
     289
     290        int i = (thread->priority < RQ_COUNT - 1) ?
     291            ++thread->priority : thread->priority;
     292
     293        cpu_t *cpu;
     294        if (thread->wired || thread->nomigrate || thread->fpu_context_engaged) {
     295                /* Cannot ready to another CPU */
     296                assert(thread->cpu != NULL);
     297                cpu = thread->cpu;
     298        } else if (thread->stolen) {
     299                /* Ready to the stealing CPU */
     300                cpu = CPU;
     301        } else if (thread->cpu) {
     302                /* Prefer the CPU on which the thread ran last */
     303                assert(thread->cpu != NULL);
     304                cpu = thread->cpu;
     305        } else {
     306                cpu = CPU;
     307        }
     308
     309        thread->state = Ready;
     310
     311        irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock));
     312
     313        /*
     314         * Append thread to respective ready queue
     315         * on respective processor.
     316         */
     317
     318        list_append(&thread->rq_link, &cpu->rq[i].rq);
     319        cpu->rq[i].n++;
     320        irq_spinlock_unlock(&(cpu->rq[i].lock), true);
     321
     322        atomic_inc(&nrdy);
     323        atomic_inc(&cpu->nrdy);
    212324}
    213325
     
    234346                return NULL;
    235347
    236         refcount_init(&thread->refcount);
    237 
    238348        if (thread_create_arch(thread, flags) != EOK) {
    239349                slab_free(thread_cache, thread);
     
    248358        irq_spinlock_unlock(&tidlock, true);
    249359
    250         context_create(&thread->saved_context, thread_main_func,
    251             thread->kstack, STACK_SIZE);
     360        memset(&thread->saved_context, 0, sizeof(thread->saved_context));
     361        context_set(&thread->saved_context, FADDR(cushion),
     362            (uintptr_t) thread->kstack, STACK_SIZE);
    252363
    253364        current_initialize((current_t *) thread->kstack);
     365
     366        ipl_t ipl = interrupts_disable();
     367        thread->saved_context.ipl = interrupts_read();
     368        interrupts_restore(ipl);
    254369
    255370        str_cpy(thread->name, THREAD_NAME_BUFLEN, name);
     
    257372        thread->thread_code = func;
    258373        thread->thread_arg = arg;
    259         thread->ucycles = ATOMIC_TIME_INITIALIZER();
    260         thread->kcycles = ATOMIC_TIME_INITIALIZER();
     374        thread->ticks = -1;
     375        thread->ucycles = 0;
     376        thread->kcycles = 0;
    261377        thread->uncounted =
    262378            ((flags & THREAD_FLAG_UNCOUNTED) == THREAD_FLAG_UNCOUNTED);
    263         atomic_init(&thread->priority, 0);
    264         atomic_init(&thread->cpu, NULL);
     379        thread->priority = -1;          /* Start in rq[0] */
     380        thread->cpu = NULL;
     381        thread->wired = false;
    265382        thread->stolen = false;
    266383        thread->uspace =
     
    268385
    269386        thread->nomigrate = 0;
    270         atomic_init(&thread->state, Entering);
    271 
    272         atomic_init(&thread->sleep_queue, NULL);
     387        thread->state = Entering;
     388
     389        timeout_initialize(&thread->sleep_timeout);
     390        thread->sleep_interruptible = false;
     391        thread->sleep_composable = false;
     392        thread->sleep_queue = NULL;
     393        thread->timeout_pending = false;
    273394
    274395        thread->in_copy_from_uspace = false;
     
    276397
    277398        thread->interrupted = false;
    278         atomic_init(&thread->sleep_state, SLEEP_INITIAL);
    279 
     399        thread->detached = false;
    280400        waitq_initialize(&thread->join_wq);
    281401
     
    283403
    284404        thread->fpu_context_exists = false;
     405        thread->fpu_context_engaged = false;
    285406
    286407        odlink_initialize(&thread->lthreads);
     
    288409#ifdef CONFIG_UDEBUG
    289410        /* Initialize debugging stuff */
    290         atomic_init(&thread->btrace, false);
     411        thread->btrace = false;
    291412        udebug_thread_initialize(&thread->udebug);
    292413#endif
     
    302423 * Detach thread from all queues, cpus etc. and destroy it.
    303424 *
    304  * @param obj  Thread to be destroyed.
    305  *
    306  */
    307 static void thread_destroy(void *obj)
    308 {
    309         thread_t *thread = (thread_t *) obj;
    310 
    311         assert_link_not_used(&thread->rq_link);
    312         assert_link_not_used(&thread->wq_link);
    313 
     425 * @param thread  Thread to be destroyed.
     426 * @param irq_res Indicate whether it should unlock thread->lock
     427 *                in interrupts-restore mode.
     428 *
     429 */
     430void thread_destroy(thread_t *thread, bool irq_res)
     431{
     432        assert(irq_spinlock_locked(&thread->lock));
     433        assert((thread->state == Exiting) || (thread->state == Lingering));
    314434        assert(thread->task);
    315 
    316         ipl_t ipl = interrupts_disable();
    317 
    318         /* Remove thread from global list. */
    319         irq_spinlock_lock(&threads_lock, false);
     435        assert(thread->cpu);
     436
     437        irq_spinlock_lock(&thread->cpu->lock, false);
     438        if (thread->cpu->fpu_owner == thread)
     439                thread->cpu->fpu_owner = NULL;
     440        irq_spinlock_unlock(&thread->cpu->lock, false);
     441
     442        irq_spinlock_pass(&thread->lock, &threads_lock);
     443
    320444        odict_remove(&thread->lthreads);
    321         irq_spinlock_unlock(&threads_lock, false);
    322 
    323         /* Remove thread from task's list and accumulate accounting. */
    324         irq_spinlock_lock(&thread->task->lock, false);
    325 
     445
     446        irq_spinlock_pass(&threads_lock, &thread->task->lock);
     447
     448        /*
     449         * Detach from the containing task.
     450         */
    326451        list_remove(&thread->th_link);
    327 
    328         /*
    329          * No other CPU has access to this thread anymore, so we don't need
    330          * thread->lock for accessing thread's fields after this point.
    331          */
    332 
    333         if (!thread->uncounted) {
    334                 thread->task->ucycles += atomic_time_read(&thread->ucycles);
    335                 thread->task->kcycles += atomic_time_read(&thread->kcycles);
    336         }
    337 
    338         irq_spinlock_unlock(&thread->task->lock, false);
    339 
    340         assert((atomic_get_unordered(&thread->state) == Exiting) || (atomic_get_unordered(&thread->state) == Lingering));
    341 
    342         /* Clear cpu->fpu_owner if set to this thread. */
    343 #ifdef CONFIG_FPU_LAZY
    344         cpu_t *cpu = atomic_get_unordered(&thread->cpu);
    345         if (cpu) {
    346                 /*
    347                  * We need to lock for this because the old CPU can concurrently try
    348                  * to dump this thread's FPU state, in which case we need to wait for
    349                  * it to finish. An atomic compare-and-swap wouldn't be enough.
    350                  */
    351                 irq_spinlock_lock(&cpu->fpu_lock, false);
    352 
    353                 if (atomic_get_unordered(&cpu->fpu_owner) == thread)
    354                         atomic_set_unordered(&cpu->fpu_owner, NULL);
    355 
    356                 irq_spinlock_unlock(&cpu->fpu_lock, false);
    357         }
    358 #endif
    359 
    360         interrupts_restore(ipl);
     452        irq_spinlock_unlock(&thread->task->lock, irq_res);
    361453
    362454        /*
     
    364456         */
    365457        task_release(thread->task);
    366         thread->task = NULL;
    367 
    368458        slab_free(thread_cache, thread);
    369 }
    370 
    371 void thread_put(thread_t *thread)
    372 {
    373         if (refcount_down(&thread->refcount)) {
    374                 thread_destroy(thread);
    375         }
    376459}
    377460
     
    387470void thread_attach(thread_t *thread, task_t *task)
    388471{
    389         ipl_t ipl = interrupts_disable();
    390 
    391472        /*
    392473         * Attach to the specified task.
    393474         */
    394         irq_spinlock_lock(&task->lock, false);
     475        irq_spinlock_lock(&task->lock, true);
    395476
    396477        /* Hold a reference to the task. */
     
    403484        list_append(&thread->th_link, &task->threads);
    404485
    405         irq_spinlock_unlock(&task->lock, false);
     486        irq_spinlock_pass(&task->lock, &threads_lock);
    406487
    407488        /*
    408489         * Register this thread in the system-wide dictionary.
    409490         */
    410         irq_spinlock_lock(&threads_lock, false);
    411491        odict_insert(&thread->lthreads, &threads, NULL);
    412         irq_spinlock_unlock(&threads_lock, false);
    413 
    414         interrupts_restore(ipl);
     492        irq_spinlock_unlock(&threads_lock, true);
    415493}
    416494
     
    449527        }
    450528
    451         scheduler_enter(Exiting);
    452         unreachable();
     529restart:
     530        irq_spinlock_lock(&THREAD->lock, true);
     531        if (THREAD->timeout_pending) {
     532                /* Busy waiting for timeouts in progress */
     533                irq_spinlock_unlock(&THREAD->lock, true);
     534                goto restart;
     535        }
     536
     537        THREAD->state = Exiting;
     538        irq_spinlock_unlock(&THREAD->lock, true);
     539
     540        scheduler();
     541
     542        /* Not reached */
     543        while (true)
     544                ;
    453545}
    454546
     
    459551 * blocking call was interruptable. See waitq_sleep_timeout().
    460552 *
     553 * The caller must guarantee the thread object is valid during the entire
     554 * function, eg by holding the threads_lock lock.
     555 *
    461556 * Interrupted threads automatically exit when returning back to user space.
    462557 *
    463  * @param thread A valid thread object.
     558 * @param thread A valid thread object. The caller must guarantee it
     559 *               will remain valid until thread_interrupt() exits.
    464560 */
    465561void thread_interrupt(thread_t *thread)
    466562{
    467563        assert(thread != NULL);
     564
     565        irq_spinlock_lock(&thread->lock, true);
     566
    468567        thread->interrupted = true;
    469         thread_wakeup(thread);
    470 }
    471 
    472 /** Prepare for putting the thread to sleep.
    473  *
    474  * @returns whether the thread is currently terminating. If THREAD_OK
    475  * is returned, the thread is guaranteed to be woken up instantly if the thread
    476  * is terminated at any time between this function's return and
    477  * thread_wait_finish(). If THREAD_TERMINATING is returned, the thread can still
    478  * go to sleep, but doing so will delay termination.
    479  */
    480 thread_termination_state_t thread_wait_start(void)
    481 {
    482         assert(THREAD != NULL);
    483 
    484         /*
    485          * This is an exchange rather than a store so that we can use the acquire
    486          * semantics, which is needed to ensure that code after this operation sees
    487          * memory ops made before thread_wakeup() in other thread, if that wakeup
    488          * was reset by this operation.
    489          *
    490          * In particular, we need this to ensure we can't miss the thread being
    491          * terminated concurrently with a synchronization primitive preparing to
    492          * sleep.
    493          */
    494         (void) atomic_exchange_explicit(&THREAD->sleep_state, SLEEP_INITIAL,
    495             memory_order_acquire);
    496 
    497         return THREAD->interrupted ? THREAD_TERMINATING : THREAD_OK;
    498 }
    499 
    500 static void thread_wait_timeout_callback(void *arg)
    501 {
    502         thread_wakeup(arg);
    503 }
    504 
    505 /**
    506  * Suspends this thread's execution until thread_wakeup() is called on it,
    507  * or deadline is reached.
    508  *
    509  * The way this would normally be used is that the current thread call
    510  * thread_wait_start(), and if interruption has not been signaled, stores
    511  * a reference to itself in a synchronized structure (such as waitq).
    512  * After that, it releases any spinlocks it might hold and calls this function.
    513  *
    514  * The thread doing the wakeup will acquire the thread's reference from said
    515  * synchronized structure and calls thread_wakeup() on it.
    516  *
    517  * Notably, there can be more than one thread performing wakeup.
    518  * The number of performed calls to thread_wakeup(), or their relative
    519  * ordering with thread_wait_finish(), does not matter. However, calls to
    520  * thread_wakeup() are expected to be synchronized with thread_wait_start()
    521  * with which they are associated, otherwise wakeups may be missed.
    522  * However, the operation of thread_wakeup() is defined at any time,
    523  * synchronization notwithstanding (in the sense of C un/defined behavior),
    524  * and is in fact used to interrupt waiting threads by external events.
    525  * The waiting thread must operate correctly in face of spurious wakeups,
    526  * and clean up its reference in the synchronization structure if necessary.
    527  *
    528  * Returns THREAD_WAIT_TIMEOUT if timeout fired, which is a necessary condition
    529  * for it to have been waken up by the timeout, but the caller must assume
    530  * that proper wakeups, timeouts and interrupts may occur concurrently, so
    531  * the fact timeout has been registered does not necessarily mean the thread
    532  * has not been woken up or interrupted.
    533  */
    534 thread_wait_result_t thread_wait_finish(deadline_t deadline)
    535 {
    536         assert(THREAD != NULL);
    537 
    538         timeout_t timeout;
    539 
    540         /* Extra check to avoid going to scheduler if we don't need to. */
    541         if (atomic_load_explicit(&THREAD->sleep_state, memory_order_acquire) !=
    542             SLEEP_INITIAL)
    543                 return THREAD_WAIT_SUCCESS;
    544 
    545         if (deadline != DEADLINE_NEVER) {
    546                 timeout_initialize(&timeout);
    547                 timeout_register_deadline(&timeout, deadline,
    548                     thread_wait_timeout_callback, THREAD);
    549         }
    550 
    551         scheduler_enter(Sleeping);
    552 
    553         if (deadline != DEADLINE_NEVER && !timeout_unregister(&timeout)) {
    554                 return THREAD_WAIT_TIMEOUT;
    555         } else {
    556                 return THREAD_WAIT_SUCCESS;
    557         }
    558 }
    559 
    560 void thread_wakeup(thread_t *thread)
     568        bool sleeping = (thread->state == Sleeping);
     569
     570        irq_spinlock_unlock(&thread->lock, true);
     571
     572        if (sleeping)
     573                waitq_interrupt_sleep(thread);
     574}
     575
     576/** Returns true if the thread was interrupted.
     577 *
     578 * @param thread A valid thread object. User must guarantee it will
     579 *               be alive during the entire call.
     580 * @return true if the thread was already interrupted via thread_interrupt().
     581 */
     582bool thread_interrupted(thread_t *thread)
    561583{
    562584        assert(thread != NULL);
    563585
    564         int state = atomic_exchange_explicit(&thread->sleep_state, SLEEP_WOKE,
    565             memory_order_acq_rel);
    566 
    567         if (state == SLEEP_ASLEEP) {
    568                 /*
    569                  * Only one thread gets to do this.
    570                  * The reference consumed here is the reference implicitly passed to
    571                  * the waking thread by the sleeper in thread_wait_finish().
    572                  */
    573                 thread_requeue_sleeping(thread);
    574         }
     586        bool interrupted;
     587
     588        irq_spinlock_lock(&thread->lock, true);
     589        interrupted = thread->interrupted;
     590        irq_spinlock_unlock(&thread->lock, true);
     591
     592        return interrupted;
    575593}
    576594
     
    578596void thread_migration_disable(void)
    579597{
    580         ipl_t ipl = interrupts_disable();
    581 
    582598        assert(THREAD);
     599
    583600        THREAD->nomigrate++;
    584 
    585         interrupts_restore(ipl);
    586601}
    587602
     
    589604void thread_migration_enable(void)
    590605{
    591         ipl_t ipl = interrupts_disable();
    592 
    593606        assert(THREAD);
    594607        assert(THREAD->nomigrate > 0);
     
    596609        if (THREAD->nomigrate > 0)
    597610                THREAD->nomigrate--;
    598 
    599         interrupts_restore(ipl);
    600611}
    601612
     
    621632}
    622633
    623 errno_t thread_join(thread_t *thread)
    624 {
    625         return thread_join_timeout(thread, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE);
    626 }
    627 
    628634/** Wait for another thread to exit.
    629  * After successful wait, the thread reference is destroyed.
    630635 *
    631636 * @param thread Thread to join on exit.
     
    638643errno_t thread_join_timeout(thread_t *thread, uint32_t usec, unsigned int flags)
    639644{
    640         assert(thread != NULL);
    641 
    642645        if (thread == THREAD)
    643646                return EINVAL;
    644647
    645         errno_t rc = _waitq_sleep_timeout(&thread->join_wq, usec, flags);
    646 
    647         if (rc == EOK)
    648                 thread_put(thread);
    649 
    650         return rc;
    651 }
    652 
     648        /*
     649         * Since thread join can only be called once on an undetached thread,
     650         * the thread pointer is guaranteed to be still valid.
     651         */
     652
     653        irq_spinlock_lock(&thread->lock, true);
     654        assert(!thread->detached);
     655        irq_spinlock_unlock(&thread->lock, true);
     656
     657        return waitq_sleep_timeout(&thread->join_wq, usec, flags, NULL);
     658
     659        // FIXME: join should deallocate the thread.
     660        //        Current code calls detach after join, that's contrary to how
     661        //        join is used in other threading APIs.
     662}
     663
     664/** Detach thread.
     665 *
     666 * Mark the thread as detached. If the thread is already
     667 * in the Lingering state, deallocate its resources.
     668 *
     669 * @param thread Thread to be detached.
     670 *
     671 */
    653672void thread_detach(thread_t *thread)
    654673{
    655         thread_put(thread);
     674        /*
     675         * Since the thread is expected not to be already detached,
     676         * pointer to it must be still valid.
     677         */
     678        irq_spinlock_lock(&thread->lock, true);
     679        assert(!thread->detached);
     680
     681        if (thread->state == Lingering) {
     682                /*
     683                 * Unlock &thread->lock and restore
     684                 * interrupts in thread_destroy().
     685                 */
     686                thread_destroy(thread, true);
     687                return;
     688        } else {
     689                thread->detached = true;
     690        }
     691
     692        irq_spinlock_unlock(&thread->lock, true);
    656693}
    657694
     
    669706        waitq_initialize(&wq);
    670707
    671         (void) waitq_sleep_timeout(&wq, usec);
    672 }
    673 
    674 /** Allow other threads to run. */
    675 void thread_yield(void)
    676 {
    677         assert(THREAD != NULL);
    678         scheduler_enter(Running);
     708        (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING, NULL);
    679709}
    680710
     
    683713        uint64_t ucycles, kcycles;
    684714        char usuffix, ksuffix;
    685         order_suffix(atomic_time_read(&thread->ucycles), &ucycles, &usuffix);
    686         order_suffix(atomic_time_read(&thread->kcycles), &kcycles, &ksuffix);
    687 
    688         state_t state = atomic_get_unordered(&thread->state);
     715        order_suffix(thread->ucycles, &ucycles, &usuffix);
     716        order_suffix(thread->kcycles, &kcycles, &ksuffix);
    689717
    690718        char *name;
     
    694722                name = thread->name;
    695723
     724#ifdef __32_BITS__
    696725        if (additional)
    697                 printf("%-8" PRIu64 " %p %p %9" PRIu64 "%c %9" PRIu64 "%c ",
     726                printf("%-8" PRIu64 " %10p %10p %9" PRIu64 "%c %9" PRIu64 "%c ",
    698727                    thread->tid, thread->thread_code, thread->kstack,
    699728                    ucycles, usuffix, kcycles, ksuffix);
    700729        else
    701                 printf("%-8" PRIu64 " %-14s %p %-8s %p %-5" PRIu32 "\n",
    702                     thread->tid, name, thread, thread_states[state],
     730                printf("%-8" PRIu64 " %-14s %10p %-8s %10p %-5" PRIu32 "\n",
     731                    thread->tid, name, thread, thread_states[thread->state],
    703732                    thread->task, thread->task->container);
     733#endif
     734
     735#ifdef __64_BITS__
     736        if (additional)
     737                printf("%-8" PRIu64 " %18p %18p\n"
     738                    "         %9" PRIu64 "%c %9" PRIu64 "%c ",
     739                    thread->tid, thread->thread_code, thread->kstack,
     740                    ucycles, usuffix, kcycles, ksuffix);
     741        else
     742                printf("%-8" PRIu64 " %-14s %18p %-8s %18p %-5" PRIu32 "\n",
     743                    thread->tid, name, thread, thread_states[thread->state],
     744                    thread->task, thread->task->container);
     745#endif
    704746
    705747        if (additional) {
    706                 cpu_t *cpu = atomic_get_unordered(&thread->cpu);
    707                 if (cpu)
    708                         printf("%-5u", cpu->id);
     748                if (thread->cpu)
     749                        printf("%-5u", thread->cpu->id);
    709750                else
    710751                        printf("none ");
    711752
    712                 if (state == Sleeping) {
    713                         printf(" %p", thread->sleep_queue);
     753                if (thread->state == Sleeping) {
     754#ifdef __32_BITS__
     755                        printf(" %10p", thread->sleep_queue);
     756#endif
     757
     758#ifdef __64_BITS__
     759                        printf(" %18p", thread->sleep_queue);
     760#endif
    714761                }
    715762
     
    727774        thread_t *thread;
    728775
    729         /* Accessing system-wide threads list through thread_first()/thread_next(). */
     776        /* Messing with thread structures, avoid deadlock */
    730777        irq_spinlock_lock(&threads_lock, true);
    731778
    732         if (sizeof(void *) <= 4) {
    733                 if (additional)
    734                         printf("[id    ] [code    ] [stack   ] [ucycles ] [kcycles ]"
    735                             " [cpu] [waitqueue]\n");
    736                 else
    737                         printf("[id    ] [name        ] [address ] [state ] [task    ]"
    738                             " [ctn]\n");
    739         } else {
    740                 if (additional) {
    741                         printf("[id    ] [code            ] [stack           ] [ucycles ] [kcycles ]"
    742                             " [cpu] [waitqueue       ]\n");
    743                 } else
    744                         printf("[id    ] [name        ] [address         ] [state ]"
    745                             " [task            ] [ctn]\n");
    746         }
     779#ifdef __32_BITS__
     780        if (additional)
     781                printf("[id    ] [code    ] [stack   ] [ucycles ] [kcycles ]"
     782                    " [cpu] [waitqueue]\n");
     783        else
     784                printf("[id    ] [name        ] [address ] [state ] [task    ]"
     785                    " [ctn]\n");
     786#endif
     787
     788#ifdef __64_BITS__
     789        if (additional) {
     790                printf("[id    ] [code            ] [stack           ]\n"
     791                    "         [ucycles ] [kcycles ] [cpu] [waitqueue       ]\n");
     792        } else
     793                printf("[id    ] [name        ] [address         ] [state ]"
     794                    " [task            ] [ctn]\n");
     795#endif
    747796
    748797        thread = thread_first();
     
    755804}
    756805
    757 static bool thread_exists(thread_t *thread)
    758 {
     806/** Check whether thread exists.
     807 *
     808 * Note that threads_lock must be already held and
     809 * interrupts must be already disabled.
     810 *
     811 * @param thread Pointer to thread.
     812 *
     813 * @return True if thread t is known to the system, false otherwise.
     814 *
     815 */
     816bool thread_exists(thread_t *thread)
     817{
     818        assert(interrupts_disabled());
     819        assert(irq_spinlock_locked(&threads_lock));
     820
    759821        odlink_t *odlink = odict_find_eq(&threads, thread, NULL);
    760822        return odlink != NULL;
    761823}
    762824
    763 /** Check whether the thread exists, and if so, return a reference to it.
    764  */
    765 thread_t *thread_try_get(thread_t *thread)
    766 {
    767         irq_spinlock_lock(&threads_lock, true);
    768 
    769         if (thread_exists(thread)) {
    770                 /* Try to strengthen the reference. */
    771                 thread = thread_try_ref(thread);
    772         } else {
    773                 thread = NULL;
    774         }
    775 
    776         irq_spinlock_unlock(&threads_lock, true);
    777 
    778         return thread;
    779 }
    780 
    781825/** Update accounting of current thread.
    782826 *
     
    789833void thread_update_accounting(bool user)
    790834{
     835        uint64_t time = get_cycle();
     836
    791837        assert(interrupts_disabled());
    792 
    793         uint64_t time = get_cycle();
     838        assert(irq_spinlock_locked(&THREAD->lock));
    794839
    795840        if (user)
    796                 atomic_time_increment(&THREAD->ucycles, time - THREAD->last_cycle);
     841                THREAD->ucycles += time - THREAD->last_cycle;
    797842        else
    798                 atomic_time_increment(&THREAD->kcycles, time - THREAD->last_cycle);
     843                THREAD->kcycles += time - THREAD->last_cycle;
    799844
    800845        THREAD->last_cycle = time;
     
    805850 * The threads_lock must be already held by the caller of this function and
    806851 * interrupts must be disabled.
    807  *
    808  * The returned reference is weak.
    809  * If the caller needs to keep it, thread_try_ref() must be used to upgrade
    810  * to a strong reference _before_ threads_lock is released.
    811852 *
    812853 * @param id Thread ID.
     
    887928{
    888929        irq_spinlock_lock(&threads_lock, true);
    889         thread_t *thread = thread_try_ref(thread_find_by_id(thread_id));
    890         irq_spinlock_unlock(&threads_lock, true);
    891 
     930
     931        thread_t *thread = thread_find_by_id(thread_id);
    892932        if (thread == NULL) {
    893933                printf("No such thread.\n");
     934                irq_spinlock_unlock(&threads_lock, true);
    894935                return;
    895936        }
     937
     938        irq_spinlock_lock(&thread->lock, false);
    896939
    897940        /*
     
    907950         */
    908951
    909         printf("Scheduling thread stack trace.\n");
    910         atomic_set_unordered(&thread->btrace, true);
    911 
    912         thread_wakeup(thread);
    913         thread_put(thread);
     952        bool sleeping = false;
     953        istate_t *istate = thread->udebug.uspace_state;
     954        if (istate != NULL) {
     955                printf("Scheduling thread stack trace.\n");
     956                thread->btrace = true;
     957                if (thread->state == Sleeping)
     958                        sleeping = true;
     959        } else
     960                printf("Thread interrupt state not available.\n");
     961
     962        irq_spinlock_unlock(&thread->lock, false);
     963
     964        if (sleeping)
     965                waitq_interrupt_sleep(thread);
     966
     967        irq_spinlock_unlock(&threads_lock, true);
    914968}
    915969
     
    10111065                thread_attach(thread, TASK);
    10121066#endif
    1013                 thread_start(thread);
    1014                 thread_put(thread);
     1067                thread_ready(thread);
    10151068
    10161069                return 0;
Note: See TracChangeset for help on using the changeset viewer.