Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/proc/thread.c

    r128359eb r597fa24  
    11/*
     2 * Copyright (c) 2025 Jiri Svoboda
    23 * Copyright (c) 2010 Jakub Jermar
    3  * Copyright (c) 2018 Jiri Svoboda
    44 * All rights reserved.
    55 *
     
    6060#include <arch/interrupt.h>
    6161#include <smp/ipi.h>
    62 #include <arch/faddr.h>
    6362#include <atomic.h>
    64 #include <mem.h>
     63#include <memw.h>
    6564#include <stdio.h>
    6665#include <stdlib.h>
     
    6968#include <errno.h>
    7069#include <debug.h>
     70#include <halt.h>
    7171
    7272/** Thread states */
     
    9494 *
    9595 * Members are of type thread_t.
     96 *
     97 * This structure contains weak references. Any reference from it must not leave
     98 * threads_lock critical section unless strengthened via thread_try_ref().
    9699 */
    97100odict_t threads;
     
    102105static slab_cache_t *thread_cache;
    103106
    104 #ifdef CONFIG_FPU
    105 slab_cache_t *fpu_context_cache;
    106 #endif
    107 
    108107static void *threads_getkey(odlink_t *);
    109108static int threads_cmp(void *, void *);
    110109
    111 /** Thread wrapper.
    112  *
    113  * This wrapper is provided to ensure that every thread makes a call to
    114  * thread_exit() when its implementing function returns.
    115  *
    116  * interrupts_disable() is assumed.
    117  *
    118  */
    119 static void cushion(void)
    120 {
    121         void (*f)(void *) = THREAD->thread_code;
    122         void *arg = THREAD->thread_arg;
    123         THREAD->last_cycle = get_cycle();
    124 
    125         /* This is where each thread wakes up after its creation */
    126         irq_spinlock_unlock(&THREAD->lock, false);
    127         interrupts_enable();
    128 
    129         f(arg);
    130 
    131         /* Accumulate accounting to the task */
    132         irq_spinlock_lock(&THREAD->lock, true);
    133         if (!THREAD->uncounted) {
    134                 thread_update_accounting(true);
    135                 uint64_t ucycles = THREAD->ucycles;
    136                 THREAD->ucycles = 0;
    137                 uint64_t kcycles = THREAD->kcycles;
    138                 THREAD->kcycles = 0;
    139 
    140                 irq_spinlock_pass(&THREAD->lock, &TASK->lock);
    141                 TASK->ucycles += ucycles;
    142                 TASK->kcycles += kcycles;
    143                 irq_spinlock_unlock(&TASK->lock, true);
    144         } else
    145                 irq_spinlock_unlock(&THREAD->lock, true);
    146 
    147         thread_exit();
    148 
    149         /* Not reached */
    150 }
    151 
    152110/** Initialization and allocation for thread_t structure
    153111 *
     
    157115        thread_t *thread = (thread_t *) obj;
    158116
    159         irq_spinlock_initialize(&thread->lock, "thread_t_lock");
    160117        link_initialize(&thread->rq_link);
    161118        link_initialize(&thread->wq_link);
     
    164121        /* call the architecture-specific part of the constructor */
    165122        thr_constructor_arch(thread);
    166 
    167 #ifdef CONFIG_FPU
    168         thread->saved_fpu_context = slab_alloc(fpu_context_cache,
    169             FRAME_ATOMIC | kmflags);
    170         if (!thread->saved_fpu_context)
    171                 return ENOMEM;
    172 #endif /* CONFIG_FPU */
    173123
    174124        /*
     
    198148        uintptr_t stack_phys =
    199149            frame_alloc(STACK_FRAMES, kmflags, STACK_SIZE - 1);
    200         if (!stack_phys) {
    201 #ifdef CONFIG_FPU
    202                 assert(thread->saved_fpu_context);
    203                 slab_free(fpu_context_cache, thread->saved_fpu_context);
    204 #endif
     150        if (!stack_phys)
    205151                return ENOMEM;
    206         }
    207152
    208153        thread->kstack = (uint8_t *) PA2KA(stack_phys);
     
    225170        frame_free(KA2PA(thread->kstack), STACK_FRAMES);
    226171
    227 #ifdef CONFIG_FPU
    228         assert(thread->saved_fpu_context);
    229         slab_free(fpu_context_cache, thread->saved_fpu_context);
    230 #endif
    231 
    232172        return STACK_FRAMES;  /* number of frames freed */
    233173}
     
    243183
    244184        atomic_store(&nrdy, 0);
    245         thread_cache = slab_cache_create("thread_t", sizeof(thread_t), 0,
     185        thread_cache = slab_cache_create("thread_t", sizeof(thread_t), _Alignof(thread_t),
    246186            thr_constructor, thr_destructor, 0);
    247187
    248 #ifdef CONFIG_FPU
    249         fpu_context_cache = slab_cache_create("fpu_context_t",
    250             sizeof(fpu_context_t), FPU_CONTEXT_ALIGN, NULL, NULL, 0);
    251 #endif
    252 
    253188        odict_initialize(&threads, threads_getkey, threads_cmp);
    254189}
     
    261196void thread_wire(thread_t *thread, cpu_t *cpu)
    262197{
    263         irq_spinlock_lock(&thread->lock, true);
    264         thread->cpu = cpu;
    265         thread->wired = true;
    266         irq_spinlock_unlock(&thread->lock, true);
    267 }
    268 
    269 /** Invoked right before thread_ready() readies the thread. thread is locked. */
    270 static void before_thread_is_ready(thread_t *thread)
    271 {
    272         assert(irq_spinlock_locked(&thread->lock));
    273 }
    274 
    275 /** Make thread ready
    276  *
    277  * Switch thread to the ready state.
    278  *
    279  * @param thread Thread to make ready.
    280  *
    281  */
    282 void thread_ready(thread_t *thread)
    283 {
    284         irq_spinlock_lock(&thread->lock, true);
    285 
    286         assert(thread->state != Ready);
    287 
    288         before_thread_is_ready(thread);
    289 
    290         int i = (thread->priority < RQ_COUNT - 1) ?
    291             ++thread->priority : thread->priority;
    292 
    293         cpu_t *cpu;
    294         if (thread->wired || thread->nomigrate || thread->fpu_context_engaged) {
    295                 /* Cannot ready to another CPU */
    296                 assert(thread->cpu != NULL);
    297                 cpu = thread->cpu;
    298         } else if (thread->stolen) {
    299                 /* Ready to the stealing CPU */
    300                 cpu = CPU;
    301         } else if (thread->cpu) {
    302                 /* Prefer the CPU on which the thread ran last */
    303                 assert(thread->cpu != NULL);
    304                 cpu = thread->cpu;
    305         } else {
    306                 cpu = CPU;
    307         }
    308 
    309         thread->state = Ready;
    310 
    311         irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock));
    312 
    313         /*
    314          * Append thread to respective ready queue
    315          * on respective processor.
    316          */
    317 
    318         list_append(&thread->rq_link, &cpu->rq[i].rq);
    319         cpu->rq[i].n++;
    320         irq_spinlock_unlock(&(cpu->rq[i].lock), true);
    321 
    322         atomic_inc(&nrdy);
    323         atomic_inc(&cpu->nrdy);
     198        ipl_t ipl = interrupts_disable();
     199        atomic_set_unordered(&thread->cpu, cpu);
     200        thread->nomigrate++;
     201        interrupts_restore(ipl);
     202}
     203
     204/** Start a thread that wasn't started yet since it was created.
     205 *
     206 * @param thread A reference to the newly created thread.
     207 */
     208void thread_start(thread_t *thread)
     209{
     210        assert(atomic_get_unordered(&thread->state) == Entering);
     211        thread_requeue_sleeping(thread_ref(thread));
    324212}
    325213
     
    346234                return NULL;
    347235
     236        refcount_init(&thread->refcount);
     237
    348238        if (thread_create_arch(thread, flags) != EOK) {
    349239                slab_free(thread_cache, thread);
     
    358248        irq_spinlock_unlock(&tidlock, true);
    359249
    360         memset(&thread->saved_context, 0, sizeof(thread->saved_context));
    361         context_set(&thread->saved_context, FADDR(cushion),
    362             (uintptr_t) thread->kstack, STACK_SIZE);
     250        context_create(&thread->saved_context, thread_main_func,
     251            thread->kstack, STACK_SIZE);
    363252
    364253        current_initialize((current_t *) thread->kstack);
    365 
    366         ipl_t ipl = interrupts_disable();
    367         thread->saved_context.ipl = interrupts_read();
    368         interrupts_restore(ipl);
    369254
    370255        str_cpy(thread->name, THREAD_NAME_BUFLEN, name);
     
    372257        thread->thread_code = func;
    373258        thread->thread_arg = arg;
    374         thread->ticks = -1;
    375         thread->ucycles = 0;
    376         thread->kcycles = 0;
     259        thread->ucycles = ATOMIC_TIME_INITIALIZER();
     260        thread->kcycles = ATOMIC_TIME_INITIALIZER();
    377261        thread->uncounted =
    378262            ((flags & THREAD_FLAG_UNCOUNTED) == THREAD_FLAG_UNCOUNTED);
    379         thread->priority = -1;          /* Start in rq[0] */
    380         thread->cpu = NULL;
    381         thread->wired = false;
     263        atomic_init(&thread->priority, 0);
     264        atomic_init(&thread->cpu, NULL);
    382265        thread->stolen = false;
    383266        thread->uspace =
     
    385268
    386269        thread->nomigrate = 0;
    387         thread->state = Entering;
    388 
    389         timeout_initialize(&thread->sleep_timeout);
    390         thread->sleep_interruptible = false;
    391         thread->sleep_composable = false;
    392         thread->sleep_queue = NULL;
    393         thread->timeout_pending = false;
     270        atomic_init(&thread->state, Entering);
     271
     272        atomic_init(&thread->sleep_queue, NULL);
    394273
    395274        thread->in_copy_from_uspace = false;
     
    397276
    398277        thread->interrupted = false;
    399         thread->detached = false;
     278        atomic_init(&thread->sleep_state, SLEEP_INITIAL);
     279
    400280        waitq_initialize(&thread->join_wq);
    401281
     
    403283
    404284        thread->fpu_context_exists = false;
    405         thread->fpu_context_engaged = false;
    406285
    407286        odlink_initialize(&thread->lthreads);
     
    409288#ifdef CONFIG_UDEBUG
    410289        /* Initialize debugging stuff */
    411         thread->btrace = false;
     290        atomic_init(&thread->btrace, false);
    412291        udebug_thread_initialize(&thread->udebug);
    413292#endif
     
    423302 * Detach thread from all queues, cpus etc. and destroy it.
    424303 *
    425  * @param thread  Thread to be destroyed.
    426  * @param irq_res Indicate whether it should unlock thread->lock
    427  *                in interrupts-restore mode.
    428  *
    429  */
    430 void thread_destroy(thread_t *thread, bool irq_res)
    431 {
    432         assert(irq_spinlock_locked(&thread->lock));
    433         assert((thread->state == Exiting) || (thread->state == Lingering));
     304 * @param obj  Thread to be destroyed.
     305 *
     306 */
     307static void thread_destroy(void *obj)
     308{
     309        thread_t *thread = (thread_t *) obj;
     310
     311        assert_link_not_used(&thread->rq_link);
     312        assert_link_not_used(&thread->wq_link);
     313
    434314        assert(thread->task);
    435         assert(thread->cpu);
    436 
    437         irq_spinlock_lock(&thread->cpu->lock, false);
    438         if (thread->cpu->fpu_owner == thread)
    439                 thread->cpu->fpu_owner = NULL;
    440         irq_spinlock_unlock(&thread->cpu->lock, false);
    441 
    442         irq_spinlock_pass(&thread->lock, &threads_lock);
    443 
     315
     316        ipl_t ipl = interrupts_disable();
     317
     318        /* Remove thread from global list. */
     319        irq_spinlock_lock(&threads_lock, false);
    444320        odict_remove(&thread->lthreads);
    445 
    446         irq_spinlock_pass(&threads_lock, &thread->task->lock);
    447 
    448         /*
    449          * Detach from the containing task.
    450          */
     321        irq_spinlock_unlock(&threads_lock, false);
     322
     323        /* Remove thread from task's list and accumulate accounting. */
     324        irq_spinlock_lock(&thread->task->lock, false);
     325
    451326        list_remove(&thread->th_link);
    452         irq_spinlock_unlock(&thread->task->lock, irq_res);
     327
     328        /*
     329         * No other CPU has access to this thread anymore, so we don't need
     330         * thread->lock for accessing thread's fields after this point.
     331         */
     332
     333        if (!thread->uncounted) {
     334                thread->task->ucycles += atomic_time_read(&thread->ucycles);
     335                thread->task->kcycles += atomic_time_read(&thread->kcycles);
     336        }
     337
     338        irq_spinlock_unlock(&thread->task->lock, false);
     339
     340        assert((atomic_get_unordered(&thread->state) == Entering) ||
     341            (atomic_get_unordered(&thread->state) == Exiting) ||
     342            (atomic_get_unordered(&thread->state) == Lingering));
     343
     344        /* Clear cpu->fpu_owner if set to this thread. */
     345#ifdef CONFIG_FPU_LAZY
     346        cpu_t *cpu = atomic_get_unordered(&thread->cpu);
     347        if (cpu) {
     348                /*
     349                 * We need to lock for this because the old CPU can concurrently try
     350                 * to dump this thread's FPU state, in which case we need to wait for
     351                 * it to finish. An atomic compare-and-swap wouldn't be enough.
     352                 */
     353                irq_spinlock_lock(&cpu->fpu_lock, false);
     354
     355                if (atomic_get_unordered(&cpu->fpu_owner) == thread)
     356                        atomic_set_unordered(&cpu->fpu_owner, NULL);
     357
     358                irq_spinlock_unlock(&cpu->fpu_lock, false);
     359        }
     360#endif
     361
     362        interrupts_restore(ipl);
    453363
    454364        /*
     
    456366         */
    457367        task_release(thread->task);
     368        thread->task = NULL;
     369
    458370        slab_free(thread_cache, thread);
     371}
     372
     373void thread_put(thread_t *thread)
     374{
     375        if (refcount_down(&thread->refcount)) {
     376                thread_destroy(thread);
     377        }
    459378}
    460379
     
    470389void thread_attach(thread_t *thread, task_t *task)
    471390{
     391        ipl_t ipl = interrupts_disable();
     392
    472393        /*
    473394         * Attach to the specified task.
    474395         */
    475         irq_spinlock_lock(&task->lock, true);
     396        irq_spinlock_lock(&task->lock, false);
    476397
    477398        /* Hold a reference to the task. */
     
    484405        list_append(&thread->th_link, &task->threads);
    485406
    486         irq_spinlock_pass(&task->lock, &threads_lock);
     407        irq_spinlock_unlock(&task->lock, false);
    487408
    488409        /*
    489410         * Register this thread in the system-wide dictionary.
    490411         */
     412        irq_spinlock_lock(&threads_lock, false);
    491413        odict_insert(&thread->lthreads, &threads, NULL);
    492         irq_spinlock_unlock(&threads_lock, true);
     414        irq_spinlock_unlock(&threads_lock, false);
     415
     416        interrupts_restore(ipl);
    493417}
    494418
     
    527451        }
    528452
    529 restart:
    530         irq_spinlock_lock(&THREAD->lock, true);
    531         if (THREAD->timeout_pending) {
    532                 /* Busy waiting for timeouts in progress */
    533                 irq_spinlock_unlock(&THREAD->lock, true);
    534                 goto restart;
    535         }
    536 
    537         THREAD->state = Exiting;
    538         irq_spinlock_unlock(&THREAD->lock, true);
    539 
    540         scheduler();
    541 
    542         /* Not reached */
    543         while (true)
    544                 ;
     453        scheduler_enter(Exiting);
     454        unreachable();
    545455}
    546456
     
    551461 * blocking call was interruptable. See waitq_sleep_timeout().
    552462 *
    553  * The caller must guarantee the thread object is valid during the entire
    554  * function, eg by holding the threads_lock lock.
    555  *
    556463 * Interrupted threads automatically exit when returning back to user space.
    557464 *
    558  * @param thread A valid thread object. The caller must guarantee it
    559  *               will remain valid until thread_interrupt() exits.
     465 * @param thread A valid thread object.
    560466 */
    561467void thread_interrupt(thread_t *thread)
    562468{
    563469        assert(thread != NULL);
    564 
    565         irq_spinlock_lock(&thread->lock, true);
    566 
    567470        thread->interrupted = true;
    568         bool sleeping = (thread->state == Sleeping);
    569 
    570         irq_spinlock_unlock(&thread->lock, true);
    571 
    572         if (sleeping)
    573                 waitq_interrupt_sleep(thread);
    574 }
    575 
    576 /** Returns true if the thread was interrupted.
    577  *
    578  * @param thread A valid thread object. User must guarantee it will
    579  *               be alive during the entire call.
    580  * @return true if the thread was already interrupted via thread_interrupt().
    581  */
    582 bool thread_interrupted(thread_t *thread)
     471        thread_wakeup(thread);
     472}
     473
     474/** Prepare for putting the thread to sleep.
     475 *
     476 * @returns whether the thread is currently terminating. If THREAD_OK
     477 * is returned, the thread is guaranteed to be woken up instantly if the thread
     478 * is terminated at any time between this function's return and
     479 * thread_wait_finish(). If THREAD_TERMINATING is returned, the thread can still
     480 * go to sleep, but doing so will delay termination.
     481 */
     482thread_termination_state_t thread_wait_start(void)
     483{
     484        assert(THREAD != NULL);
     485
     486        /*
     487         * This is an exchange rather than a store so that we can use the acquire
     488         * semantics, which is needed to ensure that code after this operation sees
     489         * memory ops made before thread_wakeup() in other thread, if that wakeup
     490         * was reset by this operation.
     491         *
     492         * In particular, we need this to ensure we can't miss the thread being
     493         * terminated concurrently with a synchronization primitive preparing to
     494         * sleep.
     495         */
     496        (void) atomic_exchange_explicit(&THREAD->sleep_state, SLEEP_INITIAL,
     497            memory_order_acquire);
     498
     499        return THREAD->interrupted ? THREAD_TERMINATING : THREAD_OK;
     500}
     501
     502static void thread_wait_timeout_callback(void *arg)
     503{
     504        thread_wakeup(arg);
     505}
     506
     507/**
     508 * Suspends this thread's execution until thread_wakeup() is called on it,
     509 * or deadline is reached.
     510 *
     511 * The way this would normally be used is that the current thread call
     512 * thread_wait_start(), and if interruption has not been signaled, stores
     513 * a reference to itself in a synchronized structure (such as waitq).
     514 * After that, it releases any spinlocks it might hold and calls this function.
     515 *
     516 * The thread doing the wakeup will acquire the thread's reference from said
     517 * synchronized structure and calls thread_wakeup() on it.
     518 *
     519 * Notably, there can be more than one thread performing wakeup.
     520 * The number of performed calls to thread_wakeup(), or their relative
     521 * ordering with thread_wait_finish(), does not matter. However, calls to
     522 * thread_wakeup() are expected to be synchronized with thread_wait_start()
     523 * with which they are associated, otherwise wakeups may be missed.
     524 * However, the operation of thread_wakeup() is defined at any time,
     525 * synchronization notwithstanding (in the sense of C un/defined behavior),
     526 * and is in fact used to interrupt waiting threads by external events.
     527 * The waiting thread must operate correctly in face of spurious wakeups,
     528 * and clean up its reference in the synchronization structure if necessary.
     529 *
     530 * Returns THREAD_WAIT_TIMEOUT if timeout fired, which is a necessary condition
     531 * for it to have been waken up by the timeout, but the caller must assume
     532 * that proper wakeups, timeouts and interrupts may occur concurrently, so
     533 * the fact timeout has been registered does not necessarily mean the thread
     534 * has not been woken up or interrupted.
     535 */
     536thread_wait_result_t thread_wait_finish(deadline_t deadline)
     537{
     538        assert(THREAD != NULL);
     539
     540        timeout_t timeout;
     541
     542        /* Extra check to avoid going to scheduler if we don't need to. */
     543        if (atomic_load_explicit(&THREAD->sleep_state, memory_order_acquire) !=
     544            SLEEP_INITIAL)
     545                return THREAD_WAIT_SUCCESS;
     546
     547        if (deadline != DEADLINE_NEVER) {
     548                timeout_initialize(&timeout);
     549                timeout_register_deadline(&timeout, deadline,
     550                    thread_wait_timeout_callback, THREAD);
     551        }
     552
     553        scheduler_enter(Sleeping);
     554
     555        if (deadline != DEADLINE_NEVER && !timeout_unregister(&timeout)) {
     556                return THREAD_WAIT_TIMEOUT;
     557        } else {
     558                return THREAD_WAIT_SUCCESS;
     559        }
     560}
     561
     562void thread_wakeup(thread_t *thread)
    583563{
    584564        assert(thread != NULL);
    585565
    586         bool interrupted;
    587 
    588         irq_spinlock_lock(&thread->lock, true);
    589         interrupted = thread->interrupted;
    590         irq_spinlock_unlock(&thread->lock, true);
    591 
    592         return interrupted;
     566        int state = atomic_exchange_explicit(&thread->sleep_state, SLEEP_WOKE,
     567            memory_order_acq_rel);
     568
     569        if (state == SLEEP_ASLEEP) {
     570                /*
     571                 * Only one thread gets to do this.
     572                 * The reference consumed here is the reference implicitly passed to
     573                 * the waking thread by the sleeper in thread_wait_finish().
     574                 */
     575                thread_requeue_sleeping(thread);
     576        }
    593577}
    594578
     
    596580void thread_migration_disable(void)
    597581{
     582        ipl_t ipl = interrupts_disable();
     583
    598584        assert(THREAD);
    599 
    600585        THREAD->nomigrate++;
     586
     587        interrupts_restore(ipl);
    601588}
    602589
     
    604591void thread_migration_enable(void)
    605592{
     593        ipl_t ipl = interrupts_disable();
     594
    606595        assert(THREAD);
    607596        assert(THREAD->nomigrate > 0);
     
    609598        if (THREAD->nomigrate > 0)
    610599                THREAD->nomigrate--;
     600
     601        interrupts_restore(ipl);
    611602}
    612603
     
    632623}
    633624
     625errno_t thread_join(thread_t *thread)
     626{
     627        return thread_join_timeout(thread, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE);
     628}
     629
    634630/** Wait for another thread to exit.
     631 * After successful wait, the thread reference is destroyed.
    635632 *
    636633 * @param thread Thread to join on exit.
     
    643640errno_t thread_join_timeout(thread_t *thread, uint32_t usec, unsigned int flags)
    644641{
     642        assert(thread != NULL);
     643
    645644        if (thread == THREAD)
    646645                return EINVAL;
    647646
    648         /*
    649          * Since thread join can only be called once on an undetached thread,
    650          * the thread pointer is guaranteed to be still valid.
    651          */
    652 
    653         irq_spinlock_lock(&thread->lock, true);
    654         assert(!thread->detached);
    655         irq_spinlock_unlock(&thread->lock, true);
    656 
    657         return waitq_sleep_timeout(&thread->join_wq, usec, flags, NULL);
    658 
    659         // FIXME: join should deallocate the thread.
    660         //        Current code calls detach after join, that's contrary to how
    661         //        join is used in other threading APIs.
    662 }
    663 
    664 /** Detach thread.
    665  *
    666  * Mark the thread as detached. If the thread is already
    667  * in the Lingering state, deallocate its resources.
    668  *
    669  * @param thread Thread to be detached.
    670  *
    671  */
     647        errno_t rc = _waitq_sleep_timeout(&thread->join_wq, usec, flags);
     648
     649        if (rc == EOK)
     650                thread_put(thread);
     651
     652        return rc;
     653}
     654
    672655void thread_detach(thread_t *thread)
    673656{
    674         /*
    675          * Since the thread is expected not to be already detached,
    676          * pointer to it must be still valid.
    677          */
    678         irq_spinlock_lock(&thread->lock, true);
    679         assert(!thread->detached);
    680 
    681         if (thread->state == Lingering) {
    682                 /*
    683                  * Unlock &thread->lock and restore
    684                  * interrupts in thread_destroy().
    685                  */
    686                 thread_destroy(thread, true);
    687                 return;
    688         } else {
    689                 thread->detached = true;
    690         }
    691 
    692         irq_spinlock_unlock(&thread->lock, true);
     657        thread_put(thread);
    693658}
    694659
     
    702667void thread_usleep(uint32_t usec)
    703668{
    704         waitq_t wq;
    705 
    706         waitq_initialize(&wq);
    707 
    708         (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING, NULL);
     669        WAITQ_INITIALIZE(wq);
     670        (void) waitq_sleep_timeout(&wq, usec);
     671}
     672
     673/** Allow other threads to run. */
     674void thread_yield(void)
     675{
     676        assert(THREAD != NULL);
     677        scheduler_enter(Running);
    709678}
    710679
     
    713682        uint64_t ucycles, kcycles;
    714683        char usuffix, ksuffix;
    715         order_suffix(thread->ucycles, &ucycles, &usuffix);
    716         order_suffix(thread->kcycles, &kcycles, &ksuffix);
     684        order_suffix(atomic_time_read(&thread->ucycles), &ucycles, &usuffix);
     685        order_suffix(atomic_time_read(&thread->kcycles), &kcycles, &ksuffix);
     686
     687        state_t state = atomic_get_unordered(&thread->state);
    717688
    718689        char *name;
     
    722693                name = thread->name;
    723694
    724 #ifdef __32_BITS__
    725695        if (additional)
    726                 printf("%-8" PRIu64 " %10p %10p %9" PRIu64 "%c %9" PRIu64 "%c ",
     696                printf("%-8" PRIu64 " %p %p %9" PRIu64 "%c %9" PRIu64 "%c ",
    727697                    thread->tid, thread->thread_code, thread->kstack,
    728698                    ucycles, usuffix, kcycles, ksuffix);
    729699        else
    730                 printf("%-8" PRIu64 " %-14s %10p %-8s %10p %-5" PRIu32 "\n",
    731                     thread->tid, name, thread, thread_states[thread->state],
     700                printf("%-8" PRIu64 " %-14s %p %-8s %p %-5" PRIu32 "\n",
     701                    thread->tid, name, thread, thread_states[state],
    732702                    thread->task, thread->task->container);
    733 #endif
    734 
    735 #ifdef __64_BITS__
    736         if (additional)
    737                 printf("%-8" PRIu64 " %18p %18p\n"
    738                     "         %9" PRIu64 "%c %9" PRIu64 "%c ",
    739                     thread->tid, thread->thread_code, thread->kstack,
    740                     ucycles, usuffix, kcycles, ksuffix);
    741         else
    742                 printf("%-8" PRIu64 " %-14s %18p %-8s %18p %-5" PRIu32 "\n",
    743                     thread->tid, name, thread, thread_states[thread->state],
    744                     thread->task, thread->task->container);
    745 #endif
    746703
    747704        if (additional) {
    748                 if (thread->cpu)
    749                         printf("%-5u", thread->cpu->id);
     705                cpu_t *cpu = atomic_get_unordered(&thread->cpu);
     706                if (cpu)
     707                        printf("%-5u", cpu->id);
    750708                else
    751709                        printf("none ");
    752710
    753                 if (thread->state == Sleeping) {
    754 #ifdef __32_BITS__
    755                         printf(" %10p", thread->sleep_queue);
    756 #endif
    757 
    758 #ifdef __64_BITS__
    759                         printf(" %18p", thread->sleep_queue);
    760 #endif
     711                if (state == Sleeping) {
     712                        printf(" %p", thread->sleep_queue);
    761713                }
    762714
     
    774726        thread_t *thread;
    775727
    776         /* Messing with thread structures, avoid deadlock */
     728        /* Accessing system-wide threads list through thread_first()/thread_next(). */
    777729        irq_spinlock_lock(&threads_lock, true);
    778730
    779 #ifdef __32_BITS__
    780         if (additional)
    781                 printf("[id    ] [code    ] [stack   ] [ucycles ] [kcycles ]"
    782                     " [cpu] [waitqueue]\n");
    783         else
    784                 printf("[id    ] [name        ] [address ] [state ] [task    ]"
    785                     " [ctn]\n");
    786 #endif
    787 
    788 #ifdef __64_BITS__
    789         if (additional) {
    790                 printf("[id    ] [code            ] [stack           ]\n"
    791                     "         [ucycles ] [kcycles ] [cpu] [waitqueue       ]\n");
    792         } else
    793                 printf("[id    ] [name        ] [address         ] [state ]"
    794                     " [task            ] [ctn]\n");
    795 #endif
     731        if (sizeof(void *) <= 4) {
     732                if (additional)
     733                        printf("[id    ] [code    ] [stack   ] [ucycles ] [kcycles ]"
     734                            " [cpu] [waitqueue]\n");
     735                else
     736                        printf("[id    ] [name        ] [address ] [state ] [task    ]"
     737                            " [ctn]\n");
     738        } else {
     739                if (additional) {
     740                        printf("[id    ] [code            ] [stack           ] [ucycles ] [kcycles ]"
     741                            " [cpu] [waitqueue       ]\n");
     742                } else
     743                        printf("[id    ] [name        ] [address         ] [state ]"
     744                            " [task            ] [ctn]\n");
     745        }
    796746
    797747        thread = thread_first();
     
    804754}
    805755
    806 /** Check whether thread exists.
    807  *
    808  * Note that threads_lock must be already held and
    809  * interrupts must be already disabled.
    810  *
    811  * @param thread Pointer to thread.
    812  *
    813  * @return True if thread t is known to the system, false otherwise.
    814  *
    815  */
    816 bool thread_exists(thread_t *thread)
    817 {
    818         assert(interrupts_disabled());
    819         assert(irq_spinlock_locked(&threads_lock));
    820 
     756static bool thread_exists(thread_t *thread)
     757{
    821758        odlink_t *odlink = odict_find_eq(&threads, thread, NULL);
    822759        return odlink != NULL;
    823760}
    824761
     762/** Check whether the thread exists, and if so, return a reference to it.
     763 */
     764thread_t *thread_try_get(thread_t *thread)
     765{
     766        irq_spinlock_lock(&threads_lock, true);
     767
     768        if (thread_exists(thread)) {
     769                /* Try to strengthen the reference. */
     770                thread = thread_try_ref(thread);
     771        } else {
     772                thread = NULL;
     773        }
     774
     775        irq_spinlock_unlock(&threads_lock, true);
     776
     777        return thread;
     778}
     779
    825780/** Update accounting of current thread.
    826781 *
     
    833788void thread_update_accounting(bool user)
    834789{
     790        assert(interrupts_disabled());
     791
    835792        uint64_t time = get_cycle();
    836793
    837         assert(interrupts_disabled());
    838         assert(irq_spinlock_locked(&THREAD->lock));
    839 
    840794        if (user)
    841                 THREAD->ucycles += time - THREAD->last_cycle;
     795                atomic_time_increment(&THREAD->ucycles, time - THREAD->last_cycle);
    842796        else
    843                 THREAD->kcycles += time - THREAD->last_cycle;
     797                atomic_time_increment(&THREAD->kcycles, time - THREAD->last_cycle);
    844798
    845799        THREAD->last_cycle = time;
     
    850804 * The threads_lock must be already held by the caller of this function and
    851805 * interrupts must be disabled.
     806 *
     807 * The returned reference is weak.
     808 * If the caller needs to keep it, thread_try_ref() must be used to upgrade
     809 * to a strong reference _before_ threads_lock is released.
    852810 *
    853811 * @param id Thread ID.
     
    928886{
    929887        irq_spinlock_lock(&threads_lock, true);
    930 
    931         thread_t *thread = thread_find_by_id(thread_id);
     888        thread_t *thread = thread_try_ref(thread_find_by_id(thread_id));
     889        irq_spinlock_unlock(&threads_lock, true);
     890
    932891        if (thread == NULL) {
    933892                printf("No such thread.\n");
    934                 irq_spinlock_unlock(&threads_lock, true);
    935893                return;
    936894        }
    937 
    938         irq_spinlock_lock(&thread->lock, false);
    939895
    940896        /*
     
    950906         */
    951907
    952         bool sleeping = false;
    953         istate_t *istate = thread->udebug.uspace_state;
    954         if (istate != NULL) {
    955                 printf("Scheduling thread stack trace.\n");
    956                 thread->btrace = true;
    957                 if (thread->state == Sleeping)
    958                         sleeping = true;
    959         } else
    960                 printf("Thread interrupt state not available.\n");
    961 
    962         irq_spinlock_unlock(&thread->lock, false);
    963 
    964         if (sleeping)
    965                 waitq_interrupt_sleep(thread);
    966 
    967         irq_spinlock_unlock(&threads_lock, true);
     908        printf("Scheduling thread stack trace.\n");
     909        atomic_set_unordered(&thread->btrace, true);
     910
     911        thread_wakeup(thread);
     912        thread_put(thread);
    968913}
    969914
     
    998943
    999944/** Process syscall to create new thread.
    1000  *
    1001  */
    1002 sys_errno_t sys_thread_create(uspace_ptr_uspace_arg_t uspace_uarg, uspace_ptr_char uspace_name,
    1003     size_t name_len, uspace_ptr_thread_id_t uspace_thread_id)
     945 * The started thread will have initial pc and sp set to the exact values passed
     946 * to the syscall. The kernel will not touch any stack data below the stack
     947 * pointer, but some architectures may require some space to be available
     948 * for use above it. See userspace() in kernel, and <libarch/thread.h> in libc.
     949 *
     950 */
     951sys_errno_t sys_thread_create(sysarg_t pc, sysarg_t sp,
     952    uspace_ptr_char uspace_name, size_t name_len)
    1004953{
    1005954        if (name_len > THREAD_NAME_BUFLEN - 1)
     
    1017966         * In case of success, kernel_uarg will be freed in uinit().
    1018967         */
    1019         uspace_arg_t *kernel_uarg =
    1020             (uspace_arg_t *) malloc(sizeof(uspace_arg_t));
     968        uinit_arg_t *kernel_uarg = malloc(sizeof(uinit_arg_t));
    1021969        if (!kernel_uarg)
    1022970                return (sys_errno_t) ENOMEM;
    1023971
    1024         rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t));
    1025         if (rc != EOK) {
    1026                 free(kernel_uarg);
    1027                 return (sys_errno_t) rc;
    1028         }
     972        kernel_uarg->pc = pc;
     973        kernel_uarg->sp = sp;
     974
     975        // TODO: fix some unnecessary inconsistencies between architectures
    1029976
    1030977        thread_t *thread = thread_create(uinit, kernel_uarg, TASK,
    1031978            THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf);
    1032         if (thread) {
    1033                 if (uspace_thread_id) {
    1034                         rc = copy_to_uspace(uspace_thread_id, &thread->tid,
    1035                             sizeof(thread->tid));
    1036                         if (rc != EOK) {
    1037                                 /*
    1038                                  * We have encountered a failure, but the thread
    1039                                  * has already been created. We need to undo its
    1040                                  * creation now.
    1041                                  */
    1042 
    1043                                 /*
    1044                                  * The new thread structure is initialized, but
    1045                                  * is still not visible to the system.
    1046                                  * We can safely deallocate it.
    1047                                  */
    1048                                 slab_free(thread_cache, thread);
    1049                                 free(kernel_uarg);
    1050 
    1051                                 return (sys_errno_t) rc;
    1052                         }
    1053                 }
     979        if (!thread) {
     980                free(kernel_uarg);
     981                return (sys_errno_t) ENOMEM;
     982        }
    1054983
    1055984#ifdef CONFIG_UDEBUG
    1056                 /*
    1057                 * Generate udebug THREAD_B event and attach the thread.
    1058                 * This must be done atomically (with the debug locks held),
    1059                 * otherwise we would either miss some thread or receive
    1060                 * THREAD_B events for threads that already existed
    1061                 * and could be detected with THREAD_READ before.
    1062                 */
    1063                 udebug_thread_b_event_attach(thread, TASK);
     985        /*
     986        * Generate udebug THREAD_B event and attach the thread.
     987        * This must be done atomically (with the debug locks held),
     988        * otherwise we would either miss some thread or receive
     989        * THREAD_B events for threads that already existed
     990        * and could be detected with THREAD_READ before.
     991        */
     992        udebug_thread_b_event_attach(thread, TASK);
    1064993#else
    1065                 thread_attach(thread, TASK);
     994        thread_attach(thread, TASK);
    1066995#endif
    1067                 thread_ready(thread);
    1068 
    1069                 return 0;
    1070         } else
    1071                 free(kernel_uarg);
    1072 
    1073         return (sys_errno_t) ENOMEM;
     996        thread_start(thread);
     997        thread_put(thread);
     998
     999        return (sys_errno_t) EOK;
    10741000}
    10751001
Note: See TracChangeset for help on using the changeset viewer.