source: mainline/kernel/generic/src/proc/thread.c@ 5663872

Last change on this file since 5663872 was 5663872, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 18 months ago

Move stuff around for thread sleep

Only mark the thread as ready for wakeup after we switch to
another context. This way, soundness of the sychronization
does not depend on thread lock being held across the context
switch, which gives us more freedom.

  • Property mode set to 100644
File size: 28.3 KB
RevLine 
[f761f1eb]1/*
[7ed8530]2 * Copyright (c) 2010 Jakub Jermar
[ef1eab7]3 * Copyright (c) 2018 Jiri Svoboda
[f761f1eb]4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
[174156fd]30/** @addtogroup kernel_generic_proc
[b45c443]31 * @{
32 */
33
[9179d0a]34/**
[b45c443]35 * @file
[da1bafb]36 * @brief Thread management functions.
[9179d0a]37 */
38
[63e27ef]39#include <assert.h>
[f761f1eb]40#include <proc/scheduler.h>
41#include <proc/thread.h>
42#include <proc/task.h>
43#include <mm/frame.h>
44#include <mm/page.h>
45#include <arch/asm.h>
[cce6acf]46#include <arch/cycle.h>
[f761f1eb]47#include <arch.h>
48#include <synch/spinlock.h>
49#include <synch/waitq.h>
[d314571]50#include <synch/syswaitq.h>
[f761f1eb]51#include <cpu.h>
[e535eeb]52#include <str.h>
[f761f1eb]53#include <context.h>
[5c9a08b]54#include <adt/list.h>
[ef1eab7]55#include <adt/odict.h>
[f761f1eb]56#include <time/clock.h>
[b3f8fb7]57#include <time/timeout.h>
[8d6c1f1]58#include <time/delay.h>
[4ffa9e0]59#include <config.h>
60#include <arch/interrupt.h>
[26a8604f]61#include <smp/ipi.h>
[f2ffad4]62#include <arch/faddr.h>
[23684b7]63#include <atomic.h>
[b169619]64#include <memw.h>
[bab75df6]65#include <stdio.h>
[aafed15]66#include <stdlib.h>
[9f52563]67#include <main/uinit.h>
[e3c762cd]68#include <syscall/copy.h>
69#include <errno.h>
[aae365bc]70#include <debug.h>
[111b9b9]71#include <halt.h>
[52755f1]72
[fe19611]73/** Thread states */
[a000878c]74const char *thread_states[] = {
[fe19611]75 "Invalid",
76 "Running",
77 "Sleeping",
78 "Ready",
79 "Entering",
80 "Exiting",
[48d14222]81 "Lingering"
[e1b6742]82};
83
[ef1eab7]84/** Lock protecting the @c threads ordered dictionary .
[4e33b6b]85 *
86 * For locking rules, see declaration thereof.
87 */
[da1bafb]88IRQ_SPINLOCK_INITIALIZE(threads_lock);
[88169d9]89
[ef1eab7]90/** Ordered dictionary of all threads by their address (i.e. pointer to
91 * the thread_t structure).
[88169d9]92 *
[ef1eab7]93 * When a thread is found in the @c threads ordered dictionary, it is
94 * guaranteed to exist as long as the @c threads_lock is held.
[da1bafb]95 *
[ef1eab7]96 * Members are of type thread_t.
[1871118]97 *
98 * This structure contains weak references. Any reference from it must not leave
99 * threads_lock critical section unless strengthened via thread_try_ref().
[88169d9]100 */
[ef1eab7]101odict_t threads;
[f761f1eb]102
[da1bafb]103IRQ_SPINLOCK_STATIC_INITIALIZE(tidlock);
104static thread_id_t last_tid = 0;
[f761f1eb]105
[82d515e9]106static slab_cache_t *thread_cache;
[da1bafb]107
[ef1eab7]108static void *threads_getkey(odlink_t *);
109static int threads_cmp(void *, void *);
110
[4e33b6b]111/** Thread wrapper.
[70527f1]112 *
[4e33b6b]113 * This wrapper is provided to ensure that every thread makes a call to
114 * thread_exit() when its implementing function returns.
[f761f1eb]115 *
[22f7769]116 * interrupts_disable() is assumed.
[70527f1]117 *
[f761f1eb]118 */
[e16e036a]119static void cushion(void)
[f761f1eb]120{
[43114c5]121 void (*f)(void *) = THREAD->thread_code;
122 void *arg = THREAD->thread_arg;
[449dc1ed]123 THREAD->last_cycle = get_cycle();
[a35b458]124
[0313ff0]125 /* This is where each thread wakes up after its creation */
[da1bafb]126 irq_spinlock_unlock(&THREAD->lock, false);
[22f7769]127 interrupts_enable();
[a35b458]128
[f761f1eb]129 f(arg);
[a35b458]130
[f761f1eb]131 thread_exit();
[a35b458]132
[da1bafb]133 /* Not reached */
[f761f1eb]134}
135
[da1bafb]136/** Initialization and allocation for thread_t structure
137 *
138 */
[b7fd2a0]139static errno_t thr_constructor(void *obj, unsigned int kmflags)
[266294a9]140{
[da1bafb]141 thread_t *thread = (thread_t *) obj;
[a35b458]142
[da1bafb]143 irq_spinlock_initialize(&thread->lock, "thread_t_lock");
144 link_initialize(&thread->rq_link);
145 link_initialize(&thread->wq_link);
146 link_initialize(&thread->th_link);
[a35b458]147
[32fffef0]148 /* call the architecture-specific part of the constructor */
[da1bafb]149 thr_constructor_arch(thread);
[a35b458]150
[38ff925]151 /*
152 * Allocate the kernel stack from the low-memory to prevent an infinite
153 * nesting of TLB-misses when accessing the stack from the part of the
154 * TLB-miss handler written in C.
155 *
156 * Note that low-memory is safe to be used for the stack as it will be
157 * covered by the kernel identity mapping, which guarantees not to
158 * nest TLB-misses infinitely (either via some hardware mechanism or
[c477c80]159 * by the construction of the assembly-language part of the TLB-miss
[38ff925]160 * handler).
161 *
162 * This restriction can be lifted once each architecture provides
[c477c80]163 * a similar guarantee, for example, by locking the kernel stack
[38ff925]164 * in the TLB whenever it is allocated from the high-memory and the
165 * thread is being scheduled to run.
166 */
167 kmflags |= FRAME_LOWMEM;
168 kmflags &= ~FRAME_HIGHMEM;
[a35b458]169
[128359eb]170 /*
171 * NOTE: All kernel stacks must be aligned to STACK_SIZE,
172 * see CURRENT.
173 */
[d1da1ff2]174
[cd3b380]175 uintptr_t stack_phys =
176 frame_alloc(STACK_FRAMES, kmflags, STACK_SIZE - 1);
[0366d09d]177 if (!stack_phys)
[7f11dc6]178 return ENOMEM;
[a35b458]179
[cd3b380]180 thread->kstack = (uint8_t *) PA2KA(stack_phys);
[a35b458]181
[9a1b20c]182#ifdef CONFIG_UDEBUG
[da1bafb]183 mutex_initialize(&thread->udebug.lock, MUTEX_PASSIVE);
[9a1b20c]184#endif
[a35b458]185
[7f11dc6]186 return EOK;
[266294a9]187}
188
189/** Destruction of thread_t object */
[da1bafb]190static size_t thr_destructor(void *obj)
[266294a9]191{
[da1bafb]192 thread_t *thread = (thread_t *) obj;
[a35b458]193
[32fffef0]194 /* call the architecture-specific part of the destructor */
[da1bafb]195 thr_destructor_arch(thread);
[a35b458]196
[5df1963]197 frame_free(KA2PA(thread->kstack), STACK_FRAMES);
[a35b458]198
[e7c4115d]199 return STACK_FRAMES; /* number of frames freed */
[266294a9]200}
[70527f1]201
202/** Initialize threads
203 *
204 * Initialize kernel threads support.
205 *
206 */
[f761f1eb]207void thread_init(void)
208{
[43114c5]209 THREAD = NULL;
[a35b458]210
[e3306d04]211 atomic_store(&nrdy, 0);
[0366d09d]212 thread_cache = slab_cache_create("thread_t", sizeof(thread_t), _Alignof(thread_t),
[6f4495f5]213 thr_constructor, thr_destructor, 0);
[a35b458]214
[ef1eab7]215 odict_initialize(&threads, threads_getkey, threads_cmp);
[016acbe]216}
[70527f1]217
[6eef3c4]218/** Wire thread to the given CPU
219 *
220 * @param cpu CPU to wire the thread to.
221 *
222 */
223void thread_wire(thread_t *thread, cpu_t *cpu)
224{
225 irq_spinlock_lock(&thread->lock, true);
226 thread->cpu = cpu;
[dd218ea]227 thread->nomigrate++;
[6eef3c4]228 irq_spinlock_unlock(&thread->lock, true);
229}
230
[8a64e81e]231/** Invoked right before thread_ready() readies the thread. thread is locked. */
232static void before_thread_is_ready(thread_t *thread)
233{
[63e27ef]234 assert(irq_spinlock_locked(&thread->lock));
[8a64e81e]235}
236
[70527f1]237/** Make thread ready
238 *
[1871118]239 * Switch thread to the ready state. Consumes reference passed by the caller.
[70527f1]240 *
[df58e44]241 * @param thread Thread to make ready.
[70527f1]242 *
243 */
[da1bafb]244void thread_ready(thread_t *thread)
[f761f1eb]245{
[da1bafb]246 irq_spinlock_lock(&thread->lock, true);
[a35b458]247
[63e27ef]248 assert(thread->state != Ready);
[518dd43]249
[8a64e81e]250 before_thread_is_ready(thread);
[a35b458]251
[6eef3c4]252 int i = (thread->priority < RQ_COUNT - 1) ?
253 ++thread->priority : thread->priority;
[518dd43]254
[fbaf6ac]255 /* Prefer the CPU on which the thread ran last */
256 cpu_t *cpu = thread->cpu ? thread->cpu : CPU;
[a35b458]257
[da1bafb]258 thread->state = Ready;
[a35b458]259
[da1bafb]260 irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock));
[a35b458]261
[70527f1]262 /*
[da1bafb]263 * Append thread to respective ready queue
264 * on respective processor.
[f761f1eb]265 */
[a35b458]266
[55b77d9]267 list_append(&thread->rq_link, &cpu->rq[i].rq);
[da1bafb]268 cpu->rq[i].n++;
269 irq_spinlock_unlock(&(cpu->rq[i].lock), true);
[a35b458]270
[59e07c91]271 atomic_inc(&nrdy);
[248fc1a]272 atomic_inc(&cpu->nrdy);
[f761f1eb]273}
274
[70527f1]275/** Create new thread
276 *
277 * Create a new thread.
278 *
[da1bafb]279 * @param func Thread's implementing function.
280 * @param arg Thread's implementing function argument.
281 * @param task Task to which the thread belongs. The caller must
282 * guarantee that the task won't cease to exist during the
283 * call. The task's lock may not be held.
284 * @param flags Thread flags.
285 * @param name Symbolic name (a copy is made).
[70527f1]286 *
[da1bafb]287 * @return New thread's structure on success, NULL on failure.
[70527f1]288 *
289 */
[3bacee1]290thread_t *thread_create(void (*func)(void *), void *arg, task_t *task,
[6eef3c4]291 thread_flags_t flags, const char *name)
[f761f1eb]292{
[abf6c01]293 thread_t *thread = (thread_t *) slab_alloc(thread_cache, FRAME_ATOMIC);
[da1bafb]294 if (!thread)
[2a46e10]295 return NULL;
[a35b458]296
[1871118]297 refcount_init(&thread->refcount);
298
[deacd722]299 if (thread_create_arch(thread, flags) != EOK) {
300 slab_free(thread_cache, thread);
301 return NULL;
302 }
303
[bb68433]304 /* Not needed, but good for debugging */
[26aafe8]305 memsetb(thread->kstack, STACK_SIZE, 0);
[a35b458]306
[da1bafb]307 irq_spinlock_lock(&tidlock, true);
308 thread->tid = ++last_tid;
309 irq_spinlock_unlock(&tidlock, true);
[a35b458]310
[edc64c0]311 memset(&thread->saved_context, 0, sizeof(thread->saved_context));
[da1bafb]312 context_set(&thread->saved_context, FADDR(cushion),
[26aafe8]313 (uintptr_t) thread->kstack, STACK_SIZE);
[a35b458]314
[a6e55886]315 current_initialize((current_t *) thread->kstack);
[a35b458]316
[da1bafb]317 ipl_t ipl = interrupts_disable();
[c030818]318 thread->saved_ipl = interrupts_read();
[bb68433]319 interrupts_restore(ipl);
[a35b458]320
[da1bafb]321 str_cpy(thread->name, THREAD_NAME_BUFLEN, name);
[a35b458]322
[da1bafb]323 thread->thread_code = func;
324 thread->thread_arg = arg;
325 thread->ucycles = 0;
326 thread->kcycles = 0;
[6eef3c4]327 thread->uncounted =
328 ((flags & THREAD_FLAG_UNCOUNTED) == THREAD_FLAG_UNCOUNTED);
[da1bafb]329 thread->priority = -1; /* Start in rq[0] */
330 thread->cpu = NULL;
[6eef3c4]331 thread->stolen = false;
332 thread->uspace =
333 ((flags & THREAD_FLAG_USPACE) == THREAD_FLAG_USPACE);
[a35b458]334
[43ac0cc]335 thread->nomigrate = 0;
[da1bafb]336 thread->state = Entering;
[a35b458]337
[111b9b9]338 atomic_init(&thread->sleep_queue, NULL);
[a35b458]339
[da1bafb]340 thread->in_copy_from_uspace = false;
341 thread->in_copy_to_uspace = false;
[a35b458]342
[da1bafb]343 thread->interrupted = false;
[111b9b9]344 atomic_init(&thread->sleep_state, SLEEP_INITIAL);
345
[da1bafb]346 waitq_initialize(&thread->join_wq);
[a35b458]347
[da1bafb]348 thread->task = task;
[a35b458]349
[6eef3c4]350 thread->fpu_context_exists = false;
[a35b458]351
[ef1eab7]352 odlink_initialize(&thread->lthreads);
[a35b458]353
[9a1b20c]354#ifdef CONFIG_UDEBUG
[5b7a107]355 /* Initialize debugging stuff */
356 thread->btrace = false;
[da1bafb]357 udebug_thread_initialize(&thread->udebug);
[9a1b20c]358#endif
[a35b458]359
[6eef3c4]360 if ((flags & THREAD_FLAG_NOATTACH) != THREAD_FLAG_NOATTACH)
[da1bafb]361 thread_attach(thread, task);
[a35b458]362
[da1bafb]363 return thread;
[d8431986]364}
365
366/** Destroy thread memory structure
367 *
368 * Detach thread from all queues, cpus etc. and destroy it.
[da1bafb]369 *
[11d2c983]370 * @param obj Thread to be destroyed.
[d8431986]371 *
372 */
[1871118]373static void thread_destroy(void *obj)
[d8431986]374{
[1871118]375 thread_t *thread = (thread_t *) obj;
376
[11d2c983]377 assert_link_not_used(&thread->rq_link);
378 assert_link_not_used(&thread->wq_link);
379
[63e27ef]380 assert(thread->task);
[11d2c983]381
382 ipl_t ipl = interrupts_disable();
383
384 /* Remove thread from global list. */
385 irq_spinlock_lock(&threads_lock, false);
386 odict_remove(&thread->lthreads);
387 irq_spinlock_unlock(&threads_lock, false);
388
[c7326f21]389 /* Remove thread from task's list and accumulate accounting. */
390 irq_spinlock_lock(&thread->task->lock, false);
391
392 list_remove(&thread->th_link);
393
394 /*
395 * No other CPU has access to this thread anymore, so we don't need
396 * thread->lock for accessing thread's fields after this point.
397 */
398
399 if (!thread->uncounted) {
400 thread->task->ucycles += thread->ucycles;
401 thread->task->kcycles += thread->kcycles;
402 }
403
404 irq_spinlock_unlock(&thread->task->lock, false);
[11d2c983]405
406 assert((thread->state == Exiting) || (thread->state == Lingering));
[a35b458]407
[c7326f21]408 /* Clear cpu->fpu_owner if set to this thread. */
[169815e]409#ifdef CONFIG_FPU_LAZY
410 if (thread->cpu) {
[f3dbe27]411 /*
412 * We need to lock for this because the old CPU can concurrently try
413 * to dump this thread's FPU state, in which case we need to wait for
414 * it to finish. An atomic compare-and-swap wouldn't be enough.
415 */
[169815e]416 irq_spinlock_lock(&thread->cpu->fpu_lock, false);
[f3dbe27]417
418 thread_t *owner = atomic_load_explicit(&thread->cpu->fpu_owner,
419 memory_order_relaxed);
420
421 if (owner == thread) {
422 atomic_store_explicit(&thread->cpu->fpu_owner, NULL,
423 memory_order_relaxed);
424 }
425
[169815e]426 irq_spinlock_unlock(&thread->cpu->fpu_lock, false);
427 }
428#endif
[a35b458]429
[11d2c983]430 interrupts_restore(ipl);
[a35b458]431
[ea7890e7]432 /*
[7ed8530]433 * Drop the reference to the containing task.
[ea7890e7]434 */
[da1bafb]435 task_release(thread->task);
[11d2c983]436 thread->task = NULL;
437
[82d515e9]438 slab_free(thread_cache, thread);
[d8431986]439}
440
[1871118]441void thread_put(thread_t *thread)
442{
443 if (refcount_down(&thread->refcount)) {
444 thread_destroy(thread);
445 }
446}
447
[d8431986]448/** Make the thread visible to the system.
449 *
450 * Attach the thread structure to the current task and make it visible in the
[5dcee525]451 * threads_tree.
[d8431986]452 *
[da1bafb]453 * @param t Thread to be attached to the task.
454 * @param task Task to which the thread is to be attached.
455 *
[d8431986]456 */
[da1bafb]457void thread_attach(thread_t *thread, task_t *task)
[d8431986]458{
[1871118]459 ipl_t ipl = interrupts_disable();
460
[d8431986]461 /*
[9a1b20c]462 * Attach to the specified task.
[d8431986]463 */
[1871118]464 irq_spinlock_lock(&task->lock, false);
[a35b458]465
[7ed8530]466 /* Hold a reference to the task. */
467 task_hold(task);
[a35b458]468
[9a1b20c]469 /* Must not count kbox thread into lifecount */
[6eef3c4]470 if (thread->uspace)
[9a1b20c]471 atomic_inc(&task->lifecount);
[a35b458]472
[55b77d9]473 list_append(&thread->th_link, &task->threads);
[a35b458]474
[1871118]475 irq_spinlock_unlock(&task->lock, false);
[a35b458]476
[bb68433]477 /*
[ef1eab7]478 * Register this thread in the system-wide dictionary.
[bb68433]479 */
[1871118]480 irq_spinlock_lock(&threads_lock, false);
[ef1eab7]481 odict_insert(&thread->lthreads, &threads, NULL);
[1871118]482 irq_spinlock_unlock(&threads_lock, false);
483
484 interrupts_restore(ipl);
[f761f1eb]485}
486
[0182a665]487/** Terminate thread.
[70527f1]488 *
[da1bafb]489 * End current thread execution and switch it to the exiting state.
490 * All pending timeouts are executed.
491 *
[70527f1]492 */
[f761f1eb]493void thread_exit(void)
494{
[6eef3c4]495 if (THREAD->uspace) {
[9a1b20c]496#ifdef CONFIG_UDEBUG
497 /* Generate udebug THREAD_E event */
498 udebug_thread_e_event();
[a35b458]499
[0ac99db]500 /*
501 * This thread will not execute any code or system calls from
502 * now on.
503 */
504 udebug_stoppable_begin();
[9a1b20c]505#endif
506 if (atomic_predec(&TASK->lifecount) == 0) {
507 /*
508 * We are the last userspace thread in the task that
509 * still has not exited. With the exception of the
510 * moment the task was created, new userspace threads
511 * can only be created by threads of the same task.
512 * We are safe to perform cleanup.
[da1bafb]513 *
[9a1b20c]514 */
[ea7890e7]515 ipc_cleanup();
[d314571]516 sys_waitq_task_cleanup();
[3bacee1]517 LOG("Cleanup of task %" PRIu64 " completed.", TASK->taskid);
[ea7890e7]518 }
519 }
[a35b458]520
[da1bafb]521 irq_spinlock_lock(&THREAD->lock, true);
[43114c5]522 THREAD->state = Exiting;
[da1bafb]523 irq_spinlock_unlock(&THREAD->lock, true);
[a35b458]524
[f761f1eb]525 scheduler();
[a35b458]526
[661a5ac]527 panic("should never be reached");
[f761f1eb]528}
529
[518dd43]530/** Interrupts an existing thread so that it may exit as soon as possible.
[1b20da0]531 *
532 * Threads that are blocked waiting for a synchronization primitive
[897fd8f1]533 * are woken up with a return code of EINTR if the
[518dd43]534 * blocking call was interruptable. See waitq_sleep_timeout().
[1b20da0]535 *
[518dd43]536 * Interrupted threads automatically exit when returning back to user space.
[1b20da0]537 *
[1871118]538 * @param thread A valid thread object.
[518dd43]539 */
[111b9b9]540void thread_interrupt(thread_t *thread)
[518dd43]541{
[63e27ef]542 assert(thread != NULL);
[111b9b9]543 thread->interrupted = true;
544 thread_wakeup(thread);
545}
[a35b458]546
[111b9b9]547/** Prepare for putting the thread to sleep.
548 *
549 * @returns whether the thread is currently terminating. If THREAD_OK
550 * is returned, the thread is guaranteed to be woken up instantly if the thread
551 * is terminated at any time between this function's return and
552 * thread_wait_finish(). If THREAD_TERMINATING is returned, the thread can still
553 * go to sleep, but doing so will delay termination.
554 */
555thread_termination_state_t thread_wait_start(void)
556{
557 assert(THREAD != NULL);
[a35b458]558
[111b9b9]559 /*
560 * This is an exchange rather than a store so that we can use the acquire
561 * semantics, which is needed to ensure that code after this operation sees
562 * memory ops made before thread_wakeup() in other thread, if that wakeup
563 * was reset by this operation.
564 *
565 * In particular, we need this to ensure we can't miss the thread being
566 * terminated concurrently with a synchronization primitive preparing to
567 * sleep.
568 */
569 (void) atomic_exchange_explicit(&THREAD->sleep_state, SLEEP_INITIAL,
570 memory_order_acquire);
[a35b458]571
[111b9b9]572 return THREAD->interrupted ? THREAD_TERMINATING : THREAD_OK;
573}
[a35b458]574
[111b9b9]575static void thread_wait_timeout_callback(void *arg)
576{
577 thread_wakeup(arg);
578}
579
580/**
581 * Suspends this thread's execution until thread_wakeup() is called on it,
582 * or deadline is reached.
583 *
584 * The way this would normally be used is that the current thread call
585 * thread_wait_start(), and if interruption has not been signaled, stores
586 * a reference to itself in a synchronized structure (such as waitq).
587 * After that, it releases any spinlocks it might hold and calls this function.
588 *
589 * The thread doing the wakeup will acquire the thread's reference from said
590 * synchronized structure and calls thread_wakeup() on it.
591 *
592 * Notably, there can be more than one thread performing wakeup.
593 * The number of performed calls to thread_wakeup(), or their relative
594 * ordering with thread_wait_finish(), does not matter. However, calls to
595 * thread_wakeup() are expected to be synchronized with thread_wait_start()
596 * with which they are associated, otherwise wakeups may be missed.
597 * However, the operation of thread_wakeup() is defined at any time,
598 * synchronization notwithstanding (in the sense of C un/defined behavior),
599 * and is in fact used to interrupt waiting threads by external events.
600 * The waiting thread must operate correctly in face of spurious wakeups,
601 * and clean up its reference in the synchronization structure if necessary.
602 *
603 * Returns THREAD_WAIT_TIMEOUT if timeout fired, which is a necessary condition
604 * for it to have been waken up by the timeout, but the caller must assume
605 * that proper wakeups, timeouts and interrupts may occur concurrently, so
606 * the fact timeout has been registered does not necessarily mean the thread
607 * has not been woken up or interrupted.
608 */
609thread_wait_result_t thread_wait_finish(deadline_t deadline)
610{
611 assert(THREAD != NULL);
612
613 timeout_t timeout;
614
[5663872]615 /* Extra check to avoid going to scheduler if we don't need to. */
616 if (atomic_load_explicit(&THREAD->sleep_state, memory_order_acquire) !=
617 SLEEP_INITIAL)
618 return THREAD_WAIT_SUCCESS;
[111b9b9]619
[5663872]620 if (deadline != DEADLINE_NEVER) {
[111b9b9]621 timeout_initialize(&timeout);
622 timeout_register_deadline(&timeout, deadline,
623 thread_wait_timeout_callback, THREAD);
624 }
625
[5663872]626 ipl_t ipl = interrupts_disable();
627 irq_spinlock_lock(&THREAD->lock, false);
628 THREAD->state = Sleeping;
629 scheduler_locked(ipl);
[111b9b9]630
631 if (deadline != DEADLINE_NEVER && !timeout_unregister(&timeout)) {
632 return THREAD_WAIT_TIMEOUT;
633 } else {
634 return THREAD_WAIT_SUCCESS;
635 }
636}
637
638void thread_wakeup(thread_t *thread)
639{
640 assert(thread != NULL);
641
642 int state = atomic_exchange_explicit(&thread->sleep_state, SLEEP_WOKE,
[5663872]643 memory_order_acq_rel);
[111b9b9]644
645 if (state == SLEEP_ASLEEP) {
646 /*
647 * Only one thread gets to do this.
648 * The reference consumed here is the reference implicitly passed to
649 * the waking thread by the sleeper in thread_wait_finish().
650 */
651 thread_ready(thread);
652 }
[518dd43]653}
654
[43ac0cc]655/** Prevent the current thread from being migrated to another processor. */
656void thread_migration_disable(void)
657{
[63e27ef]658 assert(THREAD);
[a35b458]659
[43ac0cc]660 THREAD->nomigrate++;
661}
662
663/** Allow the current thread to be migrated to another processor. */
664void thread_migration_enable(void)
665{
[63e27ef]666 assert(THREAD);
667 assert(THREAD->nomigrate > 0);
[a35b458]668
[6eef3c4]669 if (THREAD->nomigrate > 0)
670 THREAD->nomigrate--;
[43ac0cc]671}
672
[70527f1]673/** Thread sleep
674 *
675 * Suspend execution of the current thread.
676 *
677 * @param sec Number of seconds to sleep.
678 *
679 */
[7f1c620]680void thread_sleep(uint32_t sec)
[f761f1eb]681{
[7c3fb9b]682 /*
683 * Sleep in 1000 second steps to support
684 * full argument range
685 */
[22e6802]686 while (sec > 0) {
687 uint32_t period = (sec > 1000) ? 1000 : sec;
[a35b458]688
[22e6802]689 thread_usleep(period * 1000000);
690 sec -= period;
691 }
[f761f1eb]692}
[70527f1]693
[5110d0a]694errno_t thread_join(thread_t *thread)
695{
696 return thread_join_timeout(thread, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE);
697}
698
[fe19611]699/** Wait for another thread to exit.
[1871118]700 * This function does not destroy the thread. Reference counting handles that.
[fe19611]701 *
[da1bafb]702 * @param thread Thread to join on exit.
703 * @param usec Timeout in microseconds.
704 * @param flags Mode of operation.
[fe19611]705 *
706 * @return An error code from errno.h or an error code from synch.h.
[da1bafb]707 *
[fe19611]708 */
[b7fd2a0]709errno_t thread_join_timeout(thread_t *thread, uint32_t usec, unsigned int flags)
[fe19611]710{
[da1bafb]711 if (thread == THREAD)
[fe19611]712 return EINVAL;
[a35b458]713
[da1bafb]714 irq_spinlock_lock(&thread->lock, true);
[1871118]715 state_t state = thread->state;
[da1bafb]716 irq_spinlock_unlock(&thread->lock, true);
[a35b458]717
[1871118]718 if (state == Exiting) {
719 return EOK;
[fe19611]720 } else {
[111b9b9]721 return _waitq_sleep_timeout(&thread->join_wq, usec, flags);
[fe19611]722 }
723}
724
[70527f1]725/** Thread usleep
726 *
727 * Suspend execution of the current thread.
728 *
729 * @param usec Number of microseconds to sleep.
730 *
[1b20da0]731 */
[7f1c620]732void thread_usleep(uint32_t usec)
[f761f1eb]733{
734 waitq_t wq;
[a35b458]735
[f761f1eb]736 waitq_initialize(&wq);
[a35b458]737
[111b9b9]738 (void) waitq_sleep_timeout(&wq, usec);
[f761f1eb]739}
740
[ef1eab7]741static void thread_print(thread_t *thread, bool additional)
[5dcee525]742{
[1ba37fa]743 uint64_t ucycles, kcycles;
744 char usuffix, ksuffix;
[da1bafb]745 order_suffix(thread->ucycles, &ucycles, &usuffix);
746 order_suffix(thread->kcycles, &kcycles, &ksuffix);
[a35b458]747
[577f042a]748 char *name;
749 if (str_cmp(thread->name, "uinit") == 0)
750 name = thread->task->name;
751 else
752 name = thread->name;
[a35b458]753
[ef1eab7]754 if (additional)
[c1b073b7]755 printf("%-8" PRIu64 " %p %p %9" PRIu64 "%c %9" PRIu64 "%c ",
[577f042a]756 thread->tid, thread->thread_code, thread->kstack,
757 ucycles, usuffix, kcycles, ksuffix);
[48dcc69]758 else
[c1b073b7]759 printf("%-8" PRIu64 " %-14s %p %-8s %p %-5" PRIu32 "\n",
[577f042a]760 thread->tid, name, thread, thread_states[thread->state],
[26aafe8]761 thread->task, thread->task->container);
[a35b458]762
[ef1eab7]763 if (additional) {
[48dcc69]764 if (thread->cpu)
765 printf("%-5u", thread->cpu->id);
766 else
767 printf("none ");
[a35b458]768
[48dcc69]769 if (thread->state == Sleeping) {
[c1b073b7]770 printf(" %p", thread->sleep_queue);
[48dcc69]771 }
[a35b458]772
[48dcc69]773 printf("\n");
[43b1e86]774 }
[5dcee525]775}
776
[da1bafb]777/** Print list of threads debug info
[48dcc69]778 *
779 * @param additional Print additional information.
[da1bafb]780 *
781 */
[48dcc69]782void thread_print_list(bool additional)
[55ab0f1]783{
[ef1eab7]784 thread_t *thread;
785
[1871118]786 /* Accessing system-wide threads list through thread_first()/thread_next(). */
[da1bafb]787 irq_spinlock_lock(&threads_lock, true);
[a35b458]788
[c1b073b7]789 if (sizeof(void *) <= 4) {
790 if (additional)
791 printf("[id ] [code ] [stack ] [ucycles ] [kcycles ]"
792 " [cpu] [waitqueue]\n");
793 else
794 printf("[id ] [name ] [address ] [state ] [task ]"
795 " [ctn]\n");
796 } else {
797 if (additional) {
798 printf("[id ] [code ] [stack ] [ucycles ] [kcycles ]"
799 " [cpu] [waitqueue ]\n");
800 } else
801 printf("[id ] [name ] [address ] [state ]"
802 " [task ] [ctn]\n");
803 }
[a35b458]804
[aab5e46]805 thread = thread_first();
806 while (thread != NULL) {
[ef1eab7]807 thread_print(thread, additional);
[aab5e46]808 thread = thread_next(thread);
[ef1eab7]809 }
[a35b458]810
[da1bafb]811 irq_spinlock_unlock(&threads_lock, true);
[55ab0f1]812}
[9f52563]813
[1871118]814static bool thread_exists(thread_t *thread)
[016acbe]815{
[ef1eab7]816 odlink_t *odlink = odict_find_eq(&threads, thread, NULL);
817 return odlink != NULL;
[016acbe]818}
819
[1871118]820/** Check whether the thread exists, and if so, return a reference to it.
821 */
822thread_t *thread_try_get(thread_t *thread)
823{
824 irq_spinlock_lock(&threads_lock, true);
825
826 if (thread_exists(thread)) {
827 /* Try to strengthen the reference. */
828 thread = thread_try_ref(thread);
829 } else {
830 thread = NULL;
831 }
832
833 irq_spinlock_unlock(&threads_lock, true);
834
835 return thread;
836}
837
[cce6acf]838/** Update accounting of current thread.
839 *
840 * Note that thread_lock on THREAD must be already held and
841 * interrupts must be already disabled.
842 *
[da1bafb]843 * @param user True to update user accounting, false for kernel.
844 *
[cce6acf]845 */
[a2a00e8]846void thread_update_accounting(bool user)
[cce6acf]847{
848 uint64_t time = get_cycle();
[1d432f9]849
[63e27ef]850 assert(interrupts_disabled());
851 assert(irq_spinlock_locked(&THREAD->lock));
[a35b458]852
[da1bafb]853 if (user)
[a2a00e8]854 THREAD->ucycles += time - THREAD->last_cycle;
[da1bafb]855 else
[a2a00e8]856 THREAD->kcycles += time - THREAD->last_cycle;
[a35b458]857
[cce6acf]858 THREAD->last_cycle = time;
859}
860
[e1b6742]861/** Find thread structure corresponding to thread ID.
862 *
863 * The threads_lock must be already held by the caller of this function and
864 * interrupts must be disabled.
865 *
[1871118]866 * The returned reference is weak.
867 * If the caller needs to keep it, thread_try_ref() must be used to upgrade
868 * to a strong reference _before_ threads_lock is released.
869 *
[e1b6742]870 * @param id Thread ID.
871 *
872 * @return Thread structure address or NULL if there is no such thread ID.
873 *
874 */
875thread_t *thread_find_by_id(thread_id_t thread_id)
876{
[ef1eab7]877 thread_t *thread;
878
[63e27ef]879 assert(interrupts_disabled());
880 assert(irq_spinlock_locked(&threads_lock));
[a35b458]881
[aab5e46]882 thread = thread_first();
883 while (thread != NULL) {
[ef1eab7]884 if (thread->tid == thread_id)
885 return thread;
[a35b458]886
[aab5e46]887 thread = thread_next(thread);
[ef1eab7]888 }
[a35b458]889
[ef1eab7]890 return NULL;
[e1b6742]891}
892
[aab5e46]893/** Get count of threads.
894 *
895 * @return Number of threads in the system
896 */
897size_t thread_count(void)
898{
899 assert(interrupts_disabled());
900 assert(irq_spinlock_locked(&threads_lock));
901
902 return odict_count(&threads);
903}
904
905/** Get first thread.
906 *
907 * @return Pointer to first thread or @c NULL if there are none.
908 */
909thread_t *thread_first(void)
910{
911 odlink_t *odlink;
912
913 assert(interrupts_disabled());
914 assert(irq_spinlock_locked(&threads_lock));
915
916 odlink = odict_first(&threads);
917 if (odlink == NULL)
918 return NULL;
919
920 return odict_get_instance(odlink, thread_t, lthreads);
921}
922
923/** Get next thread.
924 *
925 * @param cur Current thread
926 * @return Pointer to next thread or @c NULL if there are no more threads.
927 */
928thread_t *thread_next(thread_t *cur)
929{
930 odlink_t *odlink;
931
932 assert(interrupts_disabled());
933 assert(irq_spinlock_locked(&threads_lock));
934
935 odlink = odict_next(&cur->lthreads, &threads);
936 if (odlink == NULL)
937 return NULL;
938
939 return odict_get_instance(odlink, thread_t, lthreads);
940}
941
[5b7a107]942#ifdef CONFIG_UDEBUG
943
[df58e44]944void thread_stack_trace(thread_id_t thread_id)
945{
946 irq_spinlock_lock(&threads_lock, true);
[1871118]947 thread_t *thread = thread_try_ref(thread_find_by_id(thread_id));
948 irq_spinlock_unlock(&threads_lock, true);
[a35b458]949
[df58e44]950 if (thread == NULL) {
951 printf("No such thread.\n");
952 return;
953 }
[a35b458]954
[df58e44]955 /*
956 * Schedule a stack trace to be printed
957 * just before the thread is scheduled next.
958 *
959 * If the thread is sleeping then try to interrupt
960 * the sleep. Any request for printing an uspace stack
961 * trace from within the kernel should be always
962 * considered a last resort debugging means, therefore
963 * forcing the thread's sleep to be interrupted
964 * is probably justifiable.
965 */
[a35b458]966
[1871118]967 irq_spinlock_lock(&thread->lock, true);
968
[df58e44]969 bool sleeping = false;
970 istate_t *istate = thread->udebug.uspace_state;
971 if (istate != NULL) {
972 printf("Scheduling thread stack trace.\n");
973 thread->btrace = true;
974 if (thread->state == Sleeping)
975 sleeping = true;
976 } else
977 printf("Thread interrupt state not available.\n");
[a35b458]978
[1871118]979 irq_spinlock_unlock(&thread->lock, true);
[a35b458]980
[df58e44]981 if (sleeping)
[111b9b9]982 thread_wakeup(thread);
[a35b458]983
[1871118]984 thread_put(thread);
[df58e44]985}
[e1b6742]986
[5b7a107]987#endif /* CONFIG_UDEBUG */
[e1b6742]988
[ef1eab7]989/** Get key function for the @c threads ordered dictionary.
990 *
991 * @param odlink Link
992 * @return Pointer to thread structure cast as 'void *'
993 */
994static void *threads_getkey(odlink_t *odlink)
995{
996 thread_t *thread = odict_get_instance(odlink, thread_t, lthreads);
997 return (void *) thread;
998}
999
1000/** Key comparison function for the @c threads ordered dictionary.
1001 *
1002 * @param a Pointer to thread A
1003 * @param b Pointer to thread B
1004 * @return -1, 0, 1 iff pointer to A is less than, equal to, greater than B
1005 */
1006static int threads_cmp(void *a, void *b)
1007{
1008 if (a > b)
1009 return -1;
1010 else if (a == b)
1011 return 0;
1012 else
1013 return +1;
1014}
1015
[9f52563]1016/** Process syscall to create new thread.
1017 *
1018 */
[5a5269d]1019sys_errno_t sys_thread_create(uspace_ptr_uspace_arg_t uspace_uarg, uspace_ptr_char uspace_name,
1020 size_t name_len, uspace_ptr_thread_id_t uspace_thread_id)
[9f52563]1021{
[24345a5]1022 if (name_len > THREAD_NAME_BUFLEN - 1)
[7faabb7]1023 name_len = THREAD_NAME_BUFLEN - 1;
[a35b458]1024
[da1bafb]1025 char namebuf[THREAD_NAME_BUFLEN];
[b7fd2a0]1026 errno_t rc = copy_from_uspace(namebuf, uspace_name, name_len);
[a53ed3a]1027 if (rc != EOK)
[b7fd2a0]1028 return (sys_errno_t) rc;
[a35b458]1029
[b60c582]1030 namebuf[name_len] = 0;
[a35b458]1031
[4680ef5]1032 /*
1033 * In case of failure, kernel_uarg will be deallocated in this function.
1034 * In case of success, kernel_uarg will be freed in uinit().
1035 */
[da1bafb]1036 uspace_arg_t *kernel_uarg =
[11b285d]1037 (uspace_arg_t *) malloc(sizeof(uspace_arg_t));
[7473807]1038 if (!kernel_uarg)
1039 return (sys_errno_t) ENOMEM;
[a35b458]1040
[e3c762cd]1041 rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t));
[a53ed3a]1042 if (rc != EOK) {
[e3c762cd]1043 free(kernel_uarg);
[b7fd2a0]1044 return (sys_errno_t) rc;
[e3c762cd]1045 }
[a35b458]1046
[da1bafb]1047 thread_t *thread = thread_create(uinit, kernel_uarg, TASK,
[6eef3c4]1048 THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf);
[da1bafb]1049 if (thread) {
[5a5269d]1050 if (uspace_thread_id) {
[da1bafb]1051 rc = copy_to_uspace(uspace_thread_id, &thread->tid,
1052 sizeof(thread->tid));
[a53ed3a]1053 if (rc != EOK) {
[d8431986]1054 /*
1055 * We have encountered a failure, but the thread
1056 * has already been created. We need to undo its
1057 * creation now.
1058 */
[a35b458]1059
[d8431986]1060 /*
[ea7890e7]1061 * The new thread structure is initialized, but
1062 * is still not visible to the system.
[d8431986]1063 * We can safely deallocate it.
1064 */
[82d515e9]1065 slab_free(thread_cache, thread);
[da1bafb]1066 free(kernel_uarg);
[a35b458]1067
[b7fd2a0]1068 return (sys_errno_t) rc;
[3bacee1]1069 }
[d8431986]1070 }
[a35b458]1071
[9a1b20c]1072#ifdef CONFIG_UDEBUG
[13964ef]1073 /*
1074 * Generate udebug THREAD_B event and attach the thread.
1075 * This must be done atomically (with the debug locks held),
1076 * otherwise we would either miss some thread or receive
1077 * THREAD_B events for threads that already existed
1078 * and could be detected with THREAD_READ before.
1079 */
[da1bafb]1080 udebug_thread_b_event_attach(thread, TASK);
[13964ef]1081#else
[da1bafb]1082 thread_attach(thread, TASK);
[9a1b20c]1083#endif
[da1bafb]1084 thread_ready(thread);
[a35b458]1085
[d8431986]1086 return 0;
[201abde]1087 } else
[0f250f9]1088 free(kernel_uarg);
[a35b458]1089
[b7fd2a0]1090 return (sys_errno_t) ENOMEM;
[9f52563]1091}
1092
1093/** Process syscall to terminate thread.
1094 *
1095 */
[b7fd2a0]1096sys_errno_t sys_thread_exit(int uspace_status)
[9f52563]1097{
[68091bd]1098 thread_exit();
[9f52563]1099}
[b45c443]1100
[3ce7f082]1101/** Syscall for getting TID.
1102 *
[201abde]1103 * @param uspace_thread_id Userspace address of 8-byte buffer where to store
1104 * current thread ID.
1105 *
1106 * @return 0 on success or an error code from @ref errno.h.
[da1bafb]1107 *
[b45c443]1108 */
[5a5269d]1109sys_errno_t sys_thread_get_id(uspace_ptr_thread_id_t uspace_thread_id)
[3ce7f082]1110{
1111 /*
1112 * No need to acquire lock on THREAD because tid
1113 * remains constant for the lifespan of the thread.
[da1bafb]1114 *
[3ce7f082]1115 */
[b7fd2a0]1116 return (sys_errno_t) copy_to_uspace(uspace_thread_id, &THREAD->tid,
[201abde]1117 sizeof(THREAD->tid));
[3ce7f082]1118}
[6f4495f5]1119
[d9ece1cb]1120/** Syscall wrapper for sleeping. */
[b7fd2a0]1121sys_errno_t sys_thread_usleep(uint32_t usec)
[d9ece1cb]1122{
[22e6802]1123 thread_usleep(usec);
[d9ece1cb]1124 return 0;
1125}
1126
[b7fd2a0]1127sys_errno_t sys_thread_udelay(uint32_t usec)
[7e7b791]1128{
[8d6c1f1]1129 delay(usec);
[7e7b791]1130 return 0;
1131}
1132
[3ce7f082]1133/** @}
1134 */
Note: See TracBrowser for help on using the repository browser.