source: mainline/kernel/generic/src/proc/thread.c@ 94e75cf

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 94e75cf was 5a5269d, checked in by GitHub <noreply@…>, 6 years ago

Change type of uspace pointers in kernel from pointer type to numeric (#170)

From kernel's perspective, userspace addresses are not valid pointers,
and can only be used in calls to copy_to/from_uspace().
Therefore, we change the type of those arguments and variables to
uspace_addr_t which is an alias for sysarg_t.

This allows the compiler to catch accidental direct accesses to
userspace addresses.

Additionally, to avoid losing the type information in code,
a macro uspace_ptr(type) is used that translates to uspace_addr_t.
I makes no functional difference, but allows keeping the type information
in code in case we implement some sort of static checking for it in the future.

However, ccheck doesn't like that, so instead of using uspace_ptr(char),
we use uspace_ptr_char which is defined as
#define uspace_ptr_char uspace_ptr(char).

  • Property mode set to 100644
File size: 26.8 KB
RevLine 
[f761f1eb]1/*
[7ed8530]2 * Copyright (c) 2010 Jakub Jermar
[ef1eab7]3 * Copyright (c) 2018 Jiri Svoboda
[f761f1eb]4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
[174156fd]30/** @addtogroup kernel_generic_proc
[b45c443]31 * @{
32 */
33
[9179d0a]34/**
[b45c443]35 * @file
[da1bafb]36 * @brief Thread management functions.
[9179d0a]37 */
38
[63e27ef]39#include <assert.h>
[f761f1eb]40#include <proc/scheduler.h>
41#include <proc/thread.h>
42#include <proc/task.h>
43#include <mm/frame.h>
44#include <mm/page.h>
45#include <arch/asm.h>
[cce6acf]46#include <arch/cycle.h>
[f761f1eb]47#include <arch.h>
48#include <synch/spinlock.h>
49#include <synch/waitq.h>
[d314571]50#include <synch/syswaitq.h>
[f761f1eb]51#include <cpu.h>
[e535eeb]52#include <str.h>
[f761f1eb]53#include <context.h>
[5c9a08b]54#include <adt/list.h>
[ef1eab7]55#include <adt/odict.h>
[f761f1eb]56#include <time/clock.h>
[b3f8fb7]57#include <time/timeout.h>
[8d6c1f1]58#include <time/delay.h>
[4ffa9e0]59#include <config.h>
60#include <arch/interrupt.h>
[26a8604f]61#include <smp/ipi.h>
[f2ffad4]62#include <arch/faddr.h>
[23684b7]63#include <atomic.h>
[44a7ee5]64#include <mem.h>
[bab75df6]65#include <stdio.h>
[aafed15]66#include <stdlib.h>
[9f52563]67#include <main/uinit.h>
[e3c762cd]68#include <syscall/copy.h>
69#include <errno.h>
[aae365bc]70#include <debug.h>
[52755f1]71
[fe19611]72/** Thread states */
[a000878c]73const char *thread_states[] = {
[fe19611]74 "Invalid",
75 "Running",
76 "Sleeping",
77 "Ready",
78 "Entering",
79 "Exiting",
[48d14222]80 "Lingering"
[e1b6742]81};
82
[ef1eab7]83/** Lock protecting the @c threads ordered dictionary .
[4e33b6b]84 *
85 * For locking rules, see declaration thereof.
86 */
[da1bafb]87IRQ_SPINLOCK_INITIALIZE(threads_lock);
[88169d9]88
[ef1eab7]89/** Ordered dictionary of all threads by their address (i.e. pointer to
90 * the thread_t structure).
[88169d9]91 *
[ef1eab7]92 * When a thread is found in the @c threads ordered dictionary, it is
93 * guaranteed to exist as long as the @c threads_lock is held.
[da1bafb]94 *
[ef1eab7]95 * Members are of type thread_t.
[88169d9]96 */
[ef1eab7]97odict_t threads;
[f761f1eb]98
[da1bafb]99IRQ_SPINLOCK_STATIC_INITIALIZE(tidlock);
100static thread_id_t last_tid = 0;
[f761f1eb]101
[82d515e9]102static slab_cache_t *thread_cache;
[da1bafb]103
[0f81ceb7]104#ifdef CONFIG_FPU
[82d515e9]105slab_cache_t *fpu_context_cache;
[f76fed4]106#endif
[266294a9]107
[ef1eab7]108static void *threads_getkey(odlink_t *);
109static int threads_cmp(void *, void *);
110
[4e33b6b]111/** Thread wrapper.
[70527f1]112 *
[4e33b6b]113 * This wrapper is provided to ensure that every thread makes a call to
114 * thread_exit() when its implementing function returns.
[f761f1eb]115 *
[22f7769]116 * interrupts_disable() is assumed.
[70527f1]117 *
[f761f1eb]118 */
[e16e036a]119static void cushion(void)
[f761f1eb]120{
[43114c5]121 void (*f)(void *) = THREAD->thread_code;
122 void *arg = THREAD->thread_arg;
[449dc1ed]123 THREAD->last_cycle = get_cycle();
[a35b458]124
[0313ff0]125 /* This is where each thread wakes up after its creation */
[da1bafb]126 irq_spinlock_unlock(&THREAD->lock, false);
[22f7769]127 interrupts_enable();
[a35b458]128
[f761f1eb]129 f(arg);
[a35b458]130
[0313ff0]131 /* Accumulate accounting to the task */
[da1bafb]132 irq_spinlock_lock(&THREAD->lock, true);
[62b6d17]133 if (!THREAD->uncounted) {
[a2a00e8]134 thread_update_accounting(true);
135 uint64_t ucycles = THREAD->ucycles;
136 THREAD->ucycles = 0;
137 uint64_t kcycles = THREAD->kcycles;
138 THREAD->kcycles = 0;
[a35b458]139
[da1bafb]140 irq_spinlock_pass(&THREAD->lock, &TASK->lock);
[a2a00e8]141 TASK->ucycles += ucycles;
142 TASK->kcycles += kcycles;
[da1bafb]143 irq_spinlock_unlock(&TASK->lock, true);
[62b6d17]144 } else
[da1bafb]145 irq_spinlock_unlock(&THREAD->lock, true);
[a35b458]146
[f761f1eb]147 thread_exit();
[a35b458]148
[da1bafb]149 /* Not reached */
[f761f1eb]150}
151
[da1bafb]152/** Initialization and allocation for thread_t structure
153 *
154 */
[b7fd2a0]155static errno_t thr_constructor(void *obj, unsigned int kmflags)
[266294a9]156{
[da1bafb]157 thread_t *thread = (thread_t *) obj;
[a35b458]158
[da1bafb]159 irq_spinlock_initialize(&thread->lock, "thread_t_lock");
160 link_initialize(&thread->rq_link);
161 link_initialize(&thread->wq_link);
162 link_initialize(&thread->th_link);
[a35b458]163
[32fffef0]164 /* call the architecture-specific part of the constructor */
[da1bafb]165 thr_constructor_arch(thread);
[a35b458]166
[0f81ceb7]167#ifdef CONFIG_FPU
[abf6c01]168 thread->saved_fpu_context = slab_alloc(fpu_context_cache,
169 FRAME_ATOMIC | kmflags);
[da1bafb]170 if (!thread->saved_fpu_context)
[7f11dc6]171 return ENOMEM;
[da1bafb]172#endif /* CONFIG_FPU */
[a35b458]173
[38ff925]174 /*
175 * Allocate the kernel stack from the low-memory to prevent an infinite
176 * nesting of TLB-misses when accessing the stack from the part of the
177 * TLB-miss handler written in C.
178 *
179 * Note that low-memory is safe to be used for the stack as it will be
180 * covered by the kernel identity mapping, which guarantees not to
181 * nest TLB-misses infinitely (either via some hardware mechanism or
[c477c80]182 * by the construction of the assembly-language part of the TLB-miss
[38ff925]183 * handler).
184 *
185 * This restriction can be lifted once each architecture provides
[c477c80]186 * a similar guarantee, for example, by locking the kernel stack
[38ff925]187 * in the TLB whenever it is allocated from the high-memory and the
188 * thread is being scheduled to run.
189 */
190 kmflags |= FRAME_LOWMEM;
191 kmflags &= ~FRAME_HIGHMEM;
[a35b458]192
[95d45482]193 // NOTE: All kernel stacks must be aligned to STACK_SIZE,
194 // see get_stack_base().
[d1da1ff2]195
[cd3b380]196 uintptr_t stack_phys =
197 frame_alloc(STACK_FRAMES, kmflags, STACK_SIZE - 1);
198 if (!stack_phys) {
[0f81ceb7]199#ifdef CONFIG_FPU
[ba9a150]200 assert(thread->saved_fpu_context);
201 slab_free(fpu_context_cache, thread->saved_fpu_context);
[f76fed4]202#endif
[7f11dc6]203 return ENOMEM;
[f76fed4]204 }
[a35b458]205
[cd3b380]206 thread->kstack = (uint8_t *) PA2KA(stack_phys);
[a35b458]207
[9a1b20c]208#ifdef CONFIG_UDEBUG
[da1bafb]209 mutex_initialize(&thread->udebug.lock, MUTEX_PASSIVE);
[9a1b20c]210#endif
[a35b458]211
[7f11dc6]212 return EOK;
[266294a9]213}
214
215/** Destruction of thread_t object */
[da1bafb]216static size_t thr_destructor(void *obj)
[266294a9]217{
[da1bafb]218 thread_t *thread = (thread_t *) obj;
[a35b458]219
[32fffef0]220 /* call the architecture-specific part of the destructor */
[da1bafb]221 thr_destructor_arch(thread);
[a35b458]222
[5df1963]223 frame_free(KA2PA(thread->kstack), STACK_FRAMES);
[a35b458]224
[0f81ceb7]225#ifdef CONFIG_FPU
[ba9a150]226 assert(thread->saved_fpu_context);
227 slab_free(fpu_context_cache, thread->saved_fpu_context);
[f76fed4]228#endif
[a35b458]229
[e7c4115d]230 return STACK_FRAMES; /* number of frames freed */
[266294a9]231}
[70527f1]232
233/** Initialize threads
234 *
235 * Initialize kernel threads support.
236 *
237 */
[f761f1eb]238void thread_init(void)
239{
[43114c5]240 THREAD = NULL;
[a35b458]241
[e3306d04]242 atomic_store(&nrdy, 0);
[82d515e9]243 thread_cache = slab_cache_create("thread_t", sizeof(thread_t), 0,
[6f4495f5]244 thr_constructor, thr_destructor, 0);
[a35b458]245
[0f81ceb7]246#ifdef CONFIG_FPU
[82d515e9]247 fpu_context_cache = slab_cache_create("fpu_context_t",
[f97f1e51]248 sizeof(fpu_context_t), FPU_CONTEXT_ALIGN, NULL, NULL, 0);
[f76fed4]249#endif
[a35b458]250
[ef1eab7]251 odict_initialize(&threads, threads_getkey, threads_cmp);
[016acbe]252}
[70527f1]253
[6eef3c4]254/** Wire thread to the given CPU
255 *
256 * @param cpu CPU to wire the thread to.
257 *
258 */
259void thread_wire(thread_t *thread, cpu_t *cpu)
260{
261 irq_spinlock_lock(&thread->lock, true);
262 thread->cpu = cpu;
263 thread->wired = true;
264 irq_spinlock_unlock(&thread->lock, true);
265}
266
[8a64e81e]267/** Invoked right before thread_ready() readies the thread. thread is locked. */
268static void before_thread_is_ready(thread_t *thread)
269{
[63e27ef]270 assert(irq_spinlock_locked(&thread->lock));
[8a64e81e]271}
272
[70527f1]273/** Make thread ready
274 *
[da1bafb]275 * Switch thread to the ready state.
[70527f1]276 *
[df58e44]277 * @param thread Thread to make ready.
[70527f1]278 *
279 */
[da1bafb]280void thread_ready(thread_t *thread)
[f761f1eb]281{
[da1bafb]282 irq_spinlock_lock(&thread->lock, true);
[a35b458]283
[63e27ef]284 assert(thread->state != Ready);
[518dd43]285
[8a64e81e]286 before_thread_is_ready(thread);
[a35b458]287
[6eef3c4]288 int i = (thread->priority < RQ_COUNT - 1) ?
289 ++thread->priority : thread->priority;
[518dd43]290
[8ad7dd1]291 cpu_t *cpu;
292 if (thread->wired || thread->nomigrate || thread->fpu_context_engaged) {
293 /* Cannot ready to another CPU */
[63e27ef]294 assert(thread->cpu != NULL);
[8ad7dd1]295 cpu = thread->cpu;
296 } else if (thread->stolen) {
297 /* Ready to the stealing CPU */
[6eef3c4]298 cpu = CPU;
[8ad7dd1]299 } else if (thread->cpu) {
300 /* Prefer the CPU on which the thread ran last */
[63e27ef]301 assert(thread->cpu != NULL);
[8ad7dd1]302 cpu = thread->cpu;
303 } else {
304 cpu = CPU;
305 }
[a35b458]306
[da1bafb]307 thread->state = Ready;
[a35b458]308
[da1bafb]309 irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock));
[a35b458]310
[70527f1]311 /*
[da1bafb]312 * Append thread to respective ready queue
313 * on respective processor.
[f761f1eb]314 */
[a35b458]315
[55b77d9]316 list_append(&thread->rq_link, &cpu->rq[i].rq);
[da1bafb]317 cpu->rq[i].n++;
318 irq_spinlock_unlock(&(cpu->rq[i].lock), true);
[a35b458]319
[59e07c91]320 atomic_inc(&nrdy);
[248fc1a]321 atomic_inc(&cpu->nrdy);
[f761f1eb]322}
323
[70527f1]324/** Create new thread
325 *
326 * Create a new thread.
327 *
[da1bafb]328 * @param func Thread's implementing function.
329 * @param arg Thread's implementing function argument.
330 * @param task Task to which the thread belongs. The caller must
331 * guarantee that the task won't cease to exist during the
332 * call. The task's lock may not be held.
333 * @param flags Thread flags.
334 * @param name Symbolic name (a copy is made).
[70527f1]335 *
[da1bafb]336 * @return New thread's structure on success, NULL on failure.
[70527f1]337 *
338 */
[3bacee1]339thread_t *thread_create(void (*func)(void *), void *arg, task_t *task,
[6eef3c4]340 thread_flags_t flags, const char *name)
[f761f1eb]341{
[abf6c01]342 thread_t *thread = (thread_t *) slab_alloc(thread_cache, FRAME_ATOMIC);
[da1bafb]343 if (!thread)
[2a46e10]344 return NULL;
[a35b458]345
[deacd722]346 if (thread_create_arch(thread, flags) != EOK) {
347 slab_free(thread_cache, thread);
348 return NULL;
349 }
350
[bb68433]351 /* Not needed, but good for debugging */
[26aafe8]352 memsetb(thread->kstack, STACK_SIZE, 0);
[a35b458]353
[da1bafb]354 irq_spinlock_lock(&tidlock, true);
355 thread->tid = ++last_tid;
356 irq_spinlock_unlock(&tidlock, true);
[a35b458]357
[edc64c0]358 memset(&thread->saved_context, 0, sizeof(thread->saved_context));
[da1bafb]359 context_set(&thread->saved_context, FADDR(cushion),
[26aafe8]360 (uintptr_t) thread->kstack, STACK_SIZE);
[a35b458]361
[a6e55886]362 current_initialize((current_t *) thread->kstack);
[a35b458]363
[da1bafb]364 ipl_t ipl = interrupts_disable();
365 thread->saved_context.ipl = interrupts_read();
[bb68433]366 interrupts_restore(ipl);
[a35b458]367
[da1bafb]368 str_cpy(thread->name, THREAD_NAME_BUFLEN, name);
[a35b458]369
[da1bafb]370 thread->thread_code = func;
371 thread->thread_arg = arg;
372 thread->ticks = -1;
373 thread->ucycles = 0;
374 thread->kcycles = 0;
[6eef3c4]375 thread->uncounted =
376 ((flags & THREAD_FLAG_UNCOUNTED) == THREAD_FLAG_UNCOUNTED);
[da1bafb]377 thread->priority = -1; /* Start in rq[0] */
378 thread->cpu = NULL;
[6eef3c4]379 thread->wired = false;
380 thread->stolen = false;
381 thread->uspace =
382 ((flags & THREAD_FLAG_USPACE) == THREAD_FLAG_USPACE);
[a35b458]383
[43ac0cc]384 thread->nomigrate = 0;
[da1bafb]385 thread->state = Entering;
[a35b458]386
[da1bafb]387 timeout_initialize(&thread->sleep_timeout);
388 thread->sleep_interruptible = false;
[b59318e]389 thread->sleep_composable = false;
[da1bafb]390 thread->sleep_queue = NULL;
391 thread->timeout_pending = false;
[a35b458]392
[da1bafb]393 thread->in_copy_from_uspace = false;
394 thread->in_copy_to_uspace = false;
[a35b458]395
[da1bafb]396 thread->interrupted = false;
397 thread->detached = false;
398 waitq_initialize(&thread->join_wq);
[a35b458]399
[da1bafb]400 thread->task = task;
[a35b458]401
[6eef3c4]402 thread->fpu_context_exists = false;
403 thread->fpu_context_engaged = false;
[a35b458]404
[ef1eab7]405 odlink_initialize(&thread->lthreads);
[a35b458]406
[9a1b20c]407#ifdef CONFIG_UDEBUG
[5b7a107]408 /* Initialize debugging stuff */
409 thread->btrace = false;
[da1bafb]410 udebug_thread_initialize(&thread->udebug);
[9a1b20c]411#endif
[a35b458]412
[6eef3c4]413 if ((flags & THREAD_FLAG_NOATTACH) != THREAD_FLAG_NOATTACH)
[da1bafb]414 thread_attach(thread, task);
[a35b458]415
[da1bafb]416 return thread;
[d8431986]417}
418
419/** Destroy thread memory structure
420 *
421 * Detach thread from all queues, cpus etc. and destroy it.
[da1bafb]422 *
423 * @param thread Thread to be destroyed.
424 * @param irq_res Indicate whether it should unlock thread->lock
425 * in interrupts-restore mode.
[d8431986]426 *
427 */
[da1bafb]428void thread_destroy(thread_t *thread, bool irq_res)
[d8431986]429{
[63e27ef]430 assert(irq_spinlock_locked(&thread->lock));
431 assert((thread->state == Exiting) || (thread->state == Lingering));
432 assert(thread->task);
433 assert(thread->cpu);
[a35b458]434
[da1bafb]435 irq_spinlock_lock(&thread->cpu->lock, false);
436 if (thread->cpu->fpu_owner == thread)
437 thread->cpu->fpu_owner = NULL;
438 irq_spinlock_unlock(&thread->cpu->lock, false);
[a35b458]439
[da1bafb]440 irq_spinlock_pass(&thread->lock, &threads_lock);
[a35b458]441
[ef1eab7]442 odict_remove(&thread->lthreads);
[a35b458]443
[da1bafb]444 irq_spinlock_pass(&threads_lock, &thread->task->lock);
[a35b458]445
[d8431986]446 /*
447 * Detach from the containing task.
448 */
[da1bafb]449 list_remove(&thread->th_link);
450 irq_spinlock_unlock(&thread->task->lock, irq_res);
[a35b458]451
[ea7890e7]452 /*
[7ed8530]453 * Drop the reference to the containing task.
[ea7890e7]454 */
[da1bafb]455 task_release(thread->task);
[82d515e9]456 slab_free(thread_cache, thread);
[d8431986]457}
458
459/** Make the thread visible to the system.
460 *
461 * Attach the thread structure to the current task and make it visible in the
[5dcee525]462 * threads_tree.
[d8431986]463 *
[da1bafb]464 * @param t Thread to be attached to the task.
465 * @param task Task to which the thread is to be attached.
466 *
[d8431986]467 */
[da1bafb]468void thread_attach(thread_t *thread, task_t *task)
[d8431986]469{
470 /*
[9a1b20c]471 * Attach to the specified task.
[d8431986]472 */
[da1bafb]473 irq_spinlock_lock(&task->lock, true);
[a35b458]474
[7ed8530]475 /* Hold a reference to the task. */
476 task_hold(task);
[a35b458]477
[9a1b20c]478 /* Must not count kbox thread into lifecount */
[6eef3c4]479 if (thread->uspace)
[9a1b20c]480 atomic_inc(&task->lifecount);
[a35b458]481
[55b77d9]482 list_append(&thread->th_link, &task->threads);
[a35b458]483
[da1bafb]484 irq_spinlock_pass(&task->lock, &threads_lock);
[a35b458]485
[bb68433]486 /*
[ef1eab7]487 * Register this thread in the system-wide dictionary.
[bb68433]488 */
[ef1eab7]489 odict_insert(&thread->lthreads, &threads, NULL);
[da1bafb]490 irq_spinlock_unlock(&threads_lock, true);
[f761f1eb]491}
492
[0182a665]493/** Terminate thread.
[70527f1]494 *
[da1bafb]495 * End current thread execution and switch it to the exiting state.
496 * All pending timeouts are executed.
497 *
[70527f1]498 */
[f761f1eb]499void thread_exit(void)
500{
[6eef3c4]501 if (THREAD->uspace) {
[9a1b20c]502#ifdef CONFIG_UDEBUG
503 /* Generate udebug THREAD_E event */
504 udebug_thread_e_event();
[a35b458]505
[0ac99db]506 /*
507 * This thread will not execute any code or system calls from
508 * now on.
509 */
510 udebug_stoppable_begin();
[9a1b20c]511#endif
512 if (atomic_predec(&TASK->lifecount) == 0) {
513 /*
514 * We are the last userspace thread in the task that
515 * still has not exited. With the exception of the
516 * moment the task was created, new userspace threads
517 * can only be created by threads of the same task.
518 * We are safe to perform cleanup.
[da1bafb]519 *
[9a1b20c]520 */
[ea7890e7]521 ipc_cleanup();
[d314571]522 sys_waitq_task_cleanup();
[3bacee1]523 LOG("Cleanup of task %" PRIu64 " completed.", TASK->taskid);
[ea7890e7]524 }
525 }
[a35b458]526
[f761f1eb]527restart:
[da1bafb]528 irq_spinlock_lock(&THREAD->lock, true);
529 if (THREAD->timeout_pending) {
530 /* Busy waiting for timeouts in progress */
531 irq_spinlock_unlock(&THREAD->lock, true);
[f761f1eb]532 goto restart;
533 }
[a35b458]534
[43114c5]535 THREAD->state = Exiting;
[da1bafb]536 irq_spinlock_unlock(&THREAD->lock, true);
[a35b458]537
[f761f1eb]538 scheduler();
[a35b458]539
[874621f]540 /* Not reached */
[3bacee1]541 while (true)
542 ;
[f761f1eb]543}
544
[518dd43]545/** Interrupts an existing thread so that it may exit as soon as possible.
[1b20da0]546 *
547 * Threads that are blocked waiting for a synchronization primitive
[897fd8f1]548 * are woken up with a return code of EINTR if the
[518dd43]549 * blocking call was interruptable. See waitq_sleep_timeout().
[1b20da0]550 *
[518dd43]551 * The caller must guarantee the thread object is valid during the entire
552 * function, eg by holding the threads_lock lock.
[1b20da0]553 *
[518dd43]554 * Interrupted threads automatically exit when returning back to user space.
[1b20da0]555 *
[518dd43]556 * @param thread A valid thread object. The caller must guarantee it
557 * will remain valid until thread_interrupt() exits.
558 */
559void thread_interrupt(thread_t *thread)
560{
[63e27ef]561 assert(thread != NULL);
[a35b458]562
[518dd43]563 irq_spinlock_lock(&thread->lock, true);
[a35b458]564
[518dd43]565 thread->interrupted = true;
566 bool sleeping = (thread->state == Sleeping);
[a35b458]567
[518dd43]568 irq_spinlock_unlock(&thread->lock, true);
[a35b458]569
[518dd43]570 if (sleeping)
571 waitq_interrupt_sleep(thread);
572}
573
574/** Returns true if the thread was interrupted.
[1b20da0]575 *
[518dd43]576 * @param thread A valid thread object. User must guarantee it will
577 * be alive during the entire call.
578 * @return true if the thread was already interrupted via thread_interrupt().
579 */
580bool thread_interrupted(thread_t *thread)
581{
[63e27ef]582 assert(thread != NULL);
[a35b458]583
[518dd43]584 bool interrupted;
[a35b458]585
[518dd43]586 irq_spinlock_lock(&thread->lock, true);
587 interrupted = thread->interrupted;
588 irq_spinlock_unlock(&thread->lock, true);
[a35b458]589
[518dd43]590 return interrupted;
591}
592
[43ac0cc]593/** Prevent the current thread from being migrated to another processor. */
594void thread_migration_disable(void)
595{
[63e27ef]596 assert(THREAD);
[a35b458]597
[43ac0cc]598 THREAD->nomigrate++;
599}
600
601/** Allow the current thread to be migrated to another processor. */
602void thread_migration_enable(void)
603{
[63e27ef]604 assert(THREAD);
605 assert(THREAD->nomigrate > 0);
[a35b458]606
[6eef3c4]607 if (THREAD->nomigrate > 0)
608 THREAD->nomigrate--;
[43ac0cc]609}
610
[70527f1]611/** Thread sleep
612 *
613 * Suspend execution of the current thread.
614 *
615 * @param sec Number of seconds to sleep.
616 *
617 */
[7f1c620]618void thread_sleep(uint32_t sec)
[f761f1eb]619{
[7c3fb9b]620 /*
621 * Sleep in 1000 second steps to support
622 * full argument range
623 */
[22e6802]624 while (sec > 0) {
625 uint32_t period = (sec > 1000) ? 1000 : sec;
[a35b458]626
[22e6802]627 thread_usleep(period * 1000000);
628 sec -= period;
629 }
[f761f1eb]630}
[70527f1]631
[fe19611]632/** Wait for another thread to exit.
633 *
[da1bafb]634 * @param thread Thread to join on exit.
635 * @param usec Timeout in microseconds.
636 * @param flags Mode of operation.
[fe19611]637 *
638 * @return An error code from errno.h or an error code from synch.h.
[da1bafb]639 *
[fe19611]640 */
[b7fd2a0]641errno_t thread_join_timeout(thread_t *thread, uint32_t usec, unsigned int flags)
[fe19611]642{
[da1bafb]643 if (thread == THREAD)
[fe19611]644 return EINVAL;
[a35b458]645
[fe19611]646 /*
647 * Since thread join can only be called once on an undetached thread,
648 * the thread pointer is guaranteed to be still valid.
649 */
[a35b458]650
[da1bafb]651 irq_spinlock_lock(&thread->lock, true);
[63e27ef]652 assert(!thread->detached);
[da1bafb]653 irq_spinlock_unlock(&thread->lock, true);
[a35b458]654
[897fd8f1]655 return waitq_sleep_timeout(&thread->join_wq, usec, flags, NULL);
[abf6c01]656
657 // FIXME: join should deallocate the thread.
658 // Current code calls detach after join, that's contrary to how
659 // join is used in other threading APIs.
[fe19611]660}
661
662/** Detach thread.
663 *
[df58e44]664 * Mark the thread as detached. If the thread is already
665 * in the Lingering state, deallocate its resources.
[fe19611]666 *
[da1bafb]667 * @param thread Thread to be detached.
668 *
[fe19611]669 */
[da1bafb]670void thread_detach(thread_t *thread)
[fe19611]671{
672 /*
[31d8e10]673 * Since the thread is expected not to be already detached,
[fe19611]674 * pointer to it must be still valid.
675 */
[da1bafb]676 irq_spinlock_lock(&thread->lock, true);
[63e27ef]677 assert(!thread->detached);
[a35b458]678
[da1bafb]679 if (thread->state == Lingering) {
680 /*
681 * Unlock &thread->lock and restore
682 * interrupts in thread_destroy().
683 */
684 thread_destroy(thread, true);
[fe19611]685 return;
686 } else {
[da1bafb]687 thread->detached = true;
[fe19611]688 }
[a35b458]689
[da1bafb]690 irq_spinlock_unlock(&thread->lock, true);
[fe19611]691}
692
[70527f1]693/** Thread usleep
694 *
695 * Suspend execution of the current thread.
696 *
697 * @param usec Number of microseconds to sleep.
698 *
[1b20da0]699 */
[7f1c620]700void thread_usleep(uint32_t usec)
[f761f1eb]701{
702 waitq_t wq;
[a35b458]703
[f761f1eb]704 waitq_initialize(&wq);
[a35b458]705
[897fd8f1]706 (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING, NULL);
[f761f1eb]707}
708
[ef1eab7]709static void thread_print(thread_t *thread, bool additional)
[5dcee525]710{
[1ba37fa]711 uint64_t ucycles, kcycles;
712 char usuffix, ksuffix;
[da1bafb]713 order_suffix(thread->ucycles, &ucycles, &usuffix);
714 order_suffix(thread->kcycles, &kcycles, &ksuffix);
[a35b458]715
[577f042a]716 char *name;
717 if (str_cmp(thread->name, "uinit") == 0)
718 name = thread->task->name;
719 else
720 name = thread->name;
[a35b458]721
[52755f1]722#ifdef __32_BITS__
[ef1eab7]723 if (additional)
[ae0300b5]724 printf("%-8" PRIu64 " %10p %10p %9" PRIu64 "%c %9" PRIu64 "%c ",
[577f042a]725 thread->tid, thread->thread_code, thread->kstack,
726 ucycles, usuffix, kcycles, ksuffix);
[48dcc69]727 else
[ae0300b5]728 printf("%-8" PRIu64 " %-14s %10p %-8s %10p %-5" PRIu32 "\n",
[577f042a]729 thread->tid, name, thread, thread_states[thread->state],
[26aafe8]730 thread->task, thread->task->container);
[52755f1]731#endif
[a35b458]732
[52755f1]733#ifdef __64_BITS__
[ef1eab7]734 if (additional)
[ae0300b5]735 printf("%-8" PRIu64 " %18p %18p\n"
[48dcc69]736 " %9" PRIu64 "%c %9" PRIu64 "%c ",
737 thread->tid, thread->thread_code, thread->kstack,
738 ucycles, usuffix, kcycles, ksuffix);
[5dcee525]739 else
[ae0300b5]740 printf("%-8" PRIu64 " %-14s %18p %-8s %18p %-5" PRIu32 "\n",
[577f042a]741 thread->tid, name, thread, thread_states[thread->state],
[26aafe8]742 thread->task, thread->task->container);
[48dcc69]743#endif
[a35b458]744
[ef1eab7]745 if (additional) {
[48dcc69]746 if (thread->cpu)
747 printf("%-5u", thread->cpu->id);
748 else
749 printf("none ");
[a35b458]750
[48dcc69]751 if (thread->state == Sleeping) {
[52755f1]752#ifdef __32_BITS__
[48dcc69]753 printf(" %10p", thread->sleep_queue);
[52755f1]754#endif
[a35b458]755
[52755f1]756#ifdef __64_BITS__
[48dcc69]757 printf(" %18p", thread->sleep_queue);
[52755f1]758#endif
[48dcc69]759 }
[a35b458]760
[48dcc69]761 printf("\n");
[43b1e86]762 }
[5dcee525]763}
764
[da1bafb]765/** Print list of threads debug info
[48dcc69]766 *
767 * @param additional Print additional information.
[da1bafb]768 *
769 */
[48dcc69]770void thread_print_list(bool additional)
[55ab0f1]771{
[ef1eab7]772 thread_t *thread;
773
[55ab0f1]774 /* Messing with thread structures, avoid deadlock */
[da1bafb]775 irq_spinlock_lock(&threads_lock, true);
[a35b458]776
[da1bafb]777#ifdef __32_BITS__
[48dcc69]778 if (additional)
[577f042a]779 printf("[id ] [code ] [stack ] [ucycles ] [kcycles ]"
780 " [cpu] [waitqueue]\n");
[48dcc69]781 else
782 printf("[id ] [name ] [address ] [state ] [task ]"
[26aafe8]783 " [ctn]\n");
[52755f1]784#endif
[a35b458]785
[52755f1]786#ifdef __64_BITS__
[48dcc69]787 if (additional) {
788 printf("[id ] [code ] [stack ]\n"
789 " [ucycles ] [kcycles ] [cpu] [waitqueue ]\n");
790 } else
791 printf("[id ] [name ] [address ] [state ]"
[26aafe8]792 " [task ] [ctn]\n");
[52755f1]793#endif
[a35b458]794
[aab5e46]795 thread = thread_first();
796 while (thread != NULL) {
[ef1eab7]797 thread_print(thread, additional);
[aab5e46]798 thread = thread_next(thread);
[ef1eab7]799 }
[a35b458]800
[da1bafb]801 irq_spinlock_unlock(&threads_lock, true);
[55ab0f1]802}
[9f52563]803
[016acbe]804/** Check whether thread exists.
805 *
806 * Note that threads_lock must be already held and
807 * interrupts must be already disabled.
808 *
[da1bafb]809 * @param thread Pointer to thread.
[016acbe]810 *
811 * @return True if thread t is known to the system, false otherwise.
[da1bafb]812 *
[016acbe]813 */
[da1bafb]814bool thread_exists(thread_t *thread)
[016acbe]815{
[63e27ef]816 assert(interrupts_disabled());
817 assert(irq_spinlock_locked(&threads_lock));
[1d432f9]818
[ef1eab7]819 odlink_t *odlink = odict_find_eq(&threads, thread, NULL);
820 return odlink != NULL;
[016acbe]821}
822
[cce6acf]823/** Update accounting of current thread.
824 *
825 * Note that thread_lock on THREAD must be already held and
826 * interrupts must be already disabled.
827 *
[da1bafb]828 * @param user True to update user accounting, false for kernel.
829 *
[cce6acf]830 */
[a2a00e8]831void thread_update_accounting(bool user)
[cce6acf]832{
833 uint64_t time = get_cycle();
[1d432f9]834
[63e27ef]835 assert(interrupts_disabled());
836 assert(irq_spinlock_locked(&THREAD->lock));
[a35b458]837
[da1bafb]838 if (user)
[a2a00e8]839 THREAD->ucycles += time - THREAD->last_cycle;
[da1bafb]840 else
[a2a00e8]841 THREAD->kcycles += time - THREAD->last_cycle;
[a35b458]842
[cce6acf]843 THREAD->last_cycle = time;
844}
845
[e1b6742]846/** Find thread structure corresponding to thread ID.
847 *
848 * The threads_lock must be already held by the caller of this function and
849 * interrupts must be disabled.
850 *
851 * @param id Thread ID.
852 *
853 * @return Thread structure address or NULL if there is no such thread ID.
854 *
855 */
856thread_t *thread_find_by_id(thread_id_t thread_id)
857{
[ef1eab7]858 thread_t *thread;
859
[63e27ef]860 assert(interrupts_disabled());
861 assert(irq_spinlock_locked(&threads_lock));
[a35b458]862
[aab5e46]863 thread = thread_first();
864 while (thread != NULL) {
[ef1eab7]865 if (thread->tid == thread_id)
866 return thread;
[a35b458]867
[aab5e46]868 thread = thread_next(thread);
[ef1eab7]869 }
[a35b458]870
[ef1eab7]871 return NULL;
[e1b6742]872}
873
[aab5e46]874/** Get count of threads.
875 *
876 * @return Number of threads in the system
877 */
878size_t thread_count(void)
879{
880 assert(interrupts_disabled());
881 assert(irq_spinlock_locked(&threads_lock));
882
883 return odict_count(&threads);
884}
885
886/** Get first thread.
887 *
888 * @return Pointer to first thread or @c NULL if there are none.
889 */
890thread_t *thread_first(void)
891{
892 odlink_t *odlink;
893
894 assert(interrupts_disabled());
895 assert(irq_spinlock_locked(&threads_lock));
896
897 odlink = odict_first(&threads);
898 if (odlink == NULL)
899 return NULL;
900
901 return odict_get_instance(odlink, thread_t, lthreads);
902}
903
904/** Get next thread.
905 *
906 * @param cur Current thread
907 * @return Pointer to next thread or @c NULL if there are no more threads.
908 */
909thread_t *thread_next(thread_t *cur)
910{
911 odlink_t *odlink;
912
913 assert(interrupts_disabled());
914 assert(irq_spinlock_locked(&threads_lock));
915
916 odlink = odict_next(&cur->lthreads, &threads);
917 if (odlink == NULL)
918 return NULL;
919
920 return odict_get_instance(odlink, thread_t, lthreads);
921}
922
[5b7a107]923#ifdef CONFIG_UDEBUG
924
[df58e44]925void thread_stack_trace(thread_id_t thread_id)
926{
927 irq_spinlock_lock(&threads_lock, true);
[a35b458]928
[df58e44]929 thread_t *thread = thread_find_by_id(thread_id);
930 if (thread == NULL) {
931 printf("No such thread.\n");
932 irq_spinlock_unlock(&threads_lock, true);
933 return;
934 }
[a35b458]935
[df58e44]936 irq_spinlock_lock(&thread->lock, false);
[a35b458]937
[df58e44]938 /*
939 * Schedule a stack trace to be printed
940 * just before the thread is scheduled next.
941 *
942 * If the thread is sleeping then try to interrupt
943 * the sleep. Any request for printing an uspace stack
944 * trace from within the kernel should be always
945 * considered a last resort debugging means, therefore
946 * forcing the thread's sleep to be interrupted
947 * is probably justifiable.
948 */
[a35b458]949
[df58e44]950 bool sleeping = false;
951 istate_t *istate = thread->udebug.uspace_state;
952 if (istate != NULL) {
953 printf("Scheduling thread stack trace.\n");
954 thread->btrace = true;
955 if (thread->state == Sleeping)
956 sleeping = true;
957 } else
958 printf("Thread interrupt state not available.\n");
[a35b458]959
[df58e44]960 irq_spinlock_unlock(&thread->lock, false);
[a35b458]961
[df58e44]962 if (sleeping)
963 waitq_interrupt_sleep(thread);
[a35b458]964
[df58e44]965 irq_spinlock_unlock(&threads_lock, true);
966}
[e1b6742]967
[5b7a107]968#endif /* CONFIG_UDEBUG */
[e1b6742]969
[ef1eab7]970/** Get key function for the @c threads ordered dictionary.
971 *
972 * @param odlink Link
973 * @return Pointer to thread structure cast as 'void *'
974 */
975static void *threads_getkey(odlink_t *odlink)
976{
977 thread_t *thread = odict_get_instance(odlink, thread_t, lthreads);
978 return (void *) thread;
979}
980
981/** Key comparison function for the @c threads ordered dictionary.
982 *
983 * @param a Pointer to thread A
984 * @param b Pointer to thread B
985 * @return -1, 0, 1 iff pointer to A is less than, equal to, greater than B
986 */
987static int threads_cmp(void *a, void *b)
988{
989 if (a > b)
990 return -1;
991 else if (a == b)
992 return 0;
993 else
994 return +1;
995}
996
[9f52563]997/** Process syscall to create new thread.
998 *
999 */
[5a5269d]1000sys_errno_t sys_thread_create(uspace_ptr_uspace_arg_t uspace_uarg, uspace_ptr_char uspace_name,
1001 size_t name_len, uspace_ptr_thread_id_t uspace_thread_id)
[9f52563]1002{
[24345a5]1003 if (name_len > THREAD_NAME_BUFLEN - 1)
[7faabb7]1004 name_len = THREAD_NAME_BUFLEN - 1;
[a35b458]1005
[da1bafb]1006 char namebuf[THREAD_NAME_BUFLEN];
[b7fd2a0]1007 errno_t rc = copy_from_uspace(namebuf, uspace_name, name_len);
[a53ed3a]1008 if (rc != EOK)
[b7fd2a0]1009 return (sys_errno_t) rc;
[a35b458]1010
[b60c582]1011 namebuf[name_len] = 0;
[a35b458]1012
[4680ef5]1013 /*
1014 * In case of failure, kernel_uarg will be deallocated in this function.
1015 * In case of success, kernel_uarg will be freed in uinit().
1016 */
[da1bafb]1017 uspace_arg_t *kernel_uarg =
[11b285d]1018 (uspace_arg_t *) malloc(sizeof(uspace_arg_t));
[7473807]1019 if (!kernel_uarg)
1020 return (sys_errno_t) ENOMEM;
[a35b458]1021
[e3c762cd]1022 rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t));
[a53ed3a]1023 if (rc != EOK) {
[e3c762cd]1024 free(kernel_uarg);
[b7fd2a0]1025 return (sys_errno_t) rc;
[e3c762cd]1026 }
[a35b458]1027
[da1bafb]1028 thread_t *thread = thread_create(uinit, kernel_uarg, TASK,
[6eef3c4]1029 THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf);
[da1bafb]1030 if (thread) {
[5a5269d]1031 if (uspace_thread_id) {
[da1bafb]1032 rc = copy_to_uspace(uspace_thread_id, &thread->tid,
1033 sizeof(thread->tid));
[a53ed3a]1034 if (rc != EOK) {
[d8431986]1035 /*
1036 * We have encountered a failure, but the thread
1037 * has already been created. We need to undo its
1038 * creation now.
1039 */
[a35b458]1040
[d8431986]1041 /*
[ea7890e7]1042 * The new thread structure is initialized, but
1043 * is still not visible to the system.
[d8431986]1044 * We can safely deallocate it.
1045 */
[82d515e9]1046 slab_free(thread_cache, thread);
[da1bafb]1047 free(kernel_uarg);
[a35b458]1048
[b7fd2a0]1049 return (sys_errno_t) rc;
[3bacee1]1050 }
[d8431986]1051 }
[a35b458]1052
[9a1b20c]1053#ifdef CONFIG_UDEBUG
[13964ef]1054 /*
1055 * Generate udebug THREAD_B event and attach the thread.
1056 * This must be done atomically (with the debug locks held),
1057 * otherwise we would either miss some thread or receive
1058 * THREAD_B events for threads that already existed
1059 * and could be detected with THREAD_READ before.
1060 */
[da1bafb]1061 udebug_thread_b_event_attach(thread, TASK);
[13964ef]1062#else
[da1bafb]1063 thread_attach(thread, TASK);
[9a1b20c]1064#endif
[da1bafb]1065 thread_ready(thread);
[a35b458]1066
[d8431986]1067 return 0;
[201abde]1068 } else
[0f250f9]1069 free(kernel_uarg);
[a35b458]1070
[b7fd2a0]1071 return (sys_errno_t) ENOMEM;
[9f52563]1072}
1073
1074/** Process syscall to terminate thread.
1075 *
1076 */
[b7fd2a0]1077sys_errno_t sys_thread_exit(int uspace_status)
[9f52563]1078{
[68091bd]1079 thread_exit();
[9f52563]1080}
[b45c443]1081
[3ce7f082]1082/** Syscall for getting TID.
1083 *
[201abde]1084 * @param uspace_thread_id Userspace address of 8-byte buffer where to store
1085 * current thread ID.
1086 *
1087 * @return 0 on success or an error code from @ref errno.h.
[da1bafb]1088 *
[b45c443]1089 */
[5a5269d]1090sys_errno_t sys_thread_get_id(uspace_ptr_thread_id_t uspace_thread_id)
[3ce7f082]1091{
1092 /*
1093 * No need to acquire lock on THREAD because tid
1094 * remains constant for the lifespan of the thread.
[da1bafb]1095 *
[3ce7f082]1096 */
[b7fd2a0]1097 return (sys_errno_t) copy_to_uspace(uspace_thread_id, &THREAD->tid,
[201abde]1098 sizeof(THREAD->tid));
[3ce7f082]1099}
[6f4495f5]1100
[d9ece1cb]1101/** Syscall wrapper for sleeping. */
[b7fd2a0]1102sys_errno_t sys_thread_usleep(uint32_t usec)
[d9ece1cb]1103{
[22e6802]1104 thread_usleep(usec);
[d9ece1cb]1105 return 0;
1106}
1107
[b7fd2a0]1108sys_errno_t sys_thread_udelay(uint32_t usec)
[7e7b791]1109{
[8d6c1f1]1110 delay(usec);
[7e7b791]1111 return 0;
1112}
1113
[3ce7f082]1114/** @}
1115 */
Note: See TracBrowser for help on using the repository browser.