source: mainline/kernel/generic/src/proc/thread.c@ c1b073b7

ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since c1b073b7 was c1b073b7, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 3 years ago

Remove some unnecessary #ifdefs

%p does not care about specified number of digits,
so that distinction was unnecessary to begin with.
Splitting the line for 64b pointers is also more harmful
than helpful, the line is not that long.
For spacing, replaced #ifdef with regular C if.

  • Property mode set to 100644
File size: 26.3 KB
RevLine 
[f761f1eb]1/*
[7ed8530]2 * Copyright (c) 2010 Jakub Jermar
[ef1eab7]3 * Copyright (c) 2018 Jiri Svoboda
[f761f1eb]4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
[174156fd]30/** @addtogroup kernel_generic_proc
[b45c443]31 * @{
32 */
33
[9179d0a]34/**
[b45c443]35 * @file
[da1bafb]36 * @brief Thread management functions.
[9179d0a]37 */
38
[63e27ef]39#include <assert.h>
[f761f1eb]40#include <proc/scheduler.h>
41#include <proc/thread.h>
42#include <proc/task.h>
43#include <mm/frame.h>
44#include <mm/page.h>
45#include <arch/asm.h>
[cce6acf]46#include <arch/cycle.h>
[f761f1eb]47#include <arch.h>
48#include <synch/spinlock.h>
49#include <synch/waitq.h>
[d314571]50#include <synch/syswaitq.h>
[f761f1eb]51#include <cpu.h>
[e535eeb]52#include <str.h>
[f761f1eb]53#include <context.h>
[5c9a08b]54#include <adt/list.h>
[ef1eab7]55#include <adt/odict.h>
[f761f1eb]56#include <time/clock.h>
[b3f8fb7]57#include <time/timeout.h>
[8d6c1f1]58#include <time/delay.h>
[4ffa9e0]59#include <config.h>
60#include <arch/interrupt.h>
[26a8604f]61#include <smp/ipi.h>
[f2ffad4]62#include <arch/faddr.h>
[23684b7]63#include <atomic.h>
[44a7ee5]64#include <mem.h>
[bab75df6]65#include <stdio.h>
[aafed15]66#include <stdlib.h>
[9f52563]67#include <main/uinit.h>
[e3c762cd]68#include <syscall/copy.h>
69#include <errno.h>
[aae365bc]70#include <debug.h>
[52755f1]71
[fe19611]72/** Thread states */
[a000878c]73const char *thread_states[] = {
[fe19611]74 "Invalid",
75 "Running",
76 "Sleeping",
77 "Ready",
78 "Entering",
79 "Exiting",
[48d14222]80 "Lingering"
[e1b6742]81};
82
[ef1eab7]83/** Lock protecting the @c threads ordered dictionary .
[4e33b6b]84 *
85 * For locking rules, see declaration thereof.
86 */
[da1bafb]87IRQ_SPINLOCK_INITIALIZE(threads_lock);
[88169d9]88
[ef1eab7]89/** Ordered dictionary of all threads by their address (i.e. pointer to
90 * the thread_t structure).
[88169d9]91 *
[ef1eab7]92 * When a thread is found in the @c threads ordered dictionary, it is
93 * guaranteed to exist as long as the @c threads_lock is held.
[da1bafb]94 *
[ef1eab7]95 * Members are of type thread_t.
[88169d9]96 */
[ef1eab7]97odict_t threads;
[f761f1eb]98
[da1bafb]99IRQ_SPINLOCK_STATIC_INITIALIZE(tidlock);
100static thread_id_t last_tid = 0;
[f761f1eb]101
[82d515e9]102static slab_cache_t *thread_cache;
[da1bafb]103
[0f81ceb7]104#ifdef CONFIG_FPU
[82d515e9]105slab_cache_t *fpu_context_cache;
[f76fed4]106#endif
[266294a9]107
[ef1eab7]108static void *threads_getkey(odlink_t *);
109static int threads_cmp(void *, void *);
110
[4e33b6b]111/** Thread wrapper.
[70527f1]112 *
[4e33b6b]113 * This wrapper is provided to ensure that every thread makes a call to
114 * thread_exit() when its implementing function returns.
[f761f1eb]115 *
[22f7769]116 * interrupts_disable() is assumed.
[70527f1]117 *
[f761f1eb]118 */
[e16e036a]119static void cushion(void)
[f761f1eb]120{
[43114c5]121 void (*f)(void *) = THREAD->thread_code;
122 void *arg = THREAD->thread_arg;
[449dc1ed]123 THREAD->last_cycle = get_cycle();
[a35b458]124
[0313ff0]125 /* This is where each thread wakes up after its creation */
[da1bafb]126 irq_spinlock_unlock(&THREAD->lock, false);
[22f7769]127 interrupts_enable();
[a35b458]128
[f761f1eb]129 f(arg);
[a35b458]130
[0313ff0]131 /* Accumulate accounting to the task */
[da1bafb]132 irq_spinlock_lock(&THREAD->lock, true);
[62b6d17]133 if (!THREAD->uncounted) {
[a2a00e8]134 thread_update_accounting(true);
135 uint64_t ucycles = THREAD->ucycles;
136 THREAD->ucycles = 0;
137 uint64_t kcycles = THREAD->kcycles;
138 THREAD->kcycles = 0;
[a35b458]139
[da1bafb]140 irq_spinlock_pass(&THREAD->lock, &TASK->lock);
[a2a00e8]141 TASK->ucycles += ucycles;
142 TASK->kcycles += kcycles;
[da1bafb]143 irq_spinlock_unlock(&TASK->lock, true);
[62b6d17]144 } else
[da1bafb]145 irq_spinlock_unlock(&THREAD->lock, true);
[a35b458]146
[f761f1eb]147 thread_exit();
[a35b458]148
[da1bafb]149 /* Not reached */
[f761f1eb]150}
151
[da1bafb]152/** Initialization and allocation for thread_t structure
153 *
154 */
[b7fd2a0]155static errno_t thr_constructor(void *obj, unsigned int kmflags)
[266294a9]156{
[da1bafb]157 thread_t *thread = (thread_t *) obj;
[a35b458]158
[da1bafb]159 irq_spinlock_initialize(&thread->lock, "thread_t_lock");
160 link_initialize(&thread->rq_link);
161 link_initialize(&thread->wq_link);
162 link_initialize(&thread->th_link);
[a35b458]163
[32fffef0]164 /* call the architecture-specific part of the constructor */
[da1bafb]165 thr_constructor_arch(thread);
[a35b458]166
[0f81ceb7]167#ifdef CONFIG_FPU
[abf6c01]168 thread->saved_fpu_context = slab_alloc(fpu_context_cache,
169 FRAME_ATOMIC | kmflags);
[da1bafb]170 if (!thread->saved_fpu_context)
[7f11dc6]171 return ENOMEM;
[da1bafb]172#endif /* CONFIG_FPU */
[a35b458]173
[38ff925]174 /*
175 * Allocate the kernel stack from the low-memory to prevent an infinite
176 * nesting of TLB-misses when accessing the stack from the part of the
177 * TLB-miss handler written in C.
178 *
179 * Note that low-memory is safe to be used for the stack as it will be
180 * covered by the kernel identity mapping, which guarantees not to
181 * nest TLB-misses infinitely (either via some hardware mechanism or
[c477c80]182 * by the construction of the assembly-language part of the TLB-miss
[38ff925]183 * handler).
184 *
185 * This restriction can be lifted once each architecture provides
[c477c80]186 * a similar guarantee, for example, by locking the kernel stack
[38ff925]187 * in the TLB whenever it is allocated from the high-memory and the
188 * thread is being scheduled to run.
189 */
190 kmflags |= FRAME_LOWMEM;
191 kmflags &= ~FRAME_HIGHMEM;
[a35b458]192
[128359eb]193 /*
194 * NOTE: All kernel stacks must be aligned to STACK_SIZE,
195 * see CURRENT.
196 */
[d1da1ff2]197
[cd3b380]198 uintptr_t stack_phys =
199 frame_alloc(STACK_FRAMES, kmflags, STACK_SIZE - 1);
200 if (!stack_phys) {
[0f81ceb7]201#ifdef CONFIG_FPU
[ba9a150]202 assert(thread->saved_fpu_context);
203 slab_free(fpu_context_cache, thread->saved_fpu_context);
[f76fed4]204#endif
[7f11dc6]205 return ENOMEM;
[f76fed4]206 }
[a35b458]207
[cd3b380]208 thread->kstack = (uint8_t *) PA2KA(stack_phys);
[a35b458]209
[9a1b20c]210#ifdef CONFIG_UDEBUG
[da1bafb]211 mutex_initialize(&thread->udebug.lock, MUTEX_PASSIVE);
[9a1b20c]212#endif
[a35b458]213
[7f11dc6]214 return EOK;
[266294a9]215}
216
217/** Destruction of thread_t object */
[da1bafb]218static size_t thr_destructor(void *obj)
[266294a9]219{
[da1bafb]220 thread_t *thread = (thread_t *) obj;
[a35b458]221
[32fffef0]222 /* call the architecture-specific part of the destructor */
[da1bafb]223 thr_destructor_arch(thread);
[a35b458]224
[5df1963]225 frame_free(KA2PA(thread->kstack), STACK_FRAMES);
[a35b458]226
[0f81ceb7]227#ifdef CONFIG_FPU
[ba9a150]228 assert(thread->saved_fpu_context);
229 slab_free(fpu_context_cache, thread->saved_fpu_context);
[f76fed4]230#endif
[a35b458]231
[e7c4115d]232 return STACK_FRAMES; /* number of frames freed */
[266294a9]233}
[70527f1]234
235/** Initialize threads
236 *
237 * Initialize kernel threads support.
238 *
239 */
[f761f1eb]240void thread_init(void)
241{
[43114c5]242 THREAD = NULL;
[a35b458]243
[e3306d04]244 atomic_store(&nrdy, 0);
[82d515e9]245 thread_cache = slab_cache_create("thread_t", sizeof(thread_t), 0,
[6f4495f5]246 thr_constructor, thr_destructor, 0);
[a35b458]247
[0f81ceb7]248#ifdef CONFIG_FPU
[82d515e9]249 fpu_context_cache = slab_cache_create("fpu_context_t",
[f97f1e51]250 sizeof(fpu_context_t), FPU_CONTEXT_ALIGN, NULL, NULL, 0);
[f76fed4]251#endif
[a35b458]252
[ef1eab7]253 odict_initialize(&threads, threads_getkey, threads_cmp);
[016acbe]254}
[70527f1]255
[6eef3c4]256/** Wire thread to the given CPU
257 *
258 * @param cpu CPU to wire the thread to.
259 *
260 */
261void thread_wire(thread_t *thread, cpu_t *cpu)
262{
263 irq_spinlock_lock(&thread->lock, true);
264 thread->cpu = cpu;
265 thread->wired = true;
266 irq_spinlock_unlock(&thread->lock, true);
267}
268
[8a64e81e]269/** Invoked right before thread_ready() readies the thread. thread is locked. */
270static void before_thread_is_ready(thread_t *thread)
271{
[63e27ef]272 assert(irq_spinlock_locked(&thread->lock));
[8a64e81e]273}
274
[70527f1]275/** Make thread ready
276 *
[da1bafb]277 * Switch thread to the ready state.
[70527f1]278 *
[df58e44]279 * @param thread Thread to make ready.
[70527f1]280 *
281 */
[da1bafb]282void thread_ready(thread_t *thread)
[f761f1eb]283{
[da1bafb]284 irq_spinlock_lock(&thread->lock, true);
[a35b458]285
[63e27ef]286 assert(thread->state != Ready);
[518dd43]287
[8a64e81e]288 before_thread_is_ready(thread);
[a35b458]289
[6eef3c4]290 int i = (thread->priority < RQ_COUNT - 1) ?
291 ++thread->priority : thread->priority;
[518dd43]292
[8ad7dd1]293 cpu_t *cpu;
294 if (thread->wired || thread->nomigrate || thread->fpu_context_engaged) {
295 /* Cannot ready to another CPU */
[63e27ef]296 assert(thread->cpu != NULL);
[8ad7dd1]297 cpu = thread->cpu;
298 } else if (thread->stolen) {
299 /* Ready to the stealing CPU */
[6eef3c4]300 cpu = CPU;
[8ad7dd1]301 } else if (thread->cpu) {
302 /* Prefer the CPU on which the thread ran last */
[63e27ef]303 assert(thread->cpu != NULL);
[8ad7dd1]304 cpu = thread->cpu;
305 } else {
306 cpu = CPU;
307 }
[a35b458]308
[da1bafb]309 thread->state = Ready;
[a35b458]310
[da1bafb]311 irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock));
[a35b458]312
[70527f1]313 /*
[da1bafb]314 * Append thread to respective ready queue
315 * on respective processor.
[f761f1eb]316 */
[a35b458]317
[55b77d9]318 list_append(&thread->rq_link, &cpu->rq[i].rq);
[da1bafb]319 cpu->rq[i].n++;
320 irq_spinlock_unlock(&(cpu->rq[i].lock), true);
[a35b458]321
[59e07c91]322 atomic_inc(&nrdy);
[248fc1a]323 atomic_inc(&cpu->nrdy);
[f761f1eb]324}
325
[70527f1]326/** Create new thread
327 *
328 * Create a new thread.
329 *
[da1bafb]330 * @param func Thread's implementing function.
331 * @param arg Thread's implementing function argument.
332 * @param task Task to which the thread belongs. The caller must
333 * guarantee that the task won't cease to exist during the
334 * call. The task's lock may not be held.
335 * @param flags Thread flags.
336 * @param name Symbolic name (a copy is made).
[70527f1]337 *
[da1bafb]338 * @return New thread's structure on success, NULL on failure.
[70527f1]339 *
340 */
[3bacee1]341thread_t *thread_create(void (*func)(void *), void *arg, task_t *task,
[6eef3c4]342 thread_flags_t flags, const char *name)
[f761f1eb]343{
[abf6c01]344 thread_t *thread = (thread_t *) slab_alloc(thread_cache, FRAME_ATOMIC);
[da1bafb]345 if (!thread)
[2a46e10]346 return NULL;
[a35b458]347
[deacd722]348 if (thread_create_arch(thread, flags) != EOK) {
349 slab_free(thread_cache, thread);
350 return NULL;
351 }
352
[bb68433]353 /* Not needed, but good for debugging */
[26aafe8]354 memsetb(thread->kstack, STACK_SIZE, 0);
[a35b458]355
[da1bafb]356 irq_spinlock_lock(&tidlock, true);
357 thread->tid = ++last_tid;
358 irq_spinlock_unlock(&tidlock, true);
[a35b458]359
[edc64c0]360 memset(&thread->saved_context, 0, sizeof(thread->saved_context));
[da1bafb]361 context_set(&thread->saved_context, FADDR(cushion),
[26aafe8]362 (uintptr_t) thread->kstack, STACK_SIZE);
[a35b458]363
[a6e55886]364 current_initialize((current_t *) thread->kstack);
[a35b458]365
[da1bafb]366 ipl_t ipl = interrupts_disable();
367 thread->saved_context.ipl = interrupts_read();
[bb68433]368 interrupts_restore(ipl);
[a35b458]369
[da1bafb]370 str_cpy(thread->name, THREAD_NAME_BUFLEN, name);
[a35b458]371
[da1bafb]372 thread->thread_code = func;
373 thread->thread_arg = arg;
374 thread->ucycles = 0;
375 thread->kcycles = 0;
[6eef3c4]376 thread->uncounted =
377 ((flags & THREAD_FLAG_UNCOUNTED) == THREAD_FLAG_UNCOUNTED);
[da1bafb]378 thread->priority = -1; /* Start in rq[0] */
379 thread->cpu = NULL;
[6eef3c4]380 thread->wired = false;
381 thread->stolen = false;
382 thread->uspace =
383 ((flags & THREAD_FLAG_USPACE) == THREAD_FLAG_USPACE);
[a35b458]384
[43ac0cc]385 thread->nomigrate = 0;
[da1bafb]386 thread->state = Entering;
[a35b458]387
[da1bafb]388 timeout_initialize(&thread->sleep_timeout);
389 thread->sleep_interruptible = false;
[b59318e]390 thread->sleep_composable = false;
[da1bafb]391 thread->sleep_queue = NULL;
392 thread->timeout_pending = false;
[a35b458]393
[da1bafb]394 thread->in_copy_from_uspace = false;
395 thread->in_copy_to_uspace = false;
[a35b458]396
[da1bafb]397 thread->interrupted = false;
398 thread->detached = false;
399 waitq_initialize(&thread->join_wq);
[a35b458]400
[da1bafb]401 thread->task = task;
[a35b458]402
[6eef3c4]403 thread->fpu_context_exists = false;
404 thread->fpu_context_engaged = false;
[a35b458]405
[ef1eab7]406 odlink_initialize(&thread->lthreads);
[a35b458]407
[9a1b20c]408#ifdef CONFIG_UDEBUG
[5b7a107]409 /* Initialize debugging stuff */
410 thread->btrace = false;
[da1bafb]411 udebug_thread_initialize(&thread->udebug);
[9a1b20c]412#endif
[a35b458]413
[6eef3c4]414 if ((flags & THREAD_FLAG_NOATTACH) != THREAD_FLAG_NOATTACH)
[da1bafb]415 thread_attach(thread, task);
[a35b458]416
[da1bafb]417 return thread;
[d8431986]418}
419
420/** Destroy thread memory structure
421 *
422 * Detach thread from all queues, cpus etc. and destroy it.
[da1bafb]423 *
424 * @param thread Thread to be destroyed.
425 * @param irq_res Indicate whether it should unlock thread->lock
426 * in interrupts-restore mode.
[d8431986]427 *
428 */
[da1bafb]429void thread_destroy(thread_t *thread, bool irq_res)
[d8431986]430{
[63e27ef]431 assert(irq_spinlock_locked(&thread->lock));
432 assert((thread->state == Exiting) || (thread->state == Lingering));
433 assert(thread->task);
434 assert(thread->cpu);
[a35b458]435
[da1bafb]436 irq_spinlock_lock(&thread->cpu->lock, false);
437 if (thread->cpu->fpu_owner == thread)
438 thread->cpu->fpu_owner = NULL;
439 irq_spinlock_unlock(&thread->cpu->lock, false);
[a35b458]440
[da1bafb]441 irq_spinlock_pass(&thread->lock, &threads_lock);
[a35b458]442
[ef1eab7]443 odict_remove(&thread->lthreads);
[a35b458]444
[da1bafb]445 irq_spinlock_pass(&threads_lock, &thread->task->lock);
[a35b458]446
[d8431986]447 /*
448 * Detach from the containing task.
449 */
[da1bafb]450 list_remove(&thread->th_link);
451 irq_spinlock_unlock(&thread->task->lock, irq_res);
[a35b458]452
[ea7890e7]453 /*
[7ed8530]454 * Drop the reference to the containing task.
[ea7890e7]455 */
[da1bafb]456 task_release(thread->task);
[82d515e9]457 slab_free(thread_cache, thread);
[d8431986]458}
459
460/** Make the thread visible to the system.
461 *
462 * Attach the thread structure to the current task and make it visible in the
[5dcee525]463 * threads_tree.
[d8431986]464 *
[da1bafb]465 * @param t Thread to be attached to the task.
466 * @param task Task to which the thread is to be attached.
467 *
[d8431986]468 */
[da1bafb]469void thread_attach(thread_t *thread, task_t *task)
[d8431986]470{
471 /*
[9a1b20c]472 * Attach to the specified task.
[d8431986]473 */
[da1bafb]474 irq_spinlock_lock(&task->lock, true);
[a35b458]475
[7ed8530]476 /* Hold a reference to the task. */
477 task_hold(task);
[a35b458]478
[9a1b20c]479 /* Must not count kbox thread into lifecount */
[6eef3c4]480 if (thread->uspace)
[9a1b20c]481 atomic_inc(&task->lifecount);
[a35b458]482
[55b77d9]483 list_append(&thread->th_link, &task->threads);
[a35b458]484
[da1bafb]485 irq_spinlock_pass(&task->lock, &threads_lock);
[a35b458]486
[bb68433]487 /*
[ef1eab7]488 * Register this thread in the system-wide dictionary.
[bb68433]489 */
[ef1eab7]490 odict_insert(&thread->lthreads, &threads, NULL);
[da1bafb]491 irq_spinlock_unlock(&threads_lock, true);
[f761f1eb]492}
493
[0182a665]494/** Terminate thread.
[70527f1]495 *
[da1bafb]496 * End current thread execution and switch it to the exiting state.
497 * All pending timeouts are executed.
498 *
[70527f1]499 */
[f761f1eb]500void thread_exit(void)
501{
[6eef3c4]502 if (THREAD->uspace) {
[9a1b20c]503#ifdef CONFIG_UDEBUG
504 /* Generate udebug THREAD_E event */
505 udebug_thread_e_event();
[a35b458]506
[0ac99db]507 /*
508 * This thread will not execute any code or system calls from
509 * now on.
510 */
511 udebug_stoppable_begin();
[9a1b20c]512#endif
513 if (atomic_predec(&TASK->lifecount) == 0) {
514 /*
515 * We are the last userspace thread in the task that
516 * still has not exited. With the exception of the
517 * moment the task was created, new userspace threads
518 * can only be created by threads of the same task.
519 * We are safe to perform cleanup.
[da1bafb]520 *
[9a1b20c]521 */
[ea7890e7]522 ipc_cleanup();
[d314571]523 sys_waitq_task_cleanup();
[3bacee1]524 LOG("Cleanup of task %" PRIu64 " completed.", TASK->taskid);
[ea7890e7]525 }
526 }
[a35b458]527
[f761f1eb]528restart:
[da1bafb]529 irq_spinlock_lock(&THREAD->lock, true);
530 if (THREAD->timeout_pending) {
531 /* Busy waiting for timeouts in progress */
532 irq_spinlock_unlock(&THREAD->lock, true);
[f761f1eb]533 goto restart;
534 }
[a35b458]535
[43114c5]536 THREAD->state = Exiting;
[da1bafb]537 irq_spinlock_unlock(&THREAD->lock, true);
[a35b458]538
[f761f1eb]539 scheduler();
[a35b458]540
[661a5ac]541 panic("should never be reached");
[f761f1eb]542}
543
[518dd43]544/** Interrupts an existing thread so that it may exit as soon as possible.
[1b20da0]545 *
546 * Threads that are blocked waiting for a synchronization primitive
[897fd8f1]547 * are woken up with a return code of EINTR if the
[518dd43]548 * blocking call was interruptable. See waitq_sleep_timeout().
[1b20da0]549 *
[518dd43]550 * The caller must guarantee the thread object is valid during the entire
551 * function, eg by holding the threads_lock lock.
[1b20da0]552 *
[518dd43]553 * Interrupted threads automatically exit when returning back to user space.
[1b20da0]554 *
[518dd43]555 * @param thread A valid thread object. The caller must guarantee it
556 * will remain valid until thread_interrupt() exits.
557 */
558void thread_interrupt(thread_t *thread)
559{
[63e27ef]560 assert(thread != NULL);
[a35b458]561
[518dd43]562 irq_spinlock_lock(&thread->lock, true);
[a35b458]563
[518dd43]564 thread->interrupted = true;
565 bool sleeping = (thread->state == Sleeping);
[a35b458]566
[518dd43]567 irq_spinlock_unlock(&thread->lock, true);
[a35b458]568
[518dd43]569 if (sleeping)
570 waitq_interrupt_sleep(thread);
571}
572
573/** Returns true if the thread was interrupted.
[1b20da0]574 *
[518dd43]575 * @param thread A valid thread object. User must guarantee it will
576 * be alive during the entire call.
577 * @return true if the thread was already interrupted via thread_interrupt().
578 */
579bool thread_interrupted(thread_t *thread)
580{
[63e27ef]581 assert(thread != NULL);
[a35b458]582
[518dd43]583 bool interrupted;
[a35b458]584
[518dd43]585 irq_spinlock_lock(&thread->lock, true);
586 interrupted = thread->interrupted;
587 irq_spinlock_unlock(&thread->lock, true);
[a35b458]588
[518dd43]589 return interrupted;
590}
591
[43ac0cc]592/** Prevent the current thread from being migrated to another processor. */
593void thread_migration_disable(void)
594{
[63e27ef]595 assert(THREAD);
[a35b458]596
[43ac0cc]597 THREAD->nomigrate++;
598}
599
600/** Allow the current thread to be migrated to another processor. */
601void thread_migration_enable(void)
602{
[63e27ef]603 assert(THREAD);
604 assert(THREAD->nomigrate > 0);
[a35b458]605
[6eef3c4]606 if (THREAD->nomigrate > 0)
607 THREAD->nomigrate--;
[43ac0cc]608}
609
[70527f1]610/** Thread sleep
611 *
612 * Suspend execution of the current thread.
613 *
614 * @param sec Number of seconds to sleep.
615 *
616 */
[7f1c620]617void thread_sleep(uint32_t sec)
[f761f1eb]618{
[7c3fb9b]619 /*
620 * Sleep in 1000 second steps to support
621 * full argument range
622 */
[22e6802]623 while (sec > 0) {
624 uint32_t period = (sec > 1000) ? 1000 : sec;
[a35b458]625
[22e6802]626 thread_usleep(period * 1000000);
627 sec -= period;
628 }
[f761f1eb]629}
[70527f1]630
[fe19611]631/** Wait for another thread to exit.
632 *
[da1bafb]633 * @param thread Thread to join on exit.
634 * @param usec Timeout in microseconds.
635 * @param flags Mode of operation.
[fe19611]636 *
637 * @return An error code from errno.h or an error code from synch.h.
[da1bafb]638 *
[fe19611]639 */
[b7fd2a0]640errno_t thread_join_timeout(thread_t *thread, uint32_t usec, unsigned int flags)
[fe19611]641{
[da1bafb]642 if (thread == THREAD)
[fe19611]643 return EINVAL;
[a35b458]644
[fe19611]645 /*
646 * Since thread join can only be called once on an undetached thread,
647 * the thread pointer is guaranteed to be still valid.
648 */
[a35b458]649
[da1bafb]650 irq_spinlock_lock(&thread->lock, true);
[63e27ef]651 assert(!thread->detached);
[da1bafb]652 irq_spinlock_unlock(&thread->lock, true);
[a35b458]653
[897fd8f1]654 return waitq_sleep_timeout(&thread->join_wq, usec, flags, NULL);
[abf6c01]655
656 // FIXME: join should deallocate the thread.
657 // Current code calls detach after join, that's contrary to how
658 // join is used in other threading APIs.
[fe19611]659}
660
661/** Detach thread.
662 *
[df58e44]663 * Mark the thread as detached. If the thread is already
664 * in the Lingering state, deallocate its resources.
[fe19611]665 *
[da1bafb]666 * @param thread Thread to be detached.
667 *
[fe19611]668 */
[da1bafb]669void thread_detach(thread_t *thread)
[fe19611]670{
671 /*
[31d8e10]672 * Since the thread is expected not to be already detached,
[fe19611]673 * pointer to it must be still valid.
674 */
[da1bafb]675 irq_spinlock_lock(&thread->lock, true);
[63e27ef]676 assert(!thread->detached);
[a35b458]677
[da1bafb]678 if (thread->state == Lingering) {
679 /*
680 * Unlock &thread->lock and restore
681 * interrupts in thread_destroy().
682 */
683 thread_destroy(thread, true);
[fe19611]684 return;
685 } else {
[da1bafb]686 thread->detached = true;
[fe19611]687 }
[a35b458]688
[da1bafb]689 irq_spinlock_unlock(&thread->lock, true);
[fe19611]690}
691
[70527f1]692/** Thread usleep
693 *
694 * Suspend execution of the current thread.
695 *
696 * @param usec Number of microseconds to sleep.
697 *
[1b20da0]698 */
[7f1c620]699void thread_usleep(uint32_t usec)
[f761f1eb]700{
701 waitq_t wq;
[a35b458]702
[f761f1eb]703 waitq_initialize(&wq);
[a35b458]704
[897fd8f1]705 (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING, NULL);
[f761f1eb]706}
707
[ef1eab7]708static void thread_print(thread_t *thread, bool additional)
[5dcee525]709{
[1ba37fa]710 uint64_t ucycles, kcycles;
711 char usuffix, ksuffix;
[da1bafb]712 order_suffix(thread->ucycles, &ucycles, &usuffix);
713 order_suffix(thread->kcycles, &kcycles, &ksuffix);
[a35b458]714
[577f042a]715 char *name;
716 if (str_cmp(thread->name, "uinit") == 0)
717 name = thread->task->name;
718 else
719 name = thread->name;
[a35b458]720
[ef1eab7]721 if (additional)
[c1b073b7]722 printf("%-8" PRIu64 " %p %p %9" PRIu64 "%c %9" PRIu64 "%c ",
[577f042a]723 thread->tid, thread->thread_code, thread->kstack,
724 ucycles, usuffix, kcycles, ksuffix);
[48dcc69]725 else
[c1b073b7]726 printf("%-8" PRIu64 " %-14s %p %-8s %p %-5" PRIu32 "\n",
[577f042a]727 thread->tid, name, thread, thread_states[thread->state],
[26aafe8]728 thread->task, thread->task->container);
[a35b458]729
[ef1eab7]730 if (additional) {
[48dcc69]731 if (thread->cpu)
732 printf("%-5u", thread->cpu->id);
733 else
734 printf("none ");
[a35b458]735
[48dcc69]736 if (thread->state == Sleeping) {
[c1b073b7]737 printf(" %p", thread->sleep_queue);
[48dcc69]738 }
[a35b458]739
[48dcc69]740 printf("\n");
[43b1e86]741 }
[5dcee525]742}
743
[da1bafb]744/** Print list of threads debug info
[48dcc69]745 *
746 * @param additional Print additional information.
[da1bafb]747 *
748 */
[48dcc69]749void thread_print_list(bool additional)
[55ab0f1]750{
[ef1eab7]751 thread_t *thread;
752
[55ab0f1]753 /* Messing with thread structures, avoid deadlock */
[da1bafb]754 irq_spinlock_lock(&threads_lock, true);
[a35b458]755
[c1b073b7]756 if (sizeof(void *) <= 4) {
757 if (additional)
758 printf("[id ] [code ] [stack ] [ucycles ] [kcycles ]"
759 " [cpu] [waitqueue]\n");
760 else
761 printf("[id ] [name ] [address ] [state ] [task ]"
762 " [ctn]\n");
763 } else {
764 if (additional) {
765 printf("[id ] [code ] [stack ] [ucycles ] [kcycles ]"
766 " [cpu] [waitqueue ]\n");
767 } else
768 printf("[id ] [name ] [address ] [state ]"
769 " [task ] [ctn]\n");
770 }
[a35b458]771
[aab5e46]772 thread = thread_first();
773 while (thread != NULL) {
[ef1eab7]774 thread_print(thread, additional);
[aab5e46]775 thread = thread_next(thread);
[ef1eab7]776 }
[a35b458]777
[da1bafb]778 irq_spinlock_unlock(&threads_lock, true);
[55ab0f1]779}
[9f52563]780
[016acbe]781/** Check whether thread exists.
782 *
783 * Note that threads_lock must be already held and
784 * interrupts must be already disabled.
785 *
[da1bafb]786 * @param thread Pointer to thread.
[016acbe]787 *
788 * @return True if thread t is known to the system, false otherwise.
[da1bafb]789 *
[016acbe]790 */
[da1bafb]791bool thread_exists(thread_t *thread)
[016acbe]792{
[63e27ef]793 assert(interrupts_disabled());
794 assert(irq_spinlock_locked(&threads_lock));
[1d432f9]795
[ef1eab7]796 odlink_t *odlink = odict_find_eq(&threads, thread, NULL);
797 return odlink != NULL;
[016acbe]798}
799
[cce6acf]800/** Update accounting of current thread.
801 *
802 * Note that thread_lock on THREAD must be already held and
803 * interrupts must be already disabled.
804 *
[da1bafb]805 * @param user True to update user accounting, false for kernel.
806 *
[cce6acf]807 */
[a2a00e8]808void thread_update_accounting(bool user)
[cce6acf]809{
810 uint64_t time = get_cycle();
[1d432f9]811
[63e27ef]812 assert(interrupts_disabled());
813 assert(irq_spinlock_locked(&THREAD->lock));
[a35b458]814
[da1bafb]815 if (user)
[a2a00e8]816 THREAD->ucycles += time - THREAD->last_cycle;
[da1bafb]817 else
[a2a00e8]818 THREAD->kcycles += time - THREAD->last_cycle;
[a35b458]819
[cce6acf]820 THREAD->last_cycle = time;
821}
822
[e1b6742]823/** Find thread structure corresponding to thread ID.
824 *
825 * The threads_lock must be already held by the caller of this function and
826 * interrupts must be disabled.
827 *
828 * @param id Thread ID.
829 *
830 * @return Thread structure address or NULL if there is no such thread ID.
831 *
832 */
833thread_t *thread_find_by_id(thread_id_t thread_id)
834{
[ef1eab7]835 thread_t *thread;
836
[63e27ef]837 assert(interrupts_disabled());
838 assert(irq_spinlock_locked(&threads_lock));
[a35b458]839
[aab5e46]840 thread = thread_first();
841 while (thread != NULL) {
[ef1eab7]842 if (thread->tid == thread_id)
843 return thread;
[a35b458]844
[aab5e46]845 thread = thread_next(thread);
[ef1eab7]846 }
[a35b458]847
[ef1eab7]848 return NULL;
[e1b6742]849}
850
[aab5e46]851/** Get count of threads.
852 *
853 * @return Number of threads in the system
854 */
855size_t thread_count(void)
856{
857 assert(interrupts_disabled());
858 assert(irq_spinlock_locked(&threads_lock));
859
860 return odict_count(&threads);
861}
862
863/** Get first thread.
864 *
865 * @return Pointer to first thread or @c NULL if there are none.
866 */
867thread_t *thread_first(void)
868{
869 odlink_t *odlink;
870
871 assert(interrupts_disabled());
872 assert(irq_spinlock_locked(&threads_lock));
873
874 odlink = odict_first(&threads);
875 if (odlink == NULL)
876 return NULL;
877
878 return odict_get_instance(odlink, thread_t, lthreads);
879}
880
881/** Get next thread.
882 *
883 * @param cur Current thread
884 * @return Pointer to next thread or @c NULL if there are no more threads.
885 */
886thread_t *thread_next(thread_t *cur)
887{
888 odlink_t *odlink;
889
890 assert(interrupts_disabled());
891 assert(irq_spinlock_locked(&threads_lock));
892
893 odlink = odict_next(&cur->lthreads, &threads);
894 if (odlink == NULL)
895 return NULL;
896
897 return odict_get_instance(odlink, thread_t, lthreads);
898}
899
[5b7a107]900#ifdef CONFIG_UDEBUG
901
[df58e44]902void thread_stack_trace(thread_id_t thread_id)
903{
904 irq_spinlock_lock(&threads_lock, true);
[a35b458]905
[df58e44]906 thread_t *thread = thread_find_by_id(thread_id);
907 if (thread == NULL) {
908 printf("No such thread.\n");
909 irq_spinlock_unlock(&threads_lock, true);
910 return;
911 }
[a35b458]912
[df58e44]913 irq_spinlock_lock(&thread->lock, false);
[a35b458]914
[df58e44]915 /*
916 * Schedule a stack trace to be printed
917 * just before the thread is scheduled next.
918 *
919 * If the thread is sleeping then try to interrupt
920 * the sleep. Any request for printing an uspace stack
921 * trace from within the kernel should be always
922 * considered a last resort debugging means, therefore
923 * forcing the thread's sleep to be interrupted
924 * is probably justifiable.
925 */
[a35b458]926
[df58e44]927 bool sleeping = false;
928 istate_t *istate = thread->udebug.uspace_state;
929 if (istate != NULL) {
930 printf("Scheduling thread stack trace.\n");
931 thread->btrace = true;
932 if (thread->state == Sleeping)
933 sleeping = true;
934 } else
935 printf("Thread interrupt state not available.\n");
[a35b458]936
[df58e44]937 irq_spinlock_unlock(&thread->lock, false);
[a35b458]938
[df58e44]939 if (sleeping)
940 waitq_interrupt_sleep(thread);
[a35b458]941
[df58e44]942 irq_spinlock_unlock(&threads_lock, true);
943}
[e1b6742]944
[5b7a107]945#endif /* CONFIG_UDEBUG */
[e1b6742]946
[ef1eab7]947/** Get key function for the @c threads ordered dictionary.
948 *
949 * @param odlink Link
950 * @return Pointer to thread structure cast as 'void *'
951 */
952static void *threads_getkey(odlink_t *odlink)
953{
954 thread_t *thread = odict_get_instance(odlink, thread_t, lthreads);
955 return (void *) thread;
956}
957
958/** Key comparison function for the @c threads ordered dictionary.
959 *
960 * @param a Pointer to thread A
961 * @param b Pointer to thread B
962 * @return -1, 0, 1 iff pointer to A is less than, equal to, greater than B
963 */
964static int threads_cmp(void *a, void *b)
965{
966 if (a > b)
967 return -1;
968 else if (a == b)
969 return 0;
970 else
971 return +1;
972}
973
[9f52563]974/** Process syscall to create new thread.
975 *
976 */
[5a5269d]977sys_errno_t sys_thread_create(uspace_ptr_uspace_arg_t uspace_uarg, uspace_ptr_char uspace_name,
978 size_t name_len, uspace_ptr_thread_id_t uspace_thread_id)
[9f52563]979{
[24345a5]980 if (name_len > THREAD_NAME_BUFLEN - 1)
[7faabb7]981 name_len = THREAD_NAME_BUFLEN - 1;
[a35b458]982
[da1bafb]983 char namebuf[THREAD_NAME_BUFLEN];
[b7fd2a0]984 errno_t rc = copy_from_uspace(namebuf, uspace_name, name_len);
[a53ed3a]985 if (rc != EOK)
[b7fd2a0]986 return (sys_errno_t) rc;
[a35b458]987
[b60c582]988 namebuf[name_len] = 0;
[a35b458]989
[4680ef5]990 /*
991 * In case of failure, kernel_uarg will be deallocated in this function.
992 * In case of success, kernel_uarg will be freed in uinit().
993 */
[da1bafb]994 uspace_arg_t *kernel_uarg =
[11b285d]995 (uspace_arg_t *) malloc(sizeof(uspace_arg_t));
[7473807]996 if (!kernel_uarg)
997 return (sys_errno_t) ENOMEM;
[a35b458]998
[e3c762cd]999 rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t));
[a53ed3a]1000 if (rc != EOK) {
[e3c762cd]1001 free(kernel_uarg);
[b7fd2a0]1002 return (sys_errno_t) rc;
[e3c762cd]1003 }
[a35b458]1004
[da1bafb]1005 thread_t *thread = thread_create(uinit, kernel_uarg, TASK,
[6eef3c4]1006 THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf);
[da1bafb]1007 if (thread) {
[5a5269d]1008 if (uspace_thread_id) {
[da1bafb]1009 rc = copy_to_uspace(uspace_thread_id, &thread->tid,
1010 sizeof(thread->tid));
[a53ed3a]1011 if (rc != EOK) {
[d8431986]1012 /*
1013 * We have encountered a failure, but the thread
1014 * has already been created. We need to undo its
1015 * creation now.
1016 */
[a35b458]1017
[d8431986]1018 /*
[ea7890e7]1019 * The new thread structure is initialized, but
1020 * is still not visible to the system.
[d8431986]1021 * We can safely deallocate it.
1022 */
[82d515e9]1023 slab_free(thread_cache, thread);
[da1bafb]1024 free(kernel_uarg);
[a35b458]1025
[b7fd2a0]1026 return (sys_errno_t) rc;
[3bacee1]1027 }
[d8431986]1028 }
[a35b458]1029
[9a1b20c]1030#ifdef CONFIG_UDEBUG
[13964ef]1031 /*
1032 * Generate udebug THREAD_B event and attach the thread.
1033 * This must be done atomically (with the debug locks held),
1034 * otherwise we would either miss some thread or receive
1035 * THREAD_B events for threads that already existed
1036 * and could be detected with THREAD_READ before.
1037 */
[da1bafb]1038 udebug_thread_b_event_attach(thread, TASK);
[13964ef]1039#else
[da1bafb]1040 thread_attach(thread, TASK);
[9a1b20c]1041#endif
[da1bafb]1042 thread_ready(thread);
[a35b458]1043
[d8431986]1044 return 0;
[201abde]1045 } else
[0f250f9]1046 free(kernel_uarg);
[a35b458]1047
[b7fd2a0]1048 return (sys_errno_t) ENOMEM;
[9f52563]1049}
1050
1051/** Process syscall to terminate thread.
1052 *
1053 */
[b7fd2a0]1054sys_errno_t sys_thread_exit(int uspace_status)
[9f52563]1055{
[68091bd]1056 thread_exit();
[9f52563]1057}
[b45c443]1058
[3ce7f082]1059/** Syscall for getting TID.
1060 *
[201abde]1061 * @param uspace_thread_id Userspace address of 8-byte buffer where to store
1062 * current thread ID.
1063 *
1064 * @return 0 on success or an error code from @ref errno.h.
[da1bafb]1065 *
[b45c443]1066 */
[5a5269d]1067sys_errno_t sys_thread_get_id(uspace_ptr_thread_id_t uspace_thread_id)
[3ce7f082]1068{
1069 /*
1070 * No need to acquire lock on THREAD because tid
1071 * remains constant for the lifespan of the thread.
[da1bafb]1072 *
[3ce7f082]1073 */
[b7fd2a0]1074 return (sys_errno_t) copy_to_uspace(uspace_thread_id, &THREAD->tid,
[201abde]1075 sizeof(THREAD->tid));
[3ce7f082]1076}
[6f4495f5]1077
[d9ece1cb]1078/** Syscall wrapper for sleeping. */
[b7fd2a0]1079sys_errno_t sys_thread_usleep(uint32_t usec)
[d9ece1cb]1080{
[22e6802]1081 thread_usleep(usec);
[d9ece1cb]1082 return 0;
1083}
1084
[b7fd2a0]1085sys_errno_t sys_thread_udelay(uint32_t usec)
[7e7b791]1086{
[8d6c1f1]1087 delay(usec);
[7e7b791]1088 return 0;
1089}
1090
[3ce7f082]1091/** @}
1092 */
Note: See TracBrowser for help on using the repository browser.