source: mainline/kernel/generic/src/proc/thread.c@ 036e97c

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 036e97c was edc64c0, checked in by Jakub Jermar <jakub@…>, 7 years ago

Zero out new thread's register context

This removes the information leak in which the new thread inherited some
register values from the thread which created it. Also, now each thread
begins execution with a well-defined register state.

  • Property mode set to 100644
File size: 25.4 KB
RevLine 
[f761f1eb]1/*
[7ed8530]2 * Copyright (c) 2010 Jakub Jermar
[f761f1eb]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[cc73a8a1]29/** @addtogroup genericproc
[b45c443]30 * @{
31 */
32
[9179d0a]33/**
[b45c443]34 * @file
[da1bafb]35 * @brief Thread management functions.
[9179d0a]36 */
37
[63e27ef]38#include <assert.h>
[f761f1eb]39#include <proc/scheduler.h>
40#include <proc/thread.h>
41#include <proc/task.h>
42#include <mm/frame.h>
43#include <mm/page.h>
44#include <arch/asm.h>
[cce6acf]45#include <arch/cycle.h>
[f761f1eb]46#include <arch.h>
47#include <synch/spinlock.h>
48#include <synch/waitq.h>
[8a64e81e]49#include <synch/workqueue.h>
[181a746]50#include <synch/rcu.h>
[f761f1eb]51#include <cpu.h>
[e535eeb]52#include <str.h>
[f761f1eb]53#include <context.h>
[5dcee525]54#include <adt/avl.h>
[5c9a08b]55#include <adt/list.h>
[f761f1eb]56#include <time/clock.h>
[b3f8fb7]57#include <time/timeout.h>
[8d6c1f1]58#include <time/delay.h>
[4ffa9e0]59#include <config.h>
60#include <arch/interrupt.h>
[26a8604f]61#include <smp/ipi.h>
[f2ffad4]62#include <arch/faddr.h>
[23684b7]63#include <atomic.h>
[44a7ee5]64#include <mem.h>
[55ab0f1]65#include <print.h>
[266294a9]66#include <mm/slab.h>
[9f52563]67#include <main/uinit.h>
[e3c762cd]68#include <syscall/copy.h>
69#include <errno.h>
[52755f1]70
[fe19611]71/** Thread states */
[a000878c]72const char *thread_states[] = {
[fe19611]73 "Invalid",
74 "Running",
75 "Sleeping",
76 "Ready",
77 "Entering",
78 "Exiting",
[48d14222]79 "Lingering"
[e1b6742]80};
81
82typedef struct {
83 thread_id_t thread_id;
84 thread_t *thread;
85} thread_iterator_t;
[f761f1eb]86
[5dcee525]87/** Lock protecting the threads_tree AVL tree.
[4e33b6b]88 *
89 * For locking rules, see declaration thereof.
[da1bafb]90 *
[4e33b6b]91 */
[da1bafb]92IRQ_SPINLOCK_INITIALIZE(threads_lock);
[88169d9]93
[81c0171e]94/** AVL tree of all threads.
[88169d9]95 *
[5dcee525]96 * When a thread is found in the threads_tree AVL tree, it is guaranteed to
[4e33b6b]97 * exist as long as the threads_lock is held.
[da1bafb]98 *
[88169d9]99 */
[da1bafb]100avltree_t threads_tree;
[f761f1eb]101
[da1bafb]102IRQ_SPINLOCK_STATIC_INITIALIZE(tidlock);
103static thread_id_t last_tid = 0;
[f761f1eb]104
[82d515e9]105static slab_cache_t *thread_cache;
[da1bafb]106
[0f81ceb7]107#ifdef CONFIG_FPU
[82d515e9]108slab_cache_t *fpu_context_cache;
[f76fed4]109#endif
[266294a9]110
[4e33b6b]111/** Thread wrapper.
[70527f1]112 *
[4e33b6b]113 * This wrapper is provided to ensure that every thread makes a call to
114 * thread_exit() when its implementing function returns.
[f761f1eb]115 *
[22f7769]116 * interrupts_disable() is assumed.
[70527f1]117 *
[f761f1eb]118 */
[e16e036a]119static void cushion(void)
[f761f1eb]120{
[43114c5]121 void (*f)(void *) = THREAD->thread_code;
122 void *arg = THREAD->thread_arg;
[449dc1ed]123 THREAD->last_cycle = get_cycle();
[a35b458]124
[0313ff0]125 /* This is where each thread wakes up after its creation */
[da1bafb]126 irq_spinlock_unlock(&THREAD->lock, false);
[22f7769]127 interrupts_enable();
[a35b458]128
[f761f1eb]129 f(arg);
[a35b458]130
[0313ff0]131 /* Accumulate accounting to the task */
[da1bafb]132 irq_spinlock_lock(&THREAD->lock, true);
[62b6d17]133 if (!THREAD->uncounted) {
[a2a00e8]134 thread_update_accounting(true);
135 uint64_t ucycles = THREAD->ucycles;
136 THREAD->ucycles = 0;
137 uint64_t kcycles = THREAD->kcycles;
138 THREAD->kcycles = 0;
[a35b458]139
[da1bafb]140 irq_spinlock_pass(&THREAD->lock, &TASK->lock);
[a2a00e8]141 TASK->ucycles += ucycles;
142 TASK->kcycles += kcycles;
[da1bafb]143 irq_spinlock_unlock(&TASK->lock, true);
[62b6d17]144 } else
[da1bafb]145 irq_spinlock_unlock(&THREAD->lock, true);
[a35b458]146
[f761f1eb]147 thread_exit();
[a35b458]148
[da1bafb]149 /* Not reached */
[f761f1eb]150}
151
[da1bafb]152/** Initialization and allocation for thread_t structure
153 *
154 */
[b7fd2a0]155static errno_t thr_constructor(void *obj, unsigned int kmflags)
[266294a9]156{
[da1bafb]157 thread_t *thread = (thread_t *) obj;
[a35b458]158
[da1bafb]159 irq_spinlock_initialize(&thread->lock, "thread_t_lock");
160 link_initialize(&thread->rq_link);
161 link_initialize(&thread->wq_link);
162 link_initialize(&thread->th_link);
[a35b458]163
[32fffef0]164 /* call the architecture-specific part of the constructor */
[da1bafb]165 thr_constructor_arch(thread);
[a35b458]166
[0f81ceb7]167#ifdef CONFIG_FPU
[d8431986]168#ifdef CONFIG_FPU_LAZY
[da1bafb]169 thread->saved_fpu_context = NULL;
170#else /* CONFIG_FPU_LAZY */
[82d515e9]171 thread->saved_fpu_context = slab_alloc(fpu_context_cache, kmflags);
[da1bafb]172 if (!thread->saved_fpu_context)
[7f11dc6]173 return ENOMEM;
[da1bafb]174#endif /* CONFIG_FPU_LAZY */
175#endif /* CONFIG_FPU */
[a35b458]176
[38ff925]177 /*
178 * Allocate the kernel stack from the low-memory to prevent an infinite
179 * nesting of TLB-misses when accessing the stack from the part of the
180 * TLB-miss handler written in C.
181 *
182 * Note that low-memory is safe to be used for the stack as it will be
183 * covered by the kernel identity mapping, which guarantees not to
184 * nest TLB-misses infinitely (either via some hardware mechanism or
185 * by the construciton of the assembly-language part of the TLB-miss
186 * handler).
187 *
188 * This restriction can be lifted once each architecture provides
189 * a similar guarantee, for example by locking the kernel stack
190 * in the TLB whenever it is allocated from the high-memory and the
191 * thread is being scheduled to run.
192 */
193 kmflags |= FRAME_LOWMEM;
194 kmflags &= ~FRAME_HIGHMEM;
[a35b458]195
[cd3b380]196 uintptr_t stack_phys =
197 frame_alloc(STACK_FRAMES, kmflags, STACK_SIZE - 1);
198 if (!stack_phys) {
[0f81ceb7]199#ifdef CONFIG_FPU
[da1bafb]200 if (thread->saved_fpu_context)
[82d515e9]201 slab_free(fpu_context_cache, thread->saved_fpu_context);
[f76fed4]202#endif
[7f11dc6]203 return ENOMEM;
[f76fed4]204 }
[a35b458]205
[cd3b380]206 thread->kstack = (uint8_t *) PA2KA(stack_phys);
[a35b458]207
[9a1b20c]208#ifdef CONFIG_UDEBUG
[da1bafb]209 mutex_initialize(&thread->udebug.lock, MUTEX_PASSIVE);
[9a1b20c]210#endif
[a35b458]211
[7f11dc6]212 return EOK;
[266294a9]213}
214
215/** Destruction of thread_t object */
[da1bafb]216static size_t thr_destructor(void *obj)
[266294a9]217{
[da1bafb]218 thread_t *thread = (thread_t *) obj;
[a35b458]219
[32fffef0]220 /* call the architecture-specific part of the destructor */
[da1bafb]221 thr_destructor_arch(thread);
[a35b458]222
[5df1963]223 frame_free(KA2PA(thread->kstack), STACK_FRAMES);
[a35b458]224
[0f81ceb7]225#ifdef CONFIG_FPU
[da1bafb]226 if (thread->saved_fpu_context)
[82d515e9]227 slab_free(fpu_context_cache, thread->saved_fpu_context);
[f76fed4]228#endif
[a35b458]229
[e7c4115d]230 return STACK_FRAMES; /* number of frames freed */
[266294a9]231}
[70527f1]232
233/** Initialize threads
234 *
235 * Initialize kernel threads support.
236 *
237 */
[f761f1eb]238void thread_init(void)
239{
[43114c5]240 THREAD = NULL;
[a35b458]241
[7217199]242 atomic_set(&nrdy, 0);
[82d515e9]243 thread_cache = slab_cache_create("thread_t", sizeof(thread_t), 0,
[6f4495f5]244 thr_constructor, thr_destructor, 0);
[a35b458]245
[0f81ceb7]246#ifdef CONFIG_FPU
[82d515e9]247 fpu_context_cache = slab_cache_create("fpu_context_t",
[f97f1e51]248 sizeof(fpu_context_t), FPU_CONTEXT_ALIGN, NULL, NULL, 0);
[f76fed4]249#endif
[a35b458]250
[5dcee525]251 avltree_create(&threads_tree);
[016acbe]252}
[70527f1]253
[6eef3c4]254/** Wire thread to the given CPU
255 *
256 * @param cpu CPU to wire the thread to.
257 *
258 */
259void thread_wire(thread_t *thread, cpu_t *cpu)
260{
261 irq_spinlock_lock(&thread->lock, true);
262 thread->cpu = cpu;
263 thread->wired = true;
264 irq_spinlock_unlock(&thread->lock, true);
265}
266
[8a64e81e]267/** Invoked right before thread_ready() readies the thread. thread is locked. */
268static void before_thread_is_ready(thread_t *thread)
269{
[63e27ef]270 assert(irq_spinlock_locked(&thread->lock));
[8a64e81e]271 workq_before_thread_is_ready(thread);
272}
273
[70527f1]274/** Make thread ready
275 *
[da1bafb]276 * Switch thread to the ready state.
[70527f1]277 *
[df58e44]278 * @param thread Thread to make ready.
[70527f1]279 *
280 */
[da1bafb]281void thread_ready(thread_t *thread)
[f761f1eb]282{
[da1bafb]283 irq_spinlock_lock(&thread->lock, true);
[a35b458]284
[63e27ef]285 assert(thread->state != Ready);
[518dd43]286
[8a64e81e]287 before_thread_is_ready(thread);
[a35b458]288
[6eef3c4]289 int i = (thread->priority < RQ_COUNT - 1) ?
290 ++thread->priority : thread->priority;
[518dd43]291
[8ad7dd1]292 cpu_t *cpu;
293 if (thread->wired || thread->nomigrate || thread->fpu_context_engaged) {
294 /* Cannot ready to another CPU */
[63e27ef]295 assert(thread->cpu != NULL);
[8ad7dd1]296 cpu = thread->cpu;
297 } else if (thread->stolen) {
298 /* Ready to the stealing CPU */
[6eef3c4]299 cpu = CPU;
[8ad7dd1]300 } else if (thread->cpu) {
301 /* Prefer the CPU on which the thread ran last */
[63e27ef]302 assert(thread->cpu != NULL);
[8ad7dd1]303 cpu = thread->cpu;
304 } else {
305 cpu = CPU;
306 }
[a35b458]307
[da1bafb]308 thread->state = Ready;
[a35b458]309
[da1bafb]310 irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock));
[a35b458]311
[70527f1]312 /*
[da1bafb]313 * Append thread to respective ready queue
314 * on respective processor.
[f761f1eb]315 */
[a35b458]316
[55b77d9]317 list_append(&thread->rq_link, &cpu->rq[i].rq);
[da1bafb]318 cpu->rq[i].n++;
319 irq_spinlock_unlock(&(cpu->rq[i].lock), true);
[a35b458]320
[59e07c91]321 atomic_inc(&nrdy);
[248fc1a]322 atomic_inc(&cpu->nrdy);
[f761f1eb]323}
324
[70527f1]325/** Create new thread
326 *
327 * Create a new thread.
328 *
[da1bafb]329 * @param func Thread's implementing function.
330 * @param arg Thread's implementing function argument.
331 * @param task Task to which the thread belongs. The caller must
332 * guarantee that the task won't cease to exist during the
333 * call. The task's lock may not be held.
334 * @param flags Thread flags.
335 * @param name Symbolic name (a copy is made).
[70527f1]336 *
[da1bafb]337 * @return New thread's structure on success, NULL on failure.
[70527f1]338 *
339 */
[3bacee1]340thread_t *thread_create(void (*func)(void *), void *arg, task_t *task,
[6eef3c4]341 thread_flags_t flags, const char *name)
[f761f1eb]342{
[82d515e9]343 thread_t *thread = (thread_t *) slab_alloc(thread_cache, 0);
[da1bafb]344 if (!thread)
[2a46e10]345 return NULL;
[a35b458]346
[bb68433]347 /* Not needed, but good for debugging */
[26aafe8]348 memsetb(thread->kstack, STACK_SIZE, 0);
[a35b458]349
[da1bafb]350 irq_spinlock_lock(&tidlock, true);
351 thread->tid = ++last_tid;
352 irq_spinlock_unlock(&tidlock, true);
[a35b458]353
[edc64c0]354 memset(&thread->saved_context, 0, sizeof(thread->saved_context));
[da1bafb]355 context_set(&thread->saved_context, FADDR(cushion),
[26aafe8]356 (uintptr_t) thread->kstack, STACK_SIZE);
[a35b458]357
[da1bafb]358 the_initialize((the_t *) thread->kstack);
[a35b458]359
[da1bafb]360 ipl_t ipl = interrupts_disable();
361 thread->saved_context.ipl = interrupts_read();
[bb68433]362 interrupts_restore(ipl);
[a35b458]363
[da1bafb]364 str_cpy(thread->name, THREAD_NAME_BUFLEN, name);
[a35b458]365
[da1bafb]366 thread->thread_code = func;
367 thread->thread_arg = arg;
368 thread->ticks = -1;
369 thread->ucycles = 0;
370 thread->kcycles = 0;
[6eef3c4]371 thread->uncounted =
372 ((flags & THREAD_FLAG_UNCOUNTED) == THREAD_FLAG_UNCOUNTED);
[da1bafb]373 thread->priority = -1; /* Start in rq[0] */
374 thread->cpu = NULL;
[6eef3c4]375 thread->wired = false;
376 thread->stolen = false;
377 thread->uspace =
378 ((flags & THREAD_FLAG_USPACE) == THREAD_FLAG_USPACE);
[a35b458]379
[43ac0cc]380 thread->nomigrate = 0;
[da1bafb]381 thread->state = Entering;
[a35b458]382
[da1bafb]383 timeout_initialize(&thread->sleep_timeout);
384 thread->sleep_interruptible = false;
[b59318e]385 thread->sleep_composable = false;
[da1bafb]386 thread->sleep_queue = NULL;
387 thread->timeout_pending = false;
[a35b458]388
[da1bafb]389 thread->in_copy_from_uspace = false;
390 thread->in_copy_to_uspace = false;
[a35b458]391
[da1bafb]392 thread->interrupted = false;
393 thread->detached = false;
394 waitq_initialize(&thread->join_wq);
[a35b458]395
[da1bafb]396 thread->task = task;
[a35b458]397
[8a64e81e]398 thread->workq = NULL;
[a35b458]399
[6eef3c4]400 thread->fpu_context_exists = false;
401 thread->fpu_context_engaged = false;
[a35b458]402
[da1bafb]403 avltree_node_initialize(&thread->threads_tree_node);
404 thread->threads_tree_node.key = (uintptr_t) thread;
[a35b458]405
[9a1b20c]406#ifdef CONFIG_UDEBUG
[5b7a107]407 /* Initialize debugging stuff */
408 thread->btrace = false;
[da1bafb]409 udebug_thread_initialize(&thread->udebug);
[9a1b20c]410#endif
[a35b458]411
[da1bafb]412 /* Might depend on previous initialization */
413 thread_create_arch(thread);
[a35b458]414
[181a746]415 rcu_thread_init(thread);
[a35b458]416
[6eef3c4]417 if ((flags & THREAD_FLAG_NOATTACH) != THREAD_FLAG_NOATTACH)
[da1bafb]418 thread_attach(thread, task);
[a35b458]419
[da1bafb]420 return thread;
[d8431986]421}
422
423/** Destroy thread memory structure
424 *
425 * Detach thread from all queues, cpus etc. and destroy it.
[da1bafb]426 *
427 * @param thread Thread to be destroyed.
428 * @param irq_res Indicate whether it should unlock thread->lock
429 * in interrupts-restore mode.
[d8431986]430 *
431 */
[da1bafb]432void thread_destroy(thread_t *thread, bool irq_res)
[d8431986]433{
[63e27ef]434 assert(irq_spinlock_locked(&thread->lock));
435 assert((thread->state == Exiting) || (thread->state == Lingering));
436 assert(thread->task);
437 assert(thread->cpu);
[a35b458]438
[da1bafb]439 irq_spinlock_lock(&thread->cpu->lock, false);
440 if (thread->cpu->fpu_owner == thread)
441 thread->cpu->fpu_owner = NULL;
442 irq_spinlock_unlock(&thread->cpu->lock, false);
[a35b458]443
[da1bafb]444 irq_spinlock_pass(&thread->lock, &threads_lock);
[a35b458]445
[da1bafb]446 avltree_delete(&threads_tree, &thread->threads_tree_node);
[a35b458]447
[da1bafb]448 irq_spinlock_pass(&threads_lock, &thread->task->lock);
[a35b458]449
[d8431986]450 /*
451 * Detach from the containing task.
452 */
[da1bafb]453 list_remove(&thread->th_link);
454 irq_spinlock_unlock(&thread->task->lock, irq_res);
[a35b458]455
[ea7890e7]456 /*
[7ed8530]457 * Drop the reference to the containing task.
[ea7890e7]458 */
[da1bafb]459 task_release(thread->task);
[82d515e9]460 slab_free(thread_cache, thread);
[d8431986]461}
462
463/** Make the thread visible to the system.
464 *
465 * Attach the thread structure to the current task and make it visible in the
[5dcee525]466 * threads_tree.
[d8431986]467 *
[da1bafb]468 * @param t Thread to be attached to the task.
469 * @param task Task to which the thread is to be attached.
470 *
[d8431986]471 */
[da1bafb]472void thread_attach(thread_t *thread, task_t *task)
[d8431986]473{
474 /*
[9a1b20c]475 * Attach to the specified task.
[d8431986]476 */
[da1bafb]477 irq_spinlock_lock(&task->lock, true);
[a35b458]478
[7ed8530]479 /* Hold a reference to the task. */
480 task_hold(task);
[a35b458]481
[9a1b20c]482 /* Must not count kbox thread into lifecount */
[6eef3c4]483 if (thread->uspace)
[9a1b20c]484 atomic_inc(&task->lifecount);
[a35b458]485
[55b77d9]486 list_append(&thread->th_link, &task->threads);
[a35b458]487
[da1bafb]488 irq_spinlock_pass(&task->lock, &threads_lock);
[a35b458]489
[bb68433]490 /*
491 * Register this thread in the system-wide list.
492 */
[da1bafb]493 avltree_insert(&threads_tree, &thread->threads_tree_node);
494 irq_spinlock_unlock(&threads_lock, true);
[f761f1eb]495}
496
[0182a665]497/** Terminate thread.
[70527f1]498 *
[da1bafb]499 * End current thread execution and switch it to the exiting state.
500 * All pending timeouts are executed.
501 *
[70527f1]502 */
[f761f1eb]503void thread_exit(void)
504{
[6eef3c4]505 if (THREAD->uspace) {
[9a1b20c]506#ifdef CONFIG_UDEBUG
507 /* Generate udebug THREAD_E event */
508 udebug_thread_e_event();
[a35b458]509
[0ac99db]510 /*
511 * This thread will not execute any code or system calls from
512 * now on.
513 */
514 udebug_stoppable_begin();
[9a1b20c]515#endif
516 if (atomic_predec(&TASK->lifecount) == 0) {
517 /*
518 * We are the last userspace thread in the task that
519 * still has not exited. With the exception of the
520 * moment the task was created, new userspace threads
521 * can only be created by threads of the same task.
522 * We are safe to perform cleanup.
[da1bafb]523 *
[9a1b20c]524 */
[ea7890e7]525 ipc_cleanup();
[669f3d32]526 futex_task_cleanup();
[3bacee1]527 LOG("Cleanup of task %" PRIu64 " completed.", TASK->taskid);
[ea7890e7]528 }
529 }
[a35b458]530
[f761f1eb]531restart:
[da1bafb]532 irq_spinlock_lock(&THREAD->lock, true);
533 if (THREAD->timeout_pending) {
534 /* Busy waiting for timeouts in progress */
535 irq_spinlock_unlock(&THREAD->lock, true);
[f761f1eb]536 goto restart;
537 }
[a35b458]538
[43114c5]539 THREAD->state = Exiting;
[da1bafb]540 irq_spinlock_unlock(&THREAD->lock, true);
[a35b458]541
[f761f1eb]542 scheduler();
[a35b458]543
[874621f]544 /* Not reached */
[3bacee1]545 while (true)
546 ;
[f761f1eb]547}
548
[518dd43]549/** Interrupts an existing thread so that it may exit as soon as possible.
[1b20da0]550 *
551 * Threads that are blocked waiting for a synchronization primitive
[897fd8f1]552 * are woken up with a return code of EINTR if the
[518dd43]553 * blocking call was interruptable. See waitq_sleep_timeout().
[1b20da0]554 *
[518dd43]555 * The caller must guarantee the thread object is valid during the entire
556 * function, eg by holding the threads_lock lock.
[1b20da0]557 *
[518dd43]558 * Interrupted threads automatically exit when returning back to user space.
[1b20da0]559 *
[518dd43]560 * @param thread A valid thread object. The caller must guarantee it
561 * will remain valid until thread_interrupt() exits.
562 */
563void thread_interrupt(thread_t *thread)
564{
[63e27ef]565 assert(thread != NULL);
[a35b458]566
[518dd43]567 irq_spinlock_lock(&thread->lock, true);
[a35b458]568
[518dd43]569 thread->interrupted = true;
570 bool sleeping = (thread->state == Sleeping);
[a35b458]571
[518dd43]572 irq_spinlock_unlock(&thread->lock, true);
[a35b458]573
[518dd43]574 if (sleeping)
575 waitq_interrupt_sleep(thread);
576}
577
578/** Returns true if the thread was interrupted.
[1b20da0]579 *
[518dd43]580 * @param thread A valid thread object. User must guarantee it will
581 * be alive during the entire call.
582 * @return true if the thread was already interrupted via thread_interrupt().
583 */
584bool thread_interrupted(thread_t *thread)
585{
[63e27ef]586 assert(thread != NULL);
[a35b458]587
[518dd43]588 bool interrupted;
[a35b458]589
[518dd43]590 irq_spinlock_lock(&thread->lock, true);
591 interrupted = thread->interrupted;
592 irq_spinlock_unlock(&thread->lock, true);
[a35b458]593
[518dd43]594 return interrupted;
595}
596
[43ac0cc]597/** Prevent the current thread from being migrated to another processor. */
598void thread_migration_disable(void)
599{
[63e27ef]600 assert(THREAD);
[a35b458]601
[43ac0cc]602 THREAD->nomigrate++;
603}
604
605/** Allow the current thread to be migrated to another processor. */
606void thread_migration_enable(void)
607{
[63e27ef]608 assert(THREAD);
609 assert(THREAD->nomigrate > 0);
[a35b458]610
[6eef3c4]611 if (THREAD->nomigrate > 0)
612 THREAD->nomigrate--;
[43ac0cc]613}
614
[70527f1]615/** Thread sleep
616 *
617 * Suspend execution of the current thread.
618 *
619 * @param sec Number of seconds to sleep.
620 *
621 */
[7f1c620]622void thread_sleep(uint32_t sec)
[f761f1eb]623{
[7c3fb9b]624 /*
625 * Sleep in 1000 second steps to support
626 * full argument range
627 */
[22e6802]628 while (sec > 0) {
629 uint32_t period = (sec > 1000) ? 1000 : sec;
[a35b458]630
[22e6802]631 thread_usleep(period * 1000000);
632 sec -= period;
633 }
[f761f1eb]634}
[70527f1]635
[fe19611]636/** Wait for another thread to exit.
637 *
[da1bafb]638 * @param thread Thread to join on exit.
639 * @param usec Timeout in microseconds.
640 * @param flags Mode of operation.
[fe19611]641 *
642 * @return An error code from errno.h or an error code from synch.h.
[da1bafb]643 *
[fe19611]644 */
[b7fd2a0]645errno_t thread_join_timeout(thread_t *thread, uint32_t usec, unsigned int flags)
[fe19611]646{
[da1bafb]647 if (thread == THREAD)
[fe19611]648 return EINVAL;
[a35b458]649
[fe19611]650 /*
651 * Since thread join can only be called once on an undetached thread,
652 * the thread pointer is guaranteed to be still valid.
653 */
[a35b458]654
[da1bafb]655 irq_spinlock_lock(&thread->lock, true);
[63e27ef]656 assert(!thread->detached);
[da1bafb]657 irq_spinlock_unlock(&thread->lock, true);
[a35b458]658
[897fd8f1]659 return waitq_sleep_timeout(&thread->join_wq, usec, flags, NULL);
[fe19611]660}
661
662/** Detach thread.
663 *
[df58e44]664 * Mark the thread as detached. If the thread is already
665 * in the Lingering state, deallocate its resources.
[fe19611]666 *
[da1bafb]667 * @param thread Thread to be detached.
668 *
[fe19611]669 */
[da1bafb]670void thread_detach(thread_t *thread)
[fe19611]671{
672 /*
[31d8e10]673 * Since the thread is expected not to be already detached,
[fe19611]674 * pointer to it must be still valid.
675 */
[da1bafb]676 irq_spinlock_lock(&thread->lock, true);
[63e27ef]677 assert(!thread->detached);
[a35b458]678
[da1bafb]679 if (thread->state == Lingering) {
680 /*
681 * Unlock &thread->lock and restore
682 * interrupts in thread_destroy().
683 */
684 thread_destroy(thread, true);
[fe19611]685 return;
686 } else {
[da1bafb]687 thread->detached = true;
[fe19611]688 }
[a35b458]689
[da1bafb]690 irq_spinlock_unlock(&thread->lock, true);
[fe19611]691}
692
[70527f1]693/** Thread usleep
694 *
695 * Suspend execution of the current thread.
696 *
697 * @param usec Number of microseconds to sleep.
698 *
[1b20da0]699 */
[7f1c620]700void thread_usleep(uint32_t usec)
[f761f1eb]701{
702 waitq_t wq;
[a35b458]703
[f761f1eb]704 waitq_initialize(&wq);
[a35b458]705
[897fd8f1]706 (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING, NULL);
[f761f1eb]707}
708
[b76a2217]709static bool thread_walker(avltree_node_t *node, void *arg)
[5dcee525]710{
[48dcc69]711 bool *additional = (bool *) arg;
[da1bafb]712 thread_t *thread = avltree_get_instance(node, thread_t, threads_tree_node);
[a35b458]713
[1ba37fa]714 uint64_t ucycles, kcycles;
715 char usuffix, ksuffix;
[da1bafb]716 order_suffix(thread->ucycles, &ucycles, &usuffix);
717 order_suffix(thread->kcycles, &kcycles, &ksuffix);
[a35b458]718
[577f042a]719 char *name;
720 if (str_cmp(thread->name, "uinit") == 0)
721 name = thread->task->name;
722 else
723 name = thread->name;
[a35b458]724
[52755f1]725#ifdef __32_BITS__
[48dcc69]726 if (*additional)
[ae0300b5]727 printf("%-8" PRIu64 " %10p %10p %9" PRIu64 "%c %9" PRIu64 "%c ",
[577f042a]728 thread->tid, thread->thread_code, thread->kstack,
729 ucycles, usuffix, kcycles, ksuffix);
[48dcc69]730 else
[ae0300b5]731 printf("%-8" PRIu64 " %-14s %10p %-8s %10p %-5" PRIu32 "\n",
[577f042a]732 thread->tid, name, thread, thread_states[thread->state],
[26aafe8]733 thread->task, thread->task->container);
[52755f1]734#endif
[a35b458]735
[52755f1]736#ifdef __64_BITS__
[48dcc69]737 if (*additional)
[ae0300b5]738 printf("%-8" PRIu64 " %18p %18p\n"
[48dcc69]739 " %9" PRIu64 "%c %9" PRIu64 "%c ",
740 thread->tid, thread->thread_code, thread->kstack,
741 ucycles, usuffix, kcycles, ksuffix);
[5dcee525]742 else
[ae0300b5]743 printf("%-8" PRIu64 " %-14s %18p %-8s %18p %-5" PRIu32 "\n",
[577f042a]744 thread->tid, name, thread, thread_states[thread->state],
[26aafe8]745 thread->task, thread->task->container);
[48dcc69]746#endif
[a35b458]747
[48dcc69]748 if (*additional) {
749 if (thread->cpu)
750 printf("%-5u", thread->cpu->id);
751 else
752 printf("none ");
[a35b458]753
[48dcc69]754 if (thread->state == Sleeping) {
[52755f1]755#ifdef __32_BITS__
[48dcc69]756 printf(" %10p", thread->sleep_queue);
[52755f1]757#endif
[a35b458]758
[52755f1]759#ifdef __64_BITS__
[48dcc69]760 printf(" %18p", thread->sleep_queue);
[52755f1]761#endif
[48dcc69]762 }
[a35b458]763
[48dcc69]764 printf("\n");
[43b1e86]765 }
[a35b458]766
[b76a2217]767 return true;
[5dcee525]768}
769
[da1bafb]770/** Print list of threads debug info
[48dcc69]771 *
772 * @param additional Print additional information.
[da1bafb]773 *
774 */
[48dcc69]775void thread_print_list(bool additional)
[55ab0f1]776{
777 /* Messing with thread structures, avoid deadlock */
[da1bafb]778 irq_spinlock_lock(&threads_lock, true);
[a35b458]779
[da1bafb]780#ifdef __32_BITS__
[48dcc69]781 if (additional)
[577f042a]782 printf("[id ] [code ] [stack ] [ucycles ] [kcycles ]"
783 " [cpu] [waitqueue]\n");
[48dcc69]784 else
785 printf("[id ] [name ] [address ] [state ] [task ]"
[26aafe8]786 " [ctn]\n");
[52755f1]787#endif
[a35b458]788
[52755f1]789#ifdef __64_BITS__
[48dcc69]790 if (additional) {
791 printf("[id ] [code ] [stack ]\n"
792 " [ucycles ] [kcycles ] [cpu] [waitqueue ]\n");
793 } else
794 printf("[id ] [name ] [address ] [state ]"
[26aafe8]795 " [task ] [ctn]\n");
[52755f1]796#endif
[a35b458]797
[48dcc69]798 avltree_walk(&threads_tree, thread_walker, &additional);
[a35b458]799
[da1bafb]800 irq_spinlock_unlock(&threads_lock, true);
[55ab0f1]801}
[9f52563]802
[016acbe]803/** Check whether thread exists.
804 *
805 * Note that threads_lock must be already held and
806 * interrupts must be already disabled.
807 *
[da1bafb]808 * @param thread Pointer to thread.
[016acbe]809 *
810 * @return True if thread t is known to the system, false otherwise.
[da1bafb]811 *
[016acbe]812 */
[da1bafb]813bool thread_exists(thread_t *thread)
[016acbe]814{
[63e27ef]815 assert(interrupts_disabled());
816 assert(irq_spinlock_locked(&threads_lock));
[1d432f9]817
[da1bafb]818 avltree_node_t *node =
819 avltree_search(&threads_tree, (avltree_key_t) ((uintptr_t) thread));
[a35b458]820
[5dcee525]821 return node != NULL;
[016acbe]822}
823
[cce6acf]824/** Update accounting of current thread.
825 *
826 * Note that thread_lock on THREAD must be already held and
827 * interrupts must be already disabled.
828 *
[da1bafb]829 * @param user True to update user accounting, false for kernel.
830 *
[cce6acf]831 */
[a2a00e8]832void thread_update_accounting(bool user)
[cce6acf]833{
834 uint64_t time = get_cycle();
[1d432f9]835
[63e27ef]836 assert(interrupts_disabled());
837 assert(irq_spinlock_locked(&THREAD->lock));
[a35b458]838
[da1bafb]839 if (user)
[a2a00e8]840 THREAD->ucycles += time - THREAD->last_cycle;
[da1bafb]841 else
[a2a00e8]842 THREAD->kcycles += time - THREAD->last_cycle;
[a35b458]843
[cce6acf]844 THREAD->last_cycle = time;
845}
846
[e1b6742]847static bool thread_search_walker(avltree_node_t *node, void *arg)
848{
849 thread_t *thread =
850 (thread_t *) avltree_get_instance(node, thread_t, threads_tree_node);
851 thread_iterator_t *iterator = (thread_iterator_t *) arg;
[a35b458]852
[e1b6742]853 if (thread->tid == iterator->thread_id) {
854 iterator->thread = thread;
855 return false;
856 }
[a35b458]857
[e1b6742]858 return true;
859}
860
861/** Find thread structure corresponding to thread ID.
862 *
863 * The threads_lock must be already held by the caller of this function and
864 * interrupts must be disabled.
865 *
866 * @param id Thread ID.
867 *
868 * @return Thread structure address or NULL if there is no such thread ID.
869 *
870 */
871thread_t *thread_find_by_id(thread_id_t thread_id)
872{
[63e27ef]873 assert(interrupts_disabled());
874 assert(irq_spinlock_locked(&threads_lock));
[a35b458]875
[e1b6742]876 thread_iterator_t iterator;
[a35b458]877
[e1b6742]878 iterator.thread_id = thread_id;
879 iterator.thread = NULL;
[a35b458]880
[e1b6742]881 avltree_walk(&threads_tree, thread_search_walker, (void *) &iterator);
[a35b458]882
[e1b6742]883 return iterator.thread;
884}
885
[5b7a107]886#ifdef CONFIG_UDEBUG
887
[df58e44]888void thread_stack_trace(thread_id_t thread_id)
889{
890 irq_spinlock_lock(&threads_lock, true);
[a35b458]891
[df58e44]892 thread_t *thread = thread_find_by_id(thread_id);
893 if (thread == NULL) {
894 printf("No such thread.\n");
895 irq_spinlock_unlock(&threads_lock, true);
896 return;
897 }
[a35b458]898
[df58e44]899 irq_spinlock_lock(&thread->lock, false);
[a35b458]900
[df58e44]901 /*
902 * Schedule a stack trace to be printed
903 * just before the thread is scheduled next.
904 *
905 * If the thread is sleeping then try to interrupt
906 * the sleep. Any request for printing an uspace stack
907 * trace from within the kernel should be always
908 * considered a last resort debugging means, therefore
909 * forcing the thread's sleep to be interrupted
910 * is probably justifiable.
911 */
[a35b458]912
[df58e44]913 bool sleeping = false;
914 istate_t *istate = thread->udebug.uspace_state;
915 if (istate != NULL) {
916 printf("Scheduling thread stack trace.\n");
917 thread->btrace = true;
918 if (thread->state == Sleeping)
919 sleeping = true;
920 } else
921 printf("Thread interrupt state not available.\n");
[a35b458]922
[df58e44]923 irq_spinlock_unlock(&thread->lock, false);
[a35b458]924
[df58e44]925 if (sleeping)
926 waitq_interrupt_sleep(thread);
[a35b458]927
[df58e44]928 irq_spinlock_unlock(&threads_lock, true);
929}
[e1b6742]930
[5b7a107]931#endif /* CONFIG_UDEBUG */
[e1b6742]932
[9f52563]933/** Process syscall to create new thread.
934 *
935 */
[b7fd2a0]936sys_errno_t sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name,
[7faabb7]937 size_t name_len, thread_id_t *uspace_thread_id)
[9f52563]938{
[24345a5]939 if (name_len > THREAD_NAME_BUFLEN - 1)
[7faabb7]940 name_len = THREAD_NAME_BUFLEN - 1;
[a35b458]941
[da1bafb]942 char namebuf[THREAD_NAME_BUFLEN];
[b7fd2a0]943 errno_t rc = copy_from_uspace(namebuf, uspace_name, name_len);
[a53ed3a]944 if (rc != EOK)
[b7fd2a0]945 return (sys_errno_t) rc;
[a35b458]946
[b60c582]947 namebuf[name_len] = 0;
[a35b458]948
[4680ef5]949 /*
950 * In case of failure, kernel_uarg will be deallocated in this function.
951 * In case of success, kernel_uarg will be freed in uinit().
952 */
[da1bafb]953 uspace_arg_t *kernel_uarg =
[11b285d]954 (uspace_arg_t *) malloc(sizeof(uspace_arg_t));
[7473807]955 if (!kernel_uarg)
956 return (sys_errno_t) ENOMEM;
[a35b458]957
[e3c762cd]958 rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t));
[a53ed3a]959 if (rc != EOK) {
[e3c762cd]960 free(kernel_uarg);
[b7fd2a0]961 return (sys_errno_t) rc;
[e3c762cd]962 }
[a35b458]963
[da1bafb]964 thread_t *thread = thread_create(uinit, kernel_uarg, TASK,
[6eef3c4]965 THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf);
[da1bafb]966 if (thread) {
[d8431986]967 if (uspace_thread_id != NULL) {
[da1bafb]968 rc = copy_to_uspace(uspace_thread_id, &thread->tid,
969 sizeof(thread->tid));
[a53ed3a]970 if (rc != EOK) {
[d8431986]971 /*
972 * We have encountered a failure, but the thread
973 * has already been created. We need to undo its
974 * creation now.
975 */
[a35b458]976
[d8431986]977 /*
[ea7890e7]978 * The new thread structure is initialized, but
979 * is still not visible to the system.
[d8431986]980 * We can safely deallocate it.
981 */
[82d515e9]982 slab_free(thread_cache, thread);
[da1bafb]983 free(kernel_uarg);
[a35b458]984
[b7fd2a0]985 return (sys_errno_t) rc;
[3bacee1]986 }
[d8431986]987 }
[a35b458]988
[9a1b20c]989#ifdef CONFIG_UDEBUG
[13964ef]990 /*
991 * Generate udebug THREAD_B event and attach the thread.
992 * This must be done atomically (with the debug locks held),
993 * otherwise we would either miss some thread or receive
994 * THREAD_B events for threads that already existed
995 * and could be detected with THREAD_READ before.
996 */
[da1bafb]997 udebug_thread_b_event_attach(thread, TASK);
[13964ef]998#else
[da1bafb]999 thread_attach(thread, TASK);
[9a1b20c]1000#endif
[da1bafb]1001 thread_ready(thread);
[a35b458]1002
[d8431986]1003 return 0;
[201abde]1004 } else
[0f250f9]1005 free(kernel_uarg);
[a35b458]1006
[b7fd2a0]1007 return (sys_errno_t) ENOMEM;
[9f52563]1008}
1009
1010/** Process syscall to terminate thread.
1011 *
1012 */
[b7fd2a0]1013sys_errno_t sys_thread_exit(int uspace_status)
[9f52563]1014{
[68091bd]1015 thread_exit();
[9f52563]1016}
[b45c443]1017
[3ce7f082]1018/** Syscall for getting TID.
1019 *
[201abde]1020 * @param uspace_thread_id Userspace address of 8-byte buffer where to store
1021 * current thread ID.
1022 *
1023 * @return 0 on success or an error code from @ref errno.h.
[da1bafb]1024 *
[b45c443]1025 */
[b7fd2a0]1026sys_errno_t sys_thread_get_id(thread_id_t *uspace_thread_id)
[3ce7f082]1027{
1028 /*
1029 * No need to acquire lock on THREAD because tid
1030 * remains constant for the lifespan of the thread.
[da1bafb]1031 *
[3ce7f082]1032 */
[b7fd2a0]1033 return (sys_errno_t) copy_to_uspace(uspace_thread_id, &THREAD->tid,
[201abde]1034 sizeof(THREAD->tid));
[3ce7f082]1035}
[6f4495f5]1036
[d9ece1cb]1037/** Syscall wrapper for sleeping. */
[b7fd2a0]1038sys_errno_t sys_thread_usleep(uint32_t usec)
[d9ece1cb]1039{
[22e6802]1040 thread_usleep(usec);
[d9ece1cb]1041 return 0;
1042}
1043
[b7fd2a0]1044sys_errno_t sys_thread_udelay(uint32_t usec)
[7e7b791]1045{
[8d6c1f1]1046 delay(usec);
[7e7b791]1047 return 0;
1048}
1049
[3ce7f082]1050/** @}
1051 */
Note: See TracBrowser for help on using the repository browser.