source: mainline/kernel/generic/src/proc/thread.c@ 2902e1bb

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 2902e1bb was 2902e1bb, checked in by Martin Decky <martin@…>, 13 years ago

add support for variable uspace stack size
create individual address space areas for stacks of additional threads (instead of allocating the stack from heap)
avoid memory leaks in program_create()

  • Property mode set to 100644
File size: 23.1 KB
RevLine 
[f761f1eb]1/*
[7ed8530]2 * Copyright (c) 2010 Jakub Jermar
[f761f1eb]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[cc73a8a1]29/** @addtogroup genericproc
[b45c443]30 * @{
31 */
32
[9179d0a]33/**
[b45c443]34 * @file
[da1bafb]35 * @brief Thread management functions.
[9179d0a]36 */
37
[f761f1eb]38#include <proc/scheduler.h>
39#include <proc/thread.h>
40#include <proc/task.h>
41#include <mm/frame.h>
42#include <mm/page.h>
43#include <arch/asm.h>
[cce6acf]44#include <arch/cycle.h>
[f761f1eb]45#include <arch.h>
46#include <synch/spinlock.h>
47#include <synch/waitq.h>
48#include <cpu.h>
[e535eeb]49#include <str.h>
[f761f1eb]50#include <context.h>
[5dcee525]51#include <adt/avl.h>
[5c9a08b]52#include <adt/list.h>
[f761f1eb]53#include <time/clock.h>
[b3f8fb7]54#include <time/timeout.h>
[8d6c1f1]55#include <time/delay.h>
[4ffa9e0]56#include <config.h>
57#include <arch/interrupt.h>
[26a8604f]58#include <smp/ipi.h>
[f2ffad4]59#include <arch/faddr.h>
[23684b7]60#include <atomic.h>
[9c0a9b3]61#include <memstr.h>
[55ab0f1]62#include <print.h>
[266294a9]63#include <mm/slab.h>
64#include <debug.h>
[9f52563]65#include <main/uinit.h>
[e3c762cd]66#include <syscall/copy.h>
67#include <errno.h>
[52755f1]68
[fe19611]69/** Thread states */
[a000878c]70const char *thread_states[] = {
[fe19611]71 "Invalid",
72 "Running",
73 "Sleeping",
74 "Ready",
75 "Entering",
76 "Exiting",
[48d14222]77 "Lingering"
[e1b6742]78};
79
80typedef struct {
81 thread_id_t thread_id;
82 thread_t *thread;
83} thread_iterator_t;
[f761f1eb]84
[5dcee525]85/** Lock protecting the threads_tree AVL tree.
[4e33b6b]86 *
87 * For locking rules, see declaration thereof.
[da1bafb]88 *
[4e33b6b]89 */
[da1bafb]90IRQ_SPINLOCK_INITIALIZE(threads_lock);
[88169d9]91
[81c0171e]92/** AVL tree of all threads.
[88169d9]93 *
[5dcee525]94 * When a thread is found in the threads_tree AVL tree, it is guaranteed to
[4e33b6b]95 * exist as long as the threads_lock is held.
[da1bafb]96 *
[88169d9]97 */
[da1bafb]98avltree_t threads_tree;
[f761f1eb]99
[da1bafb]100IRQ_SPINLOCK_STATIC_INITIALIZE(tidlock);
101static thread_id_t last_tid = 0;
[f761f1eb]102
[266294a9]103static slab_cache_t *thread_slab;
[da1bafb]104
[0f81ceb7]105#ifdef CONFIG_FPU
[f76fed4]106slab_cache_t *fpu_context_slab;
107#endif
[266294a9]108
[4e33b6b]109/** Thread wrapper.
[70527f1]110 *
[4e33b6b]111 * This wrapper is provided to ensure that every thread makes a call to
112 * thread_exit() when its implementing function returns.
[f761f1eb]113 *
[22f7769]114 * interrupts_disable() is assumed.
[70527f1]115 *
[f761f1eb]116 */
[e16e036a]117static void cushion(void)
[f761f1eb]118{
[43114c5]119 void (*f)(void *) = THREAD->thread_code;
120 void *arg = THREAD->thread_arg;
[449dc1ed]121 THREAD->last_cycle = get_cycle();
[da1bafb]122
[0313ff0]123 /* This is where each thread wakes up after its creation */
[da1bafb]124 irq_spinlock_unlock(&THREAD->lock, false);
[22f7769]125 interrupts_enable();
[da1bafb]126
[f761f1eb]127 f(arg);
[0313ff0]128
129 /* Accumulate accounting to the task */
[da1bafb]130 irq_spinlock_lock(&THREAD->lock, true);
[62b6d17]131 if (!THREAD->uncounted) {
[a2a00e8]132 thread_update_accounting(true);
133 uint64_t ucycles = THREAD->ucycles;
134 THREAD->ucycles = 0;
135 uint64_t kcycles = THREAD->kcycles;
136 THREAD->kcycles = 0;
[62b6d17]137
[da1bafb]138 irq_spinlock_pass(&THREAD->lock, &TASK->lock);
[a2a00e8]139 TASK->ucycles += ucycles;
140 TASK->kcycles += kcycles;
[da1bafb]141 irq_spinlock_unlock(&TASK->lock, true);
[62b6d17]142 } else
[da1bafb]143 irq_spinlock_unlock(&THREAD->lock, true);
[0313ff0]144
[f761f1eb]145 thread_exit();
[da1bafb]146
147 /* Not reached */
[f761f1eb]148}
149
[da1bafb]150/** Initialization and allocation for thread_t structure
151 *
152 */
153static int thr_constructor(void *obj, unsigned int kmflags)
[266294a9]154{
[da1bafb]155 thread_t *thread = (thread_t *) obj;
156
157 irq_spinlock_initialize(&thread->lock, "thread_t_lock");
158 link_initialize(&thread->rq_link);
159 link_initialize(&thread->wq_link);
160 link_initialize(&thread->th_link);
161
[32fffef0]162 /* call the architecture-specific part of the constructor */
[da1bafb]163 thr_constructor_arch(thread);
[266294a9]164
[0f81ceb7]165#ifdef CONFIG_FPU
[d8431986]166#ifdef CONFIG_FPU_LAZY
[da1bafb]167 thread->saved_fpu_context = NULL;
168#else /* CONFIG_FPU_LAZY */
169 thread->saved_fpu_context = slab_alloc(fpu_context_slab, kmflags);
170 if (!thread->saved_fpu_context)
[f76fed4]171 return -1;
[da1bafb]172#endif /* CONFIG_FPU_LAZY */
173#endif /* CONFIG_FPU */
174
[38ff925]175 /*
176 * Allocate the kernel stack from the low-memory to prevent an infinite
177 * nesting of TLB-misses when accessing the stack from the part of the
178 * TLB-miss handler written in C.
179 *
180 * Note that low-memory is safe to be used for the stack as it will be
181 * covered by the kernel identity mapping, which guarantees not to
182 * nest TLB-misses infinitely (either via some hardware mechanism or
183 * by the construciton of the assembly-language part of the TLB-miss
184 * handler).
185 *
186 * This restriction can be lifted once each architecture provides
187 * a similar guarantee, for example by locking the kernel stack
188 * in the TLB whenever it is allocated from the high-memory and the
189 * thread is being scheduled to run.
190 */
191 kmflags |= FRAME_LOWMEM;
192 kmflags &= ~FRAME_HIGHMEM;
193
[da1bafb]194 thread->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags);
195 if (!thread->kstack) {
[0f81ceb7]196#ifdef CONFIG_FPU
[da1bafb]197 if (thread->saved_fpu_context)
198 slab_free(fpu_context_slab, thread->saved_fpu_context);
[f76fed4]199#endif
[266294a9]200 return -1;
[f76fed4]201 }
[da1bafb]202
[9a1b20c]203#ifdef CONFIG_UDEBUG
[da1bafb]204 mutex_initialize(&thread->udebug.lock, MUTEX_PASSIVE);
[9a1b20c]205#endif
[da1bafb]206
[266294a9]207 return 0;
208}
209
210/** Destruction of thread_t object */
[da1bafb]211static size_t thr_destructor(void *obj)
[266294a9]212{
[da1bafb]213 thread_t *thread = (thread_t *) obj;
214
[32fffef0]215 /* call the architecture-specific part of the destructor */
[da1bafb]216 thr_destructor_arch(thread);
217
218 frame_free(KA2PA(thread->kstack));
219
[0f81ceb7]220#ifdef CONFIG_FPU
[da1bafb]221 if (thread->saved_fpu_context)
222 slab_free(fpu_context_slab, thread->saved_fpu_context);
[f76fed4]223#endif
[da1bafb]224
225 return 1; /* One page freed */
[266294a9]226}
[70527f1]227
228/** Initialize threads
229 *
230 * Initialize kernel threads support.
231 *
232 */
[f761f1eb]233void thread_init(void)
234{
[43114c5]235 THREAD = NULL;
[da1bafb]236
[7217199]237 atomic_set(&nrdy, 0);
[4e33b6b]238 thread_slab = slab_cache_create("thread_slab", sizeof(thread_t), 0,
[6f4495f5]239 thr_constructor, thr_destructor, 0);
[da1bafb]240
[0f81ceb7]241#ifdef CONFIG_FPU
[4e33b6b]242 fpu_context_slab = slab_cache_create("fpu_slab", sizeof(fpu_context_t),
[6f4495f5]243 FPU_CONTEXT_ALIGN, NULL, NULL, 0);
[f76fed4]244#endif
[da1bafb]245
[5dcee525]246 avltree_create(&threads_tree);
[016acbe]247}
[70527f1]248
249/** Make thread ready
250 *
[da1bafb]251 * Switch thread to the ready state.
[70527f1]252 *
[df58e44]253 * @param thread Thread to make ready.
[70527f1]254 *
255 */
[da1bafb]256void thread_ready(thread_t *thread)
[f761f1eb]257{
[da1bafb]258 irq_spinlock_lock(&thread->lock, true);
[f761f1eb]259
[df58e44]260 ASSERT(thread->state != Ready);
[da1bafb]261
262 int i = (thread->priority < RQ_COUNT - 1)
263 ? ++thread->priority : thread->priority;
264
265 cpu_t *cpu = CPU;
266 if (thread->flags & THREAD_FLAG_WIRED) {
267 ASSERT(thread->cpu != NULL);
268 cpu = thread->cpu;
[f761f1eb]269 }
[da1bafb]270 thread->state = Ready;
271
272 irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock));
[f761f1eb]273
[70527f1]274 /*
[da1bafb]275 * Append thread to respective ready queue
276 * on respective processor.
[f761f1eb]277 */
[da1bafb]278
[55b77d9]279 list_append(&thread->rq_link, &cpu->rq[i].rq);
[da1bafb]280 cpu->rq[i].n++;
281 irq_spinlock_unlock(&(cpu->rq[i].lock), true);
282
[59e07c91]283 atomic_inc(&nrdy);
[da1bafb]284 // FIXME: Why is the avg value not used
285 // avg = atomic_get(&nrdy) / config.cpu_active;
[248fc1a]286 atomic_inc(&cpu->nrdy);
[f761f1eb]287}
288
[70527f1]289/** Create new thread
290 *
291 * Create a new thread.
292 *
[da1bafb]293 * @param func Thread's implementing function.
294 * @param arg Thread's implementing function argument.
295 * @param task Task to which the thread belongs. The caller must
296 * guarantee that the task won't cease to exist during the
297 * call. The task's lock may not be held.
298 * @param flags Thread flags.
299 * @param name Symbolic name (a copy is made).
300 * @param uncounted Thread's accounting doesn't affect accumulated task
301 * accounting.
[70527f1]302 *
[da1bafb]303 * @return New thread's structure on success, NULL on failure.
[70527f1]304 *
305 */
[4e33b6b]306thread_t *thread_create(void (* func)(void *), void *arg, task_t *task,
[da1bafb]307 unsigned int flags, const char *name, bool uncounted)
[f761f1eb]308{
[da1bafb]309 thread_t *thread = (thread_t *) slab_alloc(thread_slab, 0);
310 if (!thread)
[2a46e10]311 return NULL;
[aa8d0f7]312
[bb68433]313 /* Not needed, but good for debugging */
[26aafe8]314 memsetb(thread->kstack, STACK_SIZE, 0);
[bb68433]315
[da1bafb]316 irq_spinlock_lock(&tidlock, true);
317 thread->tid = ++last_tid;
318 irq_spinlock_unlock(&tidlock, true);
[bb68433]319
[da1bafb]320 context_save(&thread->saved_context);
321 context_set(&thread->saved_context, FADDR(cushion),
[26aafe8]322 (uintptr_t) thread->kstack, STACK_SIZE);
[bb68433]323
[da1bafb]324 the_initialize((the_t *) thread->kstack);
[bb68433]325
[da1bafb]326 ipl_t ipl = interrupts_disable();
327 thread->saved_context.ipl = interrupts_read();
[bb68433]328 interrupts_restore(ipl);
329
[da1bafb]330 str_cpy(thread->name, THREAD_NAME_BUFLEN, name);
331
332 thread->thread_code = func;
333 thread->thread_arg = arg;
334 thread->ticks = -1;
335 thread->ucycles = 0;
336 thread->kcycles = 0;
337 thread->uncounted = uncounted;
338 thread->priority = -1; /* Start in rq[0] */
339 thread->cpu = NULL;
340 thread->flags = flags;
[43ac0cc]341 thread->nomigrate = 0;
[da1bafb]342 thread->state = Entering;
343
344 timeout_initialize(&thread->sleep_timeout);
345 thread->sleep_interruptible = false;
346 thread->sleep_queue = NULL;
347 thread->timeout_pending = false;
348
349 thread->in_copy_from_uspace = false;
350 thread->in_copy_to_uspace = false;
351
352 thread->interrupted = false;
353 thread->detached = false;
354 waitq_initialize(&thread->join_wq);
355
356 thread->task = task;
357
358 thread->fpu_context_exists = 0;
359 thread->fpu_context_engaged = 0;
360
361 avltree_node_initialize(&thread->threads_tree_node);
362 thread->threads_tree_node.key = (uintptr_t) thread;
[bb68433]363
[9a1b20c]364#ifdef CONFIG_UDEBUG
[5b7a107]365 /* Initialize debugging stuff */
366 thread->btrace = false;
[da1bafb]367 udebug_thread_initialize(&thread->udebug);
[9a1b20c]368#endif
[da1bafb]369
370 /* Might depend on previous initialization */
371 thread_create_arch(thread);
372
[d8431986]373 if (!(flags & THREAD_FLAG_NOATTACH))
[da1bafb]374 thread_attach(thread, task);
375
376 return thread;
[d8431986]377}
378
379/** Destroy thread memory structure
380 *
381 * Detach thread from all queues, cpus etc. and destroy it.
[da1bafb]382 *
383 * @param thread Thread to be destroyed.
384 * @param irq_res Indicate whether it should unlock thread->lock
385 * in interrupts-restore mode.
[d8431986]386 *
387 */
[da1bafb]388void thread_destroy(thread_t *thread, bool irq_res)
[d8431986]389{
[2d3ddad]390 ASSERT(irq_spinlock_locked(&thread->lock));
[da1bafb]391 ASSERT((thread->state == Exiting) || (thread->state == Lingering));
392 ASSERT(thread->task);
393 ASSERT(thread->cpu);
394
395 irq_spinlock_lock(&thread->cpu->lock, false);
396 if (thread->cpu->fpu_owner == thread)
397 thread->cpu->fpu_owner = NULL;
398 irq_spinlock_unlock(&thread->cpu->lock, false);
399
400 irq_spinlock_pass(&thread->lock, &threads_lock);
401
402 avltree_delete(&threads_tree, &thread->threads_tree_node);
403
404 irq_spinlock_pass(&threads_lock, &thread->task->lock);
405
[d8431986]406 /*
407 * Detach from the containing task.
408 */
[da1bafb]409 list_remove(&thread->th_link);
410 irq_spinlock_unlock(&thread->task->lock, irq_res);
411
[ea7890e7]412 /*
[7ed8530]413 * Drop the reference to the containing task.
[ea7890e7]414 */
[da1bafb]415 task_release(thread->task);
416 slab_free(thread_slab, thread);
[d8431986]417}
418
419/** Make the thread visible to the system.
420 *
421 * Attach the thread structure to the current task and make it visible in the
[5dcee525]422 * threads_tree.
[d8431986]423 *
[da1bafb]424 * @param t Thread to be attached to the task.
425 * @param task Task to which the thread is to be attached.
426 *
[d8431986]427 */
[da1bafb]428void thread_attach(thread_t *thread, task_t *task)
[d8431986]429{
430 /*
[9a1b20c]431 * Attach to the specified task.
[d8431986]432 */
[da1bafb]433 irq_spinlock_lock(&task->lock, true);
434
[7ed8530]435 /* Hold a reference to the task. */
436 task_hold(task);
[da1bafb]437
[9a1b20c]438 /* Must not count kbox thread into lifecount */
[da1bafb]439 if (thread->flags & THREAD_FLAG_USPACE)
[9a1b20c]440 atomic_inc(&task->lifecount);
[da1bafb]441
[55b77d9]442 list_append(&thread->th_link, &task->threads);
[da1bafb]443
444 irq_spinlock_pass(&task->lock, &threads_lock);
445
[bb68433]446 /*
447 * Register this thread in the system-wide list.
448 */
[da1bafb]449 avltree_insert(&threads_tree, &thread->threads_tree_node);
450 irq_spinlock_unlock(&threads_lock, true);
[f761f1eb]451}
452
[0182a665]453/** Terminate thread.
[70527f1]454 *
[da1bafb]455 * End current thread execution and switch it to the exiting state.
456 * All pending timeouts are executed.
457 *
[70527f1]458 */
[f761f1eb]459void thread_exit(void)
460{
[9a1b20c]461 if (THREAD->flags & THREAD_FLAG_USPACE) {
462#ifdef CONFIG_UDEBUG
463 /* Generate udebug THREAD_E event */
464 udebug_thread_e_event();
[0ac99db]465
466 /*
467 * This thread will not execute any code or system calls from
468 * now on.
469 */
470 udebug_stoppable_begin();
[9a1b20c]471#endif
472 if (atomic_predec(&TASK->lifecount) == 0) {
473 /*
474 * We are the last userspace thread in the task that
475 * still has not exited. With the exception of the
476 * moment the task was created, new userspace threads
477 * can only be created by threads of the same task.
478 * We are safe to perform cleanup.
[da1bafb]479 *
[9a1b20c]480 */
[ea7890e7]481 ipc_cleanup();
[52755f1]482 futex_cleanup();
483 LOG("Cleanup of task %" PRIu64" completed.", TASK->taskid);
[ea7890e7]484 }
485 }
[da1bafb]486
[f761f1eb]487restart:
[da1bafb]488 irq_spinlock_lock(&THREAD->lock, true);
489 if (THREAD->timeout_pending) {
490 /* Busy waiting for timeouts in progress */
491 irq_spinlock_unlock(&THREAD->lock, true);
[f761f1eb]492 goto restart;
493 }
[ea7890e7]494
[43114c5]495 THREAD->state = Exiting;
[da1bafb]496 irq_spinlock_unlock(&THREAD->lock, true);
497
[f761f1eb]498 scheduler();
[da1bafb]499
[874621f]500 /* Not reached */
[da1bafb]501 while (true);
[f761f1eb]502}
503
[43ac0cc]504/** Prevent the current thread from being migrated to another processor. */
505void thread_migration_disable(void)
506{
507 ASSERT(THREAD);
508
509 THREAD->nomigrate++;
510}
511
512/** Allow the current thread to be migrated to another processor. */
513void thread_migration_enable(void)
514{
515 ASSERT(THREAD);
516 ASSERT(THREAD->nomigrate > 0);
517
518 THREAD->nomigrate--;
519}
520
[70527f1]521/** Thread sleep
522 *
523 * Suspend execution of the current thread.
524 *
525 * @param sec Number of seconds to sleep.
526 *
527 */
[7f1c620]528void thread_sleep(uint32_t sec)
[f761f1eb]529{
[22e6802]530 /* Sleep in 1000 second steps to support
531 full argument range */
532 while (sec > 0) {
533 uint32_t period = (sec > 1000) ? 1000 : sec;
[da1bafb]534
[22e6802]535 thread_usleep(period * 1000000);
536 sec -= period;
537 }
[f761f1eb]538}
[70527f1]539
[fe19611]540/** Wait for another thread to exit.
541 *
[da1bafb]542 * @param thread Thread to join on exit.
543 * @param usec Timeout in microseconds.
544 * @param flags Mode of operation.
[fe19611]545 *
546 * @return An error code from errno.h or an error code from synch.h.
[da1bafb]547 *
[fe19611]548 */
[da1bafb]549int thread_join_timeout(thread_t *thread, uint32_t usec, unsigned int flags)
[fe19611]550{
[da1bafb]551 if (thread == THREAD)
[fe19611]552 return EINVAL;
[da1bafb]553
[fe19611]554 /*
555 * Since thread join can only be called once on an undetached thread,
556 * the thread pointer is guaranteed to be still valid.
557 */
558
[da1bafb]559 irq_spinlock_lock(&thread->lock, true);
560 ASSERT(!thread->detached);
561 irq_spinlock_unlock(&thread->lock, true);
[0182a665]562
[da1bafb]563 return waitq_sleep_timeout(&thread->join_wq, usec, flags);
[fe19611]564}
565
566/** Detach thread.
567 *
[df58e44]568 * Mark the thread as detached. If the thread is already
569 * in the Lingering state, deallocate its resources.
[fe19611]570 *
[da1bafb]571 * @param thread Thread to be detached.
572 *
[fe19611]573 */
[da1bafb]574void thread_detach(thread_t *thread)
[fe19611]575{
576 /*
[31d8e10]577 * Since the thread is expected not to be already detached,
[fe19611]578 * pointer to it must be still valid.
579 */
[da1bafb]580 irq_spinlock_lock(&thread->lock, true);
581 ASSERT(!thread->detached);
582
583 if (thread->state == Lingering) {
584 /*
585 * Unlock &thread->lock and restore
586 * interrupts in thread_destroy().
587 */
588 thread_destroy(thread, true);
[fe19611]589 return;
590 } else {
[da1bafb]591 thread->detached = true;
[fe19611]592 }
[da1bafb]593
594 irq_spinlock_unlock(&thread->lock, true);
[fe19611]595}
596
[70527f1]597/** Thread usleep
598 *
599 * Suspend execution of the current thread.
600 *
601 * @param usec Number of microseconds to sleep.
602 *
603 */
[7f1c620]604void thread_usleep(uint32_t usec)
[f761f1eb]605{
606 waitq_t wq;
[22e6802]607
[f761f1eb]608 waitq_initialize(&wq);
[22e6802]609
[116d1ef4]610 (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING);
[f761f1eb]611}
612
[b76a2217]613static bool thread_walker(avltree_node_t *node, void *arg)
[5dcee525]614{
[48dcc69]615 bool *additional = (bool *) arg;
[da1bafb]616 thread_t *thread = avltree_get_instance(node, thread_t, threads_tree_node);
[52755f1]617
[1ba37fa]618 uint64_t ucycles, kcycles;
619 char usuffix, ksuffix;
[da1bafb]620 order_suffix(thread->ucycles, &ucycles, &usuffix);
621 order_suffix(thread->kcycles, &kcycles, &ksuffix);
622
[577f042a]623 char *name;
624 if (str_cmp(thread->name, "uinit") == 0)
625 name = thread->task->name;
626 else
627 name = thread->name;
628
[52755f1]629#ifdef __32_BITS__
[48dcc69]630 if (*additional)
[ae0300b5]631 printf("%-8" PRIu64 " %10p %10p %9" PRIu64 "%c %9" PRIu64 "%c ",
[577f042a]632 thread->tid, thread->thread_code, thread->kstack,
633 ucycles, usuffix, kcycles, ksuffix);
[48dcc69]634 else
[ae0300b5]635 printf("%-8" PRIu64 " %-14s %10p %-8s %10p %-5" PRIu32 "\n",
[577f042a]636 thread->tid, name, thread, thread_states[thread->state],
[26aafe8]637 thread->task, thread->task->container);
[52755f1]638#endif
[da1bafb]639
[52755f1]640#ifdef __64_BITS__
[48dcc69]641 if (*additional)
[ae0300b5]642 printf("%-8" PRIu64 " %18p %18p\n"
[48dcc69]643 " %9" PRIu64 "%c %9" PRIu64 "%c ",
644 thread->tid, thread->thread_code, thread->kstack,
645 ucycles, usuffix, kcycles, ksuffix);
[5dcee525]646 else
[ae0300b5]647 printf("%-8" PRIu64 " %-14s %18p %-8s %18p %-5" PRIu32 "\n",
[577f042a]648 thread->tid, name, thread, thread_states[thread->state],
[26aafe8]649 thread->task, thread->task->container);
[48dcc69]650#endif
[da1bafb]651
[48dcc69]652 if (*additional) {
653 if (thread->cpu)
654 printf("%-5u", thread->cpu->id);
655 else
656 printf("none ");
657
658 if (thread->state == Sleeping) {
[52755f1]659#ifdef __32_BITS__
[48dcc69]660 printf(" %10p", thread->sleep_queue);
[52755f1]661#endif
[48dcc69]662
[52755f1]663#ifdef __64_BITS__
[48dcc69]664 printf(" %18p", thread->sleep_queue);
[52755f1]665#endif
[48dcc69]666 }
667
668 printf("\n");
[43b1e86]669 }
[da1bafb]670
[b76a2217]671 return true;
[5dcee525]672}
673
[da1bafb]674/** Print list of threads debug info
[48dcc69]675 *
676 * @param additional Print additional information.
[da1bafb]677 *
678 */
[48dcc69]679void thread_print_list(bool additional)
[55ab0f1]680{
681 /* Messing with thread structures, avoid deadlock */
[da1bafb]682 irq_spinlock_lock(&threads_lock, true);
683
684#ifdef __32_BITS__
[48dcc69]685 if (additional)
[577f042a]686 printf("[id ] [code ] [stack ] [ucycles ] [kcycles ]"
687 " [cpu] [waitqueue]\n");
[48dcc69]688 else
689 printf("[id ] [name ] [address ] [state ] [task ]"
[26aafe8]690 " [ctn]\n");
[52755f1]691#endif
[da1bafb]692
[52755f1]693#ifdef __64_BITS__
[48dcc69]694 if (additional) {
695 printf("[id ] [code ] [stack ]\n"
696 " [ucycles ] [kcycles ] [cpu] [waitqueue ]\n");
697 } else
698 printf("[id ] [name ] [address ] [state ]"
[26aafe8]699 " [task ] [ctn]\n");
[52755f1]700#endif
[da1bafb]701
[48dcc69]702 avltree_walk(&threads_tree, thread_walker, &additional);
[da1bafb]703
704 irq_spinlock_unlock(&threads_lock, true);
[55ab0f1]705}
[9f52563]706
[016acbe]707/** Check whether thread exists.
708 *
709 * Note that threads_lock must be already held and
710 * interrupts must be already disabled.
711 *
[da1bafb]712 * @param thread Pointer to thread.
[016acbe]713 *
714 * @return True if thread t is known to the system, false otherwise.
[da1bafb]715 *
[016acbe]716 */
[da1bafb]717bool thread_exists(thread_t *thread)
[016acbe]718{
[1d432f9]719 ASSERT(interrupts_disabled());
720 ASSERT(irq_spinlock_locked(&threads_lock));
721
[da1bafb]722 avltree_node_t *node =
723 avltree_search(&threads_tree, (avltree_key_t) ((uintptr_t) thread));
[016acbe]724
[5dcee525]725 return node != NULL;
[016acbe]726}
727
[cce6acf]728/** Update accounting of current thread.
729 *
730 * Note that thread_lock on THREAD must be already held and
731 * interrupts must be already disabled.
732 *
[da1bafb]733 * @param user True to update user accounting, false for kernel.
734 *
[cce6acf]735 */
[a2a00e8]736void thread_update_accounting(bool user)
[cce6acf]737{
738 uint64_t time = get_cycle();
[1d432f9]739
740 ASSERT(interrupts_disabled());
741 ASSERT(irq_spinlock_locked(&THREAD->lock));
[da1bafb]742
743 if (user)
[a2a00e8]744 THREAD->ucycles += time - THREAD->last_cycle;
[da1bafb]745 else
[a2a00e8]746 THREAD->kcycles += time - THREAD->last_cycle;
[da1bafb]747
[cce6acf]748 THREAD->last_cycle = time;
749}
750
[e1b6742]751static bool thread_search_walker(avltree_node_t *node, void *arg)
752{
753 thread_t *thread =
754 (thread_t *) avltree_get_instance(node, thread_t, threads_tree_node);
755 thread_iterator_t *iterator = (thread_iterator_t *) arg;
756
757 if (thread->tid == iterator->thread_id) {
758 iterator->thread = thread;
759 return false;
760 }
761
762 return true;
763}
764
765/** Find thread structure corresponding to thread ID.
766 *
767 * The threads_lock must be already held by the caller of this function and
768 * interrupts must be disabled.
769 *
770 * @param id Thread ID.
771 *
772 * @return Thread structure address or NULL if there is no such thread ID.
773 *
774 */
775thread_t *thread_find_by_id(thread_id_t thread_id)
776{
[1d432f9]777 ASSERT(interrupts_disabled());
778 ASSERT(irq_spinlock_locked(&threads_lock));
[df58e44]779
[e1b6742]780 thread_iterator_t iterator;
781
782 iterator.thread_id = thread_id;
783 iterator.thread = NULL;
784
785 avltree_walk(&threads_tree, thread_search_walker, (void *) &iterator);
786
787 return iterator.thread;
788}
789
[5b7a107]790#ifdef CONFIG_UDEBUG
791
[df58e44]792void thread_stack_trace(thread_id_t thread_id)
793{
794 irq_spinlock_lock(&threads_lock, true);
795
796 thread_t *thread = thread_find_by_id(thread_id);
797 if (thread == NULL) {
798 printf("No such thread.\n");
799 irq_spinlock_unlock(&threads_lock, true);
800 return;
801 }
802
803 irq_spinlock_lock(&thread->lock, false);
804
805 /*
806 * Schedule a stack trace to be printed
807 * just before the thread is scheduled next.
808 *
809 * If the thread is sleeping then try to interrupt
810 * the sleep. Any request for printing an uspace stack
811 * trace from within the kernel should be always
812 * considered a last resort debugging means, therefore
813 * forcing the thread's sleep to be interrupted
814 * is probably justifiable.
815 */
816
817 bool sleeping = false;
818 istate_t *istate = thread->udebug.uspace_state;
819 if (istate != NULL) {
820 printf("Scheduling thread stack trace.\n");
821 thread->btrace = true;
822 if (thread->state == Sleeping)
823 sleeping = true;
824 } else
825 printf("Thread interrupt state not available.\n");
826
827 irq_spinlock_unlock(&thread->lock, false);
828
829 if (sleeping)
830 waitq_interrupt_sleep(thread);
831
832 irq_spinlock_unlock(&threads_lock, true);
833}
[e1b6742]834
[5b7a107]835#endif /* CONFIG_UDEBUG */
[e1b6742]836
[9f52563]837/** Process syscall to create new thread.
838 *
839 */
[96b02eb9]840sysarg_t sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name,
[7faabb7]841 size_t name_len, thread_id_t *uspace_thread_id)
[9f52563]842{
[24345a5]843 if (name_len > THREAD_NAME_BUFLEN - 1)
[7faabb7]844 name_len = THREAD_NAME_BUFLEN - 1;
[da1bafb]845
846 char namebuf[THREAD_NAME_BUFLEN];
847 int rc = copy_from_uspace(namebuf, uspace_name, name_len);
[e3c762cd]848 if (rc != 0)
[96b02eb9]849 return (sysarg_t) rc;
[da1bafb]850
[b60c582]851 namebuf[name_len] = 0;
[da1bafb]852
[4680ef5]853 /*
854 * In case of failure, kernel_uarg will be deallocated in this function.
855 * In case of success, kernel_uarg will be freed in uinit().
856 */
[da1bafb]857 uspace_arg_t *kernel_uarg =
858 (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
[4680ef5]859
[e3c762cd]860 rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t));
861 if (rc != 0) {
862 free(kernel_uarg);
[96b02eb9]863 return (sysarg_t) rc;
[e3c762cd]864 }
[da1bafb]865
866 thread_t *thread = thread_create(uinit, kernel_uarg, TASK,
[d8431986]867 THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf, false);
[da1bafb]868 if (thread) {
[d8431986]869 if (uspace_thread_id != NULL) {
[da1bafb]870 rc = copy_to_uspace(uspace_thread_id, &thread->tid,
871 sizeof(thread->tid));
[d8431986]872 if (rc != 0) {
873 /*
874 * We have encountered a failure, but the thread
875 * has already been created. We need to undo its
876 * creation now.
877 */
[da1bafb]878
[d8431986]879 /*
[ea7890e7]880 * The new thread structure is initialized, but
881 * is still not visible to the system.
[d8431986]882 * We can safely deallocate it.
883 */
[da1bafb]884 slab_free(thread_slab, thread);
885 free(kernel_uarg);
886
[96b02eb9]887 return (sysarg_t) rc;
[d8431986]888 }
889 }
[da1bafb]890
[9a1b20c]891#ifdef CONFIG_UDEBUG
[13964ef]892 /*
893 * Generate udebug THREAD_B event and attach the thread.
894 * This must be done atomically (with the debug locks held),
895 * otherwise we would either miss some thread or receive
896 * THREAD_B events for threads that already existed
897 * and could be detected with THREAD_READ before.
898 */
[da1bafb]899 udebug_thread_b_event_attach(thread, TASK);
[13964ef]900#else
[da1bafb]901 thread_attach(thread, TASK);
[9a1b20c]902#endif
[da1bafb]903 thread_ready(thread);
904
[d8431986]905 return 0;
[201abde]906 } else
[0f250f9]907 free(kernel_uarg);
[da1bafb]908
[96b02eb9]909 return (sysarg_t) ENOMEM;
[9f52563]910}
911
912/** Process syscall to terminate thread.
913 *
914 */
[96b02eb9]915sysarg_t sys_thread_exit(int uspace_status)
[9f52563]916{
[68091bd]917 thread_exit();
[da1bafb]918
[68091bd]919 /* Unreachable */
920 return 0;
[9f52563]921}
[b45c443]922
[3ce7f082]923/** Syscall for getting TID.
924 *
[201abde]925 * @param uspace_thread_id Userspace address of 8-byte buffer where to store
926 * current thread ID.
927 *
928 * @return 0 on success or an error code from @ref errno.h.
[da1bafb]929 *
[b45c443]930 */
[96b02eb9]931sysarg_t sys_thread_get_id(thread_id_t *uspace_thread_id)
[3ce7f082]932{
933 /*
934 * No need to acquire lock on THREAD because tid
935 * remains constant for the lifespan of the thread.
[da1bafb]936 *
[3ce7f082]937 */
[96b02eb9]938 return (sysarg_t) copy_to_uspace(uspace_thread_id, &THREAD->tid,
[201abde]939 sizeof(THREAD->tid));
[3ce7f082]940}
[6f4495f5]941
[d9ece1cb]942/** Syscall wrapper for sleeping. */
[96b02eb9]943sysarg_t sys_thread_usleep(uint32_t usec)
[d9ece1cb]944{
[22e6802]945 thread_usleep(usec);
[d9ece1cb]946 return 0;
947}
948
[7e7b791]949sysarg_t sys_thread_udelay(uint32_t usec)
950{
[8d6c1f1]951 delay(usec);
[7e7b791]952 return 0;
953}
954
[3ce7f082]955/** @}
956 */
Note: See TracBrowser for help on using the repository browser.