source: mainline/kernel/generic/src/proc/thread.c@ da1bafb

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since da1bafb was da1bafb, checked in by Martin Decky <martin@…>, 15 years ago

major code revision

  • replace spinlocks taken with interrupts disabled with irq_spinlocks
  • change spacing (not indendation) to be tab-size independent
  • use unsigned integer types where appropriate (especially bit flags)
  • visual separation
  • remove argument names in function prototypes
  • string changes
  • correct some formating directives
  • replace various cryptic single-character variables (t, a, m, c, b, etc.) with proper identifiers (thread, task, timeout, as, itm, itc, etc.)
  • unify some assembler constructs
  • unused page table levels are now optimized out in compile time
  • replace several ints (with boolean semantics) with bools
  • use specifically sized types instead of generic types where appropriate (size_t, uint32_t, btree_key_t)
  • improve comments
  • split asserts with conjuction into multiple independent asserts
  • Property mode set to 100644
File size: 20.9 KB
RevLine 
[f761f1eb]1/*
[7ed8530]2 * Copyright (c) 2010 Jakub Jermar
[f761f1eb]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[cc73a8a1]29/** @addtogroup genericproc
[b45c443]30 * @{
31 */
32
[9179d0a]33/**
[b45c443]34 * @file
[da1bafb]35 * @brief Thread management functions.
[9179d0a]36 */
37
[f761f1eb]38#include <proc/scheduler.h>
39#include <proc/thread.h>
40#include <proc/task.h>
[0f250f9]41#include <proc/uarg.h>
[f761f1eb]42#include <mm/frame.h>
43#include <mm/page.h>
44#include <arch/asm.h>
[cce6acf]45#include <arch/cycle.h>
[f761f1eb]46#include <arch.h>
47#include <synch/synch.h>
48#include <synch/spinlock.h>
49#include <synch/waitq.h>
50#include <synch/rwlock.h>
51#include <cpu.h>
[e535eeb]52#include <str.h>
[f761f1eb]53#include <context.h>
[5dcee525]54#include <adt/avl.h>
[5c9a08b]55#include <adt/list.h>
[f761f1eb]56#include <time/clock.h>
[b3f8fb7]57#include <time/timeout.h>
[4ffa9e0]58#include <config.h>
59#include <arch/interrupt.h>
[26a8604f]60#include <smp/ipi.h>
[f2ffad4]61#include <arch/faddr.h>
[23684b7]62#include <atomic.h>
[9c0a9b3]63#include <memstr.h>
[55ab0f1]64#include <print.h>
[266294a9]65#include <mm/slab.h>
66#include <debug.h>
[9f52563]67#include <main/uinit.h>
[e3c762cd]68#include <syscall/copy.h>
69#include <errno.h>
[52755f1]70
71
72#ifndef LOADED_PROG_STACK_PAGES_NO
73#define LOADED_PROG_STACK_PAGES_NO 1
74#endif
[f761f1eb]75
[fe19611]76
77/** Thread states */
[a000878c]78const char *thread_states[] = {
[fe19611]79 "Invalid",
80 "Running",
81 "Sleeping",
82 "Ready",
83 "Entering",
84 "Exiting",
[48d14222]85 "Lingering"
[e1b6742]86};
87
88typedef struct {
89 thread_id_t thread_id;
90 thread_t *thread;
91} thread_iterator_t;
[f761f1eb]92
[5dcee525]93/** Lock protecting the threads_tree AVL tree.
[4e33b6b]94 *
95 * For locking rules, see declaration thereof.
[da1bafb]96 *
[4e33b6b]97 */
[da1bafb]98IRQ_SPINLOCK_INITIALIZE(threads_lock);
[88169d9]99
[81c0171e]100/** AVL tree of all threads.
[88169d9]101 *
[5dcee525]102 * When a thread is found in the threads_tree AVL tree, it is guaranteed to
[4e33b6b]103 * exist as long as the threads_lock is held.
[da1bafb]104 *
[88169d9]105 */
[da1bafb]106avltree_t threads_tree;
[f761f1eb]107
[da1bafb]108IRQ_SPINLOCK_STATIC_INITIALIZE(tidlock);
109static thread_id_t last_tid = 0;
[f761f1eb]110
[266294a9]111static slab_cache_t *thread_slab;
[da1bafb]112
[0f81ceb7]113#ifdef CONFIG_FPU
[f76fed4]114slab_cache_t *fpu_context_slab;
115#endif
[266294a9]116
[4e33b6b]117/** Thread wrapper.
[70527f1]118 *
[4e33b6b]119 * This wrapper is provided to ensure that every thread makes a call to
120 * thread_exit() when its implementing function returns.
[f761f1eb]121 *
[22f7769]122 * interrupts_disable() is assumed.
[70527f1]123 *
[f761f1eb]124 */
[e16e036a]125static void cushion(void)
[f761f1eb]126{
[43114c5]127 void (*f)(void *) = THREAD->thread_code;
128 void *arg = THREAD->thread_arg;
[449dc1ed]129 THREAD->last_cycle = get_cycle();
[da1bafb]130
[0313ff0]131 /* This is where each thread wakes up after its creation */
[da1bafb]132 irq_spinlock_unlock(&THREAD->lock, false);
[22f7769]133 interrupts_enable();
[da1bafb]134
[f761f1eb]135 f(arg);
[0313ff0]136
137 /* Accumulate accounting to the task */
[da1bafb]138 irq_spinlock_lock(&THREAD->lock, true);
[62b6d17]139 if (!THREAD->uncounted) {
[a2a00e8]140 thread_update_accounting(true);
141 uint64_t ucycles = THREAD->ucycles;
142 THREAD->ucycles = 0;
143 uint64_t kcycles = THREAD->kcycles;
144 THREAD->kcycles = 0;
[62b6d17]145
[da1bafb]146 irq_spinlock_pass(&THREAD->lock, &TASK->lock);
[a2a00e8]147 TASK->ucycles += ucycles;
148 TASK->kcycles += kcycles;
[da1bafb]149 irq_spinlock_unlock(&TASK->lock, true);
[62b6d17]150 } else
[da1bafb]151 irq_spinlock_unlock(&THREAD->lock, true);
[0313ff0]152
[f761f1eb]153 thread_exit();
[da1bafb]154
155 /* Not reached */
[f761f1eb]156}
157
[da1bafb]158/** Initialization and allocation for thread_t structure
159 *
160 */
161static int thr_constructor(void *obj, unsigned int kmflags)
[266294a9]162{
[da1bafb]163 thread_t *thread = (thread_t *) obj;
164
165 irq_spinlock_initialize(&thread->lock, "thread_t_lock");
166 link_initialize(&thread->rq_link);
167 link_initialize(&thread->wq_link);
168 link_initialize(&thread->th_link);
169
[32fffef0]170 /* call the architecture-specific part of the constructor */
[da1bafb]171 thr_constructor_arch(thread);
[266294a9]172
[0f81ceb7]173#ifdef CONFIG_FPU
[d8431986]174#ifdef CONFIG_FPU_LAZY
[da1bafb]175 thread->saved_fpu_context = NULL;
176#else /* CONFIG_FPU_LAZY */
177 thread->saved_fpu_context = slab_alloc(fpu_context_slab, kmflags);
178 if (!thread->saved_fpu_context)
[f76fed4]179 return -1;
[da1bafb]180#endif /* CONFIG_FPU_LAZY */
181#endif /* CONFIG_FPU */
182
183 thread->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags);
184 if (!thread->kstack) {
[0f81ceb7]185#ifdef CONFIG_FPU
[da1bafb]186 if (thread->saved_fpu_context)
187 slab_free(fpu_context_slab, thread->saved_fpu_context);
[f76fed4]188#endif
[266294a9]189 return -1;
[f76fed4]190 }
[da1bafb]191
[9a1b20c]192#ifdef CONFIG_UDEBUG
[da1bafb]193 mutex_initialize(&thread->udebug.lock, MUTEX_PASSIVE);
[9a1b20c]194#endif
[da1bafb]195
[266294a9]196 return 0;
197}
198
199/** Destruction of thread_t object */
[da1bafb]200static size_t thr_destructor(void *obj)
[266294a9]201{
[da1bafb]202 thread_t *thread = (thread_t *) obj;
203
[32fffef0]204 /* call the architecture-specific part of the destructor */
[da1bafb]205 thr_destructor_arch(thread);
206
207 frame_free(KA2PA(thread->kstack));
208
[0f81ceb7]209#ifdef CONFIG_FPU
[da1bafb]210 if (thread->saved_fpu_context)
211 slab_free(fpu_context_slab, thread->saved_fpu_context);
[f76fed4]212#endif
[da1bafb]213
214 return 1; /* One page freed */
[266294a9]215}
[70527f1]216
217/** Initialize threads
218 *
219 * Initialize kernel threads support.
220 *
221 */
[f761f1eb]222void thread_init(void)
223{
[43114c5]224 THREAD = NULL;
[da1bafb]225
[7217199]226 atomic_set(&nrdy, 0);
[4e33b6b]227 thread_slab = slab_cache_create("thread_slab", sizeof(thread_t), 0,
[6f4495f5]228 thr_constructor, thr_destructor, 0);
[da1bafb]229
[0f81ceb7]230#ifdef CONFIG_FPU
[4e33b6b]231 fpu_context_slab = slab_cache_create("fpu_slab", sizeof(fpu_context_t),
[6f4495f5]232 FPU_CONTEXT_ALIGN, NULL, NULL, 0);
[f76fed4]233#endif
[da1bafb]234
[5dcee525]235 avltree_create(&threads_tree);
[016acbe]236}
[70527f1]237
238/** Make thread ready
239 *
[da1bafb]240 * Switch thread to the ready state.
[70527f1]241 *
242 * @param t Thread to make ready.
243 *
244 */
[da1bafb]245void thread_ready(thread_t *thread)
[f761f1eb]246{
[da1bafb]247 irq_spinlock_lock(&thread->lock, true);
[f761f1eb]248
[da1bafb]249 ASSERT(!(thread->state == Ready));
250
251 int i = (thread->priority < RQ_COUNT - 1)
252 ? ++thread->priority : thread->priority;
253
254 cpu_t *cpu = CPU;
255 if (thread->flags & THREAD_FLAG_WIRED) {
256 ASSERT(thread->cpu != NULL);
257 cpu = thread->cpu;
[f761f1eb]258 }
[da1bafb]259 thread->state = Ready;
260
261 irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock));
[f761f1eb]262
[70527f1]263 /*
[da1bafb]264 * Append thread to respective ready queue
265 * on respective processor.
[f761f1eb]266 */
[da1bafb]267
268 list_append(&thread->rq_link, &cpu->rq[i].rq_head);
269 cpu->rq[i].n++;
270 irq_spinlock_unlock(&(cpu->rq[i].lock), true);
271
[59e07c91]272 atomic_inc(&nrdy);
[da1bafb]273 // FIXME: Why is the avg value not used
274 // avg = atomic_get(&nrdy) / config.cpu_active;
[248fc1a]275 atomic_inc(&cpu->nrdy);
[f761f1eb]276}
277
[70527f1]278/** Create new thread
279 *
280 * Create a new thread.
281 *
[da1bafb]282 * @param func Thread's implementing function.
283 * @param arg Thread's implementing function argument.
284 * @param task Task to which the thread belongs. The caller must
285 * guarantee that the task won't cease to exist during the
286 * call. The task's lock may not be held.
287 * @param flags Thread flags.
288 * @param name Symbolic name (a copy is made).
289 * @param uncounted Thread's accounting doesn't affect accumulated task
290 * accounting.
[70527f1]291 *
[da1bafb]292 * @return New thread's structure on success, NULL on failure.
[70527f1]293 *
294 */
[4e33b6b]295thread_t *thread_create(void (* func)(void *), void *arg, task_t *task,
[da1bafb]296 unsigned int flags, const char *name, bool uncounted)
[f761f1eb]297{
[da1bafb]298 thread_t *thread = (thread_t *) slab_alloc(thread_slab, 0);
299 if (!thread)
[2a46e10]300 return NULL;
[aa8d0f7]301
[bb68433]302 /* Not needed, but good for debugging */
[da1bafb]303 memsetb(thread->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0);
[bb68433]304
[da1bafb]305 irq_spinlock_lock(&tidlock, true);
306 thread->tid = ++last_tid;
307 irq_spinlock_unlock(&tidlock, true);
[bb68433]308
[da1bafb]309 context_save(&thread->saved_context);
310 context_set(&thread->saved_context, FADDR(cushion),
311 (uintptr_t) thread->kstack, THREAD_STACK_SIZE);
[bb68433]312
[da1bafb]313 the_initialize((the_t *) thread->kstack);
[bb68433]314
[da1bafb]315 ipl_t ipl = interrupts_disable();
316 thread->saved_context.ipl = interrupts_read();
[bb68433]317 interrupts_restore(ipl);
318
[da1bafb]319 str_cpy(thread->name, THREAD_NAME_BUFLEN, name);
320
321 thread->thread_code = func;
322 thread->thread_arg = arg;
323 thread->ticks = -1;
324 thread->ucycles = 0;
325 thread->kcycles = 0;
326 thread->uncounted = uncounted;
327 thread->priority = -1; /* Start in rq[0] */
328 thread->cpu = NULL;
329 thread->flags = flags;
330 thread->state = Entering;
331 thread->call_me = NULL;
332 thread->call_me_with = NULL;
333
334 timeout_initialize(&thread->sleep_timeout);
335 thread->sleep_interruptible = false;
336 thread->sleep_queue = NULL;
337 thread->timeout_pending = false;
338
339 thread->in_copy_from_uspace = false;
340 thread->in_copy_to_uspace = false;
341
342 thread->interrupted = false;
343 thread->detached = false;
344 waitq_initialize(&thread->join_wq);
345
346 thread->rwlock_holder_type = RWLOCK_NONE;
347
348 thread->task = task;
349
350 thread->fpu_context_exists = 0;
351 thread->fpu_context_engaged = 0;
352
353 avltree_node_initialize(&thread->threads_tree_node);
354 thread->threads_tree_node.key = (uintptr_t) thread;
[bb68433]355
[9a1b20c]356#ifdef CONFIG_UDEBUG
357 /* Init debugging stuff */
[da1bafb]358 udebug_thread_initialize(&thread->udebug);
[9a1b20c]359#endif
[da1bafb]360
361 /* Might depend on previous initialization */
362 thread_create_arch(thread);
363
[d8431986]364 if (!(flags & THREAD_FLAG_NOATTACH))
[da1bafb]365 thread_attach(thread, task);
366
367 return thread;
[d8431986]368}
369
370/** Destroy thread memory structure
371 *
372 * Detach thread from all queues, cpus etc. and destroy it.
[da1bafb]373 * Assume thread->lock is held!
374 *
375 * @param thread Thread to be destroyed.
376 * @param irq_res Indicate whether it should unlock thread->lock
377 * in interrupts-restore mode.
[d8431986]378 *
379 */
[da1bafb]380void thread_destroy(thread_t *thread, bool irq_res)
[d8431986]381{
[da1bafb]382 ASSERT((thread->state == Exiting) || (thread->state == Lingering));
383 ASSERT(thread->task);
384 ASSERT(thread->cpu);
385
386 irq_spinlock_lock(&thread->cpu->lock, false);
387 if (thread->cpu->fpu_owner == thread)
388 thread->cpu->fpu_owner = NULL;
389 irq_spinlock_unlock(&thread->cpu->lock, false);
390
391 irq_spinlock_pass(&thread->lock, &threads_lock);
392
393 avltree_delete(&threads_tree, &thread->threads_tree_node);
394
395 irq_spinlock_pass(&threads_lock, &thread->task->lock);
396
[d8431986]397 /*
398 * Detach from the containing task.
399 */
[da1bafb]400 list_remove(&thread->th_link);
401 irq_spinlock_unlock(&thread->task->lock, irq_res);
402
[ea7890e7]403 /*
[7ed8530]404 * Drop the reference to the containing task.
[ea7890e7]405 */
[da1bafb]406 task_release(thread->task);
407 slab_free(thread_slab, thread);
[d8431986]408}
409
410/** Make the thread visible to the system.
411 *
412 * Attach the thread structure to the current task and make it visible in the
[5dcee525]413 * threads_tree.
[d8431986]414 *
[da1bafb]415 * @param t Thread to be attached to the task.
416 * @param task Task to which the thread is to be attached.
417 *
[d8431986]418 */
[da1bafb]419void thread_attach(thread_t *thread, task_t *task)
[d8431986]420{
421 /*
[9a1b20c]422 * Attach to the specified task.
[d8431986]423 */
[da1bafb]424 irq_spinlock_lock(&task->lock, true);
425
[7ed8530]426 /* Hold a reference to the task. */
427 task_hold(task);
[da1bafb]428
[9a1b20c]429 /* Must not count kbox thread into lifecount */
[da1bafb]430 if (thread->flags & THREAD_FLAG_USPACE)
[9a1b20c]431 atomic_inc(&task->lifecount);
[da1bafb]432
433 list_append(&thread->th_link, &task->th_head);
434
435 irq_spinlock_pass(&task->lock, &threads_lock);
436
[bb68433]437 /*
438 * Register this thread in the system-wide list.
439 */
[da1bafb]440 avltree_insert(&threads_tree, &thread->threads_tree_node);
441 irq_spinlock_unlock(&threads_lock, true);
[f761f1eb]442}
443
[0182a665]444/** Terminate thread.
[70527f1]445 *
[da1bafb]446 * End current thread execution and switch it to the exiting state.
447 * All pending timeouts are executed.
448 *
[70527f1]449 */
[f761f1eb]450void thread_exit(void)
451{
[9a1b20c]452 if (THREAD->flags & THREAD_FLAG_USPACE) {
453#ifdef CONFIG_UDEBUG
454 /* Generate udebug THREAD_E event */
455 udebug_thread_e_event();
456#endif
457 if (atomic_predec(&TASK->lifecount) == 0) {
458 /*
459 * We are the last userspace thread in the task that
460 * still has not exited. With the exception of the
461 * moment the task was created, new userspace threads
462 * can only be created by threads of the same task.
463 * We are safe to perform cleanup.
[da1bafb]464 *
[9a1b20c]465 */
[ea7890e7]466 ipc_cleanup();
[52755f1]467 futex_cleanup();
468 LOG("Cleanup of task %" PRIu64" completed.", TASK->taskid);
[ea7890e7]469 }
470 }
[da1bafb]471
[f761f1eb]472restart:
[da1bafb]473 irq_spinlock_lock(&THREAD->lock, true);
474 if (THREAD->timeout_pending) {
475 /* Busy waiting for timeouts in progress */
476 irq_spinlock_unlock(&THREAD->lock, true);
[f761f1eb]477 goto restart;
478 }
[ea7890e7]479
[43114c5]480 THREAD->state = Exiting;
[da1bafb]481 irq_spinlock_unlock(&THREAD->lock, true);
482
[f761f1eb]483 scheduler();
[da1bafb]484
[874621f]485 /* Not reached */
[da1bafb]486 while (true);
[f761f1eb]487}
488
[70527f1]489/** Thread sleep
490 *
491 * Suspend execution of the current thread.
492 *
493 * @param sec Number of seconds to sleep.
494 *
495 */
[7f1c620]496void thread_sleep(uint32_t sec)
[f761f1eb]497{
[22e6802]498 /* Sleep in 1000 second steps to support
499 full argument range */
500 while (sec > 0) {
501 uint32_t period = (sec > 1000) ? 1000 : sec;
[da1bafb]502
[22e6802]503 thread_usleep(period * 1000000);
504 sec -= period;
505 }
[f761f1eb]506}
[70527f1]507
[fe19611]508/** Wait for another thread to exit.
509 *
[da1bafb]510 * @param thread Thread to join on exit.
511 * @param usec Timeout in microseconds.
512 * @param flags Mode of operation.
[fe19611]513 *
514 * @return An error code from errno.h or an error code from synch.h.
[da1bafb]515 *
[fe19611]516 */
[da1bafb]517int thread_join_timeout(thread_t *thread, uint32_t usec, unsigned int flags)
[fe19611]518{
[da1bafb]519 if (thread == THREAD)
[fe19611]520 return EINVAL;
[da1bafb]521
[fe19611]522 /*
523 * Since thread join can only be called once on an undetached thread,
524 * the thread pointer is guaranteed to be still valid.
525 */
526
[da1bafb]527 irq_spinlock_lock(&thread->lock, true);
528 ASSERT(!thread->detached);
529 irq_spinlock_unlock(&thread->lock, true);
[0182a665]530
[da1bafb]531 return waitq_sleep_timeout(&thread->join_wq, usec, flags);
[fe19611]532}
533
534/** Detach thread.
535 *
[48d14222]536 * Mark the thread as detached, if the thread is already in the Lingering
537 * state, deallocate its resources.
[fe19611]538 *
[da1bafb]539 * @param thread Thread to be detached.
540 *
[fe19611]541 */
[da1bafb]542void thread_detach(thread_t *thread)
[fe19611]543{
544 /*
[31d8e10]545 * Since the thread is expected not to be already detached,
[fe19611]546 * pointer to it must be still valid.
547 */
[da1bafb]548 irq_spinlock_lock(&thread->lock, true);
549 ASSERT(!thread->detached);
550
551 if (thread->state == Lingering) {
552 /*
553 * Unlock &thread->lock and restore
554 * interrupts in thread_destroy().
555 */
556 thread_destroy(thread, true);
[fe19611]557 return;
558 } else {
[da1bafb]559 thread->detached = true;
[fe19611]560 }
[da1bafb]561
562 irq_spinlock_unlock(&thread->lock, true);
[fe19611]563}
564
[70527f1]565/** Thread usleep
566 *
567 * Suspend execution of the current thread.
568 *
569 * @param usec Number of microseconds to sleep.
570 *
571 */
[7f1c620]572void thread_usleep(uint32_t usec)
[f761f1eb]573{
574 waitq_t wq;
[22e6802]575
[f761f1eb]576 waitq_initialize(&wq);
[22e6802]577
[116d1ef4]578 (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING);
[f761f1eb]579}
580
[70527f1]581/** Register thread out-of-context invocation
582 *
583 * Register a function and its argument to be executed
[da1bafb]584 * on next context switch to the current thread. Must
585 * be called with interrupts disabled.
[70527f1]586 *
587 * @param call_me Out-of-context function.
588 * @param call_me_with Out-of-context function argument.
589 *
590 */
[f761f1eb]591void thread_register_call_me(void (* call_me)(void *), void *call_me_with)
592{
[da1bafb]593 irq_spinlock_lock(&THREAD->lock, false);
[43114c5]594 THREAD->call_me = call_me;
595 THREAD->call_me_with = call_me_with;
[da1bafb]596 irq_spinlock_unlock(&THREAD->lock, false);
[f761f1eb]597}
[55ab0f1]598
[b76a2217]599static bool thread_walker(avltree_node_t *node, void *arg)
[5dcee525]600{
[da1bafb]601 thread_t *thread = avltree_get_instance(node, thread_t, threads_tree_node);
[52755f1]602
[1ba37fa]603 uint64_t ucycles, kcycles;
604 char usuffix, ksuffix;
[da1bafb]605 order_suffix(thread->ucycles, &ucycles, &usuffix);
606 order_suffix(thread->kcycles, &kcycles, &ksuffix);
607
[52755f1]608#ifdef __32_BITS__
[07640dfd]609 printf("%-6" PRIu64" %-10s %10p %-8s %10p %-3" PRIu32 " %10p %10p %9"
[da1bafb]610 PRIu64 "%c %9" PRIu64 "%c ", thread->tid, thread->name, thread,
611 thread_states[thread->state], thread->task, thread->task->context,
612 thread->thread_code, thread->kstack, ucycles, usuffix, kcycles, ksuffix);
[52755f1]613#endif
[da1bafb]614
[52755f1]615#ifdef __64_BITS__
[07640dfd]616 printf("%-6" PRIu64" %-10s %18p %-8s %18p %-3" PRIu32 " %18p %18p %9"
[da1bafb]617 PRIu64 "%c %9" PRIu64 "%c ", thread->tid, thread->name, thread,
618 thread_states[thread->state], thread->task, thread->task->context,
619 thread->thread_code, thread->kstack, ucycles, usuffix, kcycles, ksuffix);
[52755f1]620#endif
[da1bafb]621
622 if (thread->cpu)
623 printf("%-4u", thread->cpu->id);
[5dcee525]624 else
625 printf("none");
[da1bafb]626
627 if (thread->state == Sleeping) {
[52755f1]628#ifdef __32_BITS__
[da1bafb]629 printf(" %10p", thread->sleep_queue);
[52755f1]630#endif
[da1bafb]631
[52755f1]632#ifdef __64_BITS__
[da1bafb]633 printf(" %18p", thread->sleep_queue);
[52755f1]634#endif
[43b1e86]635 }
[da1bafb]636
[5dcee525]637 printf("\n");
[da1bafb]638
[b76a2217]639 return true;
[5dcee525]640}
641
[da1bafb]642/** Print list of threads debug info
643 *
644 */
[55ab0f1]645void thread_print_list(void)
646{
647 /* Messing with thread structures, avoid deadlock */
[da1bafb]648 irq_spinlock_lock(&threads_lock, true);
649
650#ifdef __32_BITS__
[52755f1]651 printf("tid name address state task "
[07640dfd]652 "ctx code stack ucycles kcycles cpu "
[52755f1]653 "waitqueue\n");
654 printf("------ ---------- ---------- -------- ---------- "
[07640dfd]655 "--- ---------- ---------- ---------- ---------- ---- "
[52755f1]656 "----------\n");
657#endif
[da1bafb]658
[52755f1]659#ifdef __64_BITS__
660 printf("tid name address state task "
[07640dfd]661 "ctx code stack ucycles kcycles cpu "
[52755f1]662 "waitqueue\n");
663 printf("------ ---------- ------------------ -------- ------------------ "
[07640dfd]664 "--- ------------------ ------------------ ---------- ---------- ---- "
[52755f1]665 "------------------\n");
666#endif
[da1bafb]667
[b76a2217]668 avltree_walk(&threads_tree, thread_walker, NULL);
[da1bafb]669
670 irq_spinlock_unlock(&threads_lock, true);
[55ab0f1]671}
[9f52563]672
[016acbe]673/** Check whether thread exists.
674 *
675 * Note that threads_lock must be already held and
676 * interrupts must be already disabled.
677 *
[da1bafb]678 * @param thread Pointer to thread.
[016acbe]679 *
680 * @return True if thread t is known to the system, false otherwise.
[da1bafb]681 *
[016acbe]682 */
[da1bafb]683bool thread_exists(thread_t *thread)
[016acbe]684{
[da1bafb]685 avltree_node_t *node =
686 avltree_search(&threads_tree, (avltree_key_t) ((uintptr_t) thread));
[016acbe]687
[5dcee525]688 return node != NULL;
[016acbe]689}
690
[cce6acf]691/** Update accounting of current thread.
692 *
693 * Note that thread_lock on THREAD must be already held and
694 * interrupts must be already disabled.
695 *
[da1bafb]696 * @param user True to update user accounting, false for kernel.
697 *
[cce6acf]698 */
[a2a00e8]699void thread_update_accounting(bool user)
[cce6acf]700{
701 uint64_t time = get_cycle();
[da1bafb]702
703 if (user)
[a2a00e8]704 THREAD->ucycles += time - THREAD->last_cycle;
[da1bafb]705 else
[a2a00e8]706 THREAD->kcycles += time - THREAD->last_cycle;
[da1bafb]707
[cce6acf]708 THREAD->last_cycle = time;
709}
710
[e1b6742]711static bool thread_search_walker(avltree_node_t *node, void *arg)
712{
713 thread_t *thread =
714 (thread_t *) avltree_get_instance(node, thread_t, threads_tree_node);
715 thread_iterator_t *iterator = (thread_iterator_t *) arg;
716
717 if (thread->tid == iterator->thread_id) {
718 iterator->thread = thread;
719 return false;
720 }
721
722 return true;
723}
724
725/** Find thread structure corresponding to thread ID.
726 *
727 * The threads_lock must be already held by the caller of this function and
728 * interrupts must be disabled.
729 *
730 * @param id Thread ID.
731 *
732 * @return Thread structure address or NULL if there is no such thread ID.
733 *
734 */
735thread_t *thread_find_by_id(thread_id_t thread_id)
736{
737 thread_iterator_t iterator;
738
739 iterator.thread_id = thread_id;
740 iterator.thread = NULL;
741
742 avltree_walk(&threads_tree, thread_search_walker, (void *) &iterator);
743
744 return iterator.thread;
745}
746
747
[9f52563]748/** Process syscall to create new thread.
749 *
750 */
[f6d2c81]751unative_t sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name,
[7faabb7]752 size_t name_len, thread_id_t *uspace_thread_id)
[9f52563]753{
[24345a5]754 if (name_len > THREAD_NAME_BUFLEN - 1)
[7faabb7]755 name_len = THREAD_NAME_BUFLEN - 1;
[da1bafb]756
757 char namebuf[THREAD_NAME_BUFLEN];
758 int rc = copy_from_uspace(namebuf, uspace_name, name_len);
[e3c762cd]759 if (rc != 0)
[7f1c620]760 return (unative_t) rc;
[da1bafb]761
[b60c582]762 namebuf[name_len] = 0;
[da1bafb]763
[4680ef5]764 /*
765 * In case of failure, kernel_uarg will be deallocated in this function.
766 * In case of success, kernel_uarg will be freed in uinit().
[da1bafb]767 *
[4680ef5]768 */
[da1bafb]769 uspace_arg_t *kernel_uarg =
770 (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
[4680ef5]771
[e3c762cd]772 rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t));
773 if (rc != 0) {
774 free(kernel_uarg);
[7f1c620]775 return (unative_t) rc;
[e3c762cd]776 }
[da1bafb]777
778 thread_t *thread = thread_create(uinit, kernel_uarg, TASK,
[d8431986]779 THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf, false);
[da1bafb]780 if (thread) {
[d8431986]781 if (uspace_thread_id != NULL) {
[da1bafb]782 rc = copy_to_uspace(uspace_thread_id, &thread->tid,
783 sizeof(thread->tid));
[d8431986]784 if (rc != 0) {
785 /*
786 * We have encountered a failure, but the thread
787 * has already been created. We need to undo its
788 * creation now.
[da1bafb]789 *
[d8431986]790 */
[da1bafb]791
[d8431986]792 /*
[ea7890e7]793 * The new thread structure is initialized, but
794 * is still not visible to the system.
[d8431986]795 * We can safely deallocate it.
796 */
[da1bafb]797 slab_free(thread_slab, thread);
798 free(kernel_uarg);
799
[d8431986]800 return (unative_t) rc;
801 }
802 }
[da1bafb]803
[9a1b20c]804#ifdef CONFIG_UDEBUG
[13964ef]805 /*
806 * Generate udebug THREAD_B event and attach the thread.
807 * This must be done atomically (with the debug locks held),
808 * otherwise we would either miss some thread or receive
809 * THREAD_B events for threads that already existed
810 * and could be detected with THREAD_READ before.
[da1bafb]811 *
[13964ef]812 */
[da1bafb]813 udebug_thread_b_event_attach(thread, TASK);
[13964ef]814#else
[da1bafb]815 thread_attach(thread, TASK);
[9a1b20c]816#endif
[da1bafb]817 thread_ready(thread);
818
[d8431986]819 return 0;
[201abde]820 } else
[0f250f9]821 free(kernel_uarg);
[da1bafb]822
[7f1c620]823 return (unative_t) ENOMEM;
[9f52563]824}
825
826/** Process syscall to terminate thread.
827 *
828 */
[7f1c620]829unative_t sys_thread_exit(int uspace_status)
[9f52563]830{
[68091bd]831 thread_exit();
[da1bafb]832
[68091bd]833 /* Unreachable */
834 return 0;
[9f52563]835}
[b45c443]836
[3ce7f082]837/** Syscall for getting TID.
838 *
[201abde]839 * @param uspace_thread_id Userspace address of 8-byte buffer where to store
840 * current thread ID.
841 *
842 * @return 0 on success or an error code from @ref errno.h.
[da1bafb]843 *
[b45c443]844 */
[201abde]845unative_t sys_thread_get_id(thread_id_t *uspace_thread_id)
[3ce7f082]846{
847 /*
848 * No need to acquire lock on THREAD because tid
849 * remains constant for the lifespan of the thread.
[da1bafb]850 *
[3ce7f082]851 */
[201abde]852 return (unative_t) copy_to_uspace(uspace_thread_id, &THREAD->tid,
853 sizeof(THREAD->tid));
[3ce7f082]854}
[6f4495f5]855
[d9ece1cb]856/** Syscall wrapper for sleeping. */
857unative_t sys_thread_usleep(uint32_t usec)
858{
[22e6802]859 thread_usleep(usec);
[d9ece1cb]860 return 0;
861}
862
[3ce7f082]863/** @}
864 */
Note: See TracBrowser for help on using the repository browser.