| 1 | /*
|
|---|
| 2 | * Copyright (c) 2010 Jakub Jermar
|
|---|
| 3 | * All rights reserved.
|
|---|
| 4 | *
|
|---|
| 5 | * Redistribution and use in source and binary forms, with or without
|
|---|
| 6 | * modification, are permitted provided that the following conditions
|
|---|
| 7 | * are met:
|
|---|
| 8 | *
|
|---|
| 9 | * - Redistributions of source code must retain the above copyright
|
|---|
| 10 | * notice, this list of conditions and the following disclaimer.
|
|---|
| 11 | * - Redistributions in binary form must reproduce the above copyright
|
|---|
| 12 | * notice, this list of conditions and the following disclaimer in the
|
|---|
| 13 | * documentation and/or other materials provided with the distribution.
|
|---|
| 14 | * - The name of the author may not be used to endorse or promote products
|
|---|
| 15 | * derived from this software without specific prior written permission.
|
|---|
| 16 | *
|
|---|
| 17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|---|
| 18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|---|
| 19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|---|
| 20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|---|
| 21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|---|
| 22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|---|
| 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|---|
| 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|---|
| 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|---|
| 26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|---|
| 27 | */
|
|---|
| 28 |
|
|---|
| 29 | /** @addtogroup genericproc
|
|---|
| 30 | * @{
|
|---|
| 31 | */
|
|---|
| 32 |
|
|---|
| 33 | /**
|
|---|
| 34 | * @file
|
|---|
| 35 | * @brief Thread management functions.
|
|---|
| 36 | */
|
|---|
| 37 |
|
|---|
| 38 | #include <proc/scheduler.h>
|
|---|
| 39 | #include <proc/thread.h>
|
|---|
| 40 | #include <proc/task.h>
|
|---|
| 41 | #include <mm/frame.h>
|
|---|
| 42 | #include <mm/page.h>
|
|---|
| 43 | #include <arch/asm.h>
|
|---|
| 44 | #include <arch/cycle.h>
|
|---|
| 45 | #include <arch.h>
|
|---|
| 46 | #include <synch/spinlock.h>
|
|---|
| 47 | #include <synch/waitq.h>
|
|---|
| 48 | #include <cpu.h>
|
|---|
| 49 | #include <str.h>
|
|---|
| 50 | #include <context.h>
|
|---|
| 51 | #include <adt/avl.h>
|
|---|
| 52 | #include <adt/list.h>
|
|---|
| 53 | #include <time/clock.h>
|
|---|
| 54 | #include <time/timeout.h>
|
|---|
| 55 | #include <time/delay.h>
|
|---|
| 56 | #include <config.h>
|
|---|
| 57 | #include <arch/interrupt.h>
|
|---|
| 58 | #include <smp/ipi.h>
|
|---|
| 59 | #include <arch/faddr.h>
|
|---|
| 60 | #include <atomic.h>
|
|---|
| 61 | #include <memstr.h>
|
|---|
| 62 | #include <print.h>
|
|---|
| 63 | #include <mm/slab.h>
|
|---|
| 64 | #include <debug.h>
|
|---|
| 65 | #include <main/uinit.h>
|
|---|
| 66 | #include <syscall/copy.h>
|
|---|
| 67 | #include <errno.h>
|
|---|
| 68 |
|
|---|
| 69 | /** Thread states */
|
|---|
| 70 | const char *thread_states[] = {
|
|---|
| 71 | "Invalid",
|
|---|
| 72 | "Running",
|
|---|
| 73 | "Sleeping",
|
|---|
| 74 | "Ready",
|
|---|
| 75 | "Entering",
|
|---|
| 76 | "Exiting",
|
|---|
| 77 | "Lingering"
|
|---|
| 78 | };
|
|---|
| 79 |
|
|---|
| 80 | typedef struct {
|
|---|
| 81 | thread_id_t thread_id;
|
|---|
| 82 | thread_t *thread;
|
|---|
| 83 | } thread_iterator_t;
|
|---|
| 84 |
|
|---|
| 85 | /** Lock protecting the threads_tree AVL tree.
|
|---|
| 86 | *
|
|---|
| 87 | * For locking rules, see declaration thereof.
|
|---|
| 88 | *
|
|---|
| 89 | */
|
|---|
| 90 | IRQ_SPINLOCK_INITIALIZE(threads_lock);
|
|---|
| 91 |
|
|---|
| 92 | /** AVL tree of all threads.
|
|---|
| 93 | *
|
|---|
| 94 | * When a thread is found in the threads_tree AVL tree, it is guaranteed to
|
|---|
| 95 | * exist as long as the threads_lock is held.
|
|---|
| 96 | *
|
|---|
| 97 | */
|
|---|
| 98 | avltree_t threads_tree;
|
|---|
| 99 |
|
|---|
| 100 | IRQ_SPINLOCK_STATIC_INITIALIZE(tidlock);
|
|---|
| 101 | static thread_id_t last_tid = 0;
|
|---|
| 102 |
|
|---|
| 103 | static slab_cache_t *thread_slab;
|
|---|
| 104 |
|
|---|
| 105 | #ifdef CONFIG_FPU
|
|---|
| 106 | slab_cache_t *fpu_context_slab;
|
|---|
| 107 | #endif
|
|---|
| 108 |
|
|---|
| 109 | /** Thread wrapper.
|
|---|
| 110 | *
|
|---|
| 111 | * This wrapper is provided to ensure that every thread makes a call to
|
|---|
| 112 | * thread_exit() when its implementing function returns.
|
|---|
| 113 | *
|
|---|
| 114 | * interrupts_disable() is assumed.
|
|---|
| 115 | *
|
|---|
| 116 | */
|
|---|
| 117 | static void cushion(void)
|
|---|
| 118 | {
|
|---|
| 119 | void (*f)(void *) = THREAD->thread_code;
|
|---|
| 120 | void *arg = THREAD->thread_arg;
|
|---|
| 121 | THREAD->last_cycle = get_cycle();
|
|---|
| 122 |
|
|---|
| 123 | /* This is where each thread wakes up after its creation */
|
|---|
| 124 | irq_spinlock_unlock(&THREAD->lock, false);
|
|---|
| 125 | interrupts_enable();
|
|---|
| 126 |
|
|---|
| 127 | f(arg);
|
|---|
| 128 |
|
|---|
| 129 | /* Accumulate accounting to the task */
|
|---|
| 130 | irq_spinlock_lock(&THREAD->lock, true);
|
|---|
| 131 | if (!THREAD->uncounted) {
|
|---|
| 132 | thread_update_accounting(true);
|
|---|
| 133 | uint64_t ucycles = THREAD->ucycles;
|
|---|
| 134 | THREAD->ucycles = 0;
|
|---|
| 135 | uint64_t kcycles = THREAD->kcycles;
|
|---|
| 136 | THREAD->kcycles = 0;
|
|---|
| 137 |
|
|---|
| 138 | irq_spinlock_pass(&THREAD->lock, &TASK->lock);
|
|---|
| 139 | TASK->ucycles += ucycles;
|
|---|
| 140 | TASK->kcycles += kcycles;
|
|---|
| 141 | irq_spinlock_unlock(&TASK->lock, true);
|
|---|
| 142 | } else
|
|---|
| 143 | irq_spinlock_unlock(&THREAD->lock, true);
|
|---|
| 144 |
|
|---|
| 145 | thread_exit();
|
|---|
| 146 |
|
|---|
| 147 | /* Not reached */
|
|---|
| 148 | }
|
|---|
| 149 |
|
|---|
| 150 | /** Initialization and allocation for thread_t structure
|
|---|
| 151 | *
|
|---|
| 152 | */
|
|---|
| 153 | static int thr_constructor(void *obj, unsigned int kmflags)
|
|---|
| 154 | {
|
|---|
| 155 | thread_t *thread = (thread_t *) obj;
|
|---|
| 156 |
|
|---|
| 157 | irq_spinlock_initialize(&thread->lock, "thread_t_lock");
|
|---|
| 158 | link_initialize(&thread->rq_link);
|
|---|
| 159 | link_initialize(&thread->wq_link);
|
|---|
| 160 | link_initialize(&thread->th_link);
|
|---|
| 161 |
|
|---|
| 162 | /* call the architecture-specific part of the constructor */
|
|---|
| 163 | thr_constructor_arch(thread);
|
|---|
| 164 |
|
|---|
| 165 | #ifdef CONFIG_FPU
|
|---|
| 166 | #ifdef CONFIG_FPU_LAZY
|
|---|
| 167 | thread->saved_fpu_context = NULL;
|
|---|
| 168 | #else /* CONFIG_FPU_LAZY */
|
|---|
| 169 | thread->saved_fpu_context = slab_alloc(fpu_context_slab, kmflags);
|
|---|
| 170 | if (!thread->saved_fpu_context)
|
|---|
| 171 | return -1;
|
|---|
| 172 | #endif /* CONFIG_FPU_LAZY */
|
|---|
| 173 | #endif /* CONFIG_FPU */
|
|---|
| 174 |
|
|---|
| 175 | /*
|
|---|
| 176 | * Allocate the kernel stack from the low-memory to prevent an infinite
|
|---|
| 177 | * nesting of TLB-misses when accessing the stack from the part of the
|
|---|
| 178 | * TLB-miss handler written in C.
|
|---|
| 179 | *
|
|---|
| 180 | * Note that low-memory is safe to be used for the stack as it will be
|
|---|
| 181 | * covered by the kernel identity mapping, which guarantees not to
|
|---|
| 182 | * nest TLB-misses infinitely (either via some hardware mechanism or
|
|---|
| 183 | * by the construciton of the assembly-language part of the TLB-miss
|
|---|
| 184 | * handler).
|
|---|
| 185 | *
|
|---|
| 186 | * This restriction can be lifted once each architecture provides
|
|---|
| 187 | * a similar guarantee, for example by locking the kernel stack
|
|---|
| 188 | * in the TLB whenever it is allocated from the high-memory and the
|
|---|
| 189 | * thread is being scheduled to run.
|
|---|
| 190 | */
|
|---|
| 191 | kmflags |= FRAME_LOWMEM;
|
|---|
| 192 | kmflags &= ~FRAME_HIGHMEM;
|
|---|
| 193 |
|
|---|
| 194 | thread->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags);
|
|---|
| 195 | if (!thread->kstack) {
|
|---|
| 196 | #ifdef CONFIG_FPU
|
|---|
| 197 | if (thread->saved_fpu_context)
|
|---|
| 198 | slab_free(fpu_context_slab, thread->saved_fpu_context);
|
|---|
| 199 | #endif
|
|---|
| 200 | return -1;
|
|---|
| 201 | }
|
|---|
| 202 |
|
|---|
| 203 | #ifdef CONFIG_UDEBUG
|
|---|
| 204 | mutex_initialize(&thread->udebug.lock, MUTEX_PASSIVE);
|
|---|
| 205 | #endif
|
|---|
| 206 |
|
|---|
| 207 | return 0;
|
|---|
| 208 | }
|
|---|
| 209 |
|
|---|
| 210 | /** Destruction of thread_t object */
|
|---|
| 211 | static size_t thr_destructor(void *obj)
|
|---|
| 212 | {
|
|---|
| 213 | thread_t *thread = (thread_t *) obj;
|
|---|
| 214 |
|
|---|
| 215 | /* call the architecture-specific part of the destructor */
|
|---|
| 216 | thr_destructor_arch(thread);
|
|---|
| 217 |
|
|---|
| 218 | frame_free(KA2PA(thread->kstack));
|
|---|
| 219 |
|
|---|
| 220 | #ifdef CONFIG_FPU
|
|---|
| 221 | if (thread->saved_fpu_context)
|
|---|
| 222 | slab_free(fpu_context_slab, thread->saved_fpu_context);
|
|---|
| 223 | #endif
|
|---|
| 224 |
|
|---|
| 225 | return 1; /* One page freed */
|
|---|
| 226 | }
|
|---|
| 227 |
|
|---|
| 228 | /** Initialize threads
|
|---|
| 229 | *
|
|---|
| 230 | * Initialize kernel threads support.
|
|---|
| 231 | *
|
|---|
| 232 | */
|
|---|
| 233 | void thread_init(void)
|
|---|
| 234 | {
|
|---|
| 235 | THREAD = NULL;
|
|---|
| 236 |
|
|---|
| 237 | atomic_set(&nrdy, 0);
|
|---|
| 238 | thread_slab = slab_cache_create("thread_slab", sizeof(thread_t), 0,
|
|---|
| 239 | thr_constructor, thr_destructor, 0);
|
|---|
| 240 |
|
|---|
| 241 | #ifdef CONFIG_FPU
|
|---|
| 242 | fpu_context_slab = slab_cache_create("fpu_slab", sizeof(fpu_context_t),
|
|---|
| 243 | FPU_CONTEXT_ALIGN, NULL, NULL, 0);
|
|---|
| 244 | #endif
|
|---|
| 245 |
|
|---|
| 246 | avltree_create(&threads_tree);
|
|---|
| 247 | }
|
|---|
| 248 |
|
|---|
| 249 | /** Make thread ready
|
|---|
| 250 | *
|
|---|
| 251 | * Switch thread to the ready state.
|
|---|
| 252 | *
|
|---|
| 253 | * @param thread Thread to make ready.
|
|---|
| 254 | *
|
|---|
| 255 | */
|
|---|
| 256 | void thread_ready(thread_t *thread)
|
|---|
| 257 | {
|
|---|
| 258 | irq_spinlock_lock(&thread->lock, true);
|
|---|
| 259 |
|
|---|
| 260 | ASSERT(thread->state != Ready);
|
|---|
| 261 |
|
|---|
| 262 | int i = (thread->priority < RQ_COUNT - 1)
|
|---|
| 263 | ? ++thread->priority : thread->priority;
|
|---|
| 264 |
|
|---|
| 265 | cpu_t *cpu = CPU;
|
|---|
| 266 | if (thread->flags & THREAD_FLAG_WIRED) {
|
|---|
| 267 | ASSERT(thread->cpu != NULL);
|
|---|
| 268 | cpu = thread->cpu;
|
|---|
| 269 | }
|
|---|
| 270 | thread->state = Ready;
|
|---|
| 271 |
|
|---|
| 272 | irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock));
|
|---|
| 273 |
|
|---|
| 274 | /*
|
|---|
| 275 | * Append thread to respective ready queue
|
|---|
| 276 | * on respective processor.
|
|---|
| 277 | */
|
|---|
| 278 |
|
|---|
| 279 | list_append(&thread->rq_link, &cpu->rq[i].rq);
|
|---|
| 280 | cpu->rq[i].n++;
|
|---|
| 281 | irq_spinlock_unlock(&(cpu->rq[i].lock), true);
|
|---|
| 282 |
|
|---|
| 283 | atomic_inc(&nrdy);
|
|---|
| 284 | // FIXME: Why is the avg value not used
|
|---|
| 285 | // avg = atomic_get(&nrdy) / config.cpu_active;
|
|---|
| 286 | atomic_inc(&cpu->nrdy);
|
|---|
| 287 | }
|
|---|
| 288 |
|
|---|
| 289 | /** Create new thread
|
|---|
| 290 | *
|
|---|
| 291 | * Create a new thread.
|
|---|
| 292 | *
|
|---|
| 293 | * @param func Thread's implementing function.
|
|---|
| 294 | * @param arg Thread's implementing function argument.
|
|---|
| 295 | * @param task Task to which the thread belongs. The caller must
|
|---|
| 296 | * guarantee that the task won't cease to exist during the
|
|---|
| 297 | * call. The task's lock may not be held.
|
|---|
| 298 | * @param flags Thread flags.
|
|---|
| 299 | * @param name Symbolic name (a copy is made).
|
|---|
| 300 | * @param uncounted Thread's accounting doesn't affect accumulated task
|
|---|
| 301 | * accounting.
|
|---|
| 302 | *
|
|---|
| 303 | * @return New thread's structure on success, NULL on failure.
|
|---|
| 304 | *
|
|---|
| 305 | */
|
|---|
| 306 | thread_t *thread_create(void (* func)(void *), void *arg, task_t *task,
|
|---|
| 307 | unsigned int flags, const char *name, bool uncounted)
|
|---|
| 308 | {
|
|---|
| 309 | thread_t *thread = (thread_t *) slab_alloc(thread_slab, 0);
|
|---|
| 310 | if (!thread)
|
|---|
| 311 | return NULL;
|
|---|
| 312 |
|
|---|
| 313 | /* Not needed, but good for debugging */
|
|---|
| 314 | memsetb(thread->kstack, STACK_SIZE, 0);
|
|---|
| 315 |
|
|---|
| 316 | irq_spinlock_lock(&tidlock, true);
|
|---|
| 317 | thread->tid = ++last_tid;
|
|---|
| 318 | irq_spinlock_unlock(&tidlock, true);
|
|---|
| 319 |
|
|---|
| 320 | context_save(&thread->saved_context);
|
|---|
| 321 | context_set(&thread->saved_context, FADDR(cushion),
|
|---|
| 322 | (uintptr_t) thread->kstack, STACK_SIZE);
|
|---|
| 323 |
|
|---|
| 324 | the_initialize((the_t *) thread->kstack);
|
|---|
| 325 |
|
|---|
| 326 | ipl_t ipl = interrupts_disable();
|
|---|
| 327 | thread->saved_context.ipl = interrupts_read();
|
|---|
| 328 | interrupts_restore(ipl);
|
|---|
| 329 |
|
|---|
| 330 | str_cpy(thread->name, THREAD_NAME_BUFLEN, name);
|
|---|
| 331 |
|
|---|
| 332 | thread->thread_code = func;
|
|---|
| 333 | thread->thread_arg = arg;
|
|---|
| 334 | thread->ticks = -1;
|
|---|
| 335 | thread->ucycles = 0;
|
|---|
| 336 | thread->kcycles = 0;
|
|---|
| 337 | thread->uncounted = uncounted;
|
|---|
| 338 | thread->priority = -1; /* Start in rq[0] */
|
|---|
| 339 | thread->cpu = NULL;
|
|---|
| 340 | thread->flags = flags;
|
|---|
| 341 | thread->nomigrate = 0;
|
|---|
| 342 | thread->state = Entering;
|
|---|
| 343 |
|
|---|
| 344 | timeout_initialize(&thread->sleep_timeout);
|
|---|
| 345 | thread->sleep_interruptible = false;
|
|---|
| 346 | thread->sleep_queue = NULL;
|
|---|
| 347 | thread->timeout_pending = false;
|
|---|
| 348 |
|
|---|
| 349 | thread->in_copy_from_uspace = false;
|
|---|
| 350 | thread->in_copy_to_uspace = false;
|
|---|
| 351 |
|
|---|
| 352 | thread->interrupted = false;
|
|---|
| 353 | thread->detached = false;
|
|---|
| 354 | waitq_initialize(&thread->join_wq);
|
|---|
| 355 |
|
|---|
| 356 | thread->task = task;
|
|---|
| 357 |
|
|---|
| 358 | thread->fpu_context_exists = 0;
|
|---|
| 359 | thread->fpu_context_engaged = 0;
|
|---|
| 360 |
|
|---|
| 361 | avltree_node_initialize(&thread->threads_tree_node);
|
|---|
| 362 | thread->threads_tree_node.key = (uintptr_t) thread;
|
|---|
| 363 |
|
|---|
| 364 | #ifdef CONFIG_UDEBUG
|
|---|
| 365 | /* Initialize debugging stuff */
|
|---|
| 366 | thread->btrace = false;
|
|---|
| 367 | udebug_thread_initialize(&thread->udebug);
|
|---|
| 368 | #endif
|
|---|
| 369 |
|
|---|
| 370 | /* Might depend on previous initialization */
|
|---|
| 371 | thread_create_arch(thread);
|
|---|
| 372 |
|
|---|
| 373 | if (!(flags & THREAD_FLAG_NOATTACH))
|
|---|
| 374 | thread_attach(thread, task);
|
|---|
| 375 |
|
|---|
| 376 | return thread;
|
|---|
| 377 | }
|
|---|
| 378 |
|
|---|
| 379 | /** Destroy thread memory structure
|
|---|
| 380 | *
|
|---|
| 381 | * Detach thread from all queues, cpus etc. and destroy it.
|
|---|
| 382 | *
|
|---|
| 383 | * @param thread Thread to be destroyed.
|
|---|
| 384 | * @param irq_res Indicate whether it should unlock thread->lock
|
|---|
| 385 | * in interrupts-restore mode.
|
|---|
| 386 | *
|
|---|
| 387 | */
|
|---|
| 388 | void thread_destroy(thread_t *thread, bool irq_res)
|
|---|
| 389 | {
|
|---|
| 390 | ASSERT(irq_spinlock_locked(&thread->lock));
|
|---|
| 391 | ASSERT((thread->state == Exiting) || (thread->state == Lingering));
|
|---|
| 392 | ASSERT(thread->task);
|
|---|
| 393 | ASSERT(thread->cpu);
|
|---|
| 394 |
|
|---|
| 395 | irq_spinlock_lock(&thread->cpu->lock, false);
|
|---|
| 396 | if (thread->cpu->fpu_owner == thread)
|
|---|
| 397 | thread->cpu->fpu_owner = NULL;
|
|---|
| 398 | irq_spinlock_unlock(&thread->cpu->lock, false);
|
|---|
| 399 |
|
|---|
| 400 | irq_spinlock_pass(&thread->lock, &threads_lock);
|
|---|
| 401 |
|
|---|
| 402 | avltree_delete(&threads_tree, &thread->threads_tree_node);
|
|---|
| 403 |
|
|---|
| 404 | irq_spinlock_pass(&threads_lock, &thread->task->lock);
|
|---|
| 405 |
|
|---|
| 406 | /*
|
|---|
| 407 | * Detach from the containing task.
|
|---|
| 408 | */
|
|---|
| 409 | list_remove(&thread->th_link);
|
|---|
| 410 | irq_spinlock_unlock(&thread->task->lock, irq_res);
|
|---|
| 411 |
|
|---|
| 412 | /*
|
|---|
| 413 | * Drop the reference to the containing task.
|
|---|
| 414 | */
|
|---|
| 415 | task_release(thread->task);
|
|---|
| 416 | slab_free(thread_slab, thread);
|
|---|
| 417 | }
|
|---|
| 418 |
|
|---|
| 419 | /** Make the thread visible to the system.
|
|---|
| 420 | *
|
|---|
| 421 | * Attach the thread structure to the current task and make it visible in the
|
|---|
| 422 | * threads_tree.
|
|---|
| 423 | *
|
|---|
| 424 | * @param t Thread to be attached to the task.
|
|---|
| 425 | * @param task Task to which the thread is to be attached.
|
|---|
| 426 | *
|
|---|
| 427 | */
|
|---|
| 428 | void thread_attach(thread_t *thread, task_t *task)
|
|---|
| 429 | {
|
|---|
| 430 | /*
|
|---|
| 431 | * Attach to the specified task.
|
|---|
| 432 | */
|
|---|
| 433 | irq_spinlock_lock(&task->lock, true);
|
|---|
| 434 |
|
|---|
| 435 | /* Hold a reference to the task. */
|
|---|
| 436 | task_hold(task);
|
|---|
| 437 |
|
|---|
| 438 | /* Must not count kbox thread into lifecount */
|
|---|
| 439 | if (thread->flags & THREAD_FLAG_USPACE)
|
|---|
| 440 | atomic_inc(&task->lifecount);
|
|---|
| 441 |
|
|---|
| 442 | list_append(&thread->th_link, &task->threads);
|
|---|
| 443 |
|
|---|
| 444 | irq_spinlock_pass(&task->lock, &threads_lock);
|
|---|
| 445 |
|
|---|
| 446 | /*
|
|---|
| 447 | * Register this thread in the system-wide list.
|
|---|
| 448 | */
|
|---|
| 449 | avltree_insert(&threads_tree, &thread->threads_tree_node);
|
|---|
| 450 | irq_spinlock_unlock(&threads_lock, true);
|
|---|
| 451 | }
|
|---|
| 452 |
|
|---|
| 453 | /** Terminate thread.
|
|---|
| 454 | *
|
|---|
| 455 | * End current thread execution and switch it to the exiting state.
|
|---|
| 456 | * All pending timeouts are executed.
|
|---|
| 457 | *
|
|---|
| 458 | */
|
|---|
| 459 | void thread_exit(void)
|
|---|
| 460 | {
|
|---|
| 461 | if (THREAD->flags & THREAD_FLAG_USPACE) {
|
|---|
| 462 | #ifdef CONFIG_UDEBUG
|
|---|
| 463 | /* Generate udebug THREAD_E event */
|
|---|
| 464 | udebug_thread_e_event();
|
|---|
| 465 |
|
|---|
| 466 | /*
|
|---|
| 467 | * This thread will not execute any code or system calls from
|
|---|
| 468 | * now on.
|
|---|
| 469 | */
|
|---|
| 470 | udebug_stoppable_begin();
|
|---|
| 471 | #endif
|
|---|
| 472 | if (atomic_predec(&TASK->lifecount) == 0) {
|
|---|
| 473 | /*
|
|---|
| 474 | * We are the last userspace thread in the task that
|
|---|
| 475 | * still has not exited. With the exception of the
|
|---|
| 476 | * moment the task was created, new userspace threads
|
|---|
| 477 | * can only be created by threads of the same task.
|
|---|
| 478 | * We are safe to perform cleanup.
|
|---|
| 479 | *
|
|---|
| 480 | */
|
|---|
| 481 | ipc_cleanup();
|
|---|
| 482 | futex_cleanup();
|
|---|
| 483 | LOG("Cleanup of task %" PRIu64" completed.", TASK->taskid);
|
|---|
| 484 | }
|
|---|
| 485 | }
|
|---|
| 486 |
|
|---|
| 487 | restart:
|
|---|
| 488 | irq_spinlock_lock(&THREAD->lock, true);
|
|---|
| 489 | if (THREAD->timeout_pending) {
|
|---|
| 490 | /* Busy waiting for timeouts in progress */
|
|---|
| 491 | irq_spinlock_unlock(&THREAD->lock, true);
|
|---|
| 492 | goto restart;
|
|---|
| 493 | }
|
|---|
| 494 |
|
|---|
| 495 | THREAD->state = Exiting;
|
|---|
| 496 | irq_spinlock_unlock(&THREAD->lock, true);
|
|---|
| 497 |
|
|---|
| 498 | scheduler();
|
|---|
| 499 |
|
|---|
| 500 | /* Not reached */
|
|---|
| 501 | while (true);
|
|---|
| 502 | }
|
|---|
| 503 |
|
|---|
| 504 | /** Prevent the current thread from being migrated to another processor. */
|
|---|
| 505 | void thread_migration_disable(void)
|
|---|
| 506 | {
|
|---|
| 507 | ASSERT(THREAD);
|
|---|
| 508 |
|
|---|
| 509 | THREAD->nomigrate++;
|
|---|
| 510 | }
|
|---|
| 511 |
|
|---|
| 512 | /** Allow the current thread to be migrated to another processor. */
|
|---|
| 513 | void thread_migration_enable(void)
|
|---|
| 514 | {
|
|---|
| 515 | ASSERT(THREAD);
|
|---|
| 516 | ASSERT(THREAD->nomigrate > 0);
|
|---|
| 517 |
|
|---|
| 518 | THREAD->nomigrate--;
|
|---|
| 519 | }
|
|---|
| 520 |
|
|---|
| 521 | /** Thread sleep
|
|---|
| 522 | *
|
|---|
| 523 | * Suspend execution of the current thread.
|
|---|
| 524 | *
|
|---|
| 525 | * @param sec Number of seconds to sleep.
|
|---|
| 526 | *
|
|---|
| 527 | */
|
|---|
| 528 | void thread_sleep(uint32_t sec)
|
|---|
| 529 | {
|
|---|
| 530 | /* Sleep in 1000 second steps to support
|
|---|
| 531 | full argument range */
|
|---|
| 532 | while (sec > 0) {
|
|---|
| 533 | uint32_t period = (sec > 1000) ? 1000 : sec;
|
|---|
| 534 |
|
|---|
| 535 | thread_usleep(period * 1000000);
|
|---|
| 536 | sec -= period;
|
|---|
| 537 | }
|
|---|
| 538 | }
|
|---|
| 539 |
|
|---|
| 540 | /** Wait for another thread to exit.
|
|---|
| 541 | *
|
|---|
| 542 | * @param thread Thread to join on exit.
|
|---|
| 543 | * @param usec Timeout in microseconds.
|
|---|
| 544 | * @param flags Mode of operation.
|
|---|
| 545 | *
|
|---|
| 546 | * @return An error code from errno.h or an error code from synch.h.
|
|---|
| 547 | *
|
|---|
| 548 | */
|
|---|
| 549 | int thread_join_timeout(thread_t *thread, uint32_t usec, unsigned int flags)
|
|---|
| 550 | {
|
|---|
| 551 | if (thread == THREAD)
|
|---|
| 552 | return EINVAL;
|
|---|
| 553 |
|
|---|
| 554 | /*
|
|---|
| 555 | * Since thread join can only be called once on an undetached thread,
|
|---|
| 556 | * the thread pointer is guaranteed to be still valid.
|
|---|
| 557 | */
|
|---|
| 558 |
|
|---|
| 559 | irq_spinlock_lock(&thread->lock, true);
|
|---|
| 560 | ASSERT(!thread->detached);
|
|---|
| 561 | irq_spinlock_unlock(&thread->lock, true);
|
|---|
| 562 |
|
|---|
| 563 | return waitq_sleep_timeout(&thread->join_wq, usec, flags);
|
|---|
| 564 | }
|
|---|
| 565 |
|
|---|
| 566 | /** Detach thread.
|
|---|
| 567 | *
|
|---|
| 568 | * Mark the thread as detached. If the thread is already
|
|---|
| 569 | * in the Lingering state, deallocate its resources.
|
|---|
| 570 | *
|
|---|
| 571 | * @param thread Thread to be detached.
|
|---|
| 572 | *
|
|---|
| 573 | */
|
|---|
| 574 | void thread_detach(thread_t *thread)
|
|---|
| 575 | {
|
|---|
| 576 | /*
|
|---|
| 577 | * Since the thread is expected not to be already detached,
|
|---|
| 578 | * pointer to it must be still valid.
|
|---|
| 579 | */
|
|---|
| 580 | irq_spinlock_lock(&thread->lock, true);
|
|---|
| 581 | ASSERT(!thread->detached);
|
|---|
| 582 |
|
|---|
| 583 | if (thread->state == Lingering) {
|
|---|
| 584 | /*
|
|---|
| 585 | * Unlock &thread->lock and restore
|
|---|
| 586 | * interrupts in thread_destroy().
|
|---|
| 587 | */
|
|---|
| 588 | thread_destroy(thread, true);
|
|---|
| 589 | return;
|
|---|
| 590 | } else {
|
|---|
| 591 | thread->detached = true;
|
|---|
| 592 | }
|
|---|
| 593 |
|
|---|
| 594 | irq_spinlock_unlock(&thread->lock, true);
|
|---|
| 595 | }
|
|---|
| 596 |
|
|---|
| 597 | /** Thread usleep
|
|---|
| 598 | *
|
|---|
| 599 | * Suspend execution of the current thread.
|
|---|
| 600 | *
|
|---|
| 601 | * @param usec Number of microseconds to sleep.
|
|---|
| 602 | *
|
|---|
| 603 | */
|
|---|
| 604 | void thread_usleep(uint32_t usec)
|
|---|
| 605 | {
|
|---|
| 606 | waitq_t wq;
|
|---|
| 607 |
|
|---|
| 608 | waitq_initialize(&wq);
|
|---|
| 609 |
|
|---|
| 610 | (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING);
|
|---|
| 611 | }
|
|---|
| 612 |
|
|---|
| 613 | static bool thread_walker(avltree_node_t *node, void *arg)
|
|---|
| 614 | {
|
|---|
| 615 | bool *additional = (bool *) arg;
|
|---|
| 616 | thread_t *thread = avltree_get_instance(node, thread_t, threads_tree_node);
|
|---|
| 617 |
|
|---|
| 618 | uint64_t ucycles, kcycles;
|
|---|
| 619 | char usuffix, ksuffix;
|
|---|
| 620 | order_suffix(thread->ucycles, &ucycles, &usuffix);
|
|---|
| 621 | order_suffix(thread->kcycles, &kcycles, &ksuffix);
|
|---|
| 622 |
|
|---|
| 623 | char *name;
|
|---|
| 624 | if (str_cmp(thread->name, "uinit") == 0)
|
|---|
| 625 | name = thread->task->name;
|
|---|
| 626 | else
|
|---|
| 627 | name = thread->name;
|
|---|
| 628 |
|
|---|
| 629 | #ifdef __32_BITS__
|
|---|
| 630 | if (*additional)
|
|---|
| 631 | printf("%-8" PRIu64 " %10p %10p %9" PRIu64 "%c %9" PRIu64 "%c ",
|
|---|
| 632 | thread->tid, thread->thread_code, thread->kstack,
|
|---|
| 633 | ucycles, usuffix, kcycles, ksuffix);
|
|---|
| 634 | else
|
|---|
| 635 | printf("%-8" PRIu64 " %-14s %10p %-8s %10p %-5" PRIu32 "\n",
|
|---|
| 636 | thread->tid, name, thread, thread_states[thread->state],
|
|---|
| 637 | thread->task, thread->task->container);
|
|---|
| 638 | #endif
|
|---|
| 639 |
|
|---|
| 640 | #ifdef __64_BITS__
|
|---|
| 641 | if (*additional)
|
|---|
| 642 | printf("%-8" PRIu64 " %18p %18p\n"
|
|---|
| 643 | " %9" PRIu64 "%c %9" PRIu64 "%c ",
|
|---|
| 644 | thread->tid, thread->thread_code, thread->kstack,
|
|---|
| 645 | ucycles, usuffix, kcycles, ksuffix);
|
|---|
| 646 | else
|
|---|
| 647 | printf("%-8" PRIu64 " %-14s %18p %-8s %18p %-5" PRIu32 "\n",
|
|---|
| 648 | thread->tid, name, thread, thread_states[thread->state],
|
|---|
| 649 | thread->task, thread->task->container);
|
|---|
| 650 | #endif
|
|---|
| 651 |
|
|---|
| 652 | if (*additional) {
|
|---|
| 653 | if (thread->cpu)
|
|---|
| 654 | printf("%-5u", thread->cpu->id);
|
|---|
| 655 | else
|
|---|
| 656 | printf("none ");
|
|---|
| 657 |
|
|---|
| 658 | if (thread->state == Sleeping) {
|
|---|
| 659 | #ifdef __32_BITS__
|
|---|
| 660 | printf(" %10p", thread->sleep_queue);
|
|---|
| 661 | #endif
|
|---|
| 662 |
|
|---|
| 663 | #ifdef __64_BITS__
|
|---|
| 664 | printf(" %18p", thread->sleep_queue);
|
|---|
| 665 | #endif
|
|---|
| 666 | }
|
|---|
| 667 |
|
|---|
| 668 | printf("\n");
|
|---|
| 669 | }
|
|---|
| 670 |
|
|---|
| 671 | return true;
|
|---|
| 672 | }
|
|---|
| 673 |
|
|---|
| 674 | /** Print list of threads debug info
|
|---|
| 675 | *
|
|---|
| 676 | * @param additional Print additional information.
|
|---|
| 677 | *
|
|---|
| 678 | */
|
|---|
| 679 | void thread_print_list(bool additional)
|
|---|
| 680 | {
|
|---|
| 681 | /* Messing with thread structures, avoid deadlock */
|
|---|
| 682 | irq_spinlock_lock(&threads_lock, true);
|
|---|
| 683 |
|
|---|
| 684 | #ifdef __32_BITS__
|
|---|
| 685 | if (additional)
|
|---|
| 686 | printf("[id ] [code ] [stack ] [ucycles ] [kcycles ]"
|
|---|
| 687 | " [cpu] [waitqueue]\n");
|
|---|
| 688 | else
|
|---|
| 689 | printf("[id ] [name ] [address ] [state ] [task ]"
|
|---|
| 690 | " [ctn]\n");
|
|---|
| 691 | #endif
|
|---|
| 692 |
|
|---|
| 693 | #ifdef __64_BITS__
|
|---|
| 694 | if (additional) {
|
|---|
| 695 | printf("[id ] [code ] [stack ]\n"
|
|---|
| 696 | " [ucycles ] [kcycles ] [cpu] [waitqueue ]\n");
|
|---|
| 697 | } else
|
|---|
| 698 | printf("[id ] [name ] [address ] [state ]"
|
|---|
| 699 | " [task ] [ctn]\n");
|
|---|
| 700 | #endif
|
|---|
| 701 |
|
|---|
| 702 | avltree_walk(&threads_tree, thread_walker, &additional);
|
|---|
| 703 |
|
|---|
| 704 | irq_spinlock_unlock(&threads_lock, true);
|
|---|
| 705 | }
|
|---|
| 706 |
|
|---|
| 707 | /** Check whether thread exists.
|
|---|
| 708 | *
|
|---|
| 709 | * Note that threads_lock must be already held and
|
|---|
| 710 | * interrupts must be already disabled.
|
|---|
| 711 | *
|
|---|
| 712 | * @param thread Pointer to thread.
|
|---|
| 713 | *
|
|---|
| 714 | * @return True if thread t is known to the system, false otherwise.
|
|---|
| 715 | *
|
|---|
| 716 | */
|
|---|
| 717 | bool thread_exists(thread_t *thread)
|
|---|
| 718 | {
|
|---|
| 719 | ASSERT(interrupts_disabled());
|
|---|
| 720 | ASSERT(irq_spinlock_locked(&threads_lock));
|
|---|
| 721 |
|
|---|
| 722 | avltree_node_t *node =
|
|---|
| 723 | avltree_search(&threads_tree, (avltree_key_t) ((uintptr_t) thread));
|
|---|
| 724 |
|
|---|
| 725 | return node != NULL;
|
|---|
| 726 | }
|
|---|
| 727 |
|
|---|
| 728 | /** Update accounting of current thread.
|
|---|
| 729 | *
|
|---|
| 730 | * Note that thread_lock on THREAD must be already held and
|
|---|
| 731 | * interrupts must be already disabled.
|
|---|
| 732 | *
|
|---|
| 733 | * @param user True to update user accounting, false for kernel.
|
|---|
| 734 | *
|
|---|
| 735 | */
|
|---|
| 736 | void thread_update_accounting(bool user)
|
|---|
| 737 | {
|
|---|
| 738 | uint64_t time = get_cycle();
|
|---|
| 739 |
|
|---|
| 740 | ASSERT(interrupts_disabled());
|
|---|
| 741 | ASSERT(irq_spinlock_locked(&THREAD->lock));
|
|---|
| 742 |
|
|---|
| 743 | if (user)
|
|---|
| 744 | THREAD->ucycles += time - THREAD->last_cycle;
|
|---|
| 745 | else
|
|---|
| 746 | THREAD->kcycles += time - THREAD->last_cycle;
|
|---|
| 747 |
|
|---|
| 748 | THREAD->last_cycle = time;
|
|---|
| 749 | }
|
|---|
| 750 |
|
|---|
| 751 | static bool thread_search_walker(avltree_node_t *node, void *arg)
|
|---|
| 752 | {
|
|---|
| 753 | thread_t *thread =
|
|---|
| 754 | (thread_t *) avltree_get_instance(node, thread_t, threads_tree_node);
|
|---|
| 755 | thread_iterator_t *iterator = (thread_iterator_t *) arg;
|
|---|
| 756 |
|
|---|
| 757 | if (thread->tid == iterator->thread_id) {
|
|---|
| 758 | iterator->thread = thread;
|
|---|
| 759 | return false;
|
|---|
| 760 | }
|
|---|
| 761 |
|
|---|
| 762 | return true;
|
|---|
| 763 | }
|
|---|
| 764 |
|
|---|
| 765 | /** Find thread structure corresponding to thread ID.
|
|---|
| 766 | *
|
|---|
| 767 | * The threads_lock must be already held by the caller of this function and
|
|---|
| 768 | * interrupts must be disabled.
|
|---|
| 769 | *
|
|---|
| 770 | * @param id Thread ID.
|
|---|
| 771 | *
|
|---|
| 772 | * @return Thread structure address or NULL if there is no such thread ID.
|
|---|
| 773 | *
|
|---|
| 774 | */
|
|---|
| 775 | thread_t *thread_find_by_id(thread_id_t thread_id)
|
|---|
| 776 | {
|
|---|
| 777 | ASSERT(interrupts_disabled());
|
|---|
| 778 | ASSERT(irq_spinlock_locked(&threads_lock));
|
|---|
| 779 |
|
|---|
| 780 | thread_iterator_t iterator;
|
|---|
| 781 |
|
|---|
| 782 | iterator.thread_id = thread_id;
|
|---|
| 783 | iterator.thread = NULL;
|
|---|
| 784 |
|
|---|
| 785 | avltree_walk(&threads_tree, thread_search_walker, (void *) &iterator);
|
|---|
| 786 |
|
|---|
| 787 | return iterator.thread;
|
|---|
| 788 | }
|
|---|
| 789 |
|
|---|
| 790 | #ifdef CONFIG_UDEBUG
|
|---|
| 791 |
|
|---|
| 792 | void thread_stack_trace(thread_id_t thread_id)
|
|---|
| 793 | {
|
|---|
| 794 | irq_spinlock_lock(&threads_lock, true);
|
|---|
| 795 |
|
|---|
| 796 | thread_t *thread = thread_find_by_id(thread_id);
|
|---|
| 797 | if (thread == NULL) {
|
|---|
| 798 | printf("No such thread.\n");
|
|---|
| 799 | irq_spinlock_unlock(&threads_lock, true);
|
|---|
| 800 | return;
|
|---|
| 801 | }
|
|---|
| 802 |
|
|---|
| 803 | irq_spinlock_lock(&thread->lock, false);
|
|---|
| 804 |
|
|---|
| 805 | /*
|
|---|
| 806 | * Schedule a stack trace to be printed
|
|---|
| 807 | * just before the thread is scheduled next.
|
|---|
| 808 | *
|
|---|
| 809 | * If the thread is sleeping then try to interrupt
|
|---|
| 810 | * the sleep. Any request for printing an uspace stack
|
|---|
| 811 | * trace from within the kernel should be always
|
|---|
| 812 | * considered a last resort debugging means, therefore
|
|---|
| 813 | * forcing the thread's sleep to be interrupted
|
|---|
| 814 | * is probably justifiable.
|
|---|
| 815 | */
|
|---|
| 816 |
|
|---|
| 817 | bool sleeping = false;
|
|---|
| 818 | istate_t *istate = thread->udebug.uspace_state;
|
|---|
| 819 | if (istate != NULL) {
|
|---|
| 820 | printf("Scheduling thread stack trace.\n");
|
|---|
| 821 | thread->btrace = true;
|
|---|
| 822 | if (thread->state == Sleeping)
|
|---|
| 823 | sleeping = true;
|
|---|
| 824 | } else
|
|---|
| 825 | printf("Thread interrupt state not available.\n");
|
|---|
| 826 |
|
|---|
| 827 | irq_spinlock_unlock(&thread->lock, false);
|
|---|
| 828 |
|
|---|
| 829 | if (sleeping)
|
|---|
| 830 | waitq_interrupt_sleep(thread);
|
|---|
| 831 |
|
|---|
| 832 | irq_spinlock_unlock(&threads_lock, true);
|
|---|
| 833 | }
|
|---|
| 834 |
|
|---|
| 835 | #endif /* CONFIG_UDEBUG */
|
|---|
| 836 |
|
|---|
| 837 | /** Process syscall to create new thread.
|
|---|
| 838 | *
|
|---|
| 839 | */
|
|---|
| 840 | sysarg_t sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name,
|
|---|
| 841 | size_t name_len, thread_id_t *uspace_thread_id)
|
|---|
| 842 | {
|
|---|
| 843 | if (name_len > THREAD_NAME_BUFLEN - 1)
|
|---|
| 844 | name_len = THREAD_NAME_BUFLEN - 1;
|
|---|
| 845 |
|
|---|
| 846 | char namebuf[THREAD_NAME_BUFLEN];
|
|---|
| 847 | int rc = copy_from_uspace(namebuf, uspace_name, name_len);
|
|---|
| 848 | if (rc != 0)
|
|---|
| 849 | return (sysarg_t) rc;
|
|---|
| 850 |
|
|---|
| 851 | namebuf[name_len] = 0;
|
|---|
| 852 |
|
|---|
| 853 | /*
|
|---|
| 854 | * In case of failure, kernel_uarg will be deallocated in this function.
|
|---|
| 855 | * In case of success, kernel_uarg will be freed in uinit().
|
|---|
| 856 | *
|
|---|
| 857 | */
|
|---|
| 858 | uspace_arg_t *kernel_uarg =
|
|---|
| 859 | (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
|
|---|
| 860 |
|
|---|
| 861 | rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t));
|
|---|
| 862 | if (rc != 0) {
|
|---|
| 863 | free(kernel_uarg);
|
|---|
| 864 | return (sysarg_t) rc;
|
|---|
| 865 | }
|
|---|
| 866 |
|
|---|
| 867 | thread_t *thread = thread_create(uinit, kernel_uarg, TASK,
|
|---|
| 868 | THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf, false);
|
|---|
| 869 | if (thread) {
|
|---|
| 870 | if (uspace_thread_id != NULL) {
|
|---|
| 871 | rc = copy_to_uspace(uspace_thread_id, &thread->tid,
|
|---|
| 872 | sizeof(thread->tid));
|
|---|
| 873 | if (rc != 0) {
|
|---|
| 874 | /*
|
|---|
| 875 | * We have encountered a failure, but the thread
|
|---|
| 876 | * has already been created. We need to undo its
|
|---|
| 877 | * creation now.
|
|---|
| 878 | */
|
|---|
| 879 |
|
|---|
| 880 | /*
|
|---|
| 881 | * The new thread structure is initialized, but
|
|---|
| 882 | * is still not visible to the system.
|
|---|
| 883 | * We can safely deallocate it.
|
|---|
| 884 | */
|
|---|
| 885 | slab_free(thread_slab, thread);
|
|---|
| 886 | free(kernel_uarg);
|
|---|
| 887 |
|
|---|
| 888 | return (sysarg_t) rc;
|
|---|
| 889 | }
|
|---|
| 890 | }
|
|---|
| 891 |
|
|---|
| 892 | #ifdef CONFIG_UDEBUG
|
|---|
| 893 | /*
|
|---|
| 894 | * Generate udebug THREAD_B event and attach the thread.
|
|---|
| 895 | * This must be done atomically (with the debug locks held),
|
|---|
| 896 | * otherwise we would either miss some thread or receive
|
|---|
| 897 | * THREAD_B events for threads that already existed
|
|---|
| 898 | * and could be detected with THREAD_READ before.
|
|---|
| 899 | */
|
|---|
| 900 | udebug_thread_b_event_attach(thread, TASK);
|
|---|
| 901 | #else
|
|---|
| 902 | thread_attach(thread, TASK);
|
|---|
| 903 | #endif
|
|---|
| 904 | thread_ready(thread);
|
|---|
| 905 |
|
|---|
| 906 | return 0;
|
|---|
| 907 | } else
|
|---|
| 908 | free(kernel_uarg);
|
|---|
| 909 |
|
|---|
| 910 | return (sysarg_t) ENOMEM;
|
|---|
| 911 | }
|
|---|
| 912 |
|
|---|
| 913 | /** Process syscall to terminate thread.
|
|---|
| 914 | *
|
|---|
| 915 | */
|
|---|
| 916 | sysarg_t sys_thread_exit(int uspace_status)
|
|---|
| 917 | {
|
|---|
| 918 | thread_exit();
|
|---|
| 919 |
|
|---|
| 920 | /* Unreachable */
|
|---|
| 921 | return 0;
|
|---|
| 922 | }
|
|---|
| 923 |
|
|---|
| 924 | /** Syscall for getting TID.
|
|---|
| 925 | *
|
|---|
| 926 | * @param uspace_thread_id Userspace address of 8-byte buffer where to store
|
|---|
| 927 | * current thread ID.
|
|---|
| 928 | *
|
|---|
| 929 | * @return 0 on success or an error code from @ref errno.h.
|
|---|
| 930 | *
|
|---|
| 931 | */
|
|---|
| 932 | sysarg_t sys_thread_get_id(thread_id_t *uspace_thread_id)
|
|---|
| 933 | {
|
|---|
| 934 | /*
|
|---|
| 935 | * No need to acquire lock on THREAD because tid
|
|---|
| 936 | * remains constant for the lifespan of the thread.
|
|---|
| 937 | *
|
|---|
| 938 | */
|
|---|
| 939 | return (sysarg_t) copy_to_uspace(uspace_thread_id, &THREAD->tid,
|
|---|
| 940 | sizeof(THREAD->tid));
|
|---|
| 941 | }
|
|---|
| 942 |
|
|---|
| 943 | /** Syscall wrapper for sleeping. */
|
|---|
| 944 | sysarg_t sys_thread_usleep(uint32_t usec)
|
|---|
| 945 | {
|
|---|
| 946 | thread_usleep(usec);
|
|---|
| 947 | return 0;
|
|---|
| 948 | }
|
|---|
| 949 |
|
|---|
| 950 | sysarg_t sys_thread_udelay(uint32_t usec)
|
|---|
| 951 | {
|
|---|
| 952 | delay(usec);
|
|---|
| 953 | return 0;
|
|---|
| 954 | }
|
|---|
| 955 |
|
|---|
| 956 | /** @}
|
|---|
| 957 | */
|
|---|