source: mainline/kernel/generic/src/proc/thread.c@ f3dbe27

ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since f3dbe27 was f3dbe27, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 2 years ago

Reduce locking further with lazy FPU

It turns out we only need a lock to synchronize between the trap
handler and thread destructor. The atomic operations introduced
are just plain reads and writes, written in an ugly fashion to
appease C11 undefined behavior gods.

In principle we could get rid of that if we made cpu_t::fpu_owner
a strong reference, but that would mean a thread structure could
be held in limbo indefinitely if a new thread is not being
scheduled or doesn't use FPU.

  • Property mode set to 100644
File size: 29.0 KB
Line 
1/*
2 * Copyright (c) 2010 Jakub Jermar
3 * Copyright (c) 2018 Jiri Svoboda
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup kernel_generic_proc
31 * @{
32 */
33
34/**
35 * @file
36 * @brief Thread management functions.
37 */
38
39#include <assert.h>
40#include <proc/scheduler.h>
41#include <proc/thread.h>
42#include <proc/task.h>
43#include <mm/frame.h>
44#include <mm/page.h>
45#include <arch/asm.h>
46#include <arch/cycle.h>
47#include <arch.h>
48#include <synch/spinlock.h>
49#include <synch/waitq.h>
50#include <synch/syswaitq.h>
51#include <cpu.h>
52#include <str.h>
53#include <context.h>
54#include <adt/list.h>
55#include <adt/odict.h>
56#include <time/clock.h>
57#include <time/timeout.h>
58#include <time/delay.h>
59#include <config.h>
60#include <arch/interrupt.h>
61#include <smp/ipi.h>
62#include <arch/faddr.h>
63#include <atomic.h>
64#include <mem.h>
65#include <stdio.h>
66#include <stdlib.h>
67#include <main/uinit.h>
68#include <syscall/copy.h>
69#include <errno.h>
70#include <debug.h>
71#include <halt.h>
72
73/** Thread states */
74const char *thread_states[] = {
75 "Invalid",
76 "Running",
77 "Sleeping",
78 "Ready",
79 "Entering",
80 "Exiting",
81 "Lingering"
82};
83
84enum sleep_state {
85 SLEEP_INITIAL,
86 SLEEP_ASLEEP,
87 SLEEP_WOKE,
88};
89
90/** Lock protecting the @c threads ordered dictionary .
91 *
92 * For locking rules, see declaration thereof.
93 */
94IRQ_SPINLOCK_INITIALIZE(threads_lock);
95
96/** Ordered dictionary of all threads by their address (i.e. pointer to
97 * the thread_t structure).
98 *
99 * When a thread is found in the @c threads ordered dictionary, it is
100 * guaranteed to exist as long as the @c threads_lock is held.
101 *
102 * Members are of type thread_t.
103 *
104 * This structure contains weak references. Any reference from it must not leave
105 * threads_lock critical section unless strengthened via thread_try_ref().
106 */
107odict_t threads;
108
109IRQ_SPINLOCK_STATIC_INITIALIZE(tidlock);
110static thread_id_t last_tid = 0;
111
112static slab_cache_t *thread_cache;
113
114static void *threads_getkey(odlink_t *);
115static int threads_cmp(void *, void *);
116
117/** Thread wrapper.
118 *
119 * This wrapper is provided to ensure that every thread makes a call to
120 * thread_exit() when its implementing function returns.
121 *
122 * interrupts_disable() is assumed.
123 *
124 */
125static void cushion(void)
126{
127 void (*f)(void *) = THREAD->thread_code;
128 void *arg = THREAD->thread_arg;
129 THREAD->last_cycle = get_cycle();
130
131 /* This is where each thread wakes up after its creation */
132 irq_spinlock_unlock(&THREAD->lock, false);
133 interrupts_enable();
134
135 f(arg);
136
137 thread_exit();
138
139 /* Not reached */
140}
141
142/** Initialization and allocation for thread_t structure
143 *
144 */
145static errno_t thr_constructor(void *obj, unsigned int kmflags)
146{
147 thread_t *thread = (thread_t *) obj;
148
149 irq_spinlock_initialize(&thread->lock, "thread_t_lock");
150 link_initialize(&thread->rq_link);
151 link_initialize(&thread->wq_link);
152 link_initialize(&thread->th_link);
153
154 /* call the architecture-specific part of the constructor */
155 thr_constructor_arch(thread);
156
157 /*
158 * Allocate the kernel stack from the low-memory to prevent an infinite
159 * nesting of TLB-misses when accessing the stack from the part of the
160 * TLB-miss handler written in C.
161 *
162 * Note that low-memory is safe to be used for the stack as it will be
163 * covered by the kernel identity mapping, which guarantees not to
164 * nest TLB-misses infinitely (either via some hardware mechanism or
165 * by the construction of the assembly-language part of the TLB-miss
166 * handler).
167 *
168 * This restriction can be lifted once each architecture provides
169 * a similar guarantee, for example, by locking the kernel stack
170 * in the TLB whenever it is allocated from the high-memory and the
171 * thread is being scheduled to run.
172 */
173 kmflags |= FRAME_LOWMEM;
174 kmflags &= ~FRAME_HIGHMEM;
175
176 /*
177 * NOTE: All kernel stacks must be aligned to STACK_SIZE,
178 * see CURRENT.
179 */
180
181 uintptr_t stack_phys =
182 frame_alloc(STACK_FRAMES, kmflags, STACK_SIZE - 1);
183 if (!stack_phys)
184 return ENOMEM;
185
186 thread->kstack = (uint8_t *) PA2KA(stack_phys);
187
188#ifdef CONFIG_UDEBUG
189 mutex_initialize(&thread->udebug.lock, MUTEX_PASSIVE);
190#endif
191
192 return EOK;
193}
194
195/** Destruction of thread_t object */
196static size_t thr_destructor(void *obj)
197{
198 thread_t *thread = (thread_t *) obj;
199
200 /* call the architecture-specific part of the destructor */
201 thr_destructor_arch(thread);
202
203 frame_free(KA2PA(thread->kstack), STACK_FRAMES);
204
205 return STACK_FRAMES; /* number of frames freed */
206}
207
208/** Initialize threads
209 *
210 * Initialize kernel threads support.
211 *
212 */
213void thread_init(void)
214{
215 THREAD = NULL;
216
217 atomic_store(&nrdy, 0);
218 thread_cache = slab_cache_create("thread_t", sizeof(thread_t), _Alignof(thread_t),
219 thr_constructor, thr_destructor, 0);
220
221 odict_initialize(&threads, threads_getkey, threads_cmp);
222}
223
224/** Wire thread to the given CPU
225 *
226 * @param cpu CPU to wire the thread to.
227 *
228 */
229void thread_wire(thread_t *thread, cpu_t *cpu)
230{
231 irq_spinlock_lock(&thread->lock, true);
232 thread->cpu = cpu;
233 thread->nomigrate++;
234 irq_spinlock_unlock(&thread->lock, true);
235}
236
237/** Invoked right before thread_ready() readies the thread. thread is locked. */
238static void before_thread_is_ready(thread_t *thread)
239{
240 assert(irq_spinlock_locked(&thread->lock));
241}
242
243/** Make thread ready
244 *
245 * Switch thread to the ready state. Consumes reference passed by the caller.
246 *
247 * @param thread Thread to make ready.
248 *
249 */
250void thread_ready(thread_t *thread)
251{
252 irq_spinlock_lock(&thread->lock, true);
253
254 assert(thread->state != Ready);
255
256 before_thread_is_ready(thread);
257
258 int i = (thread->priority < RQ_COUNT - 1) ?
259 ++thread->priority : thread->priority;
260
261 /* Prefer the CPU on which the thread ran last */
262 cpu_t *cpu = thread->cpu ? thread->cpu : CPU;
263
264 thread->state = Ready;
265
266 irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock));
267
268 /*
269 * Append thread to respective ready queue
270 * on respective processor.
271 */
272
273 list_append(&thread->rq_link, &cpu->rq[i].rq);
274 cpu->rq[i].n++;
275 irq_spinlock_unlock(&(cpu->rq[i].lock), true);
276
277 atomic_inc(&nrdy);
278 atomic_inc(&cpu->nrdy);
279}
280
281/** Create new thread
282 *
283 * Create a new thread.
284 *
285 * @param func Thread's implementing function.
286 * @param arg Thread's implementing function argument.
287 * @param task Task to which the thread belongs. The caller must
288 * guarantee that the task won't cease to exist during the
289 * call. The task's lock may not be held.
290 * @param flags Thread flags.
291 * @param name Symbolic name (a copy is made).
292 *
293 * @return New thread's structure on success, NULL on failure.
294 *
295 */
296thread_t *thread_create(void (*func)(void *), void *arg, task_t *task,
297 thread_flags_t flags, const char *name)
298{
299 thread_t *thread = (thread_t *) slab_alloc(thread_cache, FRAME_ATOMIC);
300 if (!thread)
301 return NULL;
302
303 refcount_init(&thread->refcount);
304
305 if (thread_create_arch(thread, flags) != EOK) {
306 slab_free(thread_cache, thread);
307 return NULL;
308 }
309
310 /* Not needed, but good for debugging */
311 memsetb(thread->kstack, STACK_SIZE, 0);
312
313 irq_spinlock_lock(&tidlock, true);
314 thread->tid = ++last_tid;
315 irq_spinlock_unlock(&tidlock, true);
316
317 memset(&thread->saved_context, 0, sizeof(thread->saved_context));
318 context_set(&thread->saved_context, FADDR(cushion),
319 (uintptr_t) thread->kstack, STACK_SIZE);
320
321 current_initialize((current_t *) thread->kstack);
322
323 ipl_t ipl = interrupts_disable();
324 thread->saved_ipl = interrupts_read();
325 interrupts_restore(ipl);
326
327 str_cpy(thread->name, THREAD_NAME_BUFLEN, name);
328
329 thread->thread_code = func;
330 thread->thread_arg = arg;
331 thread->ucycles = 0;
332 thread->kcycles = 0;
333 thread->uncounted =
334 ((flags & THREAD_FLAG_UNCOUNTED) == THREAD_FLAG_UNCOUNTED);
335 thread->priority = -1; /* Start in rq[0] */
336 thread->cpu = NULL;
337 thread->stolen = false;
338 thread->uspace =
339 ((flags & THREAD_FLAG_USPACE) == THREAD_FLAG_USPACE);
340
341 thread->nomigrate = 0;
342 thread->state = Entering;
343
344 atomic_init(&thread->sleep_queue, NULL);
345
346 thread->in_copy_from_uspace = false;
347 thread->in_copy_to_uspace = false;
348
349 thread->interrupted = false;
350 atomic_init(&thread->sleep_state, SLEEP_INITIAL);
351
352 waitq_initialize(&thread->join_wq);
353
354 thread->task = task;
355
356 thread->fpu_context_exists = false;
357
358 odlink_initialize(&thread->lthreads);
359
360#ifdef CONFIG_UDEBUG
361 /* Initialize debugging stuff */
362 thread->btrace = false;
363 udebug_thread_initialize(&thread->udebug);
364#endif
365
366 if ((flags & THREAD_FLAG_NOATTACH) != THREAD_FLAG_NOATTACH)
367 thread_attach(thread, task);
368
369 return thread;
370}
371
372/** Destroy thread memory structure
373 *
374 * Detach thread from all queues, cpus etc. and destroy it.
375 *
376 * @param obj Thread to be destroyed.
377 *
378 */
379static void thread_destroy(void *obj)
380{
381 thread_t *thread = (thread_t *) obj;
382
383 assert_link_not_used(&thread->rq_link);
384 assert_link_not_used(&thread->wq_link);
385
386 assert(thread->task);
387
388 ipl_t ipl = interrupts_disable();
389
390 /* Remove thread from global list. */
391 irq_spinlock_lock(&threads_lock, false);
392 odict_remove(&thread->lthreads);
393 irq_spinlock_unlock(&threads_lock, false);
394
395 /* Remove thread from task's list and accumulate accounting. */
396 irq_spinlock_lock(&thread->task->lock, false);
397
398 list_remove(&thread->th_link);
399
400 /*
401 * No other CPU has access to this thread anymore, so we don't need
402 * thread->lock for accessing thread's fields after this point.
403 */
404
405 if (!thread->uncounted) {
406 thread->task->ucycles += thread->ucycles;
407 thread->task->kcycles += thread->kcycles;
408 }
409
410 irq_spinlock_unlock(&thread->task->lock, false);
411
412 assert((thread->state == Exiting) || (thread->state == Lingering));
413
414 /* Clear cpu->fpu_owner if set to this thread. */
415#ifdef CONFIG_FPU_LAZY
416 if (thread->cpu) {
417 /*
418 * We need to lock for this because the old CPU can concurrently try
419 * to dump this thread's FPU state, in which case we need to wait for
420 * it to finish. An atomic compare-and-swap wouldn't be enough.
421 */
422 irq_spinlock_lock(&thread->cpu->fpu_lock, false);
423
424 thread_t *owner = atomic_load_explicit(&thread->cpu->fpu_owner,
425 memory_order_relaxed);
426
427 if (owner == thread) {
428 atomic_store_explicit(&thread->cpu->fpu_owner, NULL,
429 memory_order_relaxed);
430 }
431
432 irq_spinlock_unlock(&thread->cpu->fpu_lock, false);
433 }
434#endif
435
436 interrupts_restore(ipl);
437
438 /*
439 * Drop the reference to the containing task.
440 */
441 task_release(thread->task);
442 thread->task = NULL;
443
444 slab_free(thread_cache, thread);
445}
446
447void thread_put(thread_t *thread)
448{
449 if (refcount_down(&thread->refcount)) {
450 thread_destroy(thread);
451 }
452}
453
454/** Make the thread visible to the system.
455 *
456 * Attach the thread structure to the current task and make it visible in the
457 * threads_tree.
458 *
459 * @param t Thread to be attached to the task.
460 * @param task Task to which the thread is to be attached.
461 *
462 */
463void thread_attach(thread_t *thread, task_t *task)
464{
465 ipl_t ipl = interrupts_disable();
466
467 /*
468 * Attach to the specified task.
469 */
470 irq_spinlock_lock(&task->lock, false);
471
472 /* Hold a reference to the task. */
473 task_hold(task);
474
475 /* Must not count kbox thread into lifecount */
476 if (thread->uspace)
477 atomic_inc(&task->lifecount);
478
479 list_append(&thread->th_link, &task->threads);
480
481 irq_spinlock_unlock(&task->lock, false);
482
483 /*
484 * Register this thread in the system-wide dictionary.
485 */
486 irq_spinlock_lock(&threads_lock, false);
487 odict_insert(&thread->lthreads, &threads, NULL);
488 irq_spinlock_unlock(&threads_lock, false);
489
490 interrupts_restore(ipl);
491}
492
493/** Terminate thread.
494 *
495 * End current thread execution and switch it to the exiting state.
496 * All pending timeouts are executed.
497 *
498 */
499void thread_exit(void)
500{
501 if (THREAD->uspace) {
502#ifdef CONFIG_UDEBUG
503 /* Generate udebug THREAD_E event */
504 udebug_thread_e_event();
505
506 /*
507 * This thread will not execute any code or system calls from
508 * now on.
509 */
510 udebug_stoppable_begin();
511#endif
512 if (atomic_predec(&TASK->lifecount) == 0) {
513 /*
514 * We are the last userspace thread in the task that
515 * still has not exited. With the exception of the
516 * moment the task was created, new userspace threads
517 * can only be created by threads of the same task.
518 * We are safe to perform cleanup.
519 *
520 */
521 ipc_cleanup();
522 sys_waitq_task_cleanup();
523 LOG("Cleanup of task %" PRIu64 " completed.", TASK->taskid);
524 }
525 }
526
527 irq_spinlock_lock(&THREAD->lock, true);
528 THREAD->state = Exiting;
529 irq_spinlock_unlock(&THREAD->lock, true);
530
531 scheduler();
532
533 panic("should never be reached");
534}
535
536/** Interrupts an existing thread so that it may exit as soon as possible.
537 *
538 * Threads that are blocked waiting for a synchronization primitive
539 * are woken up with a return code of EINTR if the
540 * blocking call was interruptable. See waitq_sleep_timeout().
541 *
542 * Interrupted threads automatically exit when returning back to user space.
543 *
544 * @param thread A valid thread object.
545 */
546void thread_interrupt(thread_t *thread)
547{
548 assert(thread != NULL);
549 thread->interrupted = true;
550 thread_wakeup(thread);
551}
552
553/** Prepare for putting the thread to sleep.
554 *
555 * @returns whether the thread is currently terminating. If THREAD_OK
556 * is returned, the thread is guaranteed to be woken up instantly if the thread
557 * is terminated at any time between this function's return and
558 * thread_wait_finish(). If THREAD_TERMINATING is returned, the thread can still
559 * go to sleep, but doing so will delay termination.
560 */
561thread_termination_state_t thread_wait_start(void)
562{
563 assert(THREAD != NULL);
564
565 /*
566 * This is an exchange rather than a store so that we can use the acquire
567 * semantics, which is needed to ensure that code after this operation sees
568 * memory ops made before thread_wakeup() in other thread, if that wakeup
569 * was reset by this operation.
570 *
571 * In particular, we need this to ensure we can't miss the thread being
572 * terminated concurrently with a synchronization primitive preparing to
573 * sleep.
574 */
575 (void) atomic_exchange_explicit(&THREAD->sleep_state, SLEEP_INITIAL,
576 memory_order_acquire);
577
578 return THREAD->interrupted ? THREAD_TERMINATING : THREAD_OK;
579}
580
581static void thread_wait_internal(void)
582{
583 assert(THREAD != NULL);
584
585 ipl_t ipl = interrupts_disable();
586
587 if (atomic_load(&haltstate))
588 halt();
589
590 /*
591 * Lock here to prevent a race between entering the scheduler and another
592 * thread rescheduling this thread.
593 */
594 irq_spinlock_lock(&THREAD->lock, false);
595
596 int expected = SLEEP_INITIAL;
597
598 /* Only set SLEEP_ASLEEP in sleep pad if it's still in initial state */
599 if (atomic_compare_exchange_strong_explicit(&THREAD->sleep_state, &expected,
600 SLEEP_ASLEEP, memory_order_acq_rel, memory_order_acquire)) {
601 THREAD->state = Sleeping;
602 scheduler_locked(ipl);
603 } else {
604 assert(expected == SLEEP_WOKE);
605 /* Return immediately. */
606 irq_spinlock_unlock(&THREAD->lock, false);
607 interrupts_restore(ipl);
608 }
609}
610
611static void thread_wait_timeout_callback(void *arg)
612{
613 thread_wakeup(arg);
614}
615
616/**
617 * Suspends this thread's execution until thread_wakeup() is called on it,
618 * or deadline is reached.
619 *
620 * The way this would normally be used is that the current thread call
621 * thread_wait_start(), and if interruption has not been signaled, stores
622 * a reference to itself in a synchronized structure (such as waitq).
623 * After that, it releases any spinlocks it might hold and calls this function.
624 *
625 * The thread doing the wakeup will acquire the thread's reference from said
626 * synchronized structure and calls thread_wakeup() on it.
627 *
628 * Notably, there can be more than one thread performing wakeup.
629 * The number of performed calls to thread_wakeup(), or their relative
630 * ordering with thread_wait_finish(), does not matter. However, calls to
631 * thread_wakeup() are expected to be synchronized with thread_wait_start()
632 * with which they are associated, otherwise wakeups may be missed.
633 * However, the operation of thread_wakeup() is defined at any time,
634 * synchronization notwithstanding (in the sense of C un/defined behavior),
635 * and is in fact used to interrupt waiting threads by external events.
636 * The waiting thread must operate correctly in face of spurious wakeups,
637 * and clean up its reference in the synchronization structure if necessary.
638 *
639 * Returns THREAD_WAIT_TIMEOUT if timeout fired, which is a necessary condition
640 * for it to have been waken up by the timeout, but the caller must assume
641 * that proper wakeups, timeouts and interrupts may occur concurrently, so
642 * the fact timeout has been registered does not necessarily mean the thread
643 * has not been woken up or interrupted.
644 */
645thread_wait_result_t thread_wait_finish(deadline_t deadline)
646{
647 assert(THREAD != NULL);
648
649 timeout_t timeout;
650
651 if (deadline != DEADLINE_NEVER) {
652 /* Extra check to avoid setting up a deadline if we don't need to. */
653 if (atomic_load_explicit(&THREAD->sleep_state, memory_order_acquire) !=
654 SLEEP_INITIAL)
655 return THREAD_WAIT_SUCCESS;
656
657 timeout_initialize(&timeout);
658 timeout_register_deadline(&timeout, deadline,
659 thread_wait_timeout_callback, THREAD);
660 }
661
662 thread_wait_internal();
663
664 if (deadline != DEADLINE_NEVER && !timeout_unregister(&timeout)) {
665 return THREAD_WAIT_TIMEOUT;
666 } else {
667 return THREAD_WAIT_SUCCESS;
668 }
669}
670
671void thread_wakeup(thread_t *thread)
672{
673 assert(thread != NULL);
674
675 int state = atomic_exchange_explicit(&thread->sleep_state, SLEEP_WOKE,
676 memory_order_release);
677
678 if (state == SLEEP_ASLEEP) {
679 /*
680 * Only one thread gets to do this.
681 * The reference consumed here is the reference implicitly passed to
682 * the waking thread by the sleeper in thread_wait_finish().
683 */
684 thread_ready(thread);
685 }
686}
687
688/** Prevent the current thread from being migrated to another processor. */
689void thread_migration_disable(void)
690{
691 assert(THREAD);
692
693 THREAD->nomigrate++;
694}
695
696/** Allow the current thread to be migrated to another processor. */
697void thread_migration_enable(void)
698{
699 assert(THREAD);
700 assert(THREAD->nomigrate > 0);
701
702 if (THREAD->nomigrate > 0)
703 THREAD->nomigrate--;
704}
705
706/** Thread sleep
707 *
708 * Suspend execution of the current thread.
709 *
710 * @param sec Number of seconds to sleep.
711 *
712 */
713void thread_sleep(uint32_t sec)
714{
715 /*
716 * Sleep in 1000 second steps to support
717 * full argument range
718 */
719 while (sec > 0) {
720 uint32_t period = (sec > 1000) ? 1000 : sec;
721
722 thread_usleep(period * 1000000);
723 sec -= period;
724 }
725}
726
727errno_t thread_join(thread_t *thread)
728{
729 return thread_join_timeout(thread, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE);
730}
731
732/** Wait for another thread to exit.
733 * This function does not destroy the thread. Reference counting handles that.
734 *
735 * @param thread Thread to join on exit.
736 * @param usec Timeout in microseconds.
737 * @param flags Mode of operation.
738 *
739 * @return An error code from errno.h or an error code from synch.h.
740 *
741 */
742errno_t thread_join_timeout(thread_t *thread, uint32_t usec, unsigned int flags)
743{
744 if (thread == THREAD)
745 return EINVAL;
746
747 irq_spinlock_lock(&thread->lock, true);
748 state_t state = thread->state;
749 irq_spinlock_unlock(&thread->lock, true);
750
751 if (state == Exiting) {
752 return EOK;
753 } else {
754 return _waitq_sleep_timeout(&thread->join_wq, usec, flags);
755 }
756}
757
758/** Thread usleep
759 *
760 * Suspend execution of the current thread.
761 *
762 * @param usec Number of microseconds to sleep.
763 *
764 */
765void thread_usleep(uint32_t usec)
766{
767 waitq_t wq;
768
769 waitq_initialize(&wq);
770
771 (void) waitq_sleep_timeout(&wq, usec);
772}
773
774static void thread_print(thread_t *thread, bool additional)
775{
776 uint64_t ucycles, kcycles;
777 char usuffix, ksuffix;
778 order_suffix(thread->ucycles, &ucycles, &usuffix);
779 order_suffix(thread->kcycles, &kcycles, &ksuffix);
780
781 char *name;
782 if (str_cmp(thread->name, "uinit") == 0)
783 name = thread->task->name;
784 else
785 name = thread->name;
786
787 if (additional)
788 printf("%-8" PRIu64 " %p %p %9" PRIu64 "%c %9" PRIu64 "%c ",
789 thread->tid, thread->thread_code, thread->kstack,
790 ucycles, usuffix, kcycles, ksuffix);
791 else
792 printf("%-8" PRIu64 " %-14s %p %-8s %p %-5" PRIu32 "\n",
793 thread->tid, name, thread, thread_states[thread->state],
794 thread->task, thread->task->container);
795
796 if (additional) {
797 if (thread->cpu)
798 printf("%-5u", thread->cpu->id);
799 else
800 printf("none ");
801
802 if (thread->state == Sleeping) {
803 printf(" %p", thread->sleep_queue);
804 }
805
806 printf("\n");
807 }
808}
809
810/** Print list of threads debug info
811 *
812 * @param additional Print additional information.
813 *
814 */
815void thread_print_list(bool additional)
816{
817 thread_t *thread;
818
819 /* Accessing system-wide threads list through thread_first()/thread_next(). */
820 irq_spinlock_lock(&threads_lock, true);
821
822 if (sizeof(void *) <= 4) {
823 if (additional)
824 printf("[id ] [code ] [stack ] [ucycles ] [kcycles ]"
825 " [cpu] [waitqueue]\n");
826 else
827 printf("[id ] [name ] [address ] [state ] [task ]"
828 " [ctn]\n");
829 } else {
830 if (additional) {
831 printf("[id ] [code ] [stack ] [ucycles ] [kcycles ]"
832 " [cpu] [waitqueue ]\n");
833 } else
834 printf("[id ] [name ] [address ] [state ]"
835 " [task ] [ctn]\n");
836 }
837
838 thread = thread_first();
839 while (thread != NULL) {
840 thread_print(thread, additional);
841 thread = thread_next(thread);
842 }
843
844 irq_spinlock_unlock(&threads_lock, true);
845}
846
847static bool thread_exists(thread_t *thread)
848{
849 odlink_t *odlink = odict_find_eq(&threads, thread, NULL);
850 return odlink != NULL;
851}
852
853/** Check whether the thread exists, and if so, return a reference to it.
854 */
855thread_t *thread_try_get(thread_t *thread)
856{
857 irq_spinlock_lock(&threads_lock, true);
858
859 if (thread_exists(thread)) {
860 /* Try to strengthen the reference. */
861 thread = thread_try_ref(thread);
862 } else {
863 thread = NULL;
864 }
865
866 irq_spinlock_unlock(&threads_lock, true);
867
868 return thread;
869}
870
871/** Update accounting of current thread.
872 *
873 * Note that thread_lock on THREAD must be already held and
874 * interrupts must be already disabled.
875 *
876 * @param user True to update user accounting, false for kernel.
877 *
878 */
879void thread_update_accounting(bool user)
880{
881 uint64_t time = get_cycle();
882
883 assert(interrupts_disabled());
884 assert(irq_spinlock_locked(&THREAD->lock));
885
886 if (user)
887 THREAD->ucycles += time - THREAD->last_cycle;
888 else
889 THREAD->kcycles += time - THREAD->last_cycle;
890
891 THREAD->last_cycle = time;
892}
893
894/** Find thread structure corresponding to thread ID.
895 *
896 * The threads_lock must be already held by the caller of this function and
897 * interrupts must be disabled.
898 *
899 * The returned reference is weak.
900 * If the caller needs to keep it, thread_try_ref() must be used to upgrade
901 * to a strong reference _before_ threads_lock is released.
902 *
903 * @param id Thread ID.
904 *
905 * @return Thread structure address or NULL if there is no such thread ID.
906 *
907 */
908thread_t *thread_find_by_id(thread_id_t thread_id)
909{
910 thread_t *thread;
911
912 assert(interrupts_disabled());
913 assert(irq_spinlock_locked(&threads_lock));
914
915 thread = thread_first();
916 while (thread != NULL) {
917 if (thread->tid == thread_id)
918 return thread;
919
920 thread = thread_next(thread);
921 }
922
923 return NULL;
924}
925
926/** Get count of threads.
927 *
928 * @return Number of threads in the system
929 */
930size_t thread_count(void)
931{
932 assert(interrupts_disabled());
933 assert(irq_spinlock_locked(&threads_lock));
934
935 return odict_count(&threads);
936}
937
938/** Get first thread.
939 *
940 * @return Pointer to first thread or @c NULL if there are none.
941 */
942thread_t *thread_first(void)
943{
944 odlink_t *odlink;
945
946 assert(interrupts_disabled());
947 assert(irq_spinlock_locked(&threads_lock));
948
949 odlink = odict_first(&threads);
950 if (odlink == NULL)
951 return NULL;
952
953 return odict_get_instance(odlink, thread_t, lthreads);
954}
955
956/** Get next thread.
957 *
958 * @param cur Current thread
959 * @return Pointer to next thread or @c NULL if there are no more threads.
960 */
961thread_t *thread_next(thread_t *cur)
962{
963 odlink_t *odlink;
964
965 assert(interrupts_disabled());
966 assert(irq_spinlock_locked(&threads_lock));
967
968 odlink = odict_next(&cur->lthreads, &threads);
969 if (odlink == NULL)
970 return NULL;
971
972 return odict_get_instance(odlink, thread_t, lthreads);
973}
974
975#ifdef CONFIG_UDEBUG
976
977void thread_stack_trace(thread_id_t thread_id)
978{
979 irq_spinlock_lock(&threads_lock, true);
980 thread_t *thread = thread_try_ref(thread_find_by_id(thread_id));
981 irq_spinlock_unlock(&threads_lock, true);
982
983 if (thread == NULL) {
984 printf("No such thread.\n");
985 return;
986 }
987
988 /*
989 * Schedule a stack trace to be printed
990 * just before the thread is scheduled next.
991 *
992 * If the thread is sleeping then try to interrupt
993 * the sleep. Any request for printing an uspace stack
994 * trace from within the kernel should be always
995 * considered a last resort debugging means, therefore
996 * forcing the thread's sleep to be interrupted
997 * is probably justifiable.
998 */
999
1000 irq_spinlock_lock(&thread->lock, true);
1001
1002 bool sleeping = false;
1003 istate_t *istate = thread->udebug.uspace_state;
1004 if (istate != NULL) {
1005 printf("Scheduling thread stack trace.\n");
1006 thread->btrace = true;
1007 if (thread->state == Sleeping)
1008 sleeping = true;
1009 } else
1010 printf("Thread interrupt state not available.\n");
1011
1012 irq_spinlock_unlock(&thread->lock, true);
1013
1014 if (sleeping)
1015 thread_wakeup(thread);
1016
1017 thread_put(thread);
1018}
1019
1020#endif /* CONFIG_UDEBUG */
1021
1022/** Get key function for the @c threads ordered dictionary.
1023 *
1024 * @param odlink Link
1025 * @return Pointer to thread structure cast as 'void *'
1026 */
1027static void *threads_getkey(odlink_t *odlink)
1028{
1029 thread_t *thread = odict_get_instance(odlink, thread_t, lthreads);
1030 return (void *) thread;
1031}
1032
1033/** Key comparison function for the @c threads ordered dictionary.
1034 *
1035 * @param a Pointer to thread A
1036 * @param b Pointer to thread B
1037 * @return -1, 0, 1 iff pointer to A is less than, equal to, greater than B
1038 */
1039static int threads_cmp(void *a, void *b)
1040{
1041 if (a > b)
1042 return -1;
1043 else if (a == b)
1044 return 0;
1045 else
1046 return +1;
1047}
1048
1049/** Process syscall to create new thread.
1050 *
1051 */
1052sys_errno_t sys_thread_create(uspace_ptr_uspace_arg_t uspace_uarg, uspace_ptr_char uspace_name,
1053 size_t name_len, uspace_ptr_thread_id_t uspace_thread_id)
1054{
1055 if (name_len > THREAD_NAME_BUFLEN - 1)
1056 name_len = THREAD_NAME_BUFLEN - 1;
1057
1058 char namebuf[THREAD_NAME_BUFLEN];
1059 errno_t rc = copy_from_uspace(namebuf, uspace_name, name_len);
1060 if (rc != EOK)
1061 return (sys_errno_t) rc;
1062
1063 namebuf[name_len] = 0;
1064
1065 /*
1066 * In case of failure, kernel_uarg will be deallocated in this function.
1067 * In case of success, kernel_uarg will be freed in uinit().
1068 */
1069 uspace_arg_t *kernel_uarg =
1070 (uspace_arg_t *) malloc(sizeof(uspace_arg_t));
1071 if (!kernel_uarg)
1072 return (sys_errno_t) ENOMEM;
1073
1074 rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t));
1075 if (rc != EOK) {
1076 free(kernel_uarg);
1077 return (sys_errno_t) rc;
1078 }
1079
1080 thread_t *thread = thread_create(uinit, kernel_uarg, TASK,
1081 THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf);
1082 if (thread) {
1083 if (uspace_thread_id) {
1084 rc = copy_to_uspace(uspace_thread_id, &thread->tid,
1085 sizeof(thread->tid));
1086 if (rc != EOK) {
1087 /*
1088 * We have encountered a failure, but the thread
1089 * has already been created. We need to undo its
1090 * creation now.
1091 */
1092
1093 /*
1094 * The new thread structure is initialized, but
1095 * is still not visible to the system.
1096 * We can safely deallocate it.
1097 */
1098 slab_free(thread_cache, thread);
1099 free(kernel_uarg);
1100
1101 return (sys_errno_t) rc;
1102 }
1103 }
1104
1105#ifdef CONFIG_UDEBUG
1106 /*
1107 * Generate udebug THREAD_B event and attach the thread.
1108 * This must be done atomically (with the debug locks held),
1109 * otherwise we would either miss some thread or receive
1110 * THREAD_B events for threads that already existed
1111 * and could be detected with THREAD_READ before.
1112 */
1113 udebug_thread_b_event_attach(thread, TASK);
1114#else
1115 thread_attach(thread, TASK);
1116#endif
1117 thread_ready(thread);
1118
1119 return 0;
1120 } else
1121 free(kernel_uarg);
1122
1123 return (sys_errno_t) ENOMEM;
1124}
1125
1126/** Process syscall to terminate thread.
1127 *
1128 */
1129sys_errno_t sys_thread_exit(int uspace_status)
1130{
1131 thread_exit();
1132}
1133
1134/** Syscall for getting TID.
1135 *
1136 * @param uspace_thread_id Userspace address of 8-byte buffer where to store
1137 * current thread ID.
1138 *
1139 * @return 0 on success or an error code from @ref errno.h.
1140 *
1141 */
1142sys_errno_t sys_thread_get_id(uspace_ptr_thread_id_t uspace_thread_id)
1143{
1144 /*
1145 * No need to acquire lock on THREAD because tid
1146 * remains constant for the lifespan of the thread.
1147 *
1148 */
1149 return (sys_errno_t) copy_to_uspace(uspace_thread_id, &THREAD->tid,
1150 sizeof(THREAD->tid));
1151}
1152
1153/** Syscall wrapper for sleeping. */
1154sys_errno_t sys_thread_usleep(uint32_t usec)
1155{
1156 thread_usleep(usec);
1157 return 0;
1158}
1159
1160sys_errno_t sys_thread_udelay(uint32_t usec)
1161{
1162 delay(usec);
1163 return 0;
1164}
1165
1166/** @}
1167 */
Note: See TracBrowser for help on using the repository browser.