source: mainline/kernel/generic/src/proc/thread.c@ 0f4f1b2

Last change on this file since 0f4f1b2 was 0f4f1b2, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 2 years ago

Add (and use) functions thread_start() and thread_detach()

Mostly cosmetic, with thread_start() replacing calls to thread_ready(),
but not consuming the passed reference, and thread_detach() being
synonym for thread_put(). Makes the code's function more obvious.

Also modify some threaded tests to use thread_join() for waiting,
instead of counting threads with atomics or semaphores.

  • Property mode set to 100644
File size: 28.4 KB
Line 
1/*
2 * Copyright (c) 2010 Jakub Jermar
3 * Copyright (c) 2018 Jiri Svoboda
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup kernel_generic_proc
31 * @{
32 */
33
34/**
35 * @file
36 * @brief Thread management functions.
37 */
38
39#include <assert.h>
40#include <proc/scheduler.h>
41#include <proc/thread.h>
42#include <proc/task.h>
43#include <mm/frame.h>
44#include <mm/page.h>
45#include <arch/asm.h>
46#include <arch/cycle.h>
47#include <arch.h>
48#include <synch/spinlock.h>
49#include <synch/waitq.h>
50#include <synch/syswaitq.h>
51#include <cpu.h>
52#include <str.h>
53#include <context.h>
54#include <adt/list.h>
55#include <adt/odict.h>
56#include <time/clock.h>
57#include <time/timeout.h>
58#include <time/delay.h>
59#include <config.h>
60#include <arch/interrupt.h>
61#include <smp/ipi.h>
62#include <arch/faddr.h>
63#include <atomic.h>
64#include <memw.h>
65#include <stdio.h>
66#include <stdlib.h>
67#include <main/uinit.h>
68#include <syscall/copy.h>
69#include <errno.h>
70#include <debug.h>
71#include <halt.h>
72
73/** Thread states */
74const char *thread_states[] = {
75 "Invalid",
76 "Running",
77 "Sleeping",
78 "Ready",
79 "Entering",
80 "Exiting",
81 "Lingering"
82};
83
84/** Lock protecting the @c threads ordered dictionary .
85 *
86 * For locking rules, see declaration thereof.
87 */
88IRQ_SPINLOCK_INITIALIZE(threads_lock);
89
90/** Ordered dictionary of all threads by their address (i.e. pointer to
91 * the thread_t structure).
92 *
93 * When a thread is found in the @c threads ordered dictionary, it is
94 * guaranteed to exist as long as the @c threads_lock is held.
95 *
96 * Members are of type thread_t.
97 *
98 * This structure contains weak references. Any reference from it must not leave
99 * threads_lock critical section unless strengthened via thread_try_ref().
100 */
101odict_t threads;
102
103IRQ_SPINLOCK_STATIC_INITIALIZE(tidlock);
104static thread_id_t last_tid = 0;
105
106static slab_cache_t *thread_cache;
107
108static void *threads_getkey(odlink_t *);
109static int threads_cmp(void *, void *);
110
111/** Thread wrapper.
112 *
113 * This wrapper is provided to ensure that every thread makes a call to
114 * thread_exit() when its implementing function returns.
115 *
116 * interrupts_disable() is assumed.
117 *
118 */
119static void cushion(void)
120{
121 void (*f)(void *) = THREAD->thread_code;
122 void *arg = THREAD->thread_arg;
123
124 /* This is where each thread wakes up after its creation */
125 irq_spinlock_unlock(&THREAD->lock, false);
126 interrupts_enable();
127
128 f(arg);
129
130 thread_exit();
131
132 /* Not reached */
133}
134
135/** Initialization and allocation for thread_t structure
136 *
137 */
138static errno_t thr_constructor(void *obj, unsigned int kmflags)
139{
140 thread_t *thread = (thread_t *) obj;
141
142 irq_spinlock_initialize(&thread->lock, "thread_t_lock");
143 link_initialize(&thread->rq_link);
144 link_initialize(&thread->wq_link);
145 link_initialize(&thread->th_link);
146
147 /* call the architecture-specific part of the constructor */
148 thr_constructor_arch(thread);
149
150 /*
151 * Allocate the kernel stack from the low-memory to prevent an infinite
152 * nesting of TLB-misses when accessing the stack from the part of the
153 * TLB-miss handler written in C.
154 *
155 * Note that low-memory is safe to be used for the stack as it will be
156 * covered by the kernel identity mapping, which guarantees not to
157 * nest TLB-misses infinitely (either via some hardware mechanism or
158 * by the construction of the assembly-language part of the TLB-miss
159 * handler).
160 *
161 * This restriction can be lifted once each architecture provides
162 * a similar guarantee, for example, by locking the kernel stack
163 * in the TLB whenever it is allocated from the high-memory and the
164 * thread is being scheduled to run.
165 */
166 kmflags |= FRAME_LOWMEM;
167 kmflags &= ~FRAME_HIGHMEM;
168
169 /*
170 * NOTE: All kernel stacks must be aligned to STACK_SIZE,
171 * see CURRENT.
172 */
173
174 uintptr_t stack_phys =
175 frame_alloc(STACK_FRAMES, kmflags, STACK_SIZE - 1);
176 if (!stack_phys)
177 return ENOMEM;
178
179 thread->kstack = (uint8_t *) PA2KA(stack_phys);
180
181#ifdef CONFIG_UDEBUG
182 mutex_initialize(&thread->udebug.lock, MUTEX_PASSIVE);
183#endif
184
185 return EOK;
186}
187
188/** Destruction of thread_t object */
189static size_t thr_destructor(void *obj)
190{
191 thread_t *thread = (thread_t *) obj;
192
193 /* call the architecture-specific part of the destructor */
194 thr_destructor_arch(thread);
195
196 frame_free(KA2PA(thread->kstack), STACK_FRAMES);
197
198 return STACK_FRAMES; /* number of frames freed */
199}
200
201/** Initialize threads
202 *
203 * Initialize kernel threads support.
204 *
205 */
206void thread_init(void)
207{
208 THREAD = NULL;
209
210 atomic_store(&nrdy, 0);
211 thread_cache = slab_cache_create("thread_t", sizeof(thread_t), _Alignof(thread_t),
212 thr_constructor, thr_destructor, 0);
213
214 odict_initialize(&threads, threads_getkey, threads_cmp);
215}
216
217/** Wire thread to the given CPU
218 *
219 * @param cpu CPU to wire the thread to.
220 *
221 */
222void thread_wire(thread_t *thread, cpu_t *cpu)
223{
224 irq_spinlock_lock(&thread->lock, true);
225 thread->cpu = cpu;
226 thread->nomigrate++;
227 irq_spinlock_unlock(&thread->lock, true);
228}
229
230/** Invoked right before thread_ready() readies the thread. thread is locked. */
231static void before_thread_is_ready(thread_t *thread)
232{
233 assert(irq_spinlock_locked(&thread->lock));
234}
235
236/** Start a thread that wasn't started yet since it was created.
237 *
238 * @param thread A reference to the newly created thread.
239 */
240void thread_start(thread_t *thread)
241{
242 assert(thread->state == Entering);
243 thread_ready(thread_ref(thread));
244}
245
246/** Make thread ready
247 *
248 * Switch thread to the ready state. Consumes reference passed by the caller.
249 *
250 * @param thread Thread to make ready.
251 *
252 */
253void thread_ready(thread_t *thread)
254{
255 irq_spinlock_lock(&thread->lock, true);
256
257 assert(thread->state != Ready);
258
259 before_thread_is_ready(thread);
260
261 int i = (thread->priority < RQ_COUNT - 1) ?
262 ++thread->priority : thread->priority;
263
264 /* Prefer the CPU on which the thread ran last */
265 cpu_t *cpu = thread->cpu ? thread->cpu : CPU;
266
267 thread->state = Ready;
268
269 irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock));
270
271 /*
272 * Append thread to respective ready queue
273 * on respective processor.
274 */
275
276 list_append(&thread->rq_link, &cpu->rq[i].rq);
277 cpu->rq[i].n++;
278 irq_spinlock_unlock(&(cpu->rq[i].lock), true);
279
280 atomic_inc(&nrdy);
281 atomic_inc(&cpu->nrdy);
282}
283
284/** Create new thread
285 *
286 * Create a new thread.
287 *
288 * @param func Thread's implementing function.
289 * @param arg Thread's implementing function argument.
290 * @param task Task to which the thread belongs. The caller must
291 * guarantee that the task won't cease to exist during the
292 * call. The task's lock may not be held.
293 * @param flags Thread flags.
294 * @param name Symbolic name (a copy is made).
295 *
296 * @return New thread's structure on success, NULL on failure.
297 *
298 */
299thread_t *thread_create(void (*func)(void *), void *arg, task_t *task,
300 thread_flags_t flags, const char *name)
301{
302 thread_t *thread = (thread_t *) slab_alloc(thread_cache, FRAME_ATOMIC);
303 if (!thread)
304 return NULL;
305
306 refcount_init(&thread->refcount);
307
308 if (thread_create_arch(thread, flags) != EOK) {
309 slab_free(thread_cache, thread);
310 return NULL;
311 }
312
313 /* Not needed, but good for debugging */
314 memsetb(thread->kstack, STACK_SIZE, 0);
315
316 irq_spinlock_lock(&tidlock, true);
317 thread->tid = ++last_tid;
318 irq_spinlock_unlock(&tidlock, true);
319
320 memset(&thread->saved_context, 0, sizeof(thread->saved_context));
321 context_set(&thread->saved_context, FADDR(cushion),
322 (uintptr_t) thread->kstack, STACK_SIZE);
323
324 current_initialize((current_t *) thread->kstack);
325
326 str_cpy(thread->name, THREAD_NAME_BUFLEN, name);
327
328 thread->thread_code = func;
329 thread->thread_arg = arg;
330 thread->ucycles = 0;
331 thread->kcycles = 0;
332 thread->uncounted =
333 ((flags & THREAD_FLAG_UNCOUNTED) == THREAD_FLAG_UNCOUNTED);
334 thread->priority = -1; /* Start in rq[0] */
335 thread->cpu = NULL;
336 thread->stolen = false;
337 thread->uspace =
338 ((flags & THREAD_FLAG_USPACE) == THREAD_FLAG_USPACE);
339
340 thread->nomigrate = 0;
341 thread->state = Entering;
342
343 atomic_init(&thread->sleep_queue, NULL);
344
345 thread->in_copy_from_uspace = false;
346 thread->in_copy_to_uspace = false;
347
348 thread->interrupted = false;
349 atomic_init(&thread->sleep_state, SLEEP_INITIAL);
350
351 waitq_initialize(&thread->join_wq);
352
353 thread->task = task;
354
355 thread->fpu_context_exists = false;
356
357 odlink_initialize(&thread->lthreads);
358
359#ifdef CONFIG_UDEBUG
360 /* Initialize debugging stuff */
361 thread->btrace = false;
362 udebug_thread_initialize(&thread->udebug);
363#endif
364
365 if ((flags & THREAD_FLAG_NOATTACH) != THREAD_FLAG_NOATTACH)
366 thread_attach(thread, task);
367
368 return thread;
369}
370
371/** Destroy thread memory structure
372 *
373 * Detach thread from all queues, cpus etc. and destroy it.
374 *
375 * @param obj Thread to be destroyed.
376 *
377 */
378static void thread_destroy(void *obj)
379{
380 thread_t *thread = (thread_t *) obj;
381
382 assert_link_not_used(&thread->rq_link);
383 assert_link_not_used(&thread->wq_link);
384
385 assert(thread->task);
386
387 ipl_t ipl = interrupts_disable();
388
389 /* Remove thread from global list. */
390 irq_spinlock_lock(&threads_lock, false);
391 odict_remove(&thread->lthreads);
392 irq_spinlock_unlock(&threads_lock, false);
393
394 /* Remove thread from task's list and accumulate accounting. */
395 irq_spinlock_lock(&thread->task->lock, false);
396
397 list_remove(&thread->th_link);
398
399 /*
400 * No other CPU has access to this thread anymore, so we don't need
401 * thread->lock for accessing thread's fields after this point.
402 */
403
404 if (!thread->uncounted) {
405 thread->task->ucycles += thread->ucycles;
406 thread->task->kcycles += thread->kcycles;
407 }
408
409 irq_spinlock_unlock(&thread->task->lock, false);
410
411 assert((thread->state == Exiting) || (thread->state == Lingering));
412
413 /* Clear cpu->fpu_owner if set to this thread. */
414#ifdef CONFIG_FPU_LAZY
415 if (thread->cpu) {
416 /*
417 * We need to lock for this because the old CPU can concurrently try
418 * to dump this thread's FPU state, in which case we need to wait for
419 * it to finish. An atomic compare-and-swap wouldn't be enough.
420 */
421 irq_spinlock_lock(&thread->cpu->fpu_lock, false);
422
423 thread_t *owner = atomic_load_explicit(&thread->cpu->fpu_owner,
424 memory_order_relaxed);
425
426 if (owner == thread) {
427 atomic_store_explicit(&thread->cpu->fpu_owner, NULL,
428 memory_order_relaxed);
429 }
430
431 irq_spinlock_unlock(&thread->cpu->fpu_lock, false);
432 }
433#endif
434
435 interrupts_restore(ipl);
436
437 /*
438 * Drop the reference to the containing task.
439 */
440 task_release(thread->task);
441 thread->task = NULL;
442
443 slab_free(thread_cache, thread);
444}
445
446void thread_put(thread_t *thread)
447{
448 if (refcount_down(&thread->refcount)) {
449 thread_destroy(thread);
450 }
451}
452
453/** Make the thread visible to the system.
454 *
455 * Attach the thread structure to the current task and make it visible in the
456 * threads_tree.
457 *
458 * @param t Thread to be attached to the task.
459 * @param task Task to which the thread is to be attached.
460 *
461 */
462void thread_attach(thread_t *thread, task_t *task)
463{
464 ipl_t ipl = interrupts_disable();
465
466 /*
467 * Attach to the specified task.
468 */
469 irq_spinlock_lock(&task->lock, false);
470
471 /* Hold a reference to the task. */
472 task_hold(task);
473
474 /* Must not count kbox thread into lifecount */
475 if (thread->uspace)
476 atomic_inc(&task->lifecount);
477
478 list_append(&thread->th_link, &task->threads);
479
480 irq_spinlock_unlock(&task->lock, false);
481
482 /*
483 * Register this thread in the system-wide dictionary.
484 */
485 irq_spinlock_lock(&threads_lock, false);
486 odict_insert(&thread->lthreads, &threads, NULL);
487 irq_spinlock_unlock(&threads_lock, false);
488
489 interrupts_restore(ipl);
490}
491
492/** Terminate thread.
493 *
494 * End current thread execution and switch it to the exiting state.
495 * All pending timeouts are executed.
496 *
497 */
498void thread_exit(void)
499{
500 if (THREAD->uspace) {
501#ifdef CONFIG_UDEBUG
502 /* Generate udebug THREAD_E event */
503 udebug_thread_e_event();
504
505 /*
506 * This thread will not execute any code or system calls from
507 * now on.
508 */
509 udebug_stoppable_begin();
510#endif
511 if (atomic_predec(&TASK->lifecount) == 0) {
512 /*
513 * We are the last userspace thread in the task that
514 * still has not exited. With the exception of the
515 * moment the task was created, new userspace threads
516 * can only be created by threads of the same task.
517 * We are safe to perform cleanup.
518 *
519 */
520 ipc_cleanup();
521 sys_waitq_task_cleanup();
522 LOG("Cleanup of task %" PRIu64 " completed.", TASK->taskid);
523 }
524 }
525
526 scheduler_enter(Exiting);
527 unreachable();
528}
529
530/** Interrupts an existing thread so that it may exit as soon as possible.
531 *
532 * Threads that are blocked waiting for a synchronization primitive
533 * are woken up with a return code of EINTR if the
534 * blocking call was interruptable. See waitq_sleep_timeout().
535 *
536 * Interrupted threads automatically exit when returning back to user space.
537 *
538 * @param thread A valid thread object.
539 */
540void thread_interrupt(thread_t *thread)
541{
542 assert(thread != NULL);
543 thread->interrupted = true;
544 thread_wakeup(thread);
545}
546
547/** Prepare for putting the thread to sleep.
548 *
549 * @returns whether the thread is currently terminating. If THREAD_OK
550 * is returned, the thread is guaranteed to be woken up instantly if the thread
551 * is terminated at any time between this function's return and
552 * thread_wait_finish(). If THREAD_TERMINATING is returned, the thread can still
553 * go to sleep, but doing so will delay termination.
554 */
555thread_termination_state_t thread_wait_start(void)
556{
557 assert(THREAD != NULL);
558
559 /*
560 * This is an exchange rather than a store so that we can use the acquire
561 * semantics, which is needed to ensure that code after this operation sees
562 * memory ops made before thread_wakeup() in other thread, if that wakeup
563 * was reset by this operation.
564 *
565 * In particular, we need this to ensure we can't miss the thread being
566 * terminated concurrently with a synchronization primitive preparing to
567 * sleep.
568 */
569 (void) atomic_exchange_explicit(&THREAD->sleep_state, SLEEP_INITIAL,
570 memory_order_acquire);
571
572 return THREAD->interrupted ? THREAD_TERMINATING : THREAD_OK;
573}
574
575static void thread_wait_timeout_callback(void *arg)
576{
577 thread_wakeup(arg);
578}
579
580/**
581 * Suspends this thread's execution until thread_wakeup() is called on it,
582 * or deadline is reached.
583 *
584 * The way this would normally be used is that the current thread call
585 * thread_wait_start(), and if interruption has not been signaled, stores
586 * a reference to itself in a synchronized structure (such as waitq).
587 * After that, it releases any spinlocks it might hold and calls this function.
588 *
589 * The thread doing the wakeup will acquire the thread's reference from said
590 * synchronized structure and calls thread_wakeup() on it.
591 *
592 * Notably, there can be more than one thread performing wakeup.
593 * The number of performed calls to thread_wakeup(), or their relative
594 * ordering with thread_wait_finish(), does not matter. However, calls to
595 * thread_wakeup() are expected to be synchronized with thread_wait_start()
596 * with which they are associated, otherwise wakeups may be missed.
597 * However, the operation of thread_wakeup() is defined at any time,
598 * synchronization notwithstanding (in the sense of C un/defined behavior),
599 * and is in fact used to interrupt waiting threads by external events.
600 * The waiting thread must operate correctly in face of spurious wakeups,
601 * and clean up its reference in the synchronization structure if necessary.
602 *
603 * Returns THREAD_WAIT_TIMEOUT if timeout fired, which is a necessary condition
604 * for it to have been waken up by the timeout, but the caller must assume
605 * that proper wakeups, timeouts and interrupts may occur concurrently, so
606 * the fact timeout has been registered does not necessarily mean the thread
607 * has not been woken up or interrupted.
608 */
609thread_wait_result_t thread_wait_finish(deadline_t deadline)
610{
611 assert(THREAD != NULL);
612
613 timeout_t timeout;
614
615 /* Extra check to avoid going to scheduler if we don't need to. */
616 if (atomic_load_explicit(&THREAD->sleep_state, memory_order_acquire) !=
617 SLEEP_INITIAL)
618 return THREAD_WAIT_SUCCESS;
619
620 if (deadline != DEADLINE_NEVER) {
621 timeout_initialize(&timeout);
622 timeout_register_deadline(&timeout, deadline,
623 thread_wait_timeout_callback, THREAD);
624 }
625
626 scheduler_enter(Sleeping);
627
628 if (deadline != DEADLINE_NEVER && !timeout_unregister(&timeout)) {
629 return THREAD_WAIT_TIMEOUT;
630 } else {
631 return THREAD_WAIT_SUCCESS;
632 }
633}
634
635void thread_wakeup(thread_t *thread)
636{
637 assert(thread != NULL);
638
639 int state = atomic_exchange_explicit(&thread->sleep_state, SLEEP_WOKE,
640 memory_order_acq_rel);
641
642 if (state == SLEEP_ASLEEP) {
643 /*
644 * Only one thread gets to do this.
645 * The reference consumed here is the reference implicitly passed to
646 * the waking thread by the sleeper in thread_wait_finish().
647 */
648 thread_ready(thread);
649 }
650}
651
652/** Prevent the current thread from being migrated to another processor. */
653void thread_migration_disable(void)
654{
655 assert(THREAD);
656
657 THREAD->nomigrate++;
658}
659
660/** Allow the current thread to be migrated to another processor. */
661void thread_migration_enable(void)
662{
663 assert(THREAD);
664 assert(THREAD->nomigrate > 0);
665
666 if (THREAD->nomigrate > 0)
667 THREAD->nomigrate--;
668}
669
670/** Thread sleep
671 *
672 * Suspend execution of the current thread.
673 *
674 * @param sec Number of seconds to sleep.
675 *
676 */
677void thread_sleep(uint32_t sec)
678{
679 /*
680 * Sleep in 1000 second steps to support
681 * full argument range
682 */
683 while (sec > 0) {
684 uint32_t period = (sec > 1000) ? 1000 : sec;
685
686 thread_usleep(period * 1000000);
687 sec -= period;
688 }
689}
690
691errno_t thread_join(thread_t *thread)
692{
693 return thread_join_timeout(thread, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE);
694}
695
696/** Wait for another thread to exit.
697 * After successful wait, the thread reference is destroyed.
698 *
699 * @param thread Thread to join on exit.
700 * @param usec Timeout in microseconds.
701 * @param flags Mode of operation.
702 *
703 * @return An error code from errno.h or an error code from synch.h.
704 *
705 */
706errno_t thread_join_timeout(thread_t *thread, uint32_t usec, unsigned int flags)
707{
708 assert(thread != NULL);
709
710 if (thread == THREAD)
711 return EINVAL;
712
713 irq_spinlock_lock(&thread->lock, true);
714 state_t state = thread->state;
715 irq_spinlock_unlock(&thread->lock, true);
716
717 errno_t rc = EOK;
718
719 if (state != Exiting)
720 rc = _waitq_sleep_timeout(&thread->join_wq, usec, flags);
721
722 if (rc == EOK)
723 thread_put(thread);
724
725 return rc;
726}
727
728void thread_detach(thread_t *thread)
729{
730 thread_put(thread);
731}
732
733/** Thread usleep
734 *
735 * Suspend execution of the current thread.
736 *
737 * @param usec Number of microseconds to sleep.
738 *
739 */
740void thread_usleep(uint32_t usec)
741{
742 waitq_t wq;
743
744 waitq_initialize(&wq);
745
746 (void) waitq_sleep_timeout(&wq, usec);
747}
748
749/** Allow other threads to run. */
750void thread_yield(void)
751{
752 assert(THREAD != NULL);
753 scheduler_enter(Running);
754}
755
756static void thread_print(thread_t *thread, bool additional)
757{
758 uint64_t ucycles, kcycles;
759 char usuffix, ksuffix;
760 order_suffix(thread->ucycles, &ucycles, &usuffix);
761 order_suffix(thread->kcycles, &kcycles, &ksuffix);
762
763 char *name;
764 if (str_cmp(thread->name, "uinit") == 0)
765 name = thread->task->name;
766 else
767 name = thread->name;
768
769 if (additional)
770 printf("%-8" PRIu64 " %p %p %9" PRIu64 "%c %9" PRIu64 "%c ",
771 thread->tid, thread->thread_code, thread->kstack,
772 ucycles, usuffix, kcycles, ksuffix);
773 else
774 printf("%-8" PRIu64 " %-14s %p %-8s %p %-5" PRIu32 "\n",
775 thread->tid, name, thread, thread_states[thread->state],
776 thread->task, thread->task->container);
777
778 if (additional) {
779 if (thread->cpu)
780 printf("%-5u", thread->cpu->id);
781 else
782 printf("none ");
783
784 if (thread->state == Sleeping) {
785 printf(" %p", thread->sleep_queue);
786 }
787
788 printf("\n");
789 }
790}
791
792/** Print list of threads debug info
793 *
794 * @param additional Print additional information.
795 *
796 */
797void thread_print_list(bool additional)
798{
799 thread_t *thread;
800
801 /* Accessing system-wide threads list through thread_first()/thread_next(). */
802 irq_spinlock_lock(&threads_lock, true);
803
804 if (sizeof(void *) <= 4) {
805 if (additional)
806 printf("[id ] [code ] [stack ] [ucycles ] [kcycles ]"
807 " [cpu] [waitqueue]\n");
808 else
809 printf("[id ] [name ] [address ] [state ] [task ]"
810 " [ctn]\n");
811 } else {
812 if (additional) {
813 printf("[id ] [code ] [stack ] [ucycles ] [kcycles ]"
814 " [cpu] [waitqueue ]\n");
815 } else
816 printf("[id ] [name ] [address ] [state ]"
817 " [task ] [ctn]\n");
818 }
819
820 thread = thread_first();
821 while (thread != NULL) {
822 thread_print(thread, additional);
823 thread = thread_next(thread);
824 }
825
826 irq_spinlock_unlock(&threads_lock, true);
827}
828
829static bool thread_exists(thread_t *thread)
830{
831 odlink_t *odlink = odict_find_eq(&threads, thread, NULL);
832 return odlink != NULL;
833}
834
835/** Check whether the thread exists, and if so, return a reference to it.
836 */
837thread_t *thread_try_get(thread_t *thread)
838{
839 irq_spinlock_lock(&threads_lock, true);
840
841 if (thread_exists(thread)) {
842 /* Try to strengthen the reference. */
843 thread = thread_try_ref(thread);
844 } else {
845 thread = NULL;
846 }
847
848 irq_spinlock_unlock(&threads_lock, true);
849
850 return thread;
851}
852
853/** Update accounting of current thread.
854 *
855 * Note that thread_lock on THREAD must be already held and
856 * interrupts must be already disabled.
857 *
858 * @param user True to update user accounting, false for kernel.
859 *
860 */
861void thread_update_accounting(bool user)
862{
863 uint64_t time = get_cycle();
864
865 assert(interrupts_disabled());
866 assert(irq_spinlock_locked(&THREAD->lock));
867
868 if (user)
869 THREAD->ucycles += time - THREAD->last_cycle;
870 else
871 THREAD->kcycles += time - THREAD->last_cycle;
872
873 THREAD->last_cycle = time;
874}
875
876/** Find thread structure corresponding to thread ID.
877 *
878 * The threads_lock must be already held by the caller of this function and
879 * interrupts must be disabled.
880 *
881 * The returned reference is weak.
882 * If the caller needs to keep it, thread_try_ref() must be used to upgrade
883 * to a strong reference _before_ threads_lock is released.
884 *
885 * @param id Thread ID.
886 *
887 * @return Thread structure address or NULL if there is no such thread ID.
888 *
889 */
890thread_t *thread_find_by_id(thread_id_t thread_id)
891{
892 thread_t *thread;
893
894 assert(interrupts_disabled());
895 assert(irq_spinlock_locked(&threads_lock));
896
897 thread = thread_first();
898 while (thread != NULL) {
899 if (thread->tid == thread_id)
900 return thread;
901
902 thread = thread_next(thread);
903 }
904
905 return NULL;
906}
907
908/** Get count of threads.
909 *
910 * @return Number of threads in the system
911 */
912size_t thread_count(void)
913{
914 assert(interrupts_disabled());
915 assert(irq_spinlock_locked(&threads_lock));
916
917 return odict_count(&threads);
918}
919
920/** Get first thread.
921 *
922 * @return Pointer to first thread or @c NULL if there are none.
923 */
924thread_t *thread_first(void)
925{
926 odlink_t *odlink;
927
928 assert(interrupts_disabled());
929 assert(irq_spinlock_locked(&threads_lock));
930
931 odlink = odict_first(&threads);
932 if (odlink == NULL)
933 return NULL;
934
935 return odict_get_instance(odlink, thread_t, lthreads);
936}
937
938/** Get next thread.
939 *
940 * @param cur Current thread
941 * @return Pointer to next thread or @c NULL if there are no more threads.
942 */
943thread_t *thread_next(thread_t *cur)
944{
945 odlink_t *odlink;
946
947 assert(interrupts_disabled());
948 assert(irq_spinlock_locked(&threads_lock));
949
950 odlink = odict_next(&cur->lthreads, &threads);
951 if (odlink == NULL)
952 return NULL;
953
954 return odict_get_instance(odlink, thread_t, lthreads);
955}
956
957#ifdef CONFIG_UDEBUG
958
959void thread_stack_trace(thread_id_t thread_id)
960{
961 irq_spinlock_lock(&threads_lock, true);
962 thread_t *thread = thread_try_ref(thread_find_by_id(thread_id));
963 irq_spinlock_unlock(&threads_lock, true);
964
965 if (thread == NULL) {
966 printf("No such thread.\n");
967 return;
968 }
969
970 /*
971 * Schedule a stack trace to be printed
972 * just before the thread is scheduled next.
973 *
974 * If the thread is sleeping then try to interrupt
975 * the sleep. Any request for printing an uspace stack
976 * trace from within the kernel should be always
977 * considered a last resort debugging means, therefore
978 * forcing the thread's sleep to be interrupted
979 * is probably justifiable.
980 */
981
982 irq_spinlock_lock(&thread->lock, true);
983
984 bool sleeping = false;
985 istate_t *istate = thread->udebug.uspace_state;
986 if (istate != NULL) {
987 printf("Scheduling thread stack trace.\n");
988 thread->btrace = true;
989 if (thread->state == Sleeping)
990 sleeping = true;
991 } else
992 printf("Thread interrupt state not available.\n");
993
994 irq_spinlock_unlock(&thread->lock, true);
995
996 if (sleeping)
997 thread_wakeup(thread);
998
999 thread_put(thread);
1000}
1001
1002#endif /* CONFIG_UDEBUG */
1003
1004/** Get key function for the @c threads ordered dictionary.
1005 *
1006 * @param odlink Link
1007 * @return Pointer to thread structure cast as 'void *'
1008 */
1009static void *threads_getkey(odlink_t *odlink)
1010{
1011 thread_t *thread = odict_get_instance(odlink, thread_t, lthreads);
1012 return (void *) thread;
1013}
1014
1015/** Key comparison function for the @c threads ordered dictionary.
1016 *
1017 * @param a Pointer to thread A
1018 * @param b Pointer to thread B
1019 * @return -1, 0, 1 iff pointer to A is less than, equal to, greater than B
1020 */
1021static int threads_cmp(void *a, void *b)
1022{
1023 if (a > b)
1024 return -1;
1025 else if (a == b)
1026 return 0;
1027 else
1028 return +1;
1029}
1030
1031/** Process syscall to create new thread.
1032 *
1033 */
1034sys_errno_t sys_thread_create(uspace_ptr_uspace_arg_t uspace_uarg, uspace_ptr_char uspace_name,
1035 size_t name_len, uspace_ptr_thread_id_t uspace_thread_id)
1036{
1037 if (name_len > THREAD_NAME_BUFLEN - 1)
1038 name_len = THREAD_NAME_BUFLEN - 1;
1039
1040 char namebuf[THREAD_NAME_BUFLEN];
1041 errno_t rc = copy_from_uspace(namebuf, uspace_name, name_len);
1042 if (rc != EOK)
1043 return (sys_errno_t) rc;
1044
1045 namebuf[name_len] = 0;
1046
1047 /*
1048 * In case of failure, kernel_uarg will be deallocated in this function.
1049 * In case of success, kernel_uarg will be freed in uinit().
1050 */
1051 uspace_arg_t *kernel_uarg =
1052 (uspace_arg_t *) malloc(sizeof(uspace_arg_t));
1053 if (!kernel_uarg)
1054 return (sys_errno_t) ENOMEM;
1055
1056 rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t));
1057 if (rc != EOK) {
1058 free(kernel_uarg);
1059 return (sys_errno_t) rc;
1060 }
1061
1062 thread_t *thread = thread_create(uinit, kernel_uarg, TASK,
1063 THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf);
1064 if (thread) {
1065 if (uspace_thread_id) {
1066 rc = copy_to_uspace(uspace_thread_id, &thread->tid,
1067 sizeof(thread->tid));
1068 if (rc != EOK) {
1069 /*
1070 * We have encountered a failure, but the thread
1071 * has already been created. We need to undo its
1072 * creation now.
1073 */
1074
1075 /*
1076 * The new thread structure is initialized, but
1077 * is still not visible to the system.
1078 * We can safely deallocate it.
1079 */
1080 slab_free(thread_cache, thread);
1081 free(kernel_uarg);
1082
1083 return (sys_errno_t) rc;
1084 }
1085 }
1086
1087#ifdef CONFIG_UDEBUG
1088 /*
1089 * Generate udebug THREAD_B event and attach the thread.
1090 * This must be done atomically (with the debug locks held),
1091 * otherwise we would either miss some thread or receive
1092 * THREAD_B events for threads that already existed
1093 * and could be detected with THREAD_READ before.
1094 */
1095 udebug_thread_b_event_attach(thread, TASK);
1096#else
1097 thread_attach(thread, TASK);
1098#endif
1099 thread_ready(thread);
1100
1101 return 0;
1102 } else
1103 free(kernel_uarg);
1104
1105 return (sys_errno_t) ENOMEM;
1106}
1107
1108/** Process syscall to terminate thread.
1109 *
1110 */
1111sys_errno_t sys_thread_exit(int uspace_status)
1112{
1113 thread_exit();
1114}
1115
1116/** Syscall for getting TID.
1117 *
1118 * @param uspace_thread_id Userspace address of 8-byte buffer where to store
1119 * current thread ID.
1120 *
1121 * @return 0 on success or an error code from @ref errno.h.
1122 *
1123 */
1124sys_errno_t sys_thread_get_id(uspace_ptr_thread_id_t uspace_thread_id)
1125{
1126 /*
1127 * No need to acquire lock on THREAD because tid
1128 * remains constant for the lifespan of the thread.
1129 *
1130 */
1131 return (sys_errno_t) copy_to_uspace(uspace_thread_id, &THREAD->tid,
1132 sizeof(THREAD->tid));
1133}
1134
1135/** Syscall wrapper for sleeping. */
1136sys_errno_t sys_thread_usleep(uint32_t usec)
1137{
1138 thread_usleep(usec);
1139 return 0;
1140}
1141
1142sys_errno_t sys_thread_udelay(uint32_t usec)
1143{
1144 delay(usec);
1145 return 0;
1146}
1147
1148/** @}
1149 */
Note: See TracBrowser for help on using the repository browser.