source: mainline/kernel/generic/src/proc/thread.c@ 3fcea34

Last change on this file since 3fcea34 was 3fcea34, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 9 months ago

Simplify the SYS_THREAD_CREATE syscall interface

Removed the beefy uarg structure. Instead, the syscall gets two
parameters: %pc (program counter) and %sp (stack pointer). It starts
a thread with those values in corresponding registers, with no other
fuss whatsoever.

libc initializes threads by storing any other needed arguments on
the stack and retrieving them in thread_entry. Importantly, this
includes the address of the
thread_main function which is now
called indirectly to fix dynamic linking issues on some archs.

There's a bit of weirdness on SPARC and IA-64, because of their
stacked register handling. The current solution is that we require
some space *above* the stack pointer to be available for those
architectures. I think for SPARC, it can be made more normal.

For the remaining ones, we can (probably) just set the initial
%sp to the top edge of the stack. There's some lingering offsets
on some archs just because I didn't want to accidentally break
anything. The initial thread bringup should be functionally
unchanged from the previous state, and no binaries are currently
multithreaded except thread1 test, so there should be minimal
risk of breakage. Naturally, I tested all available emulator
builds, save for msim.

  • Property mode set to 100644
File size: 26.5 KB
Line 
1/*
2 * Copyright (c) 2010 Jakub Jermar
3 * Copyright (c) 2018 Jiri Svoboda
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup kernel_generic_proc
31 * @{
32 */
33
34/**
35 * @file
36 * @brief Thread management functions.
37 */
38
39#include <assert.h>
40#include <proc/scheduler.h>
41#include <proc/thread.h>
42#include <proc/task.h>
43#include <mm/frame.h>
44#include <mm/page.h>
45#include <arch/asm.h>
46#include <arch/cycle.h>
47#include <arch.h>
48#include <synch/spinlock.h>
49#include <synch/waitq.h>
50#include <synch/syswaitq.h>
51#include <cpu.h>
52#include <str.h>
53#include <context.h>
54#include <adt/list.h>
55#include <adt/odict.h>
56#include <time/clock.h>
57#include <time/timeout.h>
58#include <time/delay.h>
59#include <config.h>
60#include <arch/interrupt.h>
61#include <smp/ipi.h>
62#include <atomic.h>
63#include <memw.h>
64#include <stdio.h>
65#include <stdlib.h>
66#include <main/uinit.h>
67#include <syscall/copy.h>
68#include <errno.h>
69#include <debug.h>
70#include <halt.h>
71
72/** Thread states */
73const char *thread_states[] = {
74 "Invalid",
75 "Running",
76 "Sleeping",
77 "Ready",
78 "Entering",
79 "Exiting",
80 "Lingering"
81};
82
83/** Lock protecting the @c threads ordered dictionary .
84 *
85 * For locking rules, see declaration thereof.
86 */
87IRQ_SPINLOCK_INITIALIZE(threads_lock);
88
89/** Ordered dictionary of all threads by their address (i.e. pointer to
90 * the thread_t structure).
91 *
92 * When a thread is found in the @c threads ordered dictionary, it is
93 * guaranteed to exist as long as the @c threads_lock is held.
94 *
95 * Members are of type thread_t.
96 *
97 * This structure contains weak references. Any reference from it must not leave
98 * threads_lock critical section unless strengthened via thread_try_ref().
99 */
100odict_t threads;
101
102IRQ_SPINLOCK_STATIC_INITIALIZE(tidlock);
103static thread_id_t last_tid = 0;
104
105static slab_cache_t *thread_cache;
106
107static void *threads_getkey(odlink_t *);
108static int threads_cmp(void *, void *);
109
110/** Initialization and allocation for thread_t structure
111 *
112 */
113static errno_t thr_constructor(void *obj, unsigned int kmflags)
114{
115 thread_t *thread = (thread_t *) obj;
116
117 link_initialize(&thread->rq_link);
118 link_initialize(&thread->wq_link);
119 link_initialize(&thread->th_link);
120
121 /* call the architecture-specific part of the constructor */
122 thr_constructor_arch(thread);
123
124 /*
125 * Allocate the kernel stack from the low-memory to prevent an infinite
126 * nesting of TLB-misses when accessing the stack from the part of the
127 * TLB-miss handler written in C.
128 *
129 * Note that low-memory is safe to be used for the stack as it will be
130 * covered by the kernel identity mapping, which guarantees not to
131 * nest TLB-misses infinitely (either via some hardware mechanism or
132 * by the construction of the assembly-language part of the TLB-miss
133 * handler).
134 *
135 * This restriction can be lifted once each architecture provides
136 * a similar guarantee, for example, by locking the kernel stack
137 * in the TLB whenever it is allocated from the high-memory and the
138 * thread is being scheduled to run.
139 */
140 kmflags |= FRAME_LOWMEM;
141 kmflags &= ~FRAME_HIGHMEM;
142
143 /*
144 * NOTE: All kernel stacks must be aligned to STACK_SIZE,
145 * see CURRENT.
146 */
147
148 uintptr_t stack_phys =
149 frame_alloc(STACK_FRAMES, kmflags, STACK_SIZE - 1);
150 if (!stack_phys)
151 return ENOMEM;
152
153 thread->kstack = (uint8_t *) PA2KA(stack_phys);
154
155#ifdef CONFIG_UDEBUG
156 mutex_initialize(&thread->udebug.lock, MUTEX_PASSIVE);
157#endif
158
159 return EOK;
160}
161
162/** Destruction of thread_t object */
163static size_t thr_destructor(void *obj)
164{
165 thread_t *thread = (thread_t *) obj;
166
167 /* call the architecture-specific part of the destructor */
168 thr_destructor_arch(thread);
169
170 frame_free(KA2PA(thread->kstack), STACK_FRAMES);
171
172 return STACK_FRAMES; /* number of frames freed */
173}
174
175/** Initialize threads
176 *
177 * Initialize kernel threads support.
178 *
179 */
180void thread_init(void)
181{
182 THREAD = NULL;
183
184 atomic_store(&nrdy, 0);
185 thread_cache = slab_cache_create("thread_t", sizeof(thread_t), _Alignof(thread_t),
186 thr_constructor, thr_destructor, 0);
187
188 odict_initialize(&threads, threads_getkey, threads_cmp);
189}
190
191/** Wire thread to the given CPU
192 *
193 * @param cpu CPU to wire the thread to.
194 *
195 */
196void thread_wire(thread_t *thread, cpu_t *cpu)
197{
198 ipl_t ipl = interrupts_disable();
199 atomic_set_unordered(&thread->cpu, cpu);
200 thread->nomigrate++;
201 interrupts_restore(ipl);
202}
203
204/** Start a thread that wasn't started yet since it was created.
205 *
206 * @param thread A reference to the newly created thread.
207 */
208void thread_start(thread_t *thread)
209{
210 assert(atomic_get_unordered(&thread->state) == Entering);
211 thread_requeue_sleeping(thread_ref(thread));
212}
213
214/** Create new thread
215 *
216 * Create a new thread.
217 *
218 * @param func Thread's implementing function.
219 * @param arg Thread's implementing function argument.
220 * @param task Task to which the thread belongs. The caller must
221 * guarantee that the task won't cease to exist during the
222 * call. The task's lock may not be held.
223 * @param flags Thread flags.
224 * @param name Symbolic name (a copy is made).
225 *
226 * @return New thread's structure on success, NULL on failure.
227 *
228 */
229thread_t *thread_create(void (*func)(void *), void *arg, task_t *task,
230 thread_flags_t flags, const char *name)
231{
232 thread_t *thread = (thread_t *) slab_alloc(thread_cache, FRAME_ATOMIC);
233 if (!thread)
234 return NULL;
235
236 refcount_init(&thread->refcount);
237
238 if (thread_create_arch(thread, flags) != EOK) {
239 slab_free(thread_cache, thread);
240 return NULL;
241 }
242
243 /* Not needed, but good for debugging */
244 memsetb(thread->kstack, STACK_SIZE, 0);
245
246 irq_spinlock_lock(&tidlock, true);
247 thread->tid = ++last_tid;
248 irq_spinlock_unlock(&tidlock, true);
249
250 context_create(&thread->saved_context, thread_main_func,
251 thread->kstack, STACK_SIZE);
252
253 current_initialize((current_t *) thread->kstack);
254
255 str_cpy(thread->name, THREAD_NAME_BUFLEN, name);
256
257 thread->thread_code = func;
258 thread->thread_arg = arg;
259 thread->ucycles = ATOMIC_TIME_INITIALIZER();
260 thread->kcycles = ATOMIC_TIME_INITIALIZER();
261 thread->uncounted =
262 ((flags & THREAD_FLAG_UNCOUNTED) == THREAD_FLAG_UNCOUNTED);
263 atomic_init(&thread->priority, 0);
264 atomic_init(&thread->cpu, NULL);
265 thread->stolen = false;
266 thread->uspace =
267 ((flags & THREAD_FLAG_USPACE) == THREAD_FLAG_USPACE);
268
269 thread->nomigrate = 0;
270 atomic_init(&thread->state, Entering);
271
272 atomic_init(&thread->sleep_queue, NULL);
273
274 thread->in_copy_from_uspace = false;
275 thread->in_copy_to_uspace = false;
276
277 thread->interrupted = false;
278 atomic_init(&thread->sleep_state, SLEEP_INITIAL);
279
280 waitq_initialize(&thread->join_wq);
281
282 thread->task = task;
283
284 thread->fpu_context_exists = false;
285
286 odlink_initialize(&thread->lthreads);
287
288#ifdef CONFIG_UDEBUG
289 /* Initialize debugging stuff */
290 atomic_init(&thread->btrace, false);
291 udebug_thread_initialize(&thread->udebug);
292#endif
293
294 if ((flags & THREAD_FLAG_NOATTACH) != THREAD_FLAG_NOATTACH)
295 thread_attach(thread, task);
296
297 return thread;
298}
299
300/** Destroy thread memory structure
301 *
302 * Detach thread from all queues, cpus etc. and destroy it.
303 *
304 * @param obj Thread to be destroyed.
305 *
306 */
307static void thread_destroy(void *obj)
308{
309 thread_t *thread = (thread_t *) obj;
310
311 assert_link_not_used(&thread->rq_link);
312 assert_link_not_used(&thread->wq_link);
313
314 assert(thread->task);
315
316 ipl_t ipl = interrupts_disable();
317
318 /* Remove thread from global list. */
319 irq_spinlock_lock(&threads_lock, false);
320 odict_remove(&thread->lthreads);
321 irq_spinlock_unlock(&threads_lock, false);
322
323 /* Remove thread from task's list and accumulate accounting. */
324 irq_spinlock_lock(&thread->task->lock, false);
325
326 list_remove(&thread->th_link);
327
328 /*
329 * No other CPU has access to this thread anymore, so we don't need
330 * thread->lock for accessing thread's fields after this point.
331 */
332
333 if (!thread->uncounted) {
334 thread->task->ucycles += atomic_time_read(&thread->ucycles);
335 thread->task->kcycles += atomic_time_read(&thread->kcycles);
336 }
337
338 irq_spinlock_unlock(&thread->task->lock, false);
339
340 assert((atomic_get_unordered(&thread->state) == Exiting) || (atomic_get_unordered(&thread->state) == Lingering));
341
342 /* Clear cpu->fpu_owner if set to this thread. */
343#ifdef CONFIG_FPU_LAZY
344 cpu_t *cpu = atomic_get_unordered(&thread->cpu);
345 if (cpu) {
346 /*
347 * We need to lock for this because the old CPU can concurrently try
348 * to dump this thread's FPU state, in which case we need to wait for
349 * it to finish. An atomic compare-and-swap wouldn't be enough.
350 */
351 irq_spinlock_lock(&cpu->fpu_lock, false);
352
353 if (atomic_get_unordered(&cpu->fpu_owner) == thread)
354 atomic_set_unordered(&cpu->fpu_owner, NULL);
355
356 irq_spinlock_unlock(&cpu->fpu_lock, false);
357 }
358#endif
359
360 interrupts_restore(ipl);
361
362 /*
363 * Drop the reference to the containing task.
364 */
365 task_release(thread->task);
366 thread->task = NULL;
367
368 slab_free(thread_cache, thread);
369}
370
371void thread_put(thread_t *thread)
372{
373 if (refcount_down(&thread->refcount)) {
374 thread_destroy(thread);
375 }
376}
377
378/** Make the thread visible to the system.
379 *
380 * Attach the thread structure to the current task and make it visible in the
381 * threads_tree.
382 *
383 * @param t Thread to be attached to the task.
384 * @param task Task to which the thread is to be attached.
385 *
386 */
387void thread_attach(thread_t *thread, task_t *task)
388{
389 ipl_t ipl = interrupts_disable();
390
391 /*
392 * Attach to the specified task.
393 */
394 irq_spinlock_lock(&task->lock, false);
395
396 /* Hold a reference to the task. */
397 task_hold(task);
398
399 /* Must not count kbox thread into lifecount */
400 if (thread->uspace)
401 atomic_inc(&task->lifecount);
402
403 list_append(&thread->th_link, &task->threads);
404
405 irq_spinlock_unlock(&task->lock, false);
406
407 /*
408 * Register this thread in the system-wide dictionary.
409 */
410 irq_spinlock_lock(&threads_lock, false);
411 odict_insert(&thread->lthreads, &threads, NULL);
412 irq_spinlock_unlock(&threads_lock, false);
413
414 interrupts_restore(ipl);
415}
416
417/** Terminate thread.
418 *
419 * End current thread execution and switch it to the exiting state.
420 * All pending timeouts are executed.
421 *
422 */
423void thread_exit(void)
424{
425 if (THREAD->uspace) {
426#ifdef CONFIG_UDEBUG
427 /* Generate udebug THREAD_E event */
428 udebug_thread_e_event();
429
430 /*
431 * This thread will not execute any code or system calls from
432 * now on.
433 */
434 udebug_stoppable_begin();
435#endif
436 if (atomic_predec(&TASK->lifecount) == 0) {
437 /*
438 * We are the last userspace thread in the task that
439 * still has not exited. With the exception of the
440 * moment the task was created, new userspace threads
441 * can only be created by threads of the same task.
442 * We are safe to perform cleanup.
443 *
444 */
445 ipc_cleanup();
446 sys_waitq_task_cleanup();
447 LOG("Cleanup of task %" PRIu64 " completed.", TASK->taskid);
448 }
449 }
450
451 scheduler_enter(Exiting);
452 unreachable();
453}
454
455/** Interrupts an existing thread so that it may exit as soon as possible.
456 *
457 * Threads that are blocked waiting for a synchronization primitive
458 * are woken up with a return code of EINTR if the
459 * blocking call was interruptable. See waitq_sleep_timeout().
460 *
461 * Interrupted threads automatically exit when returning back to user space.
462 *
463 * @param thread A valid thread object.
464 */
465void thread_interrupt(thread_t *thread)
466{
467 assert(thread != NULL);
468 thread->interrupted = true;
469 thread_wakeup(thread);
470}
471
472/** Prepare for putting the thread to sleep.
473 *
474 * @returns whether the thread is currently terminating. If THREAD_OK
475 * is returned, the thread is guaranteed to be woken up instantly if the thread
476 * is terminated at any time between this function's return and
477 * thread_wait_finish(). If THREAD_TERMINATING is returned, the thread can still
478 * go to sleep, but doing so will delay termination.
479 */
480thread_termination_state_t thread_wait_start(void)
481{
482 assert(THREAD != NULL);
483
484 /*
485 * This is an exchange rather than a store so that we can use the acquire
486 * semantics, which is needed to ensure that code after this operation sees
487 * memory ops made before thread_wakeup() in other thread, if that wakeup
488 * was reset by this operation.
489 *
490 * In particular, we need this to ensure we can't miss the thread being
491 * terminated concurrently with a synchronization primitive preparing to
492 * sleep.
493 */
494 (void) atomic_exchange_explicit(&THREAD->sleep_state, SLEEP_INITIAL,
495 memory_order_acquire);
496
497 return THREAD->interrupted ? THREAD_TERMINATING : THREAD_OK;
498}
499
500static void thread_wait_timeout_callback(void *arg)
501{
502 thread_wakeup(arg);
503}
504
505/**
506 * Suspends this thread's execution until thread_wakeup() is called on it,
507 * or deadline is reached.
508 *
509 * The way this would normally be used is that the current thread call
510 * thread_wait_start(), and if interruption has not been signaled, stores
511 * a reference to itself in a synchronized structure (such as waitq).
512 * After that, it releases any spinlocks it might hold and calls this function.
513 *
514 * The thread doing the wakeup will acquire the thread's reference from said
515 * synchronized structure and calls thread_wakeup() on it.
516 *
517 * Notably, there can be more than one thread performing wakeup.
518 * The number of performed calls to thread_wakeup(), or their relative
519 * ordering with thread_wait_finish(), does not matter. However, calls to
520 * thread_wakeup() are expected to be synchronized with thread_wait_start()
521 * with which they are associated, otherwise wakeups may be missed.
522 * However, the operation of thread_wakeup() is defined at any time,
523 * synchronization notwithstanding (in the sense of C un/defined behavior),
524 * and is in fact used to interrupt waiting threads by external events.
525 * The waiting thread must operate correctly in face of spurious wakeups,
526 * and clean up its reference in the synchronization structure if necessary.
527 *
528 * Returns THREAD_WAIT_TIMEOUT if timeout fired, which is a necessary condition
529 * for it to have been waken up by the timeout, but the caller must assume
530 * that proper wakeups, timeouts and interrupts may occur concurrently, so
531 * the fact timeout has been registered does not necessarily mean the thread
532 * has not been woken up or interrupted.
533 */
534thread_wait_result_t thread_wait_finish(deadline_t deadline)
535{
536 assert(THREAD != NULL);
537
538 timeout_t timeout;
539
540 /* Extra check to avoid going to scheduler if we don't need to. */
541 if (atomic_load_explicit(&THREAD->sleep_state, memory_order_acquire) !=
542 SLEEP_INITIAL)
543 return THREAD_WAIT_SUCCESS;
544
545 if (deadline != DEADLINE_NEVER) {
546 timeout_initialize(&timeout);
547 timeout_register_deadline(&timeout, deadline,
548 thread_wait_timeout_callback, THREAD);
549 }
550
551 scheduler_enter(Sleeping);
552
553 if (deadline != DEADLINE_NEVER && !timeout_unregister(&timeout)) {
554 return THREAD_WAIT_TIMEOUT;
555 } else {
556 return THREAD_WAIT_SUCCESS;
557 }
558}
559
560void thread_wakeup(thread_t *thread)
561{
562 assert(thread != NULL);
563
564 int state = atomic_exchange_explicit(&thread->sleep_state, SLEEP_WOKE,
565 memory_order_acq_rel);
566
567 if (state == SLEEP_ASLEEP) {
568 /*
569 * Only one thread gets to do this.
570 * The reference consumed here is the reference implicitly passed to
571 * the waking thread by the sleeper in thread_wait_finish().
572 */
573 thread_requeue_sleeping(thread);
574 }
575}
576
577/** Prevent the current thread from being migrated to another processor. */
578void thread_migration_disable(void)
579{
580 ipl_t ipl = interrupts_disable();
581
582 assert(THREAD);
583 THREAD->nomigrate++;
584
585 interrupts_restore(ipl);
586}
587
588/** Allow the current thread to be migrated to another processor. */
589void thread_migration_enable(void)
590{
591 ipl_t ipl = interrupts_disable();
592
593 assert(THREAD);
594 assert(THREAD->nomigrate > 0);
595
596 if (THREAD->nomigrate > 0)
597 THREAD->nomigrate--;
598
599 interrupts_restore(ipl);
600}
601
602/** Thread sleep
603 *
604 * Suspend execution of the current thread.
605 *
606 * @param sec Number of seconds to sleep.
607 *
608 */
609void thread_sleep(uint32_t sec)
610{
611 /*
612 * Sleep in 1000 second steps to support
613 * full argument range
614 */
615 while (sec > 0) {
616 uint32_t period = (sec > 1000) ? 1000 : sec;
617
618 thread_usleep(period * 1000000);
619 sec -= period;
620 }
621}
622
623errno_t thread_join(thread_t *thread)
624{
625 return thread_join_timeout(thread, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE);
626}
627
628/** Wait for another thread to exit.
629 * After successful wait, the thread reference is destroyed.
630 *
631 * @param thread Thread to join on exit.
632 * @param usec Timeout in microseconds.
633 * @param flags Mode of operation.
634 *
635 * @return An error code from errno.h or an error code from synch.h.
636 *
637 */
638errno_t thread_join_timeout(thread_t *thread, uint32_t usec, unsigned int flags)
639{
640 assert(thread != NULL);
641
642 if (thread == THREAD)
643 return EINVAL;
644
645 errno_t rc = _waitq_sleep_timeout(&thread->join_wq, usec, flags);
646
647 if (rc == EOK)
648 thread_put(thread);
649
650 return rc;
651}
652
653void thread_detach(thread_t *thread)
654{
655 thread_put(thread);
656}
657
658/** Thread usleep
659 *
660 * Suspend execution of the current thread.
661 *
662 * @param usec Number of microseconds to sleep.
663 *
664 */
665void thread_usleep(uint32_t usec)
666{
667 waitq_t wq;
668
669 waitq_initialize(&wq);
670
671 (void) waitq_sleep_timeout(&wq, usec);
672}
673
674/** Allow other threads to run. */
675void thread_yield(void)
676{
677 assert(THREAD != NULL);
678 scheduler_enter(Running);
679}
680
681static void thread_print(thread_t *thread, bool additional)
682{
683 uint64_t ucycles, kcycles;
684 char usuffix, ksuffix;
685 order_suffix(atomic_time_read(&thread->ucycles), &ucycles, &usuffix);
686 order_suffix(atomic_time_read(&thread->kcycles), &kcycles, &ksuffix);
687
688 state_t state = atomic_get_unordered(&thread->state);
689
690 char *name;
691 if (str_cmp(thread->name, "uinit") == 0)
692 name = thread->task->name;
693 else
694 name = thread->name;
695
696 if (additional)
697 printf("%-8" PRIu64 " %p %p %9" PRIu64 "%c %9" PRIu64 "%c ",
698 thread->tid, thread->thread_code, thread->kstack,
699 ucycles, usuffix, kcycles, ksuffix);
700 else
701 printf("%-8" PRIu64 " %-14s %p %-8s %p %-5" PRIu32 "\n",
702 thread->tid, name, thread, thread_states[state],
703 thread->task, thread->task->container);
704
705 if (additional) {
706 cpu_t *cpu = atomic_get_unordered(&thread->cpu);
707 if (cpu)
708 printf("%-5u", cpu->id);
709 else
710 printf("none ");
711
712 if (state == Sleeping) {
713 printf(" %p", thread->sleep_queue);
714 }
715
716 printf("\n");
717 }
718}
719
720/** Print list of threads debug info
721 *
722 * @param additional Print additional information.
723 *
724 */
725void thread_print_list(bool additional)
726{
727 thread_t *thread;
728
729 /* Accessing system-wide threads list through thread_first()/thread_next(). */
730 irq_spinlock_lock(&threads_lock, true);
731
732 if (sizeof(void *) <= 4) {
733 if (additional)
734 printf("[id ] [code ] [stack ] [ucycles ] [kcycles ]"
735 " [cpu] [waitqueue]\n");
736 else
737 printf("[id ] [name ] [address ] [state ] [task ]"
738 " [ctn]\n");
739 } else {
740 if (additional) {
741 printf("[id ] [code ] [stack ] [ucycles ] [kcycles ]"
742 " [cpu] [waitqueue ]\n");
743 } else
744 printf("[id ] [name ] [address ] [state ]"
745 " [task ] [ctn]\n");
746 }
747
748 thread = thread_first();
749 while (thread != NULL) {
750 thread_print(thread, additional);
751 thread = thread_next(thread);
752 }
753
754 irq_spinlock_unlock(&threads_lock, true);
755}
756
757static bool thread_exists(thread_t *thread)
758{
759 odlink_t *odlink = odict_find_eq(&threads, thread, NULL);
760 return odlink != NULL;
761}
762
763/** Check whether the thread exists, and if so, return a reference to it.
764 */
765thread_t *thread_try_get(thread_t *thread)
766{
767 irq_spinlock_lock(&threads_lock, true);
768
769 if (thread_exists(thread)) {
770 /* Try to strengthen the reference. */
771 thread = thread_try_ref(thread);
772 } else {
773 thread = NULL;
774 }
775
776 irq_spinlock_unlock(&threads_lock, true);
777
778 return thread;
779}
780
781/** Update accounting of current thread.
782 *
783 * Note that thread_lock on THREAD must be already held and
784 * interrupts must be already disabled.
785 *
786 * @param user True to update user accounting, false for kernel.
787 *
788 */
789void thread_update_accounting(bool user)
790{
791 assert(interrupts_disabled());
792
793 uint64_t time = get_cycle();
794
795 if (user)
796 atomic_time_increment(&THREAD->ucycles, time - THREAD->last_cycle);
797 else
798 atomic_time_increment(&THREAD->kcycles, time - THREAD->last_cycle);
799
800 THREAD->last_cycle = time;
801}
802
803/** Find thread structure corresponding to thread ID.
804 *
805 * The threads_lock must be already held by the caller of this function and
806 * interrupts must be disabled.
807 *
808 * The returned reference is weak.
809 * If the caller needs to keep it, thread_try_ref() must be used to upgrade
810 * to a strong reference _before_ threads_lock is released.
811 *
812 * @param id Thread ID.
813 *
814 * @return Thread structure address or NULL if there is no such thread ID.
815 *
816 */
817thread_t *thread_find_by_id(thread_id_t thread_id)
818{
819 thread_t *thread;
820
821 assert(interrupts_disabled());
822 assert(irq_spinlock_locked(&threads_lock));
823
824 thread = thread_first();
825 while (thread != NULL) {
826 if (thread->tid == thread_id)
827 return thread;
828
829 thread = thread_next(thread);
830 }
831
832 return NULL;
833}
834
835/** Get count of threads.
836 *
837 * @return Number of threads in the system
838 */
839size_t thread_count(void)
840{
841 assert(interrupts_disabled());
842 assert(irq_spinlock_locked(&threads_lock));
843
844 return odict_count(&threads);
845}
846
847/** Get first thread.
848 *
849 * @return Pointer to first thread or @c NULL if there are none.
850 */
851thread_t *thread_first(void)
852{
853 odlink_t *odlink;
854
855 assert(interrupts_disabled());
856 assert(irq_spinlock_locked(&threads_lock));
857
858 odlink = odict_first(&threads);
859 if (odlink == NULL)
860 return NULL;
861
862 return odict_get_instance(odlink, thread_t, lthreads);
863}
864
865/** Get next thread.
866 *
867 * @param cur Current thread
868 * @return Pointer to next thread or @c NULL if there are no more threads.
869 */
870thread_t *thread_next(thread_t *cur)
871{
872 odlink_t *odlink;
873
874 assert(interrupts_disabled());
875 assert(irq_spinlock_locked(&threads_lock));
876
877 odlink = odict_next(&cur->lthreads, &threads);
878 if (odlink == NULL)
879 return NULL;
880
881 return odict_get_instance(odlink, thread_t, lthreads);
882}
883
884#ifdef CONFIG_UDEBUG
885
886void thread_stack_trace(thread_id_t thread_id)
887{
888 irq_spinlock_lock(&threads_lock, true);
889 thread_t *thread = thread_try_ref(thread_find_by_id(thread_id));
890 irq_spinlock_unlock(&threads_lock, true);
891
892 if (thread == NULL) {
893 printf("No such thread.\n");
894 return;
895 }
896
897 /*
898 * Schedule a stack trace to be printed
899 * just before the thread is scheduled next.
900 *
901 * If the thread is sleeping then try to interrupt
902 * the sleep. Any request for printing an uspace stack
903 * trace from within the kernel should be always
904 * considered a last resort debugging means, therefore
905 * forcing the thread's sleep to be interrupted
906 * is probably justifiable.
907 */
908
909 printf("Scheduling thread stack trace.\n");
910 atomic_set_unordered(&thread->btrace, true);
911
912 thread_wakeup(thread);
913 thread_put(thread);
914}
915
916#endif /* CONFIG_UDEBUG */
917
918/** Get key function for the @c threads ordered dictionary.
919 *
920 * @param odlink Link
921 * @return Pointer to thread structure cast as 'void *'
922 */
923static void *threads_getkey(odlink_t *odlink)
924{
925 thread_t *thread = odict_get_instance(odlink, thread_t, lthreads);
926 return (void *) thread;
927}
928
929/** Key comparison function for the @c threads ordered dictionary.
930 *
931 * @param a Pointer to thread A
932 * @param b Pointer to thread B
933 * @return -1, 0, 1 iff pointer to A is less than, equal to, greater than B
934 */
935static int threads_cmp(void *a, void *b)
936{
937 if (a > b)
938 return -1;
939 else if (a == b)
940 return 0;
941 else
942 return +1;
943}
944
945/** Process syscall to create new thread.
946 * The started thread will have initial pc and sp set to the exact values passed
947 * to the syscall. The kernel will not touch any stack data below the stack
948 * pointer, but some architectures may require some space to be available
949 * for use above it. See userspace() in kernel, and <libarch/thread.h> in libc.
950 *
951 */
952sys_errno_t sys_thread_create(sysarg_t pc, sysarg_t sp,
953 uspace_ptr_char uspace_name, size_t name_len)
954{
955 if (name_len > THREAD_NAME_BUFLEN - 1)
956 name_len = THREAD_NAME_BUFLEN - 1;
957
958 char namebuf[THREAD_NAME_BUFLEN];
959 errno_t rc = copy_from_uspace(namebuf, uspace_name, name_len);
960 if (rc != EOK)
961 return (sys_errno_t) rc;
962
963 namebuf[name_len] = 0;
964
965 /*
966 * In case of failure, kernel_uarg will be deallocated in this function.
967 * In case of success, kernel_uarg will be freed in uinit().
968 */
969 uinit_arg_t *kernel_uarg = malloc(sizeof(uinit_arg_t));
970 if (!kernel_uarg)
971 return (sys_errno_t) ENOMEM;
972
973 kernel_uarg->pc = pc;
974 kernel_uarg->sp = sp;
975
976 // TODO: fix some unnecessary inconsistencies between architectures
977
978 thread_t *thread = thread_create(uinit, kernel_uarg, TASK,
979 THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf);
980 if (!thread) {
981 free(kernel_uarg);
982 return (sys_errno_t) ENOMEM;
983 }
984
985#ifdef CONFIG_UDEBUG
986 /*
987 * Generate udebug THREAD_B event and attach the thread.
988 * This must be done atomically (with the debug locks held),
989 * otherwise we would either miss some thread or receive
990 * THREAD_B events for threads that already existed
991 * and could be detected with THREAD_READ before.
992 */
993 udebug_thread_b_event_attach(thread, TASK);
994#else
995 thread_attach(thread, TASK);
996#endif
997 thread_start(thread);
998 thread_put(thread);
999
1000 return (sys_errno_t) EOK;
1001}
1002
1003/** Process syscall to terminate thread.
1004 *
1005 */
1006sys_errno_t sys_thread_exit(int uspace_status)
1007{
1008 thread_exit();
1009}
1010
1011/** Syscall for getting TID.
1012 *
1013 * @param uspace_thread_id Userspace address of 8-byte buffer where to store
1014 * current thread ID.
1015 *
1016 * @return 0 on success or an error code from @ref errno.h.
1017 *
1018 */
1019sys_errno_t sys_thread_get_id(uspace_ptr_thread_id_t uspace_thread_id)
1020{
1021 /*
1022 * No need to acquire lock on THREAD because tid
1023 * remains constant for the lifespan of the thread.
1024 *
1025 */
1026 return (sys_errno_t) copy_to_uspace(uspace_thread_id, &THREAD->tid,
1027 sizeof(THREAD->tid));
1028}
1029
1030/** Syscall wrapper for sleeping. */
1031sys_errno_t sys_thread_usleep(uint32_t usec)
1032{
1033 thread_usleep(usec);
1034 return 0;
1035}
1036
1037sys_errno_t sys_thread_udelay(uint32_t usec)
1038{
1039 delay(usec);
1040 return 0;
1041}
1042
1043/** @}
1044 */
Note: See TracBrowser for help on using the repository browser.