source: mainline/kernel/generic/src/proc/thread.c@ 5663872

Last change on this file since 5663872 was 5663872, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 18 months ago

Move stuff around for thread sleep

Only mark the thread as ready for wakeup after we switch to
another context. This way, soundness of the sychronization
does not depend on thread lock being held across the context
switch, which gives us more freedom.

  • Property mode set to 100644
File size: 28.3 KB
Line 
1/*
2 * Copyright (c) 2010 Jakub Jermar
3 * Copyright (c) 2018 Jiri Svoboda
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup kernel_generic_proc
31 * @{
32 */
33
34/**
35 * @file
36 * @brief Thread management functions.
37 */
38
39#include <assert.h>
40#include <proc/scheduler.h>
41#include <proc/thread.h>
42#include <proc/task.h>
43#include <mm/frame.h>
44#include <mm/page.h>
45#include <arch/asm.h>
46#include <arch/cycle.h>
47#include <arch.h>
48#include <synch/spinlock.h>
49#include <synch/waitq.h>
50#include <synch/syswaitq.h>
51#include <cpu.h>
52#include <str.h>
53#include <context.h>
54#include <adt/list.h>
55#include <adt/odict.h>
56#include <time/clock.h>
57#include <time/timeout.h>
58#include <time/delay.h>
59#include <config.h>
60#include <arch/interrupt.h>
61#include <smp/ipi.h>
62#include <arch/faddr.h>
63#include <atomic.h>
64#include <memw.h>
65#include <stdio.h>
66#include <stdlib.h>
67#include <main/uinit.h>
68#include <syscall/copy.h>
69#include <errno.h>
70#include <debug.h>
71#include <halt.h>
72
73/** Thread states */
74const char *thread_states[] = {
75 "Invalid",
76 "Running",
77 "Sleeping",
78 "Ready",
79 "Entering",
80 "Exiting",
81 "Lingering"
82};
83
84/** Lock protecting the @c threads ordered dictionary .
85 *
86 * For locking rules, see declaration thereof.
87 */
88IRQ_SPINLOCK_INITIALIZE(threads_lock);
89
90/** Ordered dictionary of all threads by their address (i.e. pointer to
91 * the thread_t structure).
92 *
93 * When a thread is found in the @c threads ordered dictionary, it is
94 * guaranteed to exist as long as the @c threads_lock is held.
95 *
96 * Members are of type thread_t.
97 *
98 * This structure contains weak references. Any reference from it must not leave
99 * threads_lock critical section unless strengthened via thread_try_ref().
100 */
101odict_t threads;
102
103IRQ_SPINLOCK_STATIC_INITIALIZE(tidlock);
104static thread_id_t last_tid = 0;
105
106static slab_cache_t *thread_cache;
107
108static void *threads_getkey(odlink_t *);
109static int threads_cmp(void *, void *);
110
111/** Thread wrapper.
112 *
113 * This wrapper is provided to ensure that every thread makes a call to
114 * thread_exit() when its implementing function returns.
115 *
116 * interrupts_disable() is assumed.
117 *
118 */
119static void cushion(void)
120{
121 void (*f)(void *) = THREAD->thread_code;
122 void *arg = THREAD->thread_arg;
123 THREAD->last_cycle = get_cycle();
124
125 /* This is where each thread wakes up after its creation */
126 irq_spinlock_unlock(&THREAD->lock, false);
127 interrupts_enable();
128
129 f(arg);
130
131 thread_exit();
132
133 /* Not reached */
134}
135
136/** Initialization and allocation for thread_t structure
137 *
138 */
139static errno_t thr_constructor(void *obj, unsigned int kmflags)
140{
141 thread_t *thread = (thread_t *) obj;
142
143 irq_spinlock_initialize(&thread->lock, "thread_t_lock");
144 link_initialize(&thread->rq_link);
145 link_initialize(&thread->wq_link);
146 link_initialize(&thread->th_link);
147
148 /* call the architecture-specific part of the constructor */
149 thr_constructor_arch(thread);
150
151 /*
152 * Allocate the kernel stack from the low-memory to prevent an infinite
153 * nesting of TLB-misses when accessing the stack from the part of the
154 * TLB-miss handler written in C.
155 *
156 * Note that low-memory is safe to be used for the stack as it will be
157 * covered by the kernel identity mapping, which guarantees not to
158 * nest TLB-misses infinitely (either via some hardware mechanism or
159 * by the construction of the assembly-language part of the TLB-miss
160 * handler).
161 *
162 * This restriction can be lifted once each architecture provides
163 * a similar guarantee, for example, by locking the kernel stack
164 * in the TLB whenever it is allocated from the high-memory and the
165 * thread is being scheduled to run.
166 */
167 kmflags |= FRAME_LOWMEM;
168 kmflags &= ~FRAME_HIGHMEM;
169
170 /*
171 * NOTE: All kernel stacks must be aligned to STACK_SIZE,
172 * see CURRENT.
173 */
174
175 uintptr_t stack_phys =
176 frame_alloc(STACK_FRAMES, kmflags, STACK_SIZE - 1);
177 if (!stack_phys)
178 return ENOMEM;
179
180 thread->kstack = (uint8_t *) PA2KA(stack_phys);
181
182#ifdef CONFIG_UDEBUG
183 mutex_initialize(&thread->udebug.lock, MUTEX_PASSIVE);
184#endif
185
186 return EOK;
187}
188
189/** Destruction of thread_t object */
190static size_t thr_destructor(void *obj)
191{
192 thread_t *thread = (thread_t *) obj;
193
194 /* call the architecture-specific part of the destructor */
195 thr_destructor_arch(thread);
196
197 frame_free(KA2PA(thread->kstack), STACK_FRAMES);
198
199 return STACK_FRAMES; /* number of frames freed */
200}
201
202/** Initialize threads
203 *
204 * Initialize kernel threads support.
205 *
206 */
207void thread_init(void)
208{
209 THREAD = NULL;
210
211 atomic_store(&nrdy, 0);
212 thread_cache = slab_cache_create("thread_t", sizeof(thread_t), _Alignof(thread_t),
213 thr_constructor, thr_destructor, 0);
214
215 odict_initialize(&threads, threads_getkey, threads_cmp);
216}
217
218/** Wire thread to the given CPU
219 *
220 * @param cpu CPU to wire the thread to.
221 *
222 */
223void thread_wire(thread_t *thread, cpu_t *cpu)
224{
225 irq_spinlock_lock(&thread->lock, true);
226 thread->cpu = cpu;
227 thread->nomigrate++;
228 irq_spinlock_unlock(&thread->lock, true);
229}
230
231/** Invoked right before thread_ready() readies the thread. thread is locked. */
232static void before_thread_is_ready(thread_t *thread)
233{
234 assert(irq_spinlock_locked(&thread->lock));
235}
236
237/** Make thread ready
238 *
239 * Switch thread to the ready state. Consumes reference passed by the caller.
240 *
241 * @param thread Thread to make ready.
242 *
243 */
244void thread_ready(thread_t *thread)
245{
246 irq_spinlock_lock(&thread->lock, true);
247
248 assert(thread->state != Ready);
249
250 before_thread_is_ready(thread);
251
252 int i = (thread->priority < RQ_COUNT - 1) ?
253 ++thread->priority : thread->priority;
254
255 /* Prefer the CPU on which the thread ran last */
256 cpu_t *cpu = thread->cpu ? thread->cpu : CPU;
257
258 thread->state = Ready;
259
260 irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock));
261
262 /*
263 * Append thread to respective ready queue
264 * on respective processor.
265 */
266
267 list_append(&thread->rq_link, &cpu->rq[i].rq);
268 cpu->rq[i].n++;
269 irq_spinlock_unlock(&(cpu->rq[i].lock), true);
270
271 atomic_inc(&nrdy);
272 atomic_inc(&cpu->nrdy);
273}
274
275/** Create new thread
276 *
277 * Create a new thread.
278 *
279 * @param func Thread's implementing function.
280 * @param arg Thread's implementing function argument.
281 * @param task Task to which the thread belongs. The caller must
282 * guarantee that the task won't cease to exist during the
283 * call. The task's lock may not be held.
284 * @param flags Thread flags.
285 * @param name Symbolic name (a copy is made).
286 *
287 * @return New thread's structure on success, NULL on failure.
288 *
289 */
290thread_t *thread_create(void (*func)(void *), void *arg, task_t *task,
291 thread_flags_t flags, const char *name)
292{
293 thread_t *thread = (thread_t *) slab_alloc(thread_cache, FRAME_ATOMIC);
294 if (!thread)
295 return NULL;
296
297 refcount_init(&thread->refcount);
298
299 if (thread_create_arch(thread, flags) != EOK) {
300 slab_free(thread_cache, thread);
301 return NULL;
302 }
303
304 /* Not needed, but good for debugging */
305 memsetb(thread->kstack, STACK_SIZE, 0);
306
307 irq_spinlock_lock(&tidlock, true);
308 thread->tid = ++last_tid;
309 irq_spinlock_unlock(&tidlock, true);
310
311 memset(&thread->saved_context, 0, sizeof(thread->saved_context));
312 context_set(&thread->saved_context, FADDR(cushion),
313 (uintptr_t) thread->kstack, STACK_SIZE);
314
315 current_initialize((current_t *) thread->kstack);
316
317 ipl_t ipl = interrupts_disable();
318 thread->saved_ipl = interrupts_read();
319 interrupts_restore(ipl);
320
321 str_cpy(thread->name, THREAD_NAME_BUFLEN, name);
322
323 thread->thread_code = func;
324 thread->thread_arg = arg;
325 thread->ucycles = 0;
326 thread->kcycles = 0;
327 thread->uncounted =
328 ((flags & THREAD_FLAG_UNCOUNTED) == THREAD_FLAG_UNCOUNTED);
329 thread->priority = -1; /* Start in rq[0] */
330 thread->cpu = NULL;
331 thread->stolen = false;
332 thread->uspace =
333 ((flags & THREAD_FLAG_USPACE) == THREAD_FLAG_USPACE);
334
335 thread->nomigrate = 0;
336 thread->state = Entering;
337
338 atomic_init(&thread->sleep_queue, NULL);
339
340 thread->in_copy_from_uspace = false;
341 thread->in_copy_to_uspace = false;
342
343 thread->interrupted = false;
344 atomic_init(&thread->sleep_state, SLEEP_INITIAL);
345
346 waitq_initialize(&thread->join_wq);
347
348 thread->task = task;
349
350 thread->fpu_context_exists = false;
351
352 odlink_initialize(&thread->lthreads);
353
354#ifdef CONFIG_UDEBUG
355 /* Initialize debugging stuff */
356 thread->btrace = false;
357 udebug_thread_initialize(&thread->udebug);
358#endif
359
360 if ((flags & THREAD_FLAG_NOATTACH) != THREAD_FLAG_NOATTACH)
361 thread_attach(thread, task);
362
363 return thread;
364}
365
366/** Destroy thread memory structure
367 *
368 * Detach thread from all queues, cpus etc. and destroy it.
369 *
370 * @param obj Thread to be destroyed.
371 *
372 */
373static void thread_destroy(void *obj)
374{
375 thread_t *thread = (thread_t *) obj;
376
377 assert_link_not_used(&thread->rq_link);
378 assert_link_not_used(&thread->wq_link);
379
380 assert(thread->task);
381
382 ipl_t ipl = interrupts_disable();
383
384 /* Remove thread from global list. */
385 irq_spinlock_lock(&threads_lock, false);
386 odict_remove(&thread->lthreads);
387 irq_spinlock_unlock(&threads_lock, false);
388
389 /* Remove thread from task's list and accumulate accounting. */
390 irq_spinlock_lock(&thread->task->lock, false);
391
392 list_remove(&thread->th_link);
393
394 /*
395 * No other CPU has access to this thread anymore, so we don't need
396 * thread->lock for accessing thread's fields after this point.
397 */
398
399 if (!thread->uncounted) {
400 thread->task->ucycles += thread->ucycles;
401 thread->task->kcycles += thread->kcycles;
402 }
403
404 irq_spinlock_unlock(&thread->task->lock, false);
405
406 assert((thread->state == Exiting) || (thread->state == Lingering));
407
408 /* Clear cpu->fpu_owner if set to this thread. */
409#ifdef CONFIG_FPU_LAZY
410 if (thread->cpu) {
411 /*
412 * We need to lock for this because the old CPU can concurrently try
413 * to dump this thread's FPU state, in which case we need to wait for
414 * it to finish. An atomic compare-and-swap wouldn't be enough.
415 */
416 irq_spinlock_lock(&thread->cpu->fpu_lock, false);
417
418 thread_t *owner = atomic_load_explicit(&thread->cpu->fpu_owner,
419 memory_order_relaxed);
420
421 if (owner == thread) {
422 atomic_store_explicit(&thread->cpu->fpu_owner, NULL,
423 memory_order_relaxed);
424 }
425
426 irq_spinlock_unlock(&thread->cpu->fpu_lock, false);
427 }
428#endif
429
430 interrupts_restore(ipl);
431
432 /*
433 * Drop the reference to the containing task.
434 */
435 task_release(thread->task);
436 thread->task = NULL;
437
438 slab_free(thread_cache, thread);
439}
440
441void thread_put(thread_t *thread)
442{
443 if (refcount_down(&thread->refcount)) {
444 thread_destroy(thread);
445 }
446}
447
448/** Make the thread visible to the system.
449 *
450 * Attach the thread structure to the current task and make it visible in the
451 * threads_tree.
452 *
453 * @param t Thread to be attached to the task.
454 * @param task Task to which the thread is to be attached.
455 *
456 */
457void thread_attach(thread_t *thread, task_t *task)
458{
459 ipl_t ipl = interrupts_disable();
460
461 /*
462 * Attach to the specified task.
463 */
464 irq_spinlock_lock(&task->lock, false);
465
466 /* Hold a reference to the task. */
467 task_hold(task);
468
469 /* Must not count kbox thread into lifecount */
470 if (thread->uspace)
471 atomic_inc(&task->lifecount);
472
473 list_append(&thread->th_link, &task->threads);
474
475 irq_spinlock_unlock(&task->lock, false);
476
477 /*
478 * Register this thread in the system-wide dictionary.
479 */
480 irq_spinlock_lock(&threads_lock, false);
481 odict_insert(&thread->lthreads, &threads, NULL);
482 irq_spinlock_unlock(&threads_lock, false);
483
484 interrupts_restore(ipl);
485}
486
487/** Terminate thread.
488 *
489 * End current thread execution and switch it to the exiting state.
490 * All pending timeouts are executed.
491 *
492 */
493void thread_exit(void)
494{
495 if (THREAD->uspace) {
496#ifdef CONFIG_UDEBUG
497 /* Generate udebug THREAD_E event */
498 udebug_thread_e_event();
499
500 /*
501 * This thread will not execute any code or system calls from
502 * now on.
503 */
504 udebug_stoppable_begin();
505#endif
506 if (atomic_predec(&TASK->lifecount) == 0) {
507 /*
508 * We are the last userspace thread in the task that
509 * still has not exited. With the exception of the
510 * moment the task was created, new userspace threads
511 * can only be created by threads of the same task.
512 * We are safe to perform cleanup.
513 *
514 */
515 ipc_cleanup();
516 sys_waitq_task_cleanup();
517 LOG("Cleanup of task %" PRIu64 " completed.", TASK->taskid);
518 }
519 }
520
521 irq_spinlock_lock(&THREAD->lock, true);
522 THREAD->state = Exiting;
523 irq_spinlock_unlock(&THREAD->lock, true);
524
525 scheduler();
526
527 panic("should never be reached");
528}
529
530/** Interrupts an existing thread so that it may exit as soon as possible.
531 *
532 * Threads that are blocked waiting for a synchronization primitive
533 * are woken up with a return code of EINTR if the
534 * blocking call was interruptable. See waitq_sleep_timeout().
535 *
536 * Interrupted threads automatically exit when returning back to user space.
537 *
538 * @param thread A valid thread object.
539 */
540void thread_interrupt(thread_t *thread)
541{
542 assert(thread != NULL);
543 thread->interrupted = true;
544 thread_wakeup(thread);
545}
546
547/** Prepare for putting the thread to sleep.
548 *
549 * @returns whether the thread is currently terminating. If THREAD_OK
550 * is returned, the thread is guaranteed to be woken up instantly if the thread
551 * is terminated at any time between this function's return and
552 * thread_wait_finish(). If THREAD_TERMINATING is returned, the thread can still
553 * go to sleep, but doing so will delay termination.
554 */
555thread_termination_state_t thread_wait_start(void)
556{
557 assert(THREAD != NULL);
558
559 /*
560 * This is an exchange rather than a store so that we can use the acquire
561 * semantics, which is needed to ensure that code after this operation sees
562 * memory ops made before thread_wakeup() in other thread, if that wakeup
563 * was reset by this operation.
564 *
565 * In particular, we need this to ensure we can't miss the thread being
566 * terminated concurrently with a synchronization primitive preparing to
567 * sleep.
568 */
569 (void) atomic_exchange_explicit(&THREAD->sleep_state, SLEEP_INITIAL,
570 memory_order_acquire);
571
572 return THREAD->interrupted ? THREAD_TERMINATING : THREAD_OK;
573}
574
575static void thread_wait_timeout_callback(void *arg)
576{
577 thread_wakeup(arg);
578}
579
580/**
581 * Suspends this thread's execution until thread_wakeup() is called on it,
582 * or deadline is reached.
583 *
584 * The way this would normally be used is that the current thread call
585 * thread_wait_start(), and if interruption has not been signaled, stores
586 * a reference to itself in a synchronized structure (such as waitq).
587 * After that, it releases any spinlocks it might hold and calls this function.
588 *
589 * The thread doing the wakeup will acquire the thread's reference from said
590 * synchronized structure and calls thread_wakeup() on it.
591 *
592 * Notably, there can be more than one thread performing wakeup.
593 * The number of performed calls to thread_wakeup(), or their relative
594 * ordering with thread_wait_finish(), does not matter. However, calls to
595 * thread_wakeup() are expected to be synchronized with thread_wait_start()
596 * with which they are associated, otherwise wakeups may be missed.
597 * However, the operation of thread_wakeup() is defined at any time,
598 * synchronization notwithstanding (in the sense of C un/defined behavior),
599 * and is in fact used to interrupt waiting threads by external events.
600 * The waiting thread must operate correctly in face of spurious wakeups,
601 * and clean up its reference in the synchronization structure if necessary.
602 *
603 * Returns THREAD_WAIT_TIMEOUT if timeout fired, which is a necessary condition
604 * for it to have been waken up by the timeout, but the caller must assume
605 * that proper wakeups, timeouts and interrupts may occur concurrently, so
606 * the fact timeout has been registered does not necessarily mean the thread
607 * has not been woken up or interrupted.
608 */
609thread_wait_result_t thread_wait_finish(deadline_t deadline)
610{
611 assert(THREAD != NULL);
612
613 timeout_t timeout;
614
615 /* Extra check to avoid going to scheduler if we don't need to. */
616 if (atomic_load_explicit(&THREAD->sleep_state, memory_order_acquire) !=
617 SLEEP_INITIAL)
618 return THREAD_WAIT_SUCCESS;
619
620 if (deadline != DEADLINE_NEVER) {
621 timeout_initialize(&timeout);
622 timeout_register_deadline(&timeout, deadline,
623 thread_wait_timeout_callback, THREAD);
624 }
625
626 ipl_t ipl = interrupts_disable();
627 irq_spinlock_lock(&THREAD->lock, false);
628 THREAD->state = Sleeping;
629 scheduler_locked(ipl);
630
631 if (deadline != DEADLINE_NEVER && !timeout_unregister(&timeout)) {
632 return THREAD_WAIT_TIMEOUT;
633 } else {
634 return THREAD_WAIT_SUCCESS;
635 }
636}
637
638void thread_wakeup(thread_t *thread)
639{
640 assert(thread != NULL);
641
642 int state = atomic_exchange_explicit(&thread->sleep_state, SLEEP_WOKE,
643 memory_order_acq_rel);
644
645 if (state == SLEEP_ASLEEP) {
646 /*
647 * Only one thread gets to do this.
648 * The reference consumed here is the reference implicitly passed to
649 * the waking thread by the sleeper in thread_wait_finish().
650 */
651 thread_ready(thread);
652 }
653}
654
655/** Prevent the current thread from being migrated to another processor. */
656void thread_migration_disable(void)
657{
658 assert(THREAD);
659
660 THREAD->nomigrate++;
661}
662
663/** Allow the current thread to be migrated to another processor. */
664void thread_migration_enable(void)
665{
666 assert(THREAD);
667 assert(THREAD->nomigrate > 0);
668
669 if (THREAD->nomigrate > 0)
670 THREAD->nomigrate--;
671}
672
673/** Thread sleep
674 *
675 * Suspend execution of the current thread.
676 *
677 * @param sec Number of seconds to sleep.
678 *
679 */
680void thread_sleep(uint32_t sec)
681{
682 /*
683 * Sleep in 1000 second steps to support
684 * full argument range
685 */
686 while (sec > 0) {
687 uint32_t period = (sec > 1000) ? 1000 : sec;
688
689 thread_usleep(period * 1000000);
690 sec -= period;
691 }
692}
693
694errno_t thread_join(thread_t *thread)
695{
696 return thread_join_timeout(thread, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE);
697}
698
699/** Wait for another thread to exit.
700 * This function does not destroy the thread. Reference counting handles that.
701 *
702 * @param thread Thread to join on exit.
703 * @param usec Timeout in microseconds.
704 * @param flags Mode of operation.
705 *
706 * @return An error code from errno.h or an error code from synch.h.
707 *
708 */
709errno_t thread_join_timeout(thread_t *thread, uint32_t usec, unsigned int flags)
710{
711 if (thread == THREAD)
712 return EINVAL;
713
714 irq_spinlock_lock(&thread->lock, true);
715 state_t state = thread->state;
716 irq_spinlock_unlock(&thread->lock, true);
717
718 if (state == Exiting) {
719 return EOK;
720 } else {
721 return _waitq_sleep_timeout(&thread->join_wq, usec, flags);
722 }
723}
724
725/** Thread usleep
726 *
727 * Suspend execution of the current thread.
728 *
729 * @param usec Number of microseconds to sleep.
730 *
731 */
732void thread_usleep(uint32_t usec)
733{
734 waitq_t wq;
735
736 waitq_initialize(&wq);
737
738 (void) waitq_sleep_timeout(&wq, usec);
739}
740
741static void thread_print(thread_t *thread, bool additional)
742{
743 uint64_t ucycles, kcycles;
744 char usuffix, ksuffix;
745 order_suffix(thread->ucycles, &ucycles, &usuffix);
746 order_suffix(thread->kcycles, &kcycles, &ksuffix);
747
748 char *name;
749 if (str_cmp(thread->name, "uinit") == 0)
750 name = thread->task->name;
751 else
752 name = thread->name;
753
754 if (additional)
755 printf("%-8" PRIu64 " %p %p %9" PRIu64 "%c %9" PRIu64 "%c ",
756 thread->tid, thread->thread_code, thread->kstack,
757 ucycles, usuffix, kcycles, ksuffix);
758 else
759 printf("%-8" PRIu64 " %-14s %p %-8s %p %-5" PRIu32 "\n",
760 thread->tid, name, thread, thread_states[thread->state],
761 thread->task, thread->task->container);
762
763 if (additional) {
764 if (thread->cpu)
765 printf("%-5u", thread->cpu->id);
766 else
767 printf("none ");
768
769 if (thread->state == Sleeping) {
770 printf(" %p", thread->sleep_queue);
771 }
772
773 printf("\n");
774 }
775}
776
777/** Print list of threads debug info
778 *
779 * @param additional Print additional information.
780 *
781 */
782void thread_print_list(bool additional)
783{
784 thread_t *thread;
785
786 /* Accessing system-wide threads list through thread_first()/thread_next(). */
787 irq_spinlock_lock(&threads_lock, true);
788
789 if (sizeof(void *) <= 4) {
790 if (additional)
791 printf("[id ] [code ] [stack ] [ucycles ] [kcycles ]"
792 " [cpu] [waitqueue]\n");
793 else
794 printf("[id ] [name ] [address ] [state ] [task ]"
795 " [ctn]\n");
796 } else {
797 if (additional) {
798 printf("[id ] [code ] [stack ] [ucycles ] [kcycles ]"
799 " [cpu] [waitqueue ]\n");
800 } else
801 printf("[id ] [name ] [address ] [state ]"
802 " [task ] [ctn]\n");
803 }
804
805 thread = thread_first();
806 while (thread != NULL) {
807 thread_print(thread, additional);
808 thread = thread_next(thread);
809 }
810
811 irq_spinlock_unlock(&threads_lock, true);
812}
813
814static bool thread_exists(thread_t *thread)
815{
816 odlink_t *odlink = odict_find_eq(&threads, thread, NULL);
817 return odlink != NULL;
818}
819
820/** Check whether the thread exists, and if so, return a reference to it.
821 */
822thread_t *thread_try_get(thread_t *thread)
823{
824 irq_spinlock_lock(&threads_lock, true);
825
826 if (thread_exists(thread)) {
827 /* Try to strengthen the reference. */
828 thread = thread_try_ref(thread);
829 } else {
830 thread = NULL;
831 }
832
833 irq_spinlock_unlock(&threads_lock, true);
834
835 return thread;
836}
837
838/** Update accounting of current thread.
839 *
840 * Note that thread_lock on THREAD must be already held and
841 * interrupts must be already disabled.
842 *
843 * @param user True to update user accounting, false for kernel.
844 *
845 */
846void thread_update_accounting(bool user)
847{
848 uint64_t time = get_cycle();
849
850 assert(interrupts_disabled());
851 assert(irq_spinlock_locked(&THREAD->lock));
852
853 if (user)
854 THREAD->ucycles += time - THREAD->last_cycle;
855 else
856 THREAD->kcycles += time - THREAD->last_cycle;
857
858 THREAD->last_cycle = time;
859}
860
861/** Find thread structure corresponding to thread ID.
862 *
863 * The threads_lock must be already held by the caller of this function and
864 * interrupts must be disabled.
865 *
866 * The returned reference is weak.
867 * If the caller needs to keep it, thread_try_ref() must be used to upgrade
868 * to a strong reference _before_ threads_lock is released.
869 *
870 * @param id Thread ID.
871 *
872 * @return Thread structure address or NULL if there is no such thread ID.
873 *
874 */
875thread_t *thread_find_by_id(thread_id_t thread_id)
876{
877 thread_t *thread;
878
879 assert(interrupts_disabled());
880 assert(irq_spinlock_locked(&threads_lock));
881
882 thread = thread_first();
883 while (thread != NULL) {
884 if (thread->tid == thread_id)
885 return thread;
886
887 thread = thread_next(thread);
888 }
889
890 return NULL;
891}
892
893/** Get count of threads.
894 *
895 * @return Number of threads in the system
896 */
897size_t thread_count(void)
898{
899 assert(interrupts_disabled());
900 assert(irq_spinlock_locked(&threads_lock));
901
902 return odict_count(&threads);
903}
904
905/** Get first thread.
906 *
907 * @return Pointer to first thread or @c NULL if there are none.
908 */
909thread_t *thread_first(void)
910{
911 odlink_t *odlink;
912
913 assert(interrupts_disabled());
914 assert(irq_spinlock_locked(&threads_lock));
915
916 odlink = odict_first(&threads);
917 if (odlink == NULL)
918 return NULL;
919
920 return odict_get_instance(odlink, thread_t, lthreads);
921}
922
923/** Get next thread.
924 *
925 * @param cur Current thread
926 * @return Pointer to next thread or @c NULL if there are no more threads.
927 */
928thread_t *thread_next(thread_t *cur)
929{
930 odlink_t *odlink;
931
932 assert(interrupts_disabled());
933 assert(irq_spinlock_locked(&threads_lock));
934
935 odlink = odict_next(&cur->lthreads, &threads);
936 if (odlink == NULL)
937 return NULL;
938
939 return odict_get_instance(odlink, thread_t, lthreads);
940}
941
942#ifdef CONFIG_UDEBUG
943
944void thread_stack_trace(thread_id_t thread_id)
945{
946 irq_spinlock_lock(&threads_lock, true);
947 thread_t *thread = thread_try_ref(thread_find_by_id(thread_id));
948 irq_spinlock_unlock(&threads_lock, true);
949
950 if (thread == NULL) {
951 printf("No such thread.\n");
952 return;
953 }
954
955 /*
956 * Schedule a stack trace to be printed
957 * just before the thread is scheduled next.
958 *
959 * If the thread is sleeping then try to interrupt
960 * the sleep. Any request for printing an uspace stack
961 * trace from within the kernel should be always
962 * considered a last resort debugging means, therefore
963 * forcing the thread's sleep to be interrupted
964 * is probably justifiable.
965 */
966
967 irq_spinlock_lock(&thread->lock, true);
968
969 bool sleeping = false;
970 istate_t *istate = thread->udebug.uspace_state;
971 if (istate != NULL) {
972 printf("Scheduling thread stack trace.\n");
973 thread->btrace = true;
974 if (thread->state == Sleeping)
975 sleeping = true;
976 } else
977 printf("Thread interrupt state not available.\n");
978
979 irq_spinlock_unlock(&thread->lock, true);
980
981 if (sleeping)
982 thread_wakeup(thread);
983
984 thread_put(thread);
985}
986
987#endif /* CONFIG_UDEBUG */
988
989/** Get key function for the @c threads ordered dictionary.
990 *
991 * @param odlink Link
992 * @return Pointer to thread structure cast as 'void *'
993 */
994static void *threads_getkey(odlink_t *odlink)
995{
996 thread_t *thread = odict_get_instance(odlink, thread_t, lthreads);
997 return (void *) thread;
998}
999
1000/** Key comparison function for the @c threads ordered dictionary.
1001 *
1002 * @param a Pointer to thread A
1003 * @param b Pointer to thread B
1004 * @return -1, 0, 1 iff pointer to A is less than, equal to, greater than B
1005 */
1006static int threads_cmp(void *a, void *b)
1007{
1008 if (a > b)
1009 return -1;
1010 else if (a == b)
1011 return 0;
1012 else
1013 return +1;
1014}
1015
1016/** Process syscall to create new thread.
1017 *
1018 */
1019sys_errno_t sys_thread_create(uspace_ptr_uspace_arg_t uspace_uarg, uspace_ptr_char uspace_name,
1020 size_t name_len, uspace_ptr_thread_id_t uspace_thread_id)
1021{
1022 if (name_len > THREAD_NAME_BUFLEN - 1)
1023 name_len = THREAD_NAME_BUFLEN - 1;
1024
1025 char namebuf[THREAD_NAME_BUFLEN];
1026 errno_t rc = copy_from_uspace(namebuf, uspace_name, name_len);
1027 if (rc != EOK)
1028 return (sys_errno_t) rc;
1029
1030 namebuf[name_len] = 0;
1031
1032 /*
1033 * In case of failure, kernel_uarg will be deallocated in this function.
1034 * In case of success, kernel_uarg will be freed in uinit().
1035 */
1036 uspace_arg_t *kernel_uarg =
1037 (uspace_arg_t *) malloc(sizeof(uspace_arg_t));
1038 if (!kernel_uarg)
1039 return (sys_errno_t) ENOMEM;
1040
1041 rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t));
1042 if (rc != EOK) {
1043 free(kernel_uarg);
1044 return (sys_errno_t) rc;
1045 }
1046
1047 thread_t *thread = thread_create(uinit, kernel_uarg, TASK,
1048 THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf);
1049 if (thread) {
1050 if (uspace_thread_id) {
1051 rc = copy_to_uspace(uspace_thread_id, &thread->tid,
1052 sizeof(thread->tid));
1053 if (rc != EOK) {
1054 /*
1055 * We have encountered a failure, but the thread
1056 * has already been created. We need to undo its
1057 * creation now.
1058 */
1059
1060 /*
1061 * The new thread structure is initialized, but
1062 * is still not visible to the system.
1063 * We can safely deallocate it.
1064 */
1065 slab_free(thread_cache, thread);
1066 free(kernel_uarg);
1067
1068 return (sys_errno_t) rc;
1069 }
1070 }
1071
1072#ifdef CONFIG_UDEBUG
1073 /*
1074 * Generate udebug THREAD_B event and attach the thread.
1075 * This must be done atomically (with the debug locks held),
1076 * otherwise we would either miss some thread or receive
1077 * THREAD_B events for threads that already existed
1078 * and could be detected with THREAD_READ before.
1079 */
1080 udebug_thread_b_event_attach(thread, TASK);
1081#else
1082 thread_attach(thread, TASK);
1083#endif
1084 thread_ready(thread);
1085
1086 return 0;
1087 } else
1088 free(kernel_uarg);
1089
1090 return (sys_errno_t) ENOMEM;
1091}
1092
1093/** Process syscall to terminate thread.
1094 *
1095 */
1096sys_errno_t sys_thread_exit(int uspace_status)
1097{
1098 thread_exit();
1099}
1100
1101/** Syscall for getting TID.
1102 *
1103 * @param uspace_thread_id Userspace address of 8-byte buffer where to store
1104 * current thread ID.
1105 *
1106 * @return 0 on success or an error code from @ref errno.h.
1107 *
1108 */
1109sys_errno_t sys_thread_get_id(uspace_ptr_thread_id_t uspace_thread_id)
1110{
1111 /*
1112 * No need to acquire lock on THREAD because tid
1113 * remains constant for the lifespan of the thread.
1114 *
1115 */
1116 return (sys_errno_t) copy_to_uspace(uspace_thread_id, &THREAD->tid,
1117 sizeof(THREAD->tid));
1118}
1119
1120/** Syscall wrapper for sleeping. */
1121sys_errno_t sys_thread_usleep(uint32_t usec)
1122{
1123 thread_usleep(usec);
1124 return 0;
1125}
1126
1127sys_errno_t sys_thread_udelay(uint32_t usec)
1128{
1129 delay(usec);
1130 return 0;
1131}
1132
1133/** @}
1134 */
Note: See TracBrowser for help on using the repository browser.