source: mainline/kernel/generic/src/proc/thread.c@ d314571

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since d314571 was d314571, checked in by Jakub Jermar <jakub@…>, 7 years ago

Turn wait queue into a kobject usable by uspace

In order to provide an elegant synchronization mechanism for userspace,
this commit adds syscalls to create, sleep on and wakeup from a wait
queue.

  • Property mode set to 100644
File size: 26.8 KB
Line 
1/*
2 * Copyright (c) 2010 Jakub Jermar
3 * Copyright (c) 2018 Jiri Svoboda
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup kernel_generic_proc
31 * @{
32 */
33
34/**
35 * @file
36 * @brief Thread management functions.
37 */
38
39#include <assert.h>
40#include <proc/scheduler.h>
41#include <proc/thread.h>
42#include <proc/task.h>
43#include <mm/frame.h>
44#include <mm/page.h>
45#include <arch/asm.h>
46#include <arch/cycle.h>
47#include <arch.h>
48#include <synch/spinlock.h>
49#include <synch/waitq.h>
50#include <synch/syswaitq.h>
51#include <cpu.h>
52#include <str.h>
53#include <context.h>
54#include <adt/list.h>
55#include <adt/odict.h>
56#include <time/clock.h>
57#include <time/timeout.h>
58#include <time/delay.h>
59#include <config.h>
60#include <arch/interrupt.h>
61#include <smp/ipi.h>
62#include <arch/faddr.h>
63#include <atomic.h>
64#include <mem.h>
65#include <stdio.h>
66#include <stdlib.h>
67#include <main/uinit.h>
68#include <syscall/copy.h>
69#include <errno.h>
70#include <debug.h>
71
72/** Thread states */
73const char *thread_states[] = {
74 "Invalid",
75 "Running",
76 "Sleeping",
77 "Ready",
78 "Entering",
79 "Exiting",
80 "Lingering"
81};
82
83/** Lock protecting the @c threads ordered dictionary .
84 *
85 * For locking rules, see declaration thereof.
86 */
87IRQ_SPINLOCK_INITIALIZE(threads_lock);
88
89/** Ordered dictionary of all threads by their address (i.e. pointer to
90 * the thread_t structure).
91 *
92 * When a thread is found in the @c threads ordered dictionary, it is
93 * guaranteed to exist as long as the @c threads_lock is held.
94 *
95 * Members are of type thread_t.
96 */
97odict_t threads;
98
99IRQ_SPINLOCK_STATIC_INITIALIZE(tidlock);
100static thread_id_t last_tid = 0;
101
102static slab_cache_t *thread_cache;
103
104#ifdef CONFIG_FPU
105slab_cache_t *fpu_context_cache;
106#endif
107
108static void *threads_getkey(odlink_t *);
109static int threads_cmp(void *, void *);
110
111/** Thread wrapper.
112 *
113 * This wrapper is provided to ensure that every thread makes a call to
114 * thread_exit() when its implementing function returns.
115 *
116 * interrupts_disable() is assumed.
117 *
118 */
119static void cushion(void)
120{
121 void (*f)(void *) = THREAD->thread_code;
122 void *arg = THREAD->thread_arg;
123 THREAD->last_cycle = get_cycle();
124
125 /* This is where each thread wakes up after its creation */
126 irq_spinlock_unlock(&THREAD->lock, false);
127 interrupts_enable();
128
129 f(arg);
130
131 /* Accumulate accounting to the task */
132 irq_spinlock_lock(&THREAD->lock, true);
133 if (!THREAD->uncounted) {
134 thread_update_accounting(true);
135 uint64_t ucycles = THREAD->ucycles;
136 THREAD->ucycles = 0;
137 uint64_t kcycles = THREAD->kcycles;
138 THREAD->kcycles = 0;
139
140 irq_spinlock_pass(&THREAD->lock, &TASK->lock);
141 TASK->ucycles += ucycles;
142 TASK->kcycles += kcycles;
143 irq_spinlock_unlock(&TASK->lock, true);
144 } else
145 irq_spinlock_unlock(&THREAD->lock, true);
146
147 thread_exit();
148
149 /* Not reached */
150}
151
152/** Initialization and allocation for thread_t structure
153 *
154 */
155static errno_t thr_constructor(void *obj, unsigned int kmflags)
156{
157 thread_t *thread = (thread_t *) obj;
158
159 irq_spinlock_initialize(&thread->lock, "thread_t_lock");
160 link_initialize(&thread->rq_link);
161 link_initialize(&thread->wq_link);
162 link_initialize(&thread->th_link);
163
164 /* call the architecture-specific part of the constructor */
165 thr_constructor_arch(thread);
166
167#ifdef CONFIG_FPU
168 thread->saved_fpu_context = slab_alloc(fpu_context_cache,
169 FRAME_ATOMIC | kmflags);
170 if (!thread->saved_fpu_context)
171 return ENOMEM;
172#endif /* CONFIG_FPU */
173
174 /*
175 * Allocate the kernel stack from the low-memory to prevent an infinite
176 * nesting of TLB-misses when accessing the stack from the part of the
177 * TLB-miss handler written in C.
178 *
179 * Note that low-memory is safe to be used for the stack as it will be
180 * covered by the kernel identity mapping, which guarantees not to
181 * nest TLB-misses infinitely (either via some hardware mechanism or
182 * by the construciton of the assembly-language part of the TLB-miss
183 * handler).
184 *
185 * This restriction can be lifted once each architecture provides
186 * a similar guarantee, for example by locking the kernel stack
187 * in the TLB whenever it is allocated from the high-memory and the
188 * thread is being scheduled to run.
189 */
190 kmflags |= FRAME_LOWMEM;
191 kmflags &= ~FRAME_HIGHMEM;
192
193 // NOTE: All kernel stacks must be aligned to STACK_SIZE,
194 // see get_stack_base().
195
196 uintptr_t stack_phys =
197 frame_alloc(STACK_FRAMES, kmflags, STACK_SIZE - 1);
198 if (!stack_phys) {
199#ifdef CONFIG_FPU
200 assert(thread->saved_fpu_context);
201 slab_free(fpu_context_cache, thread->saved_fpu_context);
202#endif
203 return ENOMEM;
204 }
205
206 thread->kstack = (uint8_t *) PA2KA(stack_phys);
207
208#ifdef CONFIG_UDEBUG
209 mutex_initialize(&thread->udebug.lock, MUTEX_PASSIVE);
210#endif
211
212 return EOK;
213}
214
215/** Destruction of thread_t object */
216static size_t thr_destructor(void *obj)
217{
218 thread_t *thread = (thread_t *) obj;
219
220 /* call the architecture-specific part of the destructor */
221 thr_destructor_arch(thread);
222
223 frame_free(KA2PA(thread->kstack), STACK_FRAMES);
224
225#ifdef CONFIG_FPU
226 assert(thread->saved_fpu_context);
227 slab_free(fpu_context_cache, thread->saved_fpu_context);
228#endif
229
230 return STACK_FRAMES; /* number of frames freed */
231}
232
233/** Initialize threads
234 *
235 * Initialize kernel threads support.
236 *
237 */
238void thread_init(void)
239{
240 THREAD = NULL;
241
242 atomic_store(&nrdy, 0);
243 thread_cache = slab_cache_create("thread_t", sizeof(thread_t), 0,
244 thr_constructor, thr_destructor, 0);
245
246#ifdef CONFIG_FPU
247 fpu_context_cache = slab_cache_create("fpu_context_t",
248 sizeof(fpu_context_t), FPU_CONTEXT_ALIGN, NULL, NULL, 0);
249#endif
250
251 odict_initialize(&threads, threads_getkey, threads_cmp);
252}
253
254/** Wire thread to the given CPU
255 *
256 * @param cpu CPU to wire the thread to.
257 *
258 */
259void thread_wire(thread_t *thread, cpu_t *cpu)
260{
261 irq_spinlock_lock(&thread->lock, true);
262 thread->cpu = cpu;
263 thread->wired = true;
264 irq_spinlock_unlock(&thread->lock, true);
265}
266
267/** Invoked right before thread_ready() readies the thread. thread is locked. */
268static void before_thread_is_ready(thread_t *thread)
269{
270 assert(irq_spinlock_locked(&thread->lock));
271}
272
273/** Make thread ready
274 *
275 * Switch thread to the ready state.
276 *
277 * @param thread Thread to make ready.
278 *
279 */
280void thread_ready(thread_t *thread)
281{
282 irq_spinlock_lock(&thread->lock, true);
283
284 assert(thread->state != Ready);
285
286 before_thread_is_ready(thread);
287
288 int i = (thread->priority < RQ_COUNT - 1) ?
289 ++thread->priority : thread->priority;
290
291 cpu_t *cpu;
292 if (thread->wired || thread->nomigrate || thread->fpu_context_engaged) {
293 /* Cannot ready to another CPU */
294 assert(thread->cpu != NULL);
295 cpu = thread->cpu;
296 } else if (thread->stolen) {
297 /* Ready to the stealing CPU */
298 cpu = CPU;
299 } else if (thread->cpu) {
300 /* Prefer the CPU on which the thread ran last */
301 assert(thread->cpu != NULL);
302 cpu = thread->cpu;
303 } else {
304 cpu = CPU;
305 }
306
307 thread->state = Ready;
308
309 irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock));
310
311 /*
312 * Append thread to respective ready queue
313 * on respective processor.
314 */
315
316 list_append(&thread->rq_link, &cpu->rq[i].rq);
317 cpu->rq[i].n++;
318 irq_spinlock_unlock(&(cpu->rq[i].lock), true);
319
320 atomic_inc(&nrdy);
321 atomic_inc(&cpu->nrdy);
322}
323
324/** Create new thread
325 *
326 * Create a new thread.
327 *
328 * @param func Thread's implementing function.
329 * @param arg Thread's implementing function argument.
330 * @param task Task to which the thread belongs. The caller must
331 * guarantee that the task won't cease to exist during the
332 * call. The task's lock may not be held.
333 * @param flags Thread flags.
334 * @param name Symbolic name (a copy is made).
335 *
336 * @return New thread's structure on success, NULL on failure.
337 *
338 */
339thread_t *thread_create(void (*func)(void *), void *arg, task_t *task,
340 thread_flags_t flags, const char *name)
341{
342 thread_t *thread = (thread_t *) slab_alloc(thread_cache, FRAME_ATOMIC);
343 if (!thread)
344 return NULL;
345
346 if (thread_create_arch(thread, flags) != EOK) {
347 slab_free(thread_cache, thread);
348 return NULL;
349 }
350
351 /* Not needed, but good for debugging */
352 memsetb(thread->kstack, STACK_SIZE, 0);
353
354 irq_spinlock_lock(&tidlock, true);
355 thread->tid = ++last_tid;
356 irq_spinlock_unlock(&tidlock, true);
357
358 memset(&thread->saved_context, 0, sizeof(thread->saved_context));
359 context_set(&thread->saved_context, FADDR(cushion),
360 (uintptr_t) thread->kstack, STACK_SIZE);
361
362 current_initialize((current_t *) thread->kstack);
363
364 ipl_t ipl = interrupts_disable();
365 thread->saved_context.ipl = interrupts_read();
366 interrupts_restore(ipl);
367
368 str_cpy(thread->name, THREAD_NAME_BUFLEN, name);
369
370 thread->thread_code = func;
371 thread->thread_arg = arg;
372 thread->ticks = -1;
373 thread->ucycles = 0;
374 thread->kcycles = 0;
375 thread->uncounted =
376 ((flags & THREAD_FLAG_UNCOUNTED) == THREAD_FLAG_UNCOUNTED);
377 thread->priority = -1; /* Start in rq[0] */
378 thread->cpu = NULL;
379 thread->wired = false;
380 thread->stolen = false;
381 thread->uspace =
382 ((flags & THREAD_FLAG_USPACE) == THREAD_FLAG_USPACE);
383
384 thread->nomigrate = 0;
385 thread->state = Entering;
386
387 timeout_initialize(&thread->sleep_timeout);
388 thread->sleep_interruptible = false;
389 thread->sleep_composable = false;
390 thread->sleep_queue = NULL;
391 thread->timeout_pending = false;
392
393 thread->in_copy_from_uspace = false;
394 thread->in_copy_to_uspace = false;
395
396 thread->interrupted = false;
397 thread->detached = false;
398 waitq_initialize(&thread->join_wq);
399
400 thread->task = task;
401
402 thread->fpu_context_exists = false;
403 thread->fpu_context_engaged = false;
404
405 odlink_initialize(&thread->lthreads);
406
407#ifdef CONFIG_UDEBUG
408 /* Initialize debugging stuff */
409 thread->btrace = false;
410 udebug_thread_initialize(&thread->udebug);
411#endif
412
413 if ((flags & THREAD_FLAG_NOATTACH) != THREAD_FLAG_NOATTACH)
414 thread_attach(thread, task);
415
416 return thread;
417}
418
419/** Destroy thread memory structure
420 *
421 * Detach thread from all queues, cpus etc. and destroy it.
422 *
423 * @param thread Thread to be destroyed.
424 * @param irq_res Indicate whether it should unlock thread->lock
425 * in interrupts-restore mode.
426 *
427 */
428void thread_destroy(thread_t *thread, bool irq_res)
429{
430 assert(irq_spinlock_locked(&thread->lock));
431 assert((thread->state == Exiting) || (thread->state == Lingering));
432 assert(thread->task);
433 assert(thread->cpu);
434
435 irq_spinlock_lock(&thread->cpu->lock, false);
436 if (thread->cpu->fpu_owner == thread)
437 thread->cpu->fpu_owner = NULL;
438 irq_spinlock_unlock(&thread->cpu->lock, false);
439
440 irq_spinlock_pass(&thread->lock, &threads_lock);
441
442 odict_remove(&thread->lthreads);
443
444 irq_spinlock_pass(&threads_lock, &thread->task->lock);
445
446 /*
447 * Detach from the containing task.
448 */
449 list_remove(&thread->th_link);
450 irq_spinlock_unlock(&thread->task->lock, irq_res);
451
452 /*
453 * Drop the reference to the containing task.
454 */
455 task_release(thread->task);
456 slab_free(thread_cache, thread);
457}
458
459/** Make the thread visible to the system.
460 *
461 * Attach the thread structure to the current task and make it visible in the
462 * threads_tree.
463 *
464 * @param t Thread to be attached to the task.
465 * @param task Task to which the thread is to be attached.
466 *
467 */
468void thread_attach(thread_t *thread, task_t *task)
469{
470 /*
471 * Attach to the specified task.
472 */
473 irq_spinlock_lock(&task->lock, true);
474
475 /* Hold a reference to the task. */
476 task_hold(task);
477
478 /* Must not count kbox thread into lifecount */
479 if (thread->uspace)
480 atomic_inc(&task->lifecount);
481
482 list_append(&thread->th_link, &task->threads);
483
484 irq_spinlock_pass(&task->lock, &threads_lock);
485
486 /*
487 * Register this thread in the system-wide dictionary.
488 */
489 odict_insert(&thread->lthreads, &threads, NULL);
490 irq_spinlock_unlock(&threads_lock, true);
491}
492
493/** Terminate thread.
494 *
495 * End current thread execution and switch it to the exiting state.
496 * All pending timeouts are executed.
497 *
498 */
499void thread_exit(void)
500{
501 if (THREAD->uspace) {
502#ifdef CONFIG_UDEBUG
503 /* Generate udebug THREAD_E event */
504 udebug_thread_e_event();
505
506 /*
507 * This thread will not execute any code or system calls from
508 * now on.
509 */
510 udebug_stoppable_begin();
511#endif
512 if (atomic_predec(&TASK->lifecount) == 0) {
513 /*
514 * We are the last userspace thread in the task that
515 * still has not exited. With the exception of the
516 * moment the task was created, new userspace threads
517 * can only be created by threads of the same task.
518 * We are safe to perform cleanup.
519 *
520 */
521 ipc_cleanup();
522 futex_task_cleanup();
523 sys_waitq_task_cleanup();
524 LOG("Cleanup of task %" PRIu64 " completed.", TASK->taskid);
525 }
526 }
527
528restart:
529 irq_spinlock_lock(&THREAD->lock, true);
530 if (THREAD->timeout_pending) {
531 /* Busy waiting for timeouts in progress */
532 irq_spinlock_unlock(&THREAD->lock, true);
533 goto restart;
534 }
535
536 THREAD->state = Exiting;
537 irq_spinlock_unlock(&THREAD->lock, true);
538
539 scheduler();
540
541 /* Not reached */
542 while (true)
543 ;
544}
545
546/** Interrupts an existing thread so that it may exit as soon as possible.
547 *
548 * Threads that are blocked waiting for a synchronization primitive
549 * are woken up with a return code of EINTR if the
550 * blocking call was interruptable. See waitq_sleep_timeout().
551 *
552 * The caller must guarantee the thread object is valid during the entire
553 * function, eg by holding the threads_lock lock.
554 *
555 * Interrupted threads automatically exit when returning back to user space.
556 *
557 * @param thread A valid thread object. The caller must guarantee it
558 * will remain valid until thread_interrupt() exits.
559 */
560void thread_interrupt(thread_t *thread)
561{
562 assert(thread != NULL);
563
564 irq_spinlock_lock(&thread->lock, true);
565
566 thread->interrupted = true;
567 bool sleeping = (thread->state == Sleeping);
568
569 irq_spinlock_unlock(&thread->lock, true);
570
571 if (sleeping)
572 waitq_interrupt_sleep(thread);
573}
574
575/** Returns true if the thread was interrupted.
576 *
577 * @param thread A valid thread object. User must guarantee it will
578 * be alive during the entire call.
579 * @return true if the thread was already interrupted via thread_interrupt().
580 */
581bool thread_interrupted(thread_t *thread)
582{
583 assert(thread != NULL);
584
585 bool interrupted;
586
587 irq_spinlock_lock(&thread->lock, true);
588 interrupted = thread->interrupted;
589 irq_spinlock_unlock(&thread->lock, true);
590
591 return interrupted;
592}
593
594/** Prevent the current thread from being migrated to another processor. */
595void thread_migration_disable(void)
596{
597 assert(THREAD);
598
599 THREAD->nomigrate++;
600}
601
602/** Allow the current thread to be migrated to another processor. */
603void thread_migration_enable(void)
604{
605 assert(THREAD);
606 assert(THREAD->nomigrate > 0);
607
608 if (THREAD->nomigrate > 0)
609 THREAD->nomigrate--;
610}
611
612/** Thread sleep
613 *
614 * Suspend execution of the current thread.
615 *
616 * @param sec Number of seconds to sleep.
617 *
618 */
619void thread_sleep(uint32_t sec)
620{
621 /*
622 * Sleep in 1000 second steps to support
623 * full argument range
624 */
625 while (sec > 0) {
626 uint32_t period = (sec > 1000) ? 1000 : sec;
627
628 thread_usleep(period * 1000000);
629 sec -= period;
630 }
631}
632
633/** Wait for another thread to exit.
634 *
635 * @param thread Thread to join on exit.
636 * @param usec Timeout in microseconds.
637 * @param flags Mode of operation.
638 *
639 * @return An error code from errno.h or an error code from synch.h.
640 *
641 */
642errno_t thread_join_timeout(thread_t *thread, uint32_t usec, unsigned int flags)
643{
644 if (thread == THREAD)
645 return EINVAL;
646
647 /*
648 * Since thread join can only be called once on an undetached thread,
649 * the thread pointer is guaranteed to be still valid.
650 */
651
652 irq_spinlock_lock(&thread->lock, true);
653 assert(!thread->detached);
654 irq_spinlock_unlock(&thread->lock, true);
655
656 return waitq_sleep_timeout(&thread->join_wq, usec, flags, NULL);
657
658 // FIXME: join should deallocate the thread.
659 // Current code calls detach after join, that's contrary to how
660 // join is used in other threading APIs.
661}
662
663/** Detach thread.
664 *
665 * Mark the thread as detached. If the thread is already
666 * in the Lingering state, deallocate its resources.
667 *
668 * @param thread Thread to be detached.
669 *
670 */
671void thread_detach(thread_t *thread)
672{
673 /*
674 * Since the thread is expected not to be already detached,
675 * pointer to it must be still valid.
676 */
677 irq_spinlock_lock(&thread->lock, true);
678 assert(!thread->detached);
679
680 if (thread->state == Lingering) {
681 /*
682 * Unlock &thread->lock and restore
683 * interrupts in thread_destroy().
684 */
685 thread_destroy(thread, true);
686 return;
687 } else {
688 thread->detached = true;
689 }
690
691 irq_spinlock_unlock(&thread->lock, true);
692}
693
694/** Thread usleep
695 *
696 * Suspend execution of the current thread.
697 *
698 * @param usec Number of microseconds to sleep.
699 *
700 */
701void thread_usleep(uint32_t usec)
702{
703 waitq_t wq;
704
705 waitq_initialize(&wq);
706
707 (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING, NULL);
708}
709
710static void thread_print(thread_t *thread, bool additional)
711{
712 uint64_t ucycles, kcycles;
713 char usuffix, ksuffix;
714 order_suffix(thread->ucycles, &ucycles, &usuffix);
715 order_suffix(thread->kcycles, &kcycles, &ksuffix);
716
717 char *name;
718 if (str_cmp(thread->name, "uinit") == 0)
719 name = thread->task->name;
720 else
721 name = thread->name;
722
723#ifdef __32_BITS__
724 if (additional)
725 printf("%-8" PRIu64 " %10p %10p %9" PRIu64 "%c %9" PRIu64 "%c ",
726 thread->tid, thread->thread_code, thread->kstack,
727 ucycles, usuffix, kcycles, ksuffix);
728 else
729 printf("%-8" PRIu64 " %-14s %10p %-8s %10p %-5" PRIu32 "\n",
730 thread->tid, name, thread, thread_states[thread->state],
731 thread->task, thread->task->container);
732#endif
733
734#ifdef __64_BITS__
735 if (additional)
736 printf("%-8" PRIu64 " %18p %18p\n"
737 " %9" PRIu64 "%c %9" PRIu64 "%c ",
738 thread->tid, thread->thread_code, thread->kstack,
739 ucycles, usuffix, kcycles, ksuffix);
740 else
741 printf("%-8" PRIu64 " %-14s %18p %-8s %18p %-5" PRIu32 "\n",
742 thread->tid, name, thread, thread_states[thread->state],
743 thread->task, thread->task->container);
744#endif
745
746 if (additional) {
747 if (thread->cpu)
748 printf("%-5u", thread->cpu->id);
749 else
750 printf("none ");
751
752 if (thread->state == Sleeping) {
753#ifdef __32_BITS__
754 printf(" %10p", thread->sleep_queue);
755#endif
756
757#ifdef __64_BITS__
758 printf(" %18p", thread->sleep_queue);
759#endif
760 }
761
762 printf("\n");
763 }
764}
765
766/** Print list of threads debug info
767 *
768 * @param additional Print additional information.
769 *
770 */
771void thread_print_list(bool additional)
772{
773 thread_t *thread;
774
775 /* Messing with thread structures, avoid deadlock */
776 irq_spinlock_lock(&threads_lock, true);
777
778#ifdef __32_BITS__
779 if (additional)
780 printf("[id ] [code ] [stack ] [ucycles ] [kcycles ]"
781 " [cpu] [waitqueue]\n");
782 else
783 printf("[id ] [name ] [address ] [state ] [task ]"
784 " [ctn]\n");
785#endif
786
787#ifdef __64_BITS__
788 if (additional) {
789 printf("[id ] [code ] [stack ]\n"
790 " [ucycles ] [kcycles ] [cpu] [waitqueue ]\n");
791 } else
792 printf("[id ] [name ] [address ] [state ]"
793 " [task ] [ctn]\n");
794#endif
795
796 thread = thread_first();
797 while (thread != NULL) {
798 thread_print(thread, additional);
799 thread = thread_next(thread);
800 }
801
802 irq_spinlock_unlock(&threads_lock, true);
803}
804
805/** Check whether thread exists.
806 *
807 * Note that threads_lock must be already held and
808 * interrupts must be already disabled.
809 *
810 * @param thread Pointer to thread.
811 *
812 * @return True if thread t is known to the system, false otherwise.
813 *
814 */
815bool thread_exists(thread_t *thread)
816{
817 assert(interrupts_disabled());
818 assert(irq_spinlock_locked(&threads_lock));
819
820 odlink_t *odlink = odict_find_eq(&threads, thread, NULL);
821 return odlink != NULL;
822}
823
824/** Update accounting of current thread.
825 *
826 * Note that thread_lock on THREAD must be already held and
827 * interrupts must be already disabled.
828 *
829 * @param user True to update user accounting, false for kernel.
830 *
831 */
832void thread_update_accounting(bool user)
833{
834 uint64_t time = get_cycle();
835
836 assert(interrupts_disabled());
837 assert(irq_spinlock_locked(&THREAD->lock));
838
839 if (user)
840 THREAD->ucycles += time - THREAD->last_cycle;
841 else
842 THREAD->kcycles += time - THREAD->last_cycle;
843
844 THREAD->last_cycle = time;
845}
846
847/** Find thread structure corresponding to thread ID.
848 *
849 * The threads_lock must be already held by the caller of this function and
850 * interrupts must be disabled.
851 *
852 * @param id Thread ID.
853 *
854 * @return Thread structure address or NULL if there is no such thread ID.
855 *
856 */
857thread_t *thread_find_by_id(thread_id_t thread_id)
858{
859 thread_t *thread;
860
861 assert(interrupts_disabled());
862 assert(irq_spinlock_locked(&threads_lock));
863
864 thread = thread_first();
865 while (thread != NULL) {
866 if (thread->tid == thread_id)
867 return thread;
868
869 thread = thread_next(thread);
870 }
871
872 return NULL;
873}
874
875/** Get count of threads.
876 *
877 * @return Number of threads in the system
878 */
879size_t thread_count(void)
880{
881 assert(interrupts_disabled());
882 assert(irq_spinlock_locked(&threads_lock));
883
884 return odict_count(&threads);
885}
886
887/** Get first thread.
888 *
889 * @return Pointer to first thread or @c NULL if there are none.
890 */
891thread_t *thread_first(void)
892{
893 odlink_t *odlink;
894
895 assert(interrupts_disabled());
896 assert(irq_spinlock_locked(&threads_lock));
897
898 odlink = odict_first(&threads);
899 if (odlink == NULL)
900 return NULL;
901
902 return odict_get_instance(odlink, thread_t, lthreads);
903}
904
905/** Get next thread.
906 *
907 * @param cur Current thread
908 * @return Pointer to next thread or @c NULL if there are no more threads.
909 */
910thread_t *thread_next(thread_t *cur)
911{
912 odlink_t *odlink;
913
914 assert(interrupts_disabled());
915 assert(irq_spinlock_locked(&threads_lock));
916
917 odlink = odict_next(&cur->lthreads, &threads);
918 if (odlink == NULL)
919 return NULL;
920
921 return odict_get_instance(odlink, thread_t, lthreads);
922}
923
924#ifdef CONFIG_UDEBUG
925
926void thread_stack_trace(thread_id_t thread_id)
927{
928 irq_spinlock_lock(&threads_lock, true);
929
930 thread_t *thread = thread_find_by_id(thread_id);
931 if (thread == NULL) {
932 printf("No such thread.\n");
933 irq_spinlock_unlock(&threads_lock, true);
934 return;
935 }
936
937 irq_spinlock_lock(&thread->lock, false);
938
939 /*
940 * Schedule a stack trace to be printed
941 * just before the thread is scheduled next.
942 *
943 * If the thread is sleeping then try to interrupt
944 * the sleep. Any request for printing an uspace stack
945 * trace from within the kernel should be always
946 * considered a last resort debugging means, therefore
947 * forcing the thread's sleep to be interrupted
948 * is probably justifiable.
949 */
950
951 bool sleeping = false;
952 istate_t *istate = thread->udebug.uspace_state;
953 if (istate != NULL) {
954 printf("Scheduling thread stack trace.\n");
955 thread->btrace = true;
956 if (thread->state == Sleeping)
957 sleeping = true;
958 } else
959 printf("Thread interrupt state not available.\n");
960
961 irq_spinlock_unlock(&thread->lock, false);
962
963 if (sleeping)
964 waitq_interrupt_sleep(thread);
965
966 irq_spinlock_unlock(&threads_lock, true);
967}
968
969#endif /* CONFIG_UDEBUG */
970
971/** Get key function for the @c threads ordered dictionary.
972 *
973 * @param odlink Link
974 * @return Pointer to thread structure cast as 'void *'
975 */
976static void *threads_getkey(odlink_t *odlink)
977{
978 thread_t *thread = odict_get_instance(odlink, thread_t, lthreads);
979 return (void *) thread;
980}
981
982/** Key comparison function for the @c threads ordered dictionary.
983 *
984 * @param a Pointer to thread A
985 * @param b Pointer to thread B
986 * @return -1, 0, 1 iff pointer to A is less than, equal to, greater than B
987 */
988static int threads_cmp(void *a, void *b)
989{
990 if (a > b)
991 return -1;
992 else if (a == b)
993 return 0;
994 else
995 return +1;
996}
997
998/** Process syscall to create new thread.
999 *
1000 */
1001sys_errno_t sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name,
1002 size_t name_len, thread_id_t *uspace_thread_id)
1003{
1004 if (name_len > THREAD_NAME_BUFLEN - 1)
1005 name_len = THREAD_NAME_BUFLEN - 1;
1006
1007 char namebuf[THREAD_NAME_BUFLEN];
1008 errno_t rc = copy_from_uspace(namebuf, uspace_name, name_len);
1009 if (rc != EOK)
1010 return (sys_errno_t) rc;
1011
1012 namebuf[name_len] = 0;
1013
1014 /*
1015 * In case of failure, kernel_uarg will be deallocated in this function.
1016 * In case of success, kernel_uarg will be freed in uinit().
1017 */
1018 uspace_arg_t *kernel_uarg =
1019 (uspace_arg_t *) malloc(sizeof(uspace_arg_t));
1020 if (!kernel_uarg)
1021 return (sys_errno_t) ENOMEM;
1022
1023 rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t));
1024 if (rc != EOK) {
1025 free(kernel_uarg);
1026 return (sys_errno_t) rc;
1027 }
1028
1029 thread_t *thread = thread_create(uinit, kernel_uarg, TASK,
1030 THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf);
1031 if (thread) {
1032 if (uspace_thread_id != NULL) {
1033 rc = copy_to_uspace(uspace_thread_id, &thread->tid,
1034 sizeof(thread->tid));
1035 if (rc != EOK) {
1036 /*
1037 * We have encountered a failure, but the thread
1038 * has already been created. We need to undo its
1039 * creation now.
1040 */
1041
1042 /*
1043 * The new thread structure is initialized, but
1044 * is still not visible to the system.
1045 * We can safely deallocate it.
1046 */
1047 slab_free(thread_cache, thread);
1048 free(kernel_uarg);
1049
1050 return (sys_errno_t) rc;
1051 }
1052 }
1053
1054#ifdef CONFIG_UDEBUG
1055 /*
1056 * Generate udebug THREAD_B event and attach the thread.
1057 * This must be done atomically (with the debug locks held),
1058 * otherwise we would either miss some thread or receive
1059 * THREAD_B events for threads that already existed
1060 * and could be detected with THREAD_READ before.
1061 */
1062 udebug_thread_b_event_attach(thread, TASK);
1063#else
1064 thread_attach(thread, TASK);
1065#endif
1066 thread_ready(thread);
1067
1068 return 0;
1069 } else
1070 free(kernel_uarg);
1071
1072 return (sys_errno_t) ENOMEM;
1073}
1074
1075/** Process syscall to terminate thread.
1076 *
1077 */
1078sys_errno_t sys_thread_exit(int uspace_status)
1079{
1080 thread_exit();
1081}
1082
1083/** Syscall for getting TID.
1084 *
1085 * @param uspace_thread_id Userspace address of 8-byte buffer where to store
1086 * current thread ID.
1087 *
1088 * @return 0 on success or an error code from @ref errno.h.
1089 *
1090 */
1091sys_errno_t sys_thread_get_id(thread_id_t *uspace_thread_id)
1092{
1093 /*
1094 * No need to acquire lock on THREAD because tid
1095 * remains constant for the lifespan of the thread.
1096 *
1097 */
1098 return (sys_errno_t) copy_to_uspace(uspace_thread_id, &THREAD->tid,
1099 sizeof(THREAD->tid));
1100}
1101
1102/** Syscall wrapper for sleeping. */
1103sys_errno_t sys_thread_usleep(uint32_t usec)
1104{
1105 thread_usleep(usec);
1106 return 0;
1107}
1108
1109sys_errno_t sys_thread_udelay(uint32_t usec)
1110{
1111 delay(usec);
1112 return 0;
1113}
1114
1115/** @}
1116 */
Note: See TracBrowser for help on using the repository browser.