source: mainline/kernel/generic/src/proc/thread.c@ 94e75cf

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 94e75cf was 5a5269d, checked in by GitHub <noreply@…>, 6 years ago

Change type of uspace pointers in kernel from pointer type to numeric (#170)

From kernel's perspective, userspace addresses are not valid pointers,
and can only be used in calls to copy_to/from_uspace().
Therefore, we change the type of those arguments and variables to
uspace_addr_t which is an alias for sysarg_t.

This allows the compiler to catch accidental direct accesses to
userspace addresses.

Additionally, to avoid losing the type information in code,
a macro uspace_ptr(type) is used that translates to uspace_addr_t.
I makes no functional difference, but allows keeping the type information
in code in case we implement some sort of static checking for it in the future.

However, ccheck doesn't like that, so instead of using uspace_ptr(char),
we use uspace_ptr_char which is defined as
#define uspace_ptr_char uspace_ptr(char).

  • Property mode set to 100644
File size: 26.8 KB
Line 
1/*
2 * Copyright (c) 2010 Jakub Jermar
3 * Copyright (c) 2018 Jiri Svoboda
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup kernel_generic_proc
31 * @{
32 */
33
34/**
35 * @file
36 * @brief Thread management functions.
37 */
38
39#include <assert.h>
40#include <proc/scheduler.h>
41#include <proc/thread.h>
42#include <proc/task.h>
43#include <mm/frame.h>
44#include <mm/page.h>
45#include <arch/asm.h>
46#include <arch/cycle.h>
47#include <arch.h>
48#include <synch/spinlock.h>
49#include <synch/waitq.h>
50#include <synch/syswaitq.h>
51#include <cpu.h>
52#include <str.h>
53#include <context.h>
54#include <adt/list.h>
55#include <adt/odict.h>
56#include <time/clock.h>
57#include <time/timeout.h>
58#include <time/delay.h>
59#include <config.h>
60#include <arch/interrupt.h>
61#include <smp/ipi.h>
62#include <arch/faddr.h>
63#include <atomic.h>
64#include <mem.h>
65#include <stdio.h>
66#include <stdlib.h>
67#include <main/uinit.h>
68#include <syscall/copy.h>
69#include <errno.h>
70#include <debug.h>
71
72/** Thread states */
73const char *thread_states[] = {
74 "Invalid",
75 "Running",
76 "Sleeping",
77 "Ready",
78 "Entering",
79 "Exiting",
80 "Lingering"
81};
82
83/** Lock protecting the @c threads ordered dictionary .
84 *
85 * For locking rules, see declaration thereof.
86 */
87IRQ_SPINLOCK_INITIALIZE(threads_lock);
88
89/** Ordered dictionary of all threads by their address (i.e. pointer to
90 * the thread_t structure).
91 *
92 * When a thread is found in the @c threads ordered dictionary, it is
93 * guaranteed to exist as long as the @c threads_lock is held.
94 *
95 * Members are of type thread_t.
96 */
97odict_t threads;
98
99IRQ_SPINLOCK_STATIC_INITIALIZE(tidlock);
100static thread_id_t last_tid = 0;
101
102static slab_cache_t *thread_cache;
103
104#ifdef CONFIG_FPU
105slab_cache_t *fpu_context_cache;
106#endif
107
108static void *threads_getkey(odlink_t *);
109static int threads_cmp(void *, void *);
110
111/** Thread wrapper.
112 *
113 * This wrapper is provided to ensure that every thread makes a call to
114 * thread_exit() when its implementing function returns.
115 *
116 * interrupts_disable() is assumed.
117 *
118 */
119static void cushion(void)
120{
121 void (*f)(void *) = THREAD->thread_code;
122 void *arg = THREAD->thread_arg;
123 THREAD->last_cycle = get_cycle();
124
125 /* This is where each thread wakes up after its creation */
126 irq_spinlock_unlock(&THREAD->lock, false);
127 interrupts_enable();
128
129 f(arg);
130
131 /* Accumulate accounting to the task */
132 irq_spinlock_lock(&THREAD->lock, true);
133 if (!THREAD->uncounted) {
134 thread_update_accounting(true);
135 uint64_t ucycles = THREAD->ucycles;
136 THREAD->ucycles = 0;
137 uint64_t kcycles = THREAD->kcycles;
138 THREAD->kcycles = 0;
139
140 irq_spinlock_pass(&THREAD->lock, &TASK->lock);
141 TASK->ucycles += ucycles;
142 TASK->kcycles += kcycles;
143 irq_spinlock_unlock(&TASK->lock, true);
144 } else
145 irq_spinlock_unlock(&THREAD->lock, true);
146
147 thread_exit();
148
149 /* Not reached */
150}
151
152/** Initialization and allocation for thread_t structure
153 *
154 */
155static errno_t thr_constructor(void *obj, unsigned int kmflags)
156{
157 thread_t *thread = (thread_t *) obj;
158
159 irq_spinlock_initialize(&thread->lock, "thread_t_lock");
160 link_initialize(&thread->rq_link);
161 link_initialize(&thread->wq_link);
162 link_initialize(&thread->th_link);
163
164 /* call the architecture-specific part of the constructor */
165 thr_constructor_arch(thread);
166
167#ifdef CONFIG_FPU
168 thread->saved_fpu_context = slab_alloc(fpu_context_cache,
169 FRAME_ATOMIC | kmflags);
170 if (!thread->saved_fpu_context)
171 return ENOMEM;
172#endif /* CONFIG_FPU */
173
174 /*
175 * Allocate the kernel stack from the low-memory to prevent an infinite
176 * nesting of TLB-misses when accessing the stack from the part of the
177 * TLB-miss handler written in C.
178 *
179 * Note that low-memory is safe to be used for the stack as it will be
180 * covered by the kernel identity mapping, which guarantees not to
181 * nest TLB-misses infinitely (either via some hardware mechanism or
182 * by the construction of the assembly-language part of the TLB-miss
183 * handler).
184 *
185 * This restriction can be lifted once each architecture provides
186 * a similar guarantee, for example, by locking the kernel stack
187 * in the TLB whenever it is allocated from the high-memory and the
188 * thread is being scheduled to run.
189 */
190 kmflags |= FRAME_LOWMEM;
191 kmflags &= ~FRAME_HIGHMEM;
192
193 // NOTE: All kernel stacks must be aligned to STACK_SIZE,
194 // see get_stack_base().
195
196 uintptr_t stack_phys =
197 frame_alloc(STACK_FRAMES, kmflags, STACK_SIZE - 1);
198 if (!stack_phys) {
199#ifdef CONFIG_FPU
200 assert(thread->saved_fpu_context);
201 slab_free(fpu_context_cache, thread->saved_fpu_context);
202#endif
203 return ENOMEM;
204 }
205
206 thread->kstack = (uint8_t *) PA2KA(stack_phys);
207
208#ifdef CONFIG_UDEBUG
209 mutex_initialize(&thread->udebug.lock, MUTEX_PASSIVE);
210#endif
211
212 return EOK;
213}
214
215/** Destruction of thread_t object */
216static size_t thr_destructor(void *obj)
217{
218 thread_t *thread = (thread_t *) obj;
219
220 /* call the architecture-specific part of the destructor */
221 thr_destructor_arch(thread);
222
223 frame_free(KA2PA(thread->kstack), STACK_FRAMES);
224
225#ifdef CONFIG_FPU
226 assert(thread->saved_fpu_context);
227 slab_free(fpu_context_cache, thread->saved_fpu_context);
228#endif
229
230 return STACK_FRAMES; /* number of frames freed */
231}
232
233/** Initialize threads
234 *
235 * Initialize kernel threads support.
236 *
237 */
238void thread_init(void)
239{
240 THREAD = NULL;
241
242 atomic_store(&nrdy, 0);
243 thread_cache = slab_cache_create("thread_t", sizeof(thread_t), 0,
244 thr_constructor, thr_destructor, 0);
245
246#ifdef CONFIG_FPU
247 fpu_context_cache = slab_cache_create("fpu_context_t",
248 sizeof(fpu_context_t), FPU_CONTEXT_ALIGN, NULL, NULL, 0);
249#endif
250
251 odict_initialize(&threads, threads_getkey, threads_cmp);
252}
253
254/** Wire thread to the given CPU
255 *
256 * @param cpu CPU to wire the thread to.
257 *
258 */
259void thread_wire(thread_t *thread, cpu_t *cpu)
260{
261 irq_spinlock_lock(&thread->lock, true);
262 thread->cpu = cpu;
263 thread->wired = true;
264 irq_spinlock_unlock(&thread->lock, true);
265}
266
267/** Invoked right before thread_ready() readies the thread. thread is locked. */
268static void before_thread_is_ready(thread_t *thread)
269{
270 assert(irq_spinlock_locked(&thread->lock));
271}
272
273/** Make thread ready
274 *
275 * Switch thread to the ready state.
276 *
277 * @param thread Thread to make ready.
278 *
279 */
280void thread_ready(thread_t *thread)
281{
282 irq_spinlock_lock(&thread->lock, true);
283
284 assert(thread->state != Ready);
285
286 before_thread_is_ready(thread);
287
288 int i = (thread->priority < RQ_COUNT - 1) ?
289 ++thread->priority : thread->priority;
290
291 cpu_t *cpu;
292 if (thread->wired || thread->nomigrate || thread->fpu_context_engaged) {
293 /* Cannot ready to another CPU */
294 assert(thread->cpu != NULL);
295 cpu = thread->cpu;
296 } else if (thread->stolen) {
297 /* Ready to the stealing CPU */
298 cpu = CPU;
299 } else if (thread->cpu) {
300 /* Prefer the CPU on which the thread ran last */
301 assert(thread->cpu != NULL);
302 cpu = thread->cpu;
303 } else {
304 cpu = CPU;
305 }
306
307 thread->state = Ready;
308
309 irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock));
310
311 /*
312 * Append thread to respective ready queue
313 * on respective processor.
314 */
315
316 list_append(&thread->rq_link, &cpu->rq[i].rq);
317 cpu->rq[i].n++;
318 irq_spinlock_unlock(&(cpu->rq[i].lock), true);
319
320 atomic_inc(&nrdy);
321 atomic_inc(&cpu->nrdy);
322}
323
324/** Create new thread
325 *
326 * Create a new thread.
327 *
328 * @param func Thread's implementing function.
329 * @param arg Thread's implementing function argument.
330 * @param task Task to which the thread belongs. The caller must
331 * guarantee that the task won't cease to exist during the
332 * call. The task's lock may not be held.
333 * @param flags Thread flags.
334 * @param name Symbolic name (a copy is made).
335 *
336 * @return New thread's structure on success, NULL on failure.
337 *
338 */
339thread_t *thread_create(void (*func)(void *), void *arg, task_t *task,
340 thread_flags_t flags, const char *name)
341{
342 thread_t *thread = (thread_t *) slab_alloc(thread_cache, FRAME_ATOMIC);
343 if (!thread)
344 return NULL;
345
346 if (thread_create_arch(thread, flags) != EOK) {
347 slab_free(thread_cache, thread);
348 return NULL;
349 }
350
351 /* Not needed, but good for debugging */
352 memsetb(thread->kstack, STACK_SIZE, 0);
353
354 irq_spinlock_lock(&tidlock, true);
355 thread->tid = ++last_tid;
356 irq_spinlock_unlock(&tidlock, true);
357
358 memset(&thread->saved_context, 0, sizeof(thread->saved_context));
359 context_set(&thread->saved_context, FADDR(cushion),
360 (uintptr_t) thread->kstack, STACK_SIZE);
361
362 current_initialize((current_t *) thread->kstack);
363
364 ipl_t ipl = interrupts_disable();
365 thread->saved_context.ipl = interrupts_read();
366 interrupts_restore(ipl);
367
368 str_cpy(thread->name, THREAD_NAME_BUFLEN, name);
369
370 thread->thread_code = func;
371 thread->thread_arg = arg;
372 thread->ticks = -1;
373 thread->ucycles = 0;
374 thread->kcycles = 0;
375 thread->uncounted =
376 ((flags & THREAD_FLAG_UNCOUNTED) == THREAD_FLAG_UNCOUNTED);
377 thread->priority = -1; /* Start in rq[0] */
378 thread->cpu = NULL;
379 thread->wired = false;
380 thread->stolen = false;
381 thread->uspace =
382 ((flags & THREAD_FLAG_USPACE) == THREAD_FLAG_USPACE);
383
384 thread->nomigrate = 0;
385 thread->state = Entering;
386
387 timeout_initialize(&thread->sleep_timeout);
388 thread->sleep_interruptible = false;
389 thread->sleep_composable = false;
390 thread->sleep_queue = NULL;
391 thread->timeout_pending = false;
392
393 thread->in_copy_from_uspace = false;
394 thread->in_copy_to_uspace = false;
395
396 thread->interrupted = false;
397 thread->detached = false;
398 waitq_initialize(&thread->join_wq);
399
400 thread->task = task;
401
402 thread->fpu_context_exists = false;
403 thread->fpu_context_engaged = false;
404
405 odlink_initialize(&thread->lthreads);
406
407#ifdef CONFIG_UDEBUG
408 /* Initialize debugging stuff */
409 thread->btrace = false;
410 udebug_thread_initialize(&thread->udebug);
411#endif
412
413 if ((flags & THREAD_FLAG_NOATTACH) != THREAD_FLAG_NOATTACH)
414 thread_attach(thread, task);
415
416 return thread;
417}
418
419/** Destroy thread memory structure
420 *
421 * Detach thread from all queues, cpus etc. and destroy it.
422 *
423 * @param thread Thread to be destroyed.
424 * @param irq_res Indicate whether it should unlock thread->lock
425 * in interrupts-restore mode.
426 *
427 */
428void thread_destroy(thread_t *thread, bool irq_res)
429{
430 assert(irq_spinlock_locked(&thread->lock));
431 assert((thread->state == Exiting) || (thread->state == Lingering));
432 assert(thread->task);
433 assert(thread->cpu);
434
435 irq_spinlock_lock(&thread->cpu->lock, false);
436 if (thread->cpu->fpu_owner == thread)
437 thread->cpu->fpu_owner = NULL;
438 irq_spinlock_unlock(&thread->cpu->lock, false);
439
440 irq_spinlock_pass(&thread->lock, &threads_lock);
441
442 odict_remove(&thread->lthreads);
443
444 irq_spinlock_pass(&threads_lock, &thread->task->lock);
445
446 /*
447 * Detach from the containing task.
448 */
449 list_remove(&thread->th_link);
450 irq_spinlock_unlock(&thread->task->lock, irq_res);
451
452 /*
453 * Drop the reference to the containing task.
454 */
455 task_release(thread->task);
456 slab_free(thread_cache, thread);
457}
458
459/** Make the thread visible to the system.
460 *
461 * Attach the thread structure to the current task and make it visible in the
462 * threads_tree.
463 *
464 * @param t Thread to be attached to the task.
465 * @param task Task to which the thread is to be attached.
466 *
467 */
468void thread_attach(thread_t *thread, task_t *task)
469{
470 /*
471 * Attach to the specified task.
472 */
473 irq_spinlock_lock(&task->lock, true);
474
475 /* Hold a reference to the task. */
476 task_hold(task);
477
478 /* Must not count kbox thread into lifecount */
479 if (thread->uspace)
480 atomic_inc(&task->lifecount);
481
482 list_append(&thread->th_link, &task->threads);
483
484 irq_spinlock_pass(&task->lock, &threads_lock);
485
486 /*
487 * Register this thread in the system-wide dictionary.
488 */
489 odict_insert(&thread->lthreads, &threads, NULL);
490 irq_spinlock_unlock(&threads_lock, true);
491}
492
493/** Terminate thread.
494 *
495 * End current thread execution and switch it to the exiting state.
496 * All pending timeouts are executed.
497 *
498 */
499void thread_exit(void)
500{
501 if (THREAD->uspace) {
502#ifdef CONFIG_UDEBUG
503 /* Generate udebug THREAD_E event */
504 udebug_thread_e_event();
505
506 /*
507 * This thread will not execute any code or system calls from
508 * now on.
509 */
510 udebug_stoppable_begin();
511#endif
512 if (atomic_predec(&TASK->lifecount) == 0) {
513 /*
514 * We are the last userspace thread in the task that
515 * still has not exited. With the exception of the
516 * moment the task was created, new userspace threads
517 * can only be created by threads of the same task.
518 * We are safe to perform cleanup.
519 *
520 */
521 ipc_cleanup();
522 sys_waitq_task_cleanup();
523 LOG("Cleanup of task %" PRIu64 " completed.", TASK->taskid);
524 }
525 }
526
527restart:
528 irq_spinlock_lock(&THREAD->lock, true);
529 if (THREAD->timeout_pending) {
530 /* Busy waiting for timeouts in progress */
531 irq_spinlock_unlock(&THREAD->lock, true);
532 goto restart;
533 }
534
535 THREAD->state = Exiting;
536 irq_spinlock_unlock(&THREAD->lock, true);
537
538 scheduler();
539
540 /* Not reached */
541 while (true)
542 ;
543}
544
545/** Interrupts an existing thread so that it may exit as soon as possible.
546 *
547 * Threads that are blocked waiting for a synchronization primitive
548 * are woken up with a return code of EINTR if the
549 * blocking call was interruptable. See waitq_sleep_timeout().
550 *
551 * The caller must guarantee the thread object is valid during the entire
552 * function, eg by holding the threads_lock lock.
553 *
554 * Interrupted threads automatically exit when returning back to user space.
555 *
556 * @param thread A valid thread object. The caller must guarantee it
557 * will remain valid until thread_interrupt() exits.
558 */
559void thread_interrupt(thread_t *thread)
560{
561 assert(thread != NULL);
562
563 irq_spinlock_lock(&thread->lock, true);
564
565 thread->interrupted = true;
566 bool sleeping = (thread->state == Sleeping);
567
568 irq_spinlock_unlock(&thread->lock, true);
569
570 if (sleeping)
571 waitq_interrupt_sleep(thread);
572}
573
574/** Returns true if the thread was interrupted.
575 *
576 * @param thread A valid thread object. User must guarantee it will
577 * be alive during the entire call.
578 * @return true if the thread was already interrupted via thread_interrupt().
579 */
580bool thread_interrupted(thread_t *thread)
581{
582 assert(thread != NULL);
583
584 bool interrupted;
585
586 irq_spinlock_lock(&thread->lock, true);
587 interrupted = thread->interrupted;
588 irq_spinlock_unlock(&thread->lock, true);
589
590 return interrupted;
591}
592
593/** Prevent the current thread from being migrated to another processor. */
594void thread_migration_disable(void)
595{
596 assert(THREAD);
597
598 THREAD->nomigrate++;
599}
600
601/** Allow the current thread to be migrated to another processor. */
602void thread_migration_enable(void)
603{
604 assert(THREAD);
605 assert(THREAD->nomigrate > 0);
606
607 if (THREAD->nomigrate > 0)
608 THREAD->nomigrate--;
609}
610
611/** Thread sleep
612 *
613 * Suspend execution of the current thread.
614 *
615 * @param sec Number of seconds to sleep.
616 *
617 */
618void thread_sleep(uint32_t sec)
619{
620 /*
621 * Sleep in 1000 second steps to support
622 * full argument range
623 */
624 while (sec > 0) {
625 uint32_t period = (sec > 1000) ? 1000 : sec;
626
627 thread_usleep(period * 1000000);
628 sec -= period;
629 }
630}
631
632/** Wait for another thread to exit.
633 *
634 * @param thread Thread to join on exit.
635 * @param usec Timeout in microseconds.
636 * @param flags Mode of operation.
637 *
638 * @return An error code from errno.h or an error code from synch.h.
639 *
640 */
641errno_t thread_join_timeout(thread_t *thread, uint32_t usec, unsigned int flags)
642{
643 if (thread == THREAD)
644 return EINVAL;
645
646 /*
647 * Since thread join can only be called once on an undetached thread,
648 * the thread pointer is guaranteed to be still valid.
649 */
650
651 irq_spinlock_lock(&thread->lock, true);
652 assert(!thread->detached);
653 irq_spinlock_unlock(&thread->lock, true);
654
655 return waitq_sleep_timeout(&thread->join_wq, usec, flags, NULL);
656
657 // FIXME: join should deallocate the thread.
658 // Current code calls detach after join, that's contrary to how
659 // join is used in other threading APIs.
660}
661
662/** Detach thread.
663 *
664 * Mark the thread as detached. If the thread is already
665 * in the Lingering state, deallocate its resources.
666 *
667 * @param thread Thread to be detached.
668 *
669 */
670void thread_detach(thread_t *thread)
671{
672 /*
673 * Since the thread is expected not to be already detached,
674 * pointer to it must be still valid.
675 */
676 irq_spinlock_lock(&thread->lock, true);
677 assert(!thread->detached);
678
679 if (thread->state == Lingering) {
680 /*
681 * Unlock &thread->lock and restore
682 * interrupts in thread_destroy().
683 */
684 thread_destroy(thread, true);
685 return;
686 } else {
687 thread->detached = true;
688 }
689
690 irq_spinlock_unlock(&thread->lock, true);
691}
692
693/** Thread usleep
694 *
695 * Suspend execution of the current thread.
696 *
697 * @param usec Number of microseconds to sleep.
698 *
699 */
700void thread_usleep(uint32_t usec)
701{
702 waitq_t wq;
703
704 waitq_initialize(&wq);
705
706 (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING, NULL);
707}
708
709static void thread_print(thread_t *thread, bool additional)
710{
711 uint64_t ucycles, kcycles;
712 char usuffix, ksuffix;
713 order_suffix(thread->ucycles, &ucycles, &usuffix);
714 order_suffix(thread->kcycles, &kcycles, &ksuffix);
715
716 char *name;
717 if (str_cmp(thread->name, "uinit") == 0)
718 name = thread->task->name;
719 else
720 name = thread->name;
721
722#ifdef __32_BITS__
723 if (additional)
724 printf("%-8" PRIu64 " %10p %10p %9" PRIu64 "%c %9" PRIu64 "%c ",
725 thread->tid, thread->thread_code, thread->kstack,
726 ucycles, usuffix, kcycles, ksuffix);
727 else
728 printf("%-8" PRIu64 " %-14s %10p %-8s %10p %-5" PRIu32 "\n",
729 thread->tid, name, thread, thread_states[thread->state],
730 thread->task, thread->task->container);
731#endif
732
733#ifdef __64_BITS__
734 if (additional)
735 printf("%-8" PRIu64 " %18p %18p\n"
736 " %9" PRIu64 "%c %9" PRIu64 "%c ",
737 thread->tid, thread->thread_code, thread->kstack,
738 ucycles, usuffix, kcycles, ksuffix);
739 else
740 printf("%-8" PRIu64 " %-14s %18p %-8s %18p %-5" PRIu32 "\n",
741 thread->tid, name, thread, thread_states[thread->state],
742 thread->task, thread->task->container);
743#endif
744
745 if (additional) {
746 if (thread->cpu)
747 printf("%-5u", thread->cpu->id);
748 else
749 printf("none ");
750
751 if (thread->state == Sleeping) {
752#ifdef __32_BITS__
753 printf(" %10p", thread->sleep_queue);
754#endif
755
756#ifdef __64_BITS__
757 printf(" %18p", thread->sleep_queue);
758#endif
759 }
760
761 printf("\n");
762 }
763}
764
765/** Print list of threads debug info
766 *
767 * @param additional Print additional information.
768 *
769 */
770void thread_print_list(bool additional)
771{
772 thread_t *thread;
773
774 /* Messing with thread structures, avoid deadlock */
775 irq_spinlock_lock(&threads_lock, true);
776
777#ifdef __32_BITS__
778 if (additional)
779 printf("[id ] [code ] [stack ] [ucycles ] [kcycles ]"
780 " [cpu] [waitqueue]\n");
781 else
782 printf("[id ] [name ] [address ] [state ] [task ]"
783 " [ctn]\n");
784#endif
785
786#ifdef __64_BITS__
787 if (additional) {
788 printf("[id ] [code ] [stack ]\n"
789 " [ucycles ] [kcycles ] [cpu] [waitqueue ]\n");
790 } else
791 printf("[id ] [name ] [address ] [state ]"
792 " [task ] [ctn]\n");
793#endif
794
795 thread = thread_first();
796 while (thread != NULL) {
797 thread_print(thread, additional);
798 thread = thread_next(thread);
799 }
800
801 irq_spinlock_unlock(&threads_lock, true);
802}
803
804/** Check whether thread exists.
805 *
806 * Note that threads_lock must be already held and
807 * interrupts must be already disabled.
808 *
809 * @param thread Pointer to thread.
810 *
811 * @return True if thread t is known to the system, false otherwise.
812 *
813 */
814bool thread_exists(thread_t *thread)
815{
816 assert(interrupts_disabled());
817 assert(irq_spinlock_locked(&threads_lock));
818
819 odlink_t *odlink = odict_find_eq(&threads, thread, NULL);
820 return odlink != NULL;
821}
822
823/** Update accounting of current thread.
824 *
825 * Note that thread_lock on THREAD must be already held and
826 * interrupts must be already disabled.
827 *
828 * @param user True to update user accounting, false for kernel.
829 *
830 */
831void thread_update_accounting(bool user)
832{
833 uint64_t time = get_cycle();
834
835 assert(interrupts_disabled());
836 assert(irq_spinlock_locked(&THREAD->lock));
837
838 if (user)
839 THREAD->ucycles += time - THREAD->last_cycle;
840 else
841 THREAD->kcycles += time - THREAD->last_cycle;
842
843 THREAD->last_cycle = time;
844}
845
846/** Find thread structure corresponding to thread ID.
847 *
848 * The threads_lock must be already held by the caller of this function and
849 * interrupts must be disabled.
850 *
851 * @param id Thread ID.
852 *
853 * @return Thread structure address or NULL if there is no such thread ID.
854 *
855 */
856thread_t *thread_find_by_id(thread_id_t thread_id)
857{
858 thread_t *thread;
859
860 assert(interrupts_disabled());
861 assert(irq_spinlock_locked(&threads_lock));
862
863 thread = thread_first();
864 while (thread != NULL) {
865 if (thread->tid == thread_id)
866 return thread;
867
868 thread = thread_next(thread);
869 }
870
871 return NULL;
872}
873
874/** Get count of threads.
875 *
876 * @return Number of threads in the system
877 */
878size_t thread_count(void)
879{
880 assert(interrupts_disabled());
881 assert(irq_spinlock_locked(&threads_lock));
882
883 return odict_count(&threads);
884}
885
886/** Get first thread.
887 *
888 * @return Pointer to first thread or @c NULL if there are none.
889 */
890thread_t *thread_first(void)
891{
892 odlink_t *odlink;
893
894 assert(interrupts_disabled());
895 assert(irq_spinlock_locked(&threads_lock));
896
897 odlink = odict_first(&threads);
898 if (odlink == NULL)
899 return NULL;
900
901 return odict_get_instance(odlink, thread_t, lthreads);
902}
903
904/** Get next thread.
905 *
906 * @param cur Current thread
907 * @return Pointer to next thread or @c NULL if there are no more threads.
908 */
909thread_t *thread_next(thread_t *cur)
910{
911 odlink_t *odlink;
912
913 assert(interrupts_disabled());
914 assert(irq_spinlock_locked(&threads_lock));
915
916 odlink = odict_next(&cur->lthreads, &threads);
917 if (odlink == NULL)
918 return NULL;
919
920 return odict_get_instance(odlink, thread_t, lthreads);
921}
922
923#ifdef CONFIG_UDEBUG
924
925void thread_stack_trace(thread_id_t thread_id)
926{
927 irq_spinlock_lock(&threads_lock, true);
928
929 thread_t *thread = thread_find_by_id(thread_id);
930 if (thread == NULL) {
931 printf("No such thread.\n");
932 irq_spinlock_unlock(&threads_lock, true);
933 return;
934 }
935
936 irq_spinlock_lock(&thread->lock, false);
937
938 /*
939 * Schedule a stack trace to be printed
940 * just before the thread is scheduled next.
941 *
942 * If the thread is sleeping then try to interrupt
943 * the sleep. Any request for printing an uspace stack
944 * trace from within the kernel should be always
945 * considered a last resort debugging means, therefore
946 * forcing the thread's sleep to be interrupted
947 * is probably justifiable.
948 */
949
950 bool sleeping = false;
951 istate_t *istate = thread->udebug.uspace_state;
952 if (istate != NULL) {
953 printf("Scheduling thread stack trace.\n");
954 thread->btrace = true;
955 if (thread->state == Sleeping)
956 sleeping = true;
957 } else
958 printf("Thread interrupt state not available.\n");
959
960 irq_spinlock_unlock(&thread->lock, false);
961
962 if (sleeping)
963 waitq_interrupt_sleep(thread);
964
965 irq_spinlock_unlock(&threads_lock, true);
966}
967
968#endif /* CONFIG_UDEBUG */
969
970/** Get key function for the @c threads ordered dictionary.
971 *
972 * @param odlink Link
973 * @return Pointer to thread structure cast as 'void *'
974 */
975static void *threads_getkey(odlink_t *odlink)
976{
977 thread_t *thread = odict_get_instance(odlink, thread_t, lthreads);
978 return (void *) thread;
979}
980
981/** Key comparison function for the @c threads ordered dictionary.
982 *
983 * @param a Pointer to thread A
984 * @param b Pointer to thread B
985 * @return -1, 0, 1 iff pointer to A is less than, equal to, greater than B
986 */
987static int threads_cmp(void *a, void *b)
988{
989 if (a > b)
990 return -1;
991 else if (a == b)
992 return 0;
993 else
994 return +1;
995}
996
997/** Process syscall to create new thread.
998 *
999 */
1000sys_errno_t sys_thread_create(uspace_ptr_uspace_arg_t uspace_uarg, uspace_ptr_char uspace_name,
1001 size_t name_len, uspace_ptr_thread_id_t uspace_thread_id)
1002{
1003 if (name_len > THREAD_NAME_BUFLEN - 1)
1004 name_len = THREAD_NAME_BUFLEN - 1;
1005
1006 char namebuf[THREAD_NAME_BUFLEN];
1007 errno_t rc = copy_from_uspace(namebuf, uspace_name, name_len);
1008 if (rc != EOK)
1009 return (sys_errno_t) rc;
1010
1011 namebuf[name_len] = 0;
1012
1013 /*
1014 * In case of failure, kernel_uarg will be deallocated in this function.
1015 * In case of success, kernel_uarg will be freed in uinit().
1016 */
1017 uspace_arg_t *kernel_uarg =
1018 (uspace_arg_t *) malloc(sizeof(uspace_arg_t));
1019 if (!kernel_uarg)
1020 return (sys_errno_t) ENOMEM;
1021
1022 rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t));
1023 if (rc != EOK) {
1024 free(kernel_uarg);
1025 return (sys_errno_t) rc;
1026 }
1027
1028 thread_t *thread = thread_create(uinit, kernel_uarg, TASK,
1029 THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf);
1030 if (thread) {
1031 if (uspace_thread_id) {
1032 rc = copy_to_uspace(uspace_thread_id, &thread->tid,
1033 sizeof(thread->tid));
1034 if (rc != EOK) {
1035 /*
1036 * We have encountered a failure, but the thread
1037 * has already been created. We need to undo its
1038 * creation now.
1039 */
1040
1041 /*
1042 * The new thread structure is initialized, but
1043 * is still not visible to the system.
1044 * We can safely deallocate it.
1045 */
1046 slab_free(thread_cache, thread);
1047 free(kernel_uarg);
1048
1049 return (sys_errno_t) rc;
1050 }
1051 }
1052
1053#ifdef CONFIG_UDEBUG
1054 /*
1055 * Generate udebug THREAD_B event and attach the thread.
1056 * This must be done atomically (with the debug locks held),
1057 * otherwise we would either miss some thread or receive
1058 * THREAD_B events for threads that already existed
1059 * and could be detected with THREAD_READ before.
1060 */
1061 udebug_thread_b_event_attach(thread, TASK);
1062#else
1063 thread_attach(thread, TASK);
1064#endif
1065 thread_ready(thread);
1066
1067 return 0;
1068 } else
1069 free(kernel_uarg);
1070
1071 return (sys_errno_t) ENOMEM;
1072}
1073
1074/** Process syscall to terminate thread.
1075 *
1076 */
1077sys_errno_t sys_thread_exit(int uspace_status)
1078{
1079 thread_exit();
1080}
1081
1082/** Syscall for getting TID.
1083 *
1084 * @param uspace_thread_id Userspace address of 8-byte buffer where to store
1085 * current thread ID.
1086 *
1087 * @return 0 on success or an error code from @ref errno.h.
1088 *
1089 */
1090sys_errno_t sys_thread_get_id(uspace_ptr_thread_id_t uspace_thread_id)
1091{
1092 /*
1093 * No need to acquire lock on THREAD because tid
1094 * remains constant for the lifespan of the thread.
1095 *
1096 */
1097 return (sys_errno_t) copy_to_uspace(uspace_thread_id, &THREAD->tid,
1098 sizeof(THREAD->tid));
1099}
1100
1101/** Syscall wrapper for sleeping. */
1102sys_errno_t sys_thread_usleep(uint32_t usec)
1103{
1104 thread_usleep(usec);
1105 return 0;
1106}
1107
1108sys_errno_t sys_thread_udelay(uint32_t usec)
1109{
1110 delay(usec);
1111 return 0;
1112}
1113
1114/** @}
1115 */
Note: See TracBrowser for help on using the repository browser.