source: mainline/kernel/generic/src/proc/thread.c@ c1b073b7

ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since c1b073b7 was c1b073b7, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 3 years ago

Remove some unnecessary #ifdefs

%p does not care about specified number of digits,
so that distinction was unnecessary to begin with.
Splitting the line for 64b pointers is also more harmful
than helpful, the line is not that long.
For spacing, replaced #ifdef with regular C if.

  • Property mode set to 100644
File size: 26.3 KB
Line 
1/*
2 * Copyright (c) 2010 Jakub Jermar
3 * Copyright (c) 2018 Jiri Svoboda
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup kernel_generic_proc
31 * @{
32 */
33
34/**
35 * @file
36 * @brief Thread management functions.
37 */
38
39#include <assert.h>
40#include <proc/scheduler.h>
41#include <proc/thread.h>
42#include <proc/task.h>
43#include <mm/frame.h>
44#include <mm/page.h>
45#include <arch/asm.h>
46#include <arch/cycle.h>
47#include <arch.h>
48#include <synch/spinlock.h>
49#include <synch/waitq.h>
50#include <synch/syswaitq.h>
51#include <cpu.h>
52#include <str.h>
53#include <context.h>
54#include <adt/list.h>
55#include <adt/odict.h>
56#include <time/clock.h>
57#include <time/timeout.h>
58#include <time/delay.h>
59#include <config.h>
60#include <arch/interrupt.h>
61#include <smp/ipi.h>
62#include <arch/faddr.h>
63#include <atomic.h>
64#include <mem.h>
65#include <stdio.h>
66#include <stdlib.h>
67#include <main/uinit.h>
68#include <syscall/copy.h>
69#include <errno.h>
70#include <debug.h>
71
72/** Thread states */
73const char *thread_states[] = {
74 "Invalid",
75 "Running",
76 "Sleeping",
77 "Ready",
78 "Entering",
79 "Exiting",
80 "Lingering"
81};
82
83/** Lock protecting the @c threads ordered dictionary .
84 *
85 * For locking rules, see declaration thereof.
86 */
87IRQ_SPINLOCK_INITIALIZE(threads_lock);
88
89/** Ordered dictionary of all threads by their address (i.e. pointer to
90 * the thread_t structure).
91 *
92 * When a thread is found in the @c threads ordered dictionary, it is
93 * guaranteed to exist as long as the @c threads_lock is held.
94 *
95 * Members are of type thread_t.
96 */
97odict_t threads;
98
99IRQ_SPINLOCK_STATIC_INITIALIZE(tidlock);
100static thread_id_t last_tid = 0;
101
102static slab_cache_t *thread_cache;
103
104#ifdef CONFIG_FPU
105slab_cache_t *fpu_context_cache;
106#endif
107
108static void *threads_getkey(odlink_t *);
109static int threads_cmp(void *, void *);
110
111/** Thread wrapper.
112 *
113 * This wrapper is provided to ensure that every thread makes a call to
114 * thread_exit() when its implementing function returns.
115 *
116 * interrupts_disable() is assumed.
117 *
118 */
119static void cushion(void)
120{
121 void (*f)(void *) = THREAD->thread_code;
122 void *arg = THREAD->thread_arg;
123 THREAD->last_cycle = get_cycle();
124
125 /* This is where each thread wakes up after its creation */
126 irq_spinlock_unlock(&THREAD->lock, false);
127 interrupts_enable();
128
129 f(arg);
130
131 /* Accumulate accounting to the task */
132 irq_spinlock_lock(&THREAD->lock, true);
133 if (!THREAD->uncounted) {
134 thread_update_accounting(true);
135 uint64_t ucycles = THREAD->ucycles;
136 THREAD->ucycles = 0;
137 uint64_t kcycles = THREAD->kcycles;
138 THREAD->kcycles = 0;
139
140 irq_spinlock_pass(&THREAD->lock, &TASK->lock);
141 TASK->ucycles += ucycles;
142 TASK->kcycles += kcycles;
143 irq_spinlock_unlock(&TASK->lock, true);
144 } else
145 irq_spinlock_unlock(&THREAD->lock, true);
146
147 thread_exit();
148
149 /* Not reached */
150}
151
152/** Initialization and allocation for thread_t structure
153 *
154 */
155static errno_t thr_constructor(void *obj, unsigned int kmflags)
156{
157 thread_t *thread = (thread_t *) obj;
158
159 irq_spinlock_initialize(&thread->lock, "thread_t_lock");
160 link_initialize(&thread->rq_link);
161 link_initialize(&thread->wq_link);
162 link_initialize(&thread->th_link);
163
164 /* call the architecture-specific part of the constructor */
165 thr_constructor_arch(thread);
166
167#ifdef CONFIG_FPU
168 thread->saved_fpu_context = slab_alloc(fpu_context_cache,
169 FRAME_ATOMIC | kmflags);
170 if (!thread->saved_fpu_context)
171 return ENOMEM;
172#endif /* CONFIG_FPU */
173
174 /*
175 * Allocate the kernel stack from the low-memory to prevent an infinite
176 * nesting of TLB-misses when accessing the stack from the part of the
177 * TLB-miss handler written in C.
178 *
179 * Note that low-memory is safe to be used for the stack as it will be
180 * covered by the kernel identity mapping, which guarantees not to
181 * nest TLB-misses infinitely (either via some hardware mechanism or
182 * by the construction of the assembly-language part of the TLB-miss
183 * handler).
184 *
185 * This restriction can be lifted once each architecture provides
186 * a similar guarantee, for example, by locking the kernel stack
187 * in the TLB whenever it is allocated from the high-memory and the
188 * thread is being scheduled to run.
189 */
190 kmflags |= FRAME_LOWMEM;
191 kmflags &= ~FRAME_HIGHMEM;
192
193 /*
194 * NOTE: All kernel stacks must be aligned to STACK_SIZE,
195 * see CURRENT.
196 */
197
198 uintptr_t stack_phys =
199 frame_alloc(STACK_FRAMES, kmflags, STACK_SIZE - 1);
200 if (!stack_phys) {
201#ifdef CONFIG_FPU
202 assert(thread->saved_fpu_context);
203 slab_free(fpu_context_cache, thread->saved_fpu_context);
204#endif
205 return ENOMEM;
206 }
207
208 thread->kstack = (uint8_t *) PA2KA(stack_phys);
209
210#ifdef CONFIG_UDEBUG
211 mutex_initialize(&thread->udebug.lock, MUTEX_PASSIVE);
212#endif
213
214 return EOK;
215}
216
217/** Destruction of thread_t object */
218static size_t thr_destructor(void *obj)
219{
220 thread_t *thread = (thread_t *) obj;
221
222 /* call the architecture-specific part of the destructor */
223 thr_destructor_arch(thread);
224
225 frame_free(KA2PA(thread->kstack), STACK_FRAMES);
226
227#ifdef CONFIG_FPU
228 assert(thread->saved_fpu_context);
229 slab_free(fpu_context_cache, thread->saved_fpu_context);
230#endif
231
232 return STACK_FRAMES; /* number of frames freed */
233}
234
235/** Initialize threads
236 *
237 * Initialize kernel threads support.
238 *
239 */
240void thread_init(void)
241{
242 THREAD = NULL;
243
244 atomic_store(&nrdy, 0);
245 thread_cache = slab_cache_create("thread_t", sizeof(thread_t), 0,
246 thr_constructor, thr_destructor, 0);
247
248#ifdef CONFIG_FPU
249 fpu_context_cache = slab_cache_create("fpu_context_t",
250 sizeof(fpu_context_t), FPU_CONTEXT_ALIGN, NULL, NULL, 0);
251#endif
252
253 odict_initialize(&threads, threads_getkey, threads_cmp);
254}
255
256/** Wire thread to the given CPU
257 *
258 * @param cpu CPU to wire the thread to.
259 *
260 */
261void thread_wire(thread_t *thread, cpu_t *cpu)
262{
263 irq_spinlock_lock(&thread->lock, true);
264 thread->cpu = cpu;
265 thread->wired = true;
266 irq_spinlock_unlock(&thread->lock, true);
267}
268
269/** Invoked right before thread_ready() readies the thread. thread is locked. */
270static void before_thread_is_ready(thread_t *thread)
271{
272 assert(irq_spinlock_locked(&thread->lock));
273}
274
275/** Make thread ready
276 *
277 * Switch thread to the ready state.
278 *
279 * @param thread Thread to make ready.
280 *
281 */
282void thread_ready(thread_t *thread)
283{
284 irq_spinlock_lock(&thread->lock, true);
285
286 assert(thread->state != Ready);
287
288 before_thread_is_ready(thread);
289
290 int i = (thread->priority < RQ_COUNT - 1) ?
291 ++thread->priority : thread->priority;
292
293 cpu_t *cpu;
294 if (thread->wired || thread->nomigrate || thread->fpu_context_engaged) {
295 /* Cannot ready to another CPU */
296 assert(thread->cpu != NULL);
297 cpu = thread->cpu;
298 } else if (thread->stolen) {
299 /* Ready to the stealing CPU */
300 cpu = CPU;
301 } else if (thread->cpu) {
302 /* Prefer the CPU on which the thread ran last */
303 assert(thread->cpu != NULL);
304 cpu = thread->cpu;
305 } else {
306 cpu = CPU;
307 }
308
309 thread->state = Ready;
310
311 irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock));
312
313 /*
314 * Append thread to respective ready queue
315 * on respective processor.
316 */
317
318 list_append(&thread->rq_link, &cpu->rq[i].rq);
319 cpu->rq[i].n++;
320 irq_spinlock_unlock(&(cpu->rq[i].lock), true);
321
322 atomic_inc(&nrdy);
323 atomic_inc(&cpu->nrdy);
324}
325
326/** Create new thread
327 *
328 * Create a new thread.
329 *
330 * @param func Thread's implementing function.
331 * @param arg Thread's implementing function argument.
332 * @param task Task to which the thread belongs. The caller must
333 * guarantee that the task won't cease to exist during the
334 * call. The task's lock may not be held.
335 * @param flags Thread flags.
336 * @param name Symbolic name (a copy is made).
337 *
338 * @return New thread's structure on success, NULL on failure.
339 *
340 */
341thread_t *thread_create(void (*func)(void *), void *arg, task_t *task,
342 thread_flags_t flags, const char *name)
343{
344 thread_t *thread = (thread_t *) slab_alloc(thread_cache, FRAME_ATOMIC);
345 if (!thread)
346 return NULL;
347
348 if (thread_create_arch(thread, flags) != EOK) {
349 slab_free(thread_cache, thread);
350 return NULL;
351 }
352
353 /* Not needed, but good for debugging */
354 memsetb(thread->kstack, STACK_SIZE, 0);
355
356 irq_spinlock_lock(&tidlock, true);
357 thread->tid = ++last_tid;
358 irq_spinlock_unlock(&tidlock, true);
359
360 memset(&thread->saved_context, 0, sizeof(thread->saved_context));
361 context_set(&thread->saved_context, FADDR(cushion),
362 (uintptr_t) thread->kstack, STACK_SIZE);
363
364 current_initialize((current_t *) thread->kstack);
365
366 ipl_t ipl = interrupts_disable();
367 thread->saved_context.ipl = interrupts_read();
368 interrupts_restore(ipl);
369
370 str_cpy(thread->name, THREAD_NAME_BUFLEN, name);
371
372 thread->thread_code = func;
373 thread->thread_arg = arg;
374 thread->ucycles = 0;
375 thread->kcycles = 0;
376 thread->uncounted =
377 ((flags & THREAD_FLAG_UNCOUNTED) == THREAD_FLAG_UNCOUNTED);
378 thread->priority = -1; /* Start in rq[0] */
379 thread->cpu = NULL;
380 thread->wired = false;
381 thread->stolen = false;
382 thread->uspace =
383 ((flags & THREAD_FLAG_USPACE) == THREAD_FLAG_USPACE);
384
385 thread->nomigrate = 0;
386 thread->state = Entering;
387
388 timeout_initialize(&thread->sleep_timeout);
389 thread->sleep_interruptible = false;
390 thread->sleep_composable = false;
391 thread->sleep_queue = NULL;
392 thread->timeout_pending = false;
393
394 thread->in_copy_from_uspace = false;
395 thread->in_copy_to_uspace = false;
396
397 thread->interrupted = false;
398 thread->detached = false;
399 waitq_initialize(&thread->join_wq);
400
401 thread->task = task;
402
403 thread->fpu_context_exists = false;
404 thread->fpu_context_engaged = false;
405
406 odlink_initialize(&thread->lthreads);
407
408#ifdef CONFIG_UDEBUG
409 /* Initialize debugging stuff */
410 thread->btrace = false;
411 udebug_thread_initialize(&thread->udebug);
412#endif
413
414 if ((flags & THREAD_FLAG_NOATTACH) != THREAD_FLAG_NOATTACH)
415 thread_attach(thread, task);
416
417 return thread;
418}
419
420/** Destroy thread memory structure
421 *
422 * Detach thread from all queues, cpus etc. and destroy it.
423 *
424 * @param thread Thread to be destroyed.
425 * @param irq_res Indicate whether it should unlock thread->lock
426 * in interrupts-restore mode.
427 *
428 */
429void thread_destroy(thread_t *thread, bool irq_res)
430{
431 assert(irq_spinlock_locked(&thread->lock));
432 assert((thread->state == Exiting) || (thread->state == Lingering));
433 assert(thread->task);
434 assert(thread->cpu);
435
436 irq_spinlock_lock(&thread->cpu->lock, false);
437 if (thread->cpu->fpu_owner == thread)
438 thread->cpu->fpu_owner = NULL;
439 irq_spinlock_unlock(&thread->cpu->lock, false);
440
441 irq_spinlock_pass(&thread->lock, &threads_lock);
442
443 odict_remove(&thread->lthreads);
444
445 irq_spinlock_pass(&threads_lock, &thread->task->lock);
446
447 /*
448 * Detach from the containing task.
449 */
450 list_remove(&thread->th_link);
451 irq_spinlock_unlock(&thread->task->lock, irq_res);
452
453 /*
454 * Drop the reference to the containing task.
455 */
456 task_release(thread->task);
457 slab_free(thread_cache, thread);
458}
459
460/** Make the thread visible to the system.
461 *
462 * Attach the thread structure to the current task and make it visible in the
463 * threads_tree.
464 *
465 * @param t Thread to be attached to the task.
466 * @param task Task to which the thread is to be attached.
467 *
468 */
469void thread_attach(thread_t *thread, task_t *task)
470{
471 /*
472 * Attach to the specified task.
473 */
474 irq_spinlock_lock(&task->lock, true);
475
476 /* Hold a reference to the task. */
477 task_hold(task);
478
479 /* Must not count kbox thread into lifecount */
480 if (thread->uspace)
481 atomic_inc(&task->lifecount);
482
483 list_append(&thread->th_link, &task->threads);
484
485 irq_spinlock_pass(&task->lock, &threads_lock);
486
487 /*
488 * Register this thread in the system-wide dictionary.
489 */
490 odict_insert(&thread->lthreads, &threads, NULL);
491 irq_spinlock_unlock(&threads_lock, true);
492}
493
494/** Terminate thread.
495 *
496 * End current thread execution and switch it to the exiting state.
497 * All pending timeouts are executed.
498 *
499 */
500void thread_exit(void)
501{
502 if (THREAD->uspace) {
503#ifdef CONFIG_UDEBUG
504 /* Generate udebug THREAD_E event */
505 udebug_thread_e_event();
506
507 /*
508 * This thread will not execute any code or system calls from
509 * now on.
510 */
511 udebug_stoppable_begin();
512#endif
513 if (atomic_predec(&TASK->lifecount) == 0) {
514 /*
515 * We are the last userspace thread in the task that
516 * still has not exited. With the exception of the
517 * moment the task was created, new userspace threads
518 * can only be created by threads of the same task.
519 * We are safe to perform cleanup.
520 *
521 */
522 ipc_cleanup();
523 sys_waitq_task_cleanup();
524 LOG("Cleanup of task %" PRIu64 " completed.", TASK->taskid);
525 }
526 }
527
528restart:
529 irq_spinlock_lock(&THREAD->lock, true);
530 if (THREAD->timeout_pending) {
531 /* Busy waiting for timeouts in progress */
532 irq_spinlock_unlock(&THREAD->lock, true);
533 goto restart;
534 }
535
536 THREAD->state = Exiting;
537 irq_spinlock_unlock(&THREAD->lock, true);
538
539 scheduler();
540
541 panic("should never be reached");
542}
543
544/** Interrupts an existing thread so that it may exit as soon as possible.
545 *
546 * Threads that are blocked waiting for a synchronization primitive
547 * are woken up with a return code of EINTR if the
548 * blocking call was interruptable. See waitq_sleep_timeout().
549 *
550 * The caller must guarantee the thread object is valid during the entire
551 * function, eg by holding the threads_lock lock.
552 *
553 * Interrupted threads automatically exit when returning back to user space.
554 *
555 * @param thread A valid thread object. The caller must guarantee it
556 * will remain valid until thread_interrupt() exits.
557 */
558void thread_interrupt(thread_t *thread)
559{
560 assert(thread != NULL);
561
562 irq_spinlock_lock(&thread->lock, true);
563
564 thread->interrupted = true;
565 bool sleeping = (thread->state == Sleeping);
566
567 irq_spinlock_unlock(&thread->lock, true);
568
569 if (sleeping)
570 waitq_interrupt_sleep(thread);
571}
572
573/** Returns true if the thread was interrupted.
574 *
575 * @param thread A valid thread object. User must guarantee it will
576 * be alive during the entire call.
577 * @return true if the thread was already interrupted via thread_interrupt().
578 */
579bool thread_interrupted(thread_t *thread)
580{
581 assert(thread != NULL);
582
583 bool interrupted;
584
585 irq_spinlock_lock(&thread->lock, true);
586 interrupted = thread->interrupted;
587 irq_spinlock_unlock(&thread->lock, true);
588
589 return interrupted;
590}
591
592/** Prevent the current thread from being migrated to another processor. */
593void thread_migration_disable(void)
594{
595 assert(THREAD);
596
597 THREAD->nomigrate++;
598}
599
600/** Allow the current thread to be migrated to another processor. */
601void thread_migration_enable(void)
602{
603 assert(THREAD);
604 assert(THREAD->nomigrate > 0);
605
606 if (THREAD->nomigrate > 0)
607 THREAD->nomigrate--;
608}
609
610/** Thread sleep
611 *
612 * Suspend execution of the current thread.
613 *
614 * @param sec Number of seconds to sleep.
615 *
616 */
617void thread_sleep(uint32_t sec)
618{
619 /*
620 * Sleep in 1000 second steps to support
621 * full argument range
622 */
623 while (sec > 0) {
624 uint32_t period = (sec > 1000) ? 1000 : sec;
625
626 thread_usleep(period * 1000000);
627 sec -= period;
628 }
629}
630
631/** Wait for another thread to exit.
632 *
633 * @param thread Thread to join on exit.
634 * @param usec Timeout in microseconds.
635 * @param flags Mode of operation.
636 *
637 * @return An error code from errno.h or an error code from synch.h.
638 *
639 */
640errno_t thread_join_timeout(thread_t *thread, uint32_t usec, unsigned int flags)
641{
642 if (thread == THREAD)
643 return EINVAL;
644
645 /*
646 * Since thread join can only be called once on an undetached thread,
647 * the thread pointer is guaranteed to be still valid.
648 */
649
650 irq_spinlock_lock(&thread->lock, true);
651 assert(!thread->detached);
652 irq_spinlock_unlock(&thread->lock, true);
653
654 return waitq_sleep_timeout(&thread->join_wq, usec, flags, NULL);
655
656 // FIXME: join should deallocate the thread.
657 // Current code calls detach after join, that's contrary to how
658 // join is used in other threading APIs.
659}
660
661/** Detach thread.
662 *
663 * Mark the thread as detached. If the thread is already
664 * in the Lingering state, deallocate its resources.
665 *
666 * @param thread Thread to be detached.
667 *
668 */
669void thread_detach(thread_t *thread)
670{
671 /*
672 * Since the thread is expected not to be already detached,
673 * pointer to it must be still valid.
674 */
675 irq_spinlock_lock(&thread->lock, true);
676 assert(!thread->detached);
677
678 if (thread->state == Lingering) {
679 /*
680 * Unlock &thread->lock and restore
681 * interrupts in thread_destroy().
682 */
683 thread_destroy(thread, true);
684 return;
685 } else {
686 thread->detached = true;
687 }
688
689 irq_spinlock_unlock(&thread->lock, true);
690}
691
692/** Thread usleep
693 *
694 * Suspend execution of the current thread.
695 *
696 * @param usec Number of microseconds to sleep.
697 *
698 */
699void thread_usleep(uint32_t usec)
700{
701 waitq_t wq;
702
703 waitq_initialize(&wq);
704
705 (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING, NULL);
706}
707
708static void thread_print(thread_t *thread, bool additional)
709{
710 uint64_t ucycles, kcycles;
711 char usuffix, ksuffix;
712 order_suffix(thread->ucycles, &ucycles, &usuffix);
713 order_suffix(thread->kcycles, &kcycles, &ksuffix);
714
715 char *name;
716 if (str_cmp(thread->name, "uinit") == 0)
717 name = thread->task->name;
718 else
719 name = thread->name;
720
721 if (additional)
722 printf("%-8" PRIu64 " %p %p %9" PRIu64 "%c %9" PRIu64 "%c ",
723 thread->tid, thread->thread_code, thread->kstack,
724 ucycles, usuffix, kcycles, ksuffix);
725 else
726 printf("%-8" PRIu64 " %-14s %p %-8s %p %-5" PRIu32 "\n",
727 thread->tid, name, thread, thread_states[thread->state],
728 thread->task, thread->task->container);
729
730 if (additional) {
731 if (thread->cpu)
732 printf("%-5u", thread->cpu->id);
733 else
734 printf("none ");
735
736 if (thread->state == Sleeping) {
737 printf(" %p", thread->sleep_queue);
738 }
739
740 printf("\n");
741 }
742}
743
744/** Print list of threads debug info
745 *
746 * @param additional Print additional information.
747 *
748 */
749void thread_print_list(bool additional)
750{
751 thread_t *thread;
752
753 /* Messing with thread structures, avoid deadlock */
754 irq_spinlock_lock(&threads_lock, true);
755
756 if (sizeof(void *) <= 4) {
757 if (additional)
758 printf("[id ] [code ] [stack ] [ucycles ] [kcycles ]"
759 " [cpu] [waitqueue]\n");
760 else
761 printf("[id ] [name ] [address ] [state ] [task ]"
762 " [ctn]\n");
763 } else {
764 if (additional) {
765 printf("[id ] [code ] [stack ] [ucycles ] [kcycles ]"
766 " [cpu] [waitqueue ]\n");
767 } else
768 printf("[id ] [name ] [address ] [state ]"
769 " [task ] [ctn]\n");
770 }
771
772 thread = thread_first();
773 while (thread != NULL) {
774 thread_print(thread, additional);
775 thread = thread_next(thread);
776 }
777
778 irq_spinlock_unlock(&threads_lock, true);
779}
780
781/** Check whether thread exists.
782 *
783 * Note that threads_lock must be already held and
784 * interrupts must be already disabled.
785 *
786 * @param thread Pointer to thread.
787 *
788 * @return True if thread t is known to the system, false otherwise.
789 *
790 */
791bool thread_exists(thread_t *thread)
792{
793 assert(interrupts_disabled());
794 assert(irq_spinlock_locked(&threads_lock));
795
796 odlink_t *odlink = odict_find_eq(&threads, thread, NULL);
797 return odlink != NULL;
798}
799
800/** Update accounting of current thread.
801 *
802 * Note that thread_lock on THREAD must be already held and
803 * interrupts must be already disabled.
804 *
805 * @param user True to update user accounting, false for kernel.
806 *
807 */
808void thread_update_accounting(bool user)
809{
810 uint64_t time = get_cycle();
811
812 assert(interrupts_disabled());
813 assert(irq_spinlock_locked(&THREAD->lock));
814
815 if (user)
816 THREAD->ucycles += time - THREAD->last_cycle;
817 else
818 THREAD->kcycles += time - THREAD->last_cycle;
819
820 THREAD->last_cycle = time;
821}
822
823/** Find thread structure corresponding to thread ID.
824 *
825 * The threads_lock must be already held by the caller of this function and
826 * interrupts must be disabled.
827 *
828 * @param id Thread ID.
829 *
830 * @return Thread structure address or NULL if there is no such thread ID.
831 *
832 */
833thread_t *thread_find_by_id(thread_id_t thread_id)
834{
835 thread_t *thread;
836
837 assert(interrupts_disabled());
838 assert(irq_spinlock_locked(&threads_lock));
839
840 thread = thread_first();
841 while (thread != NULL) {
842 if (thread->tid == thread_id)
843 return thread;
844
845 thread = thread_next(thread);
846 }
847
848 return NULL;
849}
850
851/** Get count of threads.
852 *
853 * @return Number of threads in the system
854 */
855size_t thread_count(void)
856{
857 assert(interrupts_disabled());
858 assert(irq_spinlock_locked(&threads_lock));
859
860 return odict_count(&threads);
861}
862
863/** Get first thread.
864 *
865 * @return Pointer to first thread or @c NULL if there are none.
866 */
867thread_t *thread_first(void)
868{
869 odlink_t *odlink;
870
871 assert(interrupts_disabled());
872 assert(irq_spinlock_locked(&threads_lock));
873
874 odlink = odict_first(&threads);
875 if (odlink == NULL)
876 return NULL;
877
878 return odict_get_instance(odlink, thread_t, lthreads);
879}
880
881/** Get next thread.
882 *
883 * @param cur Current thread
884 * @return Pointer to next thread or @c NULL if there are no more threads.
885 */
886thread_t *thread_next(thread_t *cur)
887{
888 odlink_t *odlink;
889
890 assert(interrupts_disabled());
891 assert(irq_spinlock_locked(&threads_lock));
892
893 odlink = odict_next(&cur->lthreads, &threads);
894 if (odlink == NULL)
895 return NULL;
896
897 return odict_get_instance(odlink, thread_t, lthreads);
898}
899
900#ifdef CONFIG_UDEBUG
901
902void thread_stack_trace(thread_id_t thread_id)
903{
904 irq_spinlock_lock(&threads_lock, true);
905
906 thread_t *thread = thread_find_by_id(thread_id);
907 if (thread == NULL) {
908 printf("No such thread.\n");
909 irq_spinlock_unlock(&threads_lock, true);
910 return;
911 }
912
913 irq_spinlock_lock(&thread->lock, false);
914
915 /*
916 * Schedule a stack trace to be printed
917 * just before the thread is scheduled next.
918 *
919 * If the thread is sleeping then try to interrupt
920 * the sleep. Any request for printing an uspace stack
921 * trace from within the kernel should be always
922 * considered a last resort debugging means, therefore
923 * forcing the thread's sleep to be interrupted
924 * is probably justifiable.
925 */
926
927 bool sleeping = false;
928 istate_t *istate = thread->udebug.uspace_state;
929 if (istate != NULL) {
930 printf("Scheduling thread stack trace.\n");
931 thread->btrace = true;
932 if (thread->state == Sleeping)
933 sleeping = true;
934 } else
935 printf("Thread interrupt state not available.\n");
936
937 irq_spinlock_unlock(&thread->lock, false);
938
939 if (sleeping)
940 waitq_interrupt_sleep(thread);
941
942 irq_spinlock_unlock(&threads_lock, true);
943}
944
945#endif /* CONFIG_UDEBUG */
946
947/** Get key function for the @c threads ordered dictionary.
948 *
949 * @param odlink Link
950 * @return Pointer to thread structure cast as 'void *'
951 */
952static void *threads_getkey(odlink_t *odlink)
953{
954 thread_t *thread = odict_get_instance(odlink, thread_t, lthreads);
955 return (void *) thread;
956}
957
958/** Key comparison function for the @c threads ordered dictionary.
959 *
960 * @param a Pointer to thread A
961 * @param b Pointer to thread B
962 * @return -1, 0, 1 iff pointer to A is less than, equal to, greater than B
963 */
964static int threads_cmp(void *a, void *b)
965{
966 if (a > b)
967 return -1;
968 else if (a == b)
969 return 0;
970 else
971 return +1;
972}
973
974/** Process syscall to create new thread.
975 *
976 */
977sys_errno_t sys_thread_create(uspace_ptr_uspace_arg_t uspace_uarg, uspace_ptr_char uspace_name,
978 size_t name_len, uspace_ptr_thread_id_t uspace_thread_id)
979{
980 if (name_len > THREAD_NAME_BUFLEN - 1)
981 name_len = THREAD_NAME_BUFLEN - 1;
982
983 char namebuf[THREAD_NAME_BUFLEN];
984 errno_t rc = copy_from_uspace(namebuf, uspace_name, name_len);
985 if (rc != EOK)
986 return (sys_errno_t) rc;
987
988 namebuf[name_len] = 0;
989
990 /*
991 * In case of failure, kernel_uarg will be deallocated in this function.
992 * In case of success, kernel_uarg will be freed in uinit().
993 */
994 uspace_arg_t *kernel_uarg =
995 (uspace_arg_t *) malloc(sizeof(uspace_arg_t));
996 if (!kernel_uarg)
997 return (sys_errno_t) ENOMEM;
998
999 rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t));
1000 if (rc != EOK) {
1001 free(kernel_uarg);
1002 return (sys_errno_t) rc;
1003 }
1004
1005 thread_t *thread = thread_create(uinit, kernel_uarg, TASK,
1006 THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf);
1007 if (thread) {
1008 if (uspace_thread_id) {
1009 rc = copy_to_uspace(uspace_thread_id, &thread->tid,
1010 sizeof(thread->tid));
1011 if (rc != EOK) {
1012 /*
1013 * We have encountered a failure, but the thread
1014 * has already been created. We need to undo its
1015 * creation now.
1016 */
1017
1018 /*
1019 * The new thread structure is initialized, but
1020 * is still not visible to the system.
1021 * We can safely deallocate it.
1022 */
1023 slab_free(thread_cache, thread);
1024 free(kernel_uarg);
1025
1026 return (sys_errno_t) rc;
1027 }
1028 }
1029
1030#ifdef CONFIG_UDEBUG
1031 /*
1032 * Generate udebug THREAD_B event and attach the thread.
1033 * This must be done atomically (with the debug locks held),
1034 * otherwise we would either miss some thread or receive
1035 * THREAD_B events for threads that already existed
1036 * and could be detected with THREAD_READ before.
1037 */
1038 udebug_thread_b_event_attach(thread, TASK);
1039#else
1040 thread_attach(thread, TASK);
1041#endif
1042 thread_ready(thread);
1043
1044 return 0;
1045 } else
1046 free(kernel_uarg);
1047
1048 return (sys_errno_t) ENOMEM;
1049}
1050
1051/** Process syscall to terminate thread.
1052 *
1053 */
1054sys_errno_t sys_thread_exit(int uspace_status)
1055{
1056 thread_exit();
1057}
1058
1059/** Syscall for getting TID.
1060 *
1061 * @param uspace_thread_id Userspace address of 8-byte buffer where to store
1062 * current thread ID.
1063 *
1064 * @return 0 on success or an error code from @ref errno.h.
1065 *
1066 */
1067sys_errno_t sys_thread_get_id(uspace_ptr_thread_id_t uspace_thread_id)
1068{
1069 /*
1070 * No need to acquire lock on THREAD because tid
1071 * remains constant for the lifespan of the thread.
1072 *
1073 */
1074 return (sys_errno_t) copy_to_uspace(uspace_thread_id, &THREAD->tid,
1075 sizeof(THREAD->tid));
1076}
1077
1078/** Syscall wrapper for sleeping. */
1079sys_errno_t sys_thread_usleep(uint32_t usec)
1080{
1081 thread_usleep(usec);
1082 return 0;
1083}
1084
1085sys_errno_t sys_thread_udelay(uint32_t usec)
1086{
1087 delay(usec);
1088 return 0;
1089}
1090
1091/** @}
1092 */
Note: See TracBrowser for help on using the repository browser.