source: mainline/kernel/generic/src/proc/thread.c@ 036e97c

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 036e97c was edc64c0, checked in by Jakub Jermar <jakub@…>, 7 years ago

Zero out new thread's register context

This removes the information leak in which the new thread inherited some
register values from the thread which created it. Also, now each thread
begins execution with a well-defined register state.

  • Property mode set to 100644
File size: 25.4 KB
Line 
1/*
2 * Copyright (c) 2010 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup genericproc
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Thread management functions.
36 */
37
38#include <assert.h>
39#include <proc/scheduler.h>
40#include <proc/thread.h>
41#include <proc/task.h>
42#include <mm/frame.h>
43#include <mm/page.h>
44#include <arch/asm.h>
45#include <arch/cycle.h>
46#include <arch.h>
47#include <synch/spinlock.h>
48#include <synch/waitq.h>
49#include <synch/workqueue.h>
50#include <synch/rcu.h>
51#include <cpu.h>
52#include <str.h>
53#include <context.h>
54#include <adt/avl.h>
55#include <adt/list.h>
56#include <time/clock.h>
57#include <time/timeout.h>
58#include <time/delay.h>
59#include <config.h>
60#include <arch/interrupt.h>
61#include <smp/ipi.h>
62#include <arch/faddr.h>
63#include <atomic.h>
64#include <mem.h>
65#include <print.h>
66#include <mm/slab.h>
67#include <main/uinit.h>
68#include <syscall/copy.h>
69#include <errno.h>
70
71/** Thread states */
72const char *thread_states[] = {
73 "Invalid",
74 "Running",
75 "Sleeping",
76 "Ready",
77 "Entering",
78 "Exiting",
79 "Lingering"
80};
81
82typedef struct {
83 thread_id_t thread_id;
84 thread_t *thread;
85} thread_iterator_t;
86
87/** Lock protecting the threads_tree AVL tree.
88 *
89 * For locking rules, see declaration thereof.
90 *
91 */
92IRQ_SPINLOCK_INITIALIZE(threads_lock);
93
94/** AVL tree of all threads.
95 *
96 * When a thread is found in the threads_tree AVL tree, it is guaranteed to
97 * exist as long as the threads_lock is held.
98 *
99 */
100avltree_t threads_tree;
101
102IRQ_SPINLOCK_STATIC_INITIALIZE(tidlock);
103static thread_id_t last_tid = 0;
104
105static slab_cache_t *thread_cache;
106
107#ifdef CONFIG_FPU
108slab_cache_t *fpu_context_cache;
109#endif
110
111/** Thread wrapper.
112 *
113 * This wrapper is provided to ensure that every thread makes a call to
114 * thread_exit() when its implementing function returns.
115 *
116 * interrupts_disable() is assumed.
117 *
118 */
119static void cushion(void)
120{
121 void (*f)(void *) = THREAD->thread_code;
122 void *arg = THREAD->thread_arg;
123 THREAD->last_cycle = get_cycle();
124
125 /* This is where each thread wakes up after its creation */
126 irq_spinlock_unlock(&THREAD->lock, false);
127 interrupts_enable();
128
129 f(arg);
130
131 /* Accumulate accounting to the task */
132 irq_spinlock_lock(&THREAD->lock, true);
133 if (!THREAD->uncounted) {
134 thread_update_accounting(true);
135 uint64_t ucycles = THREAD->ucycles;
136 THREAD->ucycles = 0;
137 uint64_t kcycles = THREAD->kcycles;
138 THREAD->kcycles = 0;
139
140 irq_spinlock_pass(&THREAD->lock, &TASK->lock);
141 TASK->ucycles += ucycles;
142 TASK->kcycles += kcycles;
143 irq_spinlock_unlock(&TASK->lock, true);
144 } else
145 irq_spinlock_unlock(&THREAD->lock, true);
146
147 thread_exit();
148
149 /* Not reached */
150}
151
152/** Initialization and allocation for thread_t structure
153 *
154 */
155static errno_t thr_constructor(void *obj, unsigned int kmflags)
156{
157 thread_t *thread = (thread_t *) obj;
158
159 irq_spinlock_initialize(&thread->lock, "thread_t_lock");
160 link_initialize(&thread->rq_link);
161 link_initialize(&thread->wq_link);
162 link_initialize(&thread->th_link);
163
164 /* call the architecture-specific part of the constructor */
165 thr_constructor_arch(thread);
166
167#ifdef CONFIG_FPU
168#ifdef CONFIG_FPU_LAZY
169 thread->saved_fpu_context = NULL;
170#else /* CONFIG_FPU_LAZY */
171 thread->saved_fpu_context = slab_alloc(fpu_context_cache, kmflags);
172 if (!thread->saved_fpu_context)
173 return ENOMEM;
174#endif /* CONFIG_FPU_LAZY */
175#endif /* CONFIG_FPU */
176
177 /*
178 * Allocate the kernel stack from the low-memory to prevent an infinite
179 * nesting of TLB-misses when accessing the stack from the part of the
180 * TLB-miss handler written in C.
181 *
182 * Note that low-memory is safe to be used for the stack as it will be
183 * covered by the kernel identity mapping, which guarantees not to
184 * nest TLB-misses infinitely (either via some hardware mechanism or
185 * by the construciton of the assembly-language part of the TLB-miss
186 * handler).
187 *
188 * This restriction can be lifted once each architecture provides
189 * a similar guarantee, for example by locking the kernel stack
190 * in the TLB whenever it is allocated from the high-memory and the
191 * thread is being scheduled to run.
192 */
193 kmflags |= FRAME_LOWMEM;
194 kmflags &= ~FRAME_HIGHMEM;
195
196 uintptr_t stack_phys =
197 frame_alloc(STACK_FRAMES, kmflags, STACK_SIZE - 1);
198 if (!stack_phys) {
199#ifdef CONFIG_FPU
200 if (thread->saved_fpu_context)
201 slab_free(fpu_context_cache, thread->saved_fpu_context);
202#endif
203 return ENOMEM;
204 }
205
206 thread->kstack = (uint8_t *) PA2KA(stack_phys);
207
208#ifdef CONFIG_UDEBUG
209 mutex_initialize(&thread->udebug.lock, MUTEX_PASSIVE);
210#endif
211
212 return EOK;
213}
214
215/** Destruction of thread_t object */
216static size_t thr_destructor(void *obj)
217{
218 thread_t *thread = (thread_t *) obj;
219
220 /* call the architecture-specific part of the destructor */
221 thr_destructor_arch(thread);
222
223 frame_free(KA2PA(thread->kstack), STACK_FRAMES);
224
225#ifdef CONFIG_FPU
226 if (thread->saved_fpu_context)
227 slab_free(fpu_context_cache, thread->saved_fpu_context);
228#endif
229
230 return STACK_FRAMES; /* number of frames freed */
231}
232
233/** Initialize threads
234 *
235 * Initialize kernel threads support.
236 *
237 */
238void thread_init(void)
239{
240 THREAD = NULL;
241
242 atomic_set(&nrdy, 0);
243 thread_cache = slab_cache_create("thread_t", sizeof(thread_t), 0,
244 thr_constructor, thr_destructor, 0);
245
246#ifdef CONFIG_FPU
247 fpu_context_cache = slab_cache_create("fpu_context_t",
248 sizeof(fpu_context_t), FPU_CONTEXT_ALIGN, NULL, NULL, 0);
249#endif
250
251 avltree_create(&threads_tree);
252}
253
254/** Wire thread to the given CPU
255 *
256 * @param cpu CPU to wire the thread to.
257 *
258 */
259void thread_wire(thread_t *thread, cpu_t *cpu)
260{
261 irq_spinlock_lock(&thread->lock, true);
262 thread->cpu = cpu;
263 thread->wired = true;
264 irq_spinlock_unlock(&thread->lock, true);
265}
266
267/** Invoked right before thread_ready() readies the thread. thread is locked. */
268static void before_thread_is_ready(thread_t *thread)
269{
270 assert(irq_spinlock_locked(&thread->lock));
271 workq_before_thread_is_ready(thread);
272}
273
274/** Make thread ready
275 *
276 * Switch thread to the ready state.
277 *
278 * @param thread Thread to make ready.
279 *
280 */
281void thread_ready(thread_t *thread)
282{
283 irq_spinlock_lock(&thread->lock, true);
284
285 assert(thread->state != Ready);
286
287 before_thread_is_ready(thread);
288
289 int i = (thread->priority < RQ_COUNT - 1) ?
290 ++thread->priority : thread->priority;
291
292 cpu_t *cpu;
293 if (thread->wired || thread->nomigrate || thread->fpu_context_engaged) {
294 /* Cannot ready to another CPU */
295 assert(thread->cpu != NULL);
296 cpu = thread->cpu;
297 } else if (thread->stolen) {
298 /* Ready to the stealing CPU */
299 cpu = CPU;
300 } else if (thread->cpu) {
301 /* Prefer the CPU on which the thread ran last */
302 assert(thread->cpu != NULL);
303 cpu = thread->cpu;
304 } else {
305 cpu = CPU;
306 }
307
308 thread->state = Ready;
309
310 irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock));
311
312 /*
313 * Append thread to respective ready queue
314 * on respective processor.
315 */
316
317 list_append(&thread->rq_link, &cpu->rq[i].rq);
318 cpu->rq[i].n++;
319 irq_spinlock_unlock(&(cpu->rq[i].lock), true);
320
321 atomic_inc(&nrdy);
322 atomic_inc(&cpu->nrdy);
323}
324
325/** Create new thread
326 *
327 * Create a new thread.
328 *
329 * @param func Thread's implementing function.
330 * @param arg Thread's implementing function argument.
331 * @param task Task to which the thread belongs. The caller must
332 * guarantee that the task won't cease to exist during the
333 * call. The task's lock may not be held.
334 * @param flags Thread flags.
335 * @param name Symbolic name (a copy is made).
336 *
337 * @return New thread's structure on success, NULL on failure.
338 *
339 */
340thread_t *thread_create(void (*func)(void *), void *arg, task_t *task,
341 thread_flags_t flags, const char *name)
342{
343 thread_t *thread = (thread_t *) slab_alloc(thread_cache, 0);
344 if (!thread)
345 return NULL;
346
347 /* Not needed, but good for debugging */
348 memsetb(thread->kstack, STACK_SIZE, 0);
349
350 irq_spinlock_lock(&tidlock, true);
351 thread->tid = ++last_tid;
352 irq_spinlock_unlock(&tidlock, true);
353
354 memset(&thread->saved_context, 0, sizeof(thread->saved_context));
355 context_set(&thread->saved_context, FADDR(cushion),
356 (uintptr_t) thread->kstack, STACK_SIZE);
357
358 the_initialize((the_t *) thread->kstack);
359
360 ipl_t ipl = interrupts_disable();
361 thread->saved_context.ipl = interrupts_read();
362 interrupts_restore(ipl);
363
364 str_cpy(thread->name, THREAD_NAME_BUFLEN, name);
365
366 thread->thread_code = func;
367 thread->thread_arg = arg;
368 thread->ticks = -1;
369 thread->ucycles = 0;
370 thread->kcycles = 0;
371 thread->uncounted =
372 ((flags & THREAD_FLAG_UNCOUNTED) == THREAD_FLAG_UNCOUNTED);
373 thread->priority = -1; /* Start in rq[0] */
374 thread->cpu = NULL;
375 thread->wired = false;
376 thread->stolen = false;
377 thread->uspace =
378 ((flags & THREAD_FLAG_USPACE) == THREAD_FLAG_USPACE);
379
380 thread->nomigrate = 0;
381 thread->state = Entering;
382
383 timeout_initialize(&thread->sleep_timeout);
384 thread->sleep_interruptible = false;
385 thread->sleep_composable = false;
386 thread->sleep_queue = NULL;
387 thread->timeout_pending = false;
388
389 thread->in_copy_from_uspace = false;
390 thread->in_copy_to_uspace = false;
391
392 thread->interrupted = false;
393 thread->detached = false;
394 waitq_initialize(&thread->join_wq);
395
396 thread->task = task;
397
398 thread->workq = NULL;
399
400 thread->fpu_context_exists = false;
401 thread->fpu_context_engaged = false;
402
403 avltree_node_initialize(&thread->threads_tree_node);
404 thread->threads_tree_node.key = (uintptr_t) thread;
405
406#ifdef CONFIG_UDEBUG
407 /* Initialize debugging stuff */
408 thread->btrace = false;
409 udebug_thread_initialize(&thread->udebug);
410#endif
411
412 /* Might depend on previous initialization */
413 thread_create_arch(thread);
414
415 rcu_thread_init(thread);
416
417 if ((flags & THREAD_FLAG_NOATTACH) != THREAD_FLAG_NOATTACH)
418 thread_attach(thread, task);
419
420 return thread;
421}
422
423/** Destroy thread memory structure
424 *
425 * Detach thread from all queues, cpus etc. and destroy it.
426 *
427 * @param thread Thread to be destroyed.
428 * @param irq_res Indicate whether it should unlock thread->lock
429 * in interrupts-restore mode.
430 *
431 */
432void thread_destroy(thread_t *thread, bool irq_res)
433{
434 assert(irq_spinlock_locked(&thread->lock));
435 assert((thread->state == Exiting) || (thread->state == Lingering));
436 assert(thread->task);
437 assert(thread->cpu);
438
439 irq_spinlock_lock(&thread->cpu->lock, false);
440 if (thread->cpu->fpu_owner == thread)
441 thread->cpu->fpu_owner = NULL;
442 irq_spinlock_unlock(&thread->cpu->lock, false);
443
444 irq_spinlock_pass(&thread->lock, &threads_lock);
445
446 avltree_delete(&threads_tree, &thread->threads_tree_node);
447
448 irq_spinlock_pass(&threads_lock, &thread->task->lock);
449
450 /*
451 * Detach from the containing task.
452 */
453 list_remove(&thread->th_link);
454 irq_spinlock_unlock(&thread->task->lock, irq_res);
455
456 /*
457 * Drop the reference to the containing task.
458 */
459 task_release(thread->task);
460 slab_free(thread_cache, thread);
461}
462
463/** Make the thread visible to the system.
464 *
465 * Attach the thread structure to the current task and make it visible in the
466 * threads_tree.
467 *
468 * @param t Thread to be attached to the task.
469 * @param task Task to which the thread is to be attached.
470 *
471 */
472void thread_attach(thread_t *thread, task_t *task)
473{
474 /*
475 * Attach to the specified task.
476 */
477 irq_spinlock_lock(&task->lock, true);
478
479 /* Hold a reference to the task. */
480 task_hold(task);
481
482 /* Must not count kbox thread into lifecount */
483 if (thread->uspace)
484 atomic_inc(&task->lifecount);
485
486 list_append(&thread->th_link, &task->threads);
487
488 irq_spinlock_pass(&task->lock, &threads_lock);
489
490 /*
491 * Register this thread in the system-wide list.
492 */
493 avltree_insert(&threads_tree, &thread->threads_tree_node);
494 irq_spinlock_unlock(&threads_lock, true);
495}
496
497/** Terminate thread.
498 *
499 * End current thread execution and switch it to the exiting state.
500 * All pending timeouts are executed.
501 *
502 */
503void thread_exit(void)
504{
505 if (THREAD->uspace) {
506#ifdef CONFIG_UDEBUG
507 /* Generate udebug THREAD_E event */
508 udebug_thread_e_event();
509
510 /*
511 * This thread will not execute any code or system calls from
512 * now on.
513 */
514 udebug_stoppable_begin();
515#endif
516 if (atomic_predec(&TASK->lifecount) == 0) {
517 /*
518 * We are the last userspace thread in the task that
519 * still has not exited. With the exception of the
520 * moment the task was created, new userspace threads
521 * can only be created by threads of the same task.
522 * We are safe to perform cleanup.
523 *
524 */
525 ipc_cleanup();
526 futex_task_cleanup();
527 LOG("Cleanup of task %" PRIu64 " completed.", TASK->taskid);
528 }
529 }
530
531restart:
532 irq_spinlock_lock(&THREAD->lock, true);
533 if (THREAD->timeout_pending) {
534 /* Busy waiting for timeouts in progress */
535 irq_spinlock_unlock(&THREAD->lock, true);
536 goto restart;
537 }
538
539 THREAD->state = Exiting;
540 irq_spinlock_unlock(&THREAD->lock, true);
541
542 scheduler();
543
544 /* Not reached */
545 while (true)
546 ;
547}
548
549/** Interrupts an existing thread so that it may exit as soon as possible.
550 *
551 * Threads that are blocked waiting for a synchronization primitive
552 * are woken up with a return code of EINTR if the
553 * blocking call was interruptable. See waitq_sleep_timeout().
554 *
555 * The caller must guarantee the thread object is valid during the entire
556 * function, eg by holding the threads_lock lock.
557 *
558 * Interrupted threads automatically exit when returning back to user space.
559 *
560 * @param thread A valid thread object. The caller must guarantee it
561 * will remain valid until thread_interrupt() exits.
562 */
563void thread_interrupt(thread_t *thread)
564{
565 assert(thread != NULL);
566
567 irq_spinlock_lock(&thread->lock, true);
568
569 thread->interrupted = true;
570 bool sleeping = (thread->state == Sleeping);
571
572 irq_spinlock_unlock(&thread->lock, true);
573
574 if (sleeping)
575 waitq_interrupt_sleep(thread);
576}
577
578/** Returns true if the thread was interrupted.
579 *
580 * @param thread A valid thread object. User must guarantee it will
581 * be alive during the entire call.
582 * @return true if the thread was already interrupted via thread_interrupt().
583 */
584bool thread_interrupted(thread_t *thread)
585{
586 assert(thread != NULL);
587
588 bool interrupted;
589
590 irq_spinlock_lock(&thread->lock, true);
591 interrupted = thread->interrupted;
592 irq_spinlock_unlock(&thread->lock, true);
593
594 return interrupted;
595}
596
597/** Prevent the current thread from being migrated to another processor. */
598void thread_migration_disable(void)
599{
600 assert(THREAD);
601
602 THREAD->nomigrate++;
603}
604
605/** Allow the current thread to be migrated to another processor. */
606void thread_migration_enable(void)
607{
608 assert(THREAD);
609 assert(THREAD->nomigrate > 0);
610
611 if (THREAD->nomigrate > 0)
612 THREAD->nomigrate--;
613}
614
615/** Thread sleep
616 *
617 * Suspend execution of the current thread.
618 *
619 * @param sec Number of seconds to sleep.
620 *
621 */
622void thread_sleep(uint32_t sec)
623{
624 /*
625 * Sleep in 1000 second steps to support
626 * full argument range
627 */
628 while (sec > 0) {
629 uint32_t period = (sec > 1000) ? 1000 : sec;
630
631 thread_usleep(period * 1000000);
632 sec -= period;
633 }
634}
635
636/** Wait for another thread to exit.
637 *
638 * @param thread Thread to join on exit.
639 * @param usec Timeout in microseconds.
640 * @param flags Mode of operation.
641 *
642 * @return An error code from errno.h or an error code from synch.h.
643 *
644 */
645errno_t thread_join_timeout(thread_t *thread, uint32_t usec, unsigned int flags)
646{
647 if (thread == THREAD)
648 return EINVAL;
649
650 /*
651 * Since thread join can only be called once on an undetached thread,
652 * the thread pointer is guaranteed to be still valid.
653 */
654
655 irq_spinlock_lock(&thread->lock, true);
656 assert(!thread->detached);
657 irq_spinlock_unlock(&thread->lock, true);
658
659 return waitq_sleep_timeout(&thread->join_wq, usec, flags, NULL);
660}
661
662/** Detach thread.
663 *
664 * Mark the thread as detached. If the thread is already
665 * in the Lingering state, deallocate its resources.
666 *
667 * @param thread Thread to be detached.
668 *
669 */
670void thread_detach(thread_t *thread)
671{
672 /*
673 * Since the thread is expected not to be already detached,
674 * pointer to it must be still valid.
675 */
676 irq_spinlock_lock(&thread->lock, true);
677 assert(!thread->detached);
678
679 if (thread->state == Lingering) {
680 /*
681 * Unlock &thread->lock and restore
682 * interrupts in thread_destroy().
683 */
684 thread_destroy(thread, true);
685 return;
686 } else {
687 thread->detached = true;
688 }
689
690 irq_spinlock_unlock(&thread->lock, true);
691}
692
693/** Thread usleep
694 *
695 * Suspend execution of the current thread.
696 *
697 * @param usec Number of microseconds to sleep.
698 *
699 */
700void thread_usleep(uint32_t usec)
701{
702 waitq_t wq;
703
704 waitq_initialize(&wq);
705
706 (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING, NULL);
707}
708
709static bool thread_walker(avltree_node_t *node, void *arg)
710{
711 bool *additional = (bool *) arg;
712 thread_t *thread = avltree_get_instance(node, thread_t, threads_tree_node);
713
714 uint64_t ucycles, kcycles;
715 char usuffix, ksuffix;
716 order_suffix(thread->ucycles, &ucycles, &usuffix);
717 order_suffix(thread->kcycles, &kcycles, &ksuffix);
718
719 char *name;
720 if (str_cmp(thread->name, "uinit") == 0)
721 name = thread->task->name;
722 else
723 name = thread->name;
724
725#ifdef __32_BITS__
726 if (*additional)
727 printf("%-8" PRIu64 " %10p %10p %9" PRIu64 "%c %9" PRIu64 "%c ",
728 thread->tid, thread->thread_code, thread->kstack,
729 ucycles, usuffix, kcycles, ksuffix);
730 else
731 printf("%-8" PRIu64 " %-14s %10p %-8s %10p %-5" PRIu32 "\n",
732 thread->tid, name, thread, thread_states[thread->state],
733 thread->task, thread->task->container);
734#endif
735
736#ifdef __64_BITS__
737 if (*additional)
738 printf("%-8" PRIu64 " %18p %18p\n"
739 " %9" PRIu64 "%c %9" PRIu64 "%c ",
740 thread->tid, thread->thread_code, thread->kstack,
741 ucycles, usuffix, kcycles, ksuffix);
742 else
743 printf("%-8" PRIu64 " %-14s %18p %-8s %18p %-5" PRIu32 "\n",
744 thread->tid, name, thread, thread_states[thread->state],
745 thread->task, thread->task->container);
746#endif
747
748 if (*additional) {
749 if (thread->cpu)
750 printf("%-5u", thread->cpu->id);
751 else
752 printf("none ");
753
754 if (thread->state == Sleeping) {
755#ifdef __32_BITS__
756 printf(" %10p", thread->sleep_queue);
757#endif
758
759#ifdef __64_BITS__
760 printf(" %18p", thread->sleep_queue);
761#endif
762 }
763
764 printf("\n");
765 }
766
767 return true;
768}
769
770/** Print list of threads debug info
771 *
772 * @param additional Print additional information.
773 *
774 */
775void thread_print_list(bool additional)
776{
777 /* Messing with thread structures, avoid deadlock */
778 irq_spinlock_lock(&threads_lock, true);
779
780#ifdef __32_BITS__
781 if (additional)
782 printf("[id ] [code ] [stack ] [ucycles ] [kcycles ]"
783 " [cpu] [waitqueue]\n");
784 else
785 printf("[id ] [name ] [address ] [state ] [task ]"
786 " [ctn]\n");
787#endif
788
789#ifdef __64_BITS__
790 if (additional) {
791 printf("[id ] [code ] [stack ]\n"
792 " [ucycles ] [kcycles ] [cpu] [waitqueue ]\n");
793 } else
794 printf("[id ] [name ] [address ] [state ]"
795 " [task ] [ctn]\n");
796#endif
797
798 avltree_walk(&threads_tree, thread_walker, &additional);
799
800 irq_spinlock_unlock(&threads_lock, true);
801}
802
803/** Check whether thread exists.
804 *
805 * Note that threads_lock must be already held and
806 * interrupts must be already disabled.
807 *
808 * @param thread Pointer to thread.
809 *
810 * @return True if thread t is known to the system, false otherwise.
811 *
812 */
813bool thread_exists(thread_t *thread)
814{
815 assert(interrupts_disabled());
816 assert(irq_spinlock_locked(&threads_lock));
817
818 avltree_node_t *node =
819 avltree_search(&threads_tree, (avltree_key_t) ((uintptr_t) thread));
820
821 return node != NULL;
822}
823
824/** Update accounting of current thread.
825 *
826 * Note that thread_lock on THREAD must be already held and
827 * interrupts must be already disabled.
828 *
829 * @param user True to update user accounting, false for kernel.
830 *
831 */
832void thread_update_accounting(bool user)
833{
834 uint64_t time = get_cycle();
835
836 assert(interrupts_disabled());
837 assert(irq_spinlock_locked(&THREAD->lock));
838
839 if (user)
840 THREAD->ucycles += time - THREAD->last_cycle;
841 else
842 THREAD->kcycles += time - THREAD->last_cycle;
843
844 THREAD->last_cycle = time;
845}
846
847static bool thread_search_walker(avltree_node_t *node, void *arg)
848{
849 thread_t *thread =
850 (thread_t *) avltree_get_instance(node, thread_t, threads_tree_node);
851 thread_iterator_t *iterator = (thread_iterator_t *) arg;
852
853 if (thread->tid == iterator->thread_id) {
854 iterator->thread = thread;
855 return false;
856 }
857
858 return true;
859}
860
861/** Find thread structure corresponding to thread ID.
862 *
863 * The threads_lock must be already held by the caller of this function and
864 * interrupts must be disabled.
865 *
866 * @param id Thread ID.
867 *
868 * @return Thread structure address or NULL if there is no such thread ID.
869 *
870 */
871thread_t *thread_find_by_id(thread_id_t thread_id)
872{
873 assert(interrupts_disabled());
874 assert(irq_spinlock_locked(&threads_lock));
875
876 thread_iterator_t iterator;
877
878 iterator.thread_id = thread_id;
879 iterator.thread = NULL;
880
881 avltree_walk(&threads_tree, thread_search_walker, (void *) &iterator);
882
883 return iterator.thread;
884}
885
886#ifdef CONFIG_UDEBUG
887
888void thread_stack_trace(thread_id_t thread_id)
889{
890 irq_spinlock_lock(&threads_lock, true);
891
892 thread_t *thread = thread_find_by_id(thread_id);
893 if (thread == NULL) {
894 printf("No such thread.\n");
895 irq_spinlock_unlock(&threads_lock, true);
896 return;
897 }
898
899 irq_spinlock_lock(&thread->lock, false);
900
901 /*
902 * Schedule a stack trace to be printed
903 * just before the thread is scheduled next.
904 *
905 * If the thread is sleeping then try to interrupt
906 * the sleep. Any request for printing an uspace stack
907 * trace from within the kernel should be always
908 * considered a last resort debugging means, therefore
909 * forcing the thread's sleep to be interrupted
910 * is probably justifiable.
911 */
912
913 bool sleeping = false;
914 istate_t *istate = thread->udebug.uspace_state;
915 if (istate != NULL) {
916 printf("Scheduling thread stack trace.\n");
917 thread->btrace = true;
918 if (thread->state == Sleeping)
919 sleeping = true;
920 } else
921 printf("Thread interrupt state not available.\n");
922
923 irq_spinlock_unlock(&thread->lock, false);
924
925 if (sleeping)
926 waitq_interrupt_sleep(thread);
927
928 irq_spinlock_unlock(&threads_lock, true);
929}
930
931#endif /* CONFIG_UDEBUG */
932
933/** Process syscall to create new thread.
934 *
935 */
936sys_errno_t sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name,
937 size_t name_len, thread_id_t *uspace_thread_id)
938{
939 if (name_len > THREAD_NAME_BUFLEN - 1)
940 name_len = THREAD_NAME_BUFLEN - 1;
941
942 char namebuf[THREAD_NAME_BUFLEN];
943 errno_t rc = copy_from_uspace(namebuf, uspace_name, name_len);
944 if (rc != EOK)
945 return (sys_errno_t) rc;
946
947 namebuf[name_len] = 0;
948
949 /*
950 * In case of failure, kernel_uarg will be deallocated in this function.
951 * In case of success, kernel_uarg will be freed in uinit().
952 */
953 uspace_arg_t *kernel_uarg =
954 (uspace_arg_t *) malloc(sizeof(uspace_arg_t));
955 if (!kernel_uarg)
956 return (sys_errno_t) ENOMEM;
957
958 rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t));
959 if (rc != EOK) {
960 free(kernel_uarg);
961 return (sys_errno_t) rc;
962 }
963
964 thread_t *thread = thread_create(uinit, kernel_uarg, TASK,
965 THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf);
966 if (thread) {
967 if (uspace_thread_id != NULL) {
968 rc = copy_to_uspace(uspace_thread_id, &thread->tid,
969 sizeof(thread->tid));
970 if (rc != EOK) {
971 /*
972 * We have encountered a failure, but the thread
973 * has already been created. We need to undo its
974 * creation now.
975 */
976
977 /*
978 * The new thread structure is initialized, but
979 * is still not visible to the system.
980 * We can safely deallocate it.
981 */
982 slab_free(thread_cache, thread);
983 free(kernel_uarg);
984
985 return (sys_errno_t) rc;
986 }
987 }
988
989#ifdef CONFIG_UDEBUG
990 /*
991 * Generate udebug THREAD_B event and attach the thread.
992 * This must be done atomically (with the debug locks held),
993 * otherwise we would either miss some thread or receive
994 * THREAD_B events for threads that already existed
995 * and could be detected with THREAD_READ before.
996 */
997 udebug_thread_b_event_attach(thread, TASK);
998#else
999 thread_attach(thread, TASK);
1000#endif
1001 thread_ready(thread);
1002
1003 return 0;
1004 } else
1005 free(kernel_uarg);
1006
1007 return (sys_errno_t) ENOMEM;
1008}
1009
1010/** Process syscall to terminate thread.
1011 *
1012 */
1013sys_errno_t sys_thread_exit(int uspace_status)
1014{
1015 thread_exit();
1016}
1017
1018/** Syscall for getting TID.
1019 *
1020 * @param uspace_thread_id Userspace address of 8-byte buffer where to store
1021 * current thread ID.
1022 *
1023 * @return 0 on success or an error code from @ref errno.h.
1024 *
1025 */
1026sys_errno_t sys_thread_get_id(thread_id_t *uspace_thread_id)
1027{
1028 /*
1029 * No need to acquire lock on THREAD because tid
1030 * remains constant for the lifespan of the thread.
1031 *
1032 */
1033 return (sys_errno_t) copy_to_uspace(uspace_thread_id, &THREAD->tid,
1034 sizeof(THREAD->tid));
1035}
1036
1037/** Syscall wrapper for sleeping. */
1038sys_errno_t sys_thread_usleep(uint32_t usec)
1039{
1040 thread_usleep(usec);
1041 return 0;
1042}
1043
1044sys_errno_t sys_thread_udelay(uint32_t usec)
1045{
1046 delay(usec);
1047 return 0;
1048}
1049
1050/** @}
1051 */
Note: See TracBrowser for help on using the repository browser.