source: mainline/kernel/generic/src/proc/thread.c@ 5df1963

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 5df1963 was 5df1963, checked in by Martin Decky <martin@…>, 12 years ago

bitmap frame allocator does not keep track of the size of the allocated frame blocks
to avoid memory leaks the number of allocated frames needs to be passed explicitly during deallocation

  • Property mode set to 100644
File size: 23.5 KB
Line 
1/*
2 * Copyright (c) 2010 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup genericproc
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Thread management functions.
36 */
37
38#include <proc/scheduler.h>
39#include <proc/thread.h>
40#include <proc/task.h>
41#include <mm/frame.h>
42#include <mm/page.h>
43#include <arch/asm.h>
44#include <arch/cycle.h>
45#include <arch.h>
46#include <synch/spinlock.h>
47#include <synch/waitq.h>
48#include <cpu.h>
49#include <str.h>
50#include <context.h>
51#include <adt/avl.h>
52#include <adt/list.h>
53#include <time/clock.h>
54#include <time/timeout.h>
55#include <time/delay.h>
56#include <config.h>
57#include <arch/interrupt.h>
58#include <smp/ipi.h>
59#include <arch/faddr.h>
60#include <atomic.h>
61#include <memstr.h>
62#include <print.h>
63#include <mm/slab.h>
64#include <debug.h>
65#include <main/uinit.h>
66#include <syscall/copy.h>
67#include <errno.h>
68
69/** Thread states */
70const char *thread_states[] = {
71 "Invalid",
72 "Running",
73 "Sleeping",
74 "Ready",
75 "Entering",
76 "Exiting",
77 "Lingering"
78};
79
80typedef struct {
81 thread_id_t thread_id;
82 thread_t *thread;
83} thread_iterator_t;
84
85/** Lock protecting the threads_tree AVL tree.
86 *
87 * For locking rules, see declaration thereof.
88 *
89 */
90IRQ_SPINLOCK_INITIALIZE(threads_lock);
91
92/** AVL tree of all threads.
93 *
94 * When a thread is found in the threads_tree AVL tree, it is guaranteed to
95 * exist as long as the threads_lock is held.
96 *
97 */
98avltree_t threads_tree;
99
100IRQ_SPINLOCK_STATIC_INITIALIZE(tidlock);
101static thread_id_t last_tid = 0;
102
103static slab_cache_t *thread_slab;
104
105#ifdef CONFIG_FPU
106slab_cache_t *fpu_context_slab;
107#endif
108
109/** Thread wrapper.
110 *
111 * This wrapper is provided to ensure that every thread makes a call to
112 * thread_exit() when its implementing function returns.
113 *
114 * interrupts_disable() is assumed.
115 *
116 */
117static void cushion(void)
118{
119 void (*f)(void *) = THREAD->thread_code;
120 void *arg = THREAD->thread_arg;
121 THREAD->last_cycle = get_cycle();
122
123 /* This is where each thread wakes up after its creation */
124 irq_spinlock_unlock(&THREAD->lock, false);
125 interrupts_enable();
126
127 f(arg);
128
129 /* Accumulate accounting to the task */
130 irq_spinlock_lock(&THREAD->lock, true);
131 if (!THREAD->uncounted) {
132 thread_update_accounting(true);
133 uint64_t ucycles = THREAD->ucycles;
134 THREAD->ucycles = 0;
135 uint64_t kcycles = THREAD->kcycles;
136 THREAD->kcycles = 0;
137
138 irq_spinlock_pass(&THREAD->lock, &TASK->lock);
139 TASK->ucycles += ucycles;
140 TASK->kcycles += kcycles;
141 irq_spinlock_unlock(&TASK->lock, true);
142 } else
143 irq_spinlock_unlock(&THREAD->lock, true);
144
145 thread_exit();
146
147 /* Not reached */
148}
149
150/** Initialization and allocation for thread_t structure
151 *
152 */
153static int thr_constructor(void *obj, unsigned int kmflags)
154{
155 thread_t *thread = (thread_t *) obj;
156
157 irq_spinlock_initialize(&thread->lock, "thread_t_lock");
158 link_initialize(&thread->rq_link);
159 link_initialize(&thread->wq_link);
160 link_initialize(&thread->th_link);
161
162 /* call the architecture-specific part of the constructor */
163 thr_constructor_arch(thread);
164
165#ifdef CONFIG_FPU
166#ifdef CONFIG_FPU_LAZY
167 thread->saved_fpu_context = NULL;
168#else /* CONFIG_FPU_LAZY */
169 thread->saved_fpu_context = slab_alloc(fpu_context_slab, kmflags);
170 if (!thread->saved_fpu_context)
171 return -1;
172#endif /* CONFIG_FPU_LAZY */
173#endif /* CONFIG_FPU */
174
175 /*
176 * Allocate the kernel stack from the low-memory to prevent an infinite
177 * nesting of TLB-misses when accessing the stack from the part of the
178 * TLB-miss handler written in C.
179 *
180 * Note that low-memory is safe to be used for the stack as it will be
181 * covered by the kernel identity mapping, which guarantees not to
182 * nest TLB-misses infinitely (either via some hardware mechanism or
183 * by the construciton of the assembly-language part of the TLB-miss
184 * handler).
185 *
186 * This restriction can be lifted once each architecture provides
187 * a similar guarantee, for example by locking the kernel stack
188 * in the TLB whenever it is allocated from the high-memory and the
189 * thread is being scheduled to run.
190 */
191 kmflags |= FRAME_LOWMEM;
192 kmflags &= ~FRAME_HIGHMEM;
193
194 thread->kstack = (uint8_t *)
195 PA2KA(frame_alloc(STACK_FRAMES, kmflags, STACK_SIZE - 1));
196 if (!thread->kstack) {
197#ifdef CONFIG_FPU
198 if (thread->saved_fpu_context)
199 slab_free(fpu_context_slab, thread->saved_fpu_context);
200#endif
201 return -1;
202 }
203
204#ifdef CONFIG_UDEBUG
205 mutex_initialize(&thread->udebug.lock, MUTEX_PASSIVE);
206#endif
207
208 return 0;
209}
210
211/** Destruction of thread_t object */
212static size_t thr_destructor(void *obj)
213{
214 thread_t *thread = (thread_t *) obj;
215
216 /* call the architecture-specific part of the destructor */
217 thr_destructor_arch(thread);
218
219 frame_free(KA2PA(thread->kstack), STACK_FRAMES);
220
221#ifdef CONFIG_FPU
222 if (thread->saved_fpu_context)
223 slab_free(fpu_context_slab, thread->saved_fpu_context);
224#endif
225
226 return 1; /* One page freed */
227}
228
229/** Initialize threads
230 *
231 * Initialize kernel threads support.
232 *
233 */
234void thread_init(void)
235{
236 THREAD = NULL;
237
238 atomic_set(&nrdy, 0);
239 thread_slab = slab_cache_create("thread_t", sizeof(thread_t), 0,
240 thr_constructor, thr_destructor, 0);
241
242#ifdef CONFIG_FPU
243 fpu_context_slab = slab_cache_create("fpu_context_t",
244 sizeof(fpu_context_t), FPU_CONTEXT_ALIGN, NULL, NULL, 0);
245#endif
246
247 avltree_create(&threads_tree);
248}
249
250/** Wire thread to the given CPU
251 *
252 * @param cpu CPU to wire the thread to.
253 *
254 */
255void thread_wire(thread_t *thread, cpu_t *cpu)
256{
257 irq_spinlock_lock(&thread->lock, true);
258 thread->cpu = cpu;
259 thread->wired = true;
260 irq_spinlock_unlock(&thread->lock, true);
261}
262
263/** Make thread ready
264 *
265 * Switch thread to the ready state.
266 *
267 * @param thread Thread to make ready.
268 *
269 */
270void thread_ready(thread_t *thread)
271{
272 irq_spinlock_lock(&thread->lock, true);
273
274 ASSERT(thread->state != Ready);
275
276 int i = (thread->priority < RQ_COUNT - 1) ?
277 ++thread->priority : thread->priority;
278
279 cpu_t *cpu;
280 if (thread->wired || thread->nomigrate || thread->fpu_context_engaged) {
281 ASSERT(thread->cpu != NULL);
282 cpu = thread->cpu;
283 } else
284 cpu = CPU;
285
286 thread->state = Ready;
287
288 irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock));
289
290 /*
291 * Append thread to respective ready queue
292 * on respective processor.
293 */
294
295 list_append(&thread->rq_link, &cpu->rq[i].rq);
296 cpu->rq[i].n++;
297 irq_spinlock_unlock(&(cpu->rq[i].lock), true);
298
299 atomic_inc(&nrdy);
300 // FIXME: Why is the avg value not used
301 // avg = atomic_get(&nrdy) / config.cpu_active;
302 atomic_inc(&cpu->nrdy);
303}
304
305/** Create new thread
306 *
307 * Create a new thread.
308 *
309 * @param func Thread's implementing function.
310 * @param arg Thread's implementing function argument.
311 * @param task Task to which the thread belongs. The caller must
312 * guarantee that the task won't cease to exist during the
313 * call. The task's lock may not be held.
314 * @param flags Thread flags.
315 * @param name Symbolic name (a copy is made).
316 *
317 * @return New thread's structure on success, NULL on failure.
318 *
319 */
320thread_t *thread_create(void (* func)(void *), void *arg, task_t *task,
321 thread_flags_t flags, const char *name)
322{
323 thread_t *thread = (thread_t *) slab_alloc(thread_slab, 0);
324 if (!thread)
325 return NULL;
326
327 /* Not needed, but good for debugging */
328 memsetb(thread->kstack, STACK_SIZE, 0);
329
330 irq_spinlock_lock(&tidlock, true);
331 thread->tid = ++last_tid;
332 irq_spinlock_unlock(&tidlock, true);
333
334 context_save(&thread->saved_context);
335 context_set(&thread->saved_context, FADDR(cushion),
336 (uintptr_t) thread->kstack, STACK_SIZE);
337
338 the_initialize((the_t *) thread->kstack);
339
340 ipl_t ipl = interrupts_disable();
341 thread->saved_context.ipl = interrupts_read();
342 interrupts_restore(ipl);
343
344 str_cpy(thread->name, THREAD_NAME_BUFLEN, name);
345
346 thread->thread_code = func;
347 thread->thread_arg = arg;
348 thread->ticks = -1;
349 thread->ucycles = 0;
350 thread->kcycles = 0;
351 thread->uncounted =
352 ((flags & THREAD_FLAG_UNCOUNTED) == THREAD_FLAG_UNCOUNTED);
353 thread->priority = -1; /* Start in rq[0] */
354 thread->cpu = NULL;
355 thread->wired = false;
356 thread->stolen = false;
357 thread->uspace =
358 ((flags & THREAD_FLAG_USPACE) == THREAD_FLAG_USPACE);
359
360 thread->nomigrate = 0;
361 thread->state = Entering;
362
363 timeout_initialize(&thread->sleep_timeout);
364 thread->sleep_interruptible = false;
365 thread->sleep_queue = NULL;
366 thread->timeout_pending = false;
367
368 thread->in_copy_from_uspace = false;
369 thread->in_copy_to_uspace = false;
370
371 thread->interrupted = false;
372 thread->detached = false;
373 waitq_initialize(&thread->join_wq);
374
375 thread->task = task;
376
377 thread->fpu_context_exists = false;
378 thread->fpu_context_engaged = false;
379
380 avltree_node_initialize(&thread->threads_tree_node);
381 thread->threads_tree_node.key = (uintptr_t) thread;
382
383#ifdef CONFIG_UDEBUG
384 /* Initialize debugging stuff */
385 thread->btrace = false;
386 udebug_thread_initialize(&thread->udebug);
387#endif
388
389 /* Might depend on previous initialization */
390 thread_create_arch(thread);
391
392 if ((flags & THREAD_FLAG_NOATTACH) != THREAD_FLAG_NOATTACH)
393 thread_attach(thread, task);
394
395 return thread;
396}
397
398/** Destroy thread memory structure
399 *
400 * Detach thread from all queues, cpus etc. and destroy it.
401 *
402 * @param thread Thread to be destroyed.
403 * @param irq_res Indicate whether it should unlock thread->lock
404 * in interrupts-restore mode.
405 *
406 */
407void thread_destroy(thread_t *thread, bool irq_res)
408{
409 ASSERT(irq_spinlock_locked(&thread->lock));
410 ASSERT((thread->state == Exiting) || (thread->state == Lingering));
411 ASSERT(thread->task);
412 ASSERT(thread->cpu);
413
414 irq_spinlock_lock(&thread->cpu->lock, false);
415 if (thread->cpu->fpu_owner == thread)
416 thread->cpu->fpu_owner = NULL;
417 irq_spinlock_unlock(&thread->cpu->lock, false);
418
419 irq_spinlock_pass(&thread->lock, &threads_lock);
420
421 avltree_delete(&threads_tree, &thread->threads_tree_node);
422
423 irq_spinlock_pass(&threads_lock, &thread->task->lock);
424
425 /*
426 * Detach from the containing task.
427 */
428 list_remove(&thread->th_link);
429 irq_spinlock_unlock(&thread->task->lock, irq_res);
430
431 /*
432 * Drop the reference to the containing task.
433 */
434 task_release(thread->task);
435 slab_free(thread_slab, thread);
436}
437
438/** Make the thread visible to the system.
439 *
440 * Attach the thread structure to the current task and make it visible in the
441 * threads_tree.
442 *
443 * @param t Thread to be attached to the task.
444 * @param task Task to which the thread is to be attached.
445 *
446 */
447void thread_attach(thread_t *thread, task_t *task)
448{
449 /*
450 * Attach to the specified task.
451 */
452 irq_spinlock_lock(&task->lock, true);
453
454 /* Hold a reference to the task. */
455 task_hold(task);
456
457 /* Must not count kbox thread into lifecount */
458 if (thread->uspace)
459 atomic_inc(&task->lifecount);
460
461 list_append(&thread->th_link, &task->threads);
462
463 irq_spinlock_pass(&task->lock, &threads_lock);
464
465 /*
466 * Register this thread in the system-wide list.
467 */
468 avltree_insert(&threads_tree, &thread->threads_tree_node);
469 irq_spinlock_unlock(&threads_lock, true);
470}
471
472/** Terminate thread.
473 *
474 * End current thread execution and switch it to the exiting state.
475 * All pending timeouts are executed.
476 *
477 */
478void thread_exit(void)
479{
480 if (THREAD->uspace) {
481#ifdef CONFIG_UDEBUG
482 /* Generate udebug THREAD_E event */
483 udebug_thread_e_event();
484
485 /*
486 * This thread will not execute any code or system calls from
487 * now on.
488 */
489 udebug_stoppable_begin();
490#endif
491 if (atomic_predec(&TASK->lifecount) == 0) {
492 /*
493 * We are the last userspace thread in the task that
494 * still has not exited. With the exception of the
495 * moment the task was created, new userspace threads
496 * can only be created by threads of the same task.
497 * We are safe to perform cleanup.
498 *
499 */
500 ipc_cleanup();
501 futex_cleanup();
502 LOG("Cleanup of task %" PRIu64" completed.", TASK->taskid);
503 }
504 }
505
506restart:
507 irq_spinlock_lock(&THREAD->lock, true);
508 if (THREAD->timeout_pending) {
509 /* Busy waiting for timeouts in progress */
510 irq_spinlock_unlock(&THREAD->lock, true);
511 goto restart;
512 }
513
514 THREAD->state = Exiting;
515 irq_spinlock_unlock(&THREAD->lock, true);
516
517 scheduler();
518
519 /* Not reached */
520 while (true);
521}
522
523/** Prevent the current thread from being migrated to another processor. */
524void thread_migration_disable(void)
525{
526 ASSERT(THREAD);
527
528 THREAD->nomigrate++;
529}
530
531/** Allow the current thread to be migrated to another processor. */
532void thread_migration_enable(void)
533{
534 ASSERT(THREAD);
535 ASSERT(THREAD->nomigrate > 0);
536
537 if (THREAD->nomigrate > 0)
538 THREAD->nomigrate--;
539}
540
541/** Thread sleep
542 *
543 * Suspend execution of the current thread.
544 *
545 * @param sec Number of seconds to sleep.
546 *
547 */
548void thread_sleep(uint32_t sec)
549{
550 /* Sleep in 1000 second steps to support
551 full argument range */
552 while (sec > 0) {
553 uint32_t period = (sec > 1000) ? 1000 : sec;
554
555 thread_usleep(period * 1000000);
556 sec -= period;
557 }
558}
559
560/** Wait for another thread to exit.
561 *
562 * @param thread Thread to join on exit.
563 * @param usec Timeout in microseconds.
564 * @param flags Mode of operation.
565 *
566 * @return An error code from errno.h or an error code from synch.h.
567 *
568 */
569int thread_join_timeout(thread_t *thread, uint32_t usec, unsigned int flags)
570{
571 if (thread == THREAD)
572 return EINVAL;
573
574 /*
575 * Since thread join can only be called once on an undetached thread,
576 * the thread pointer is guaranteed to be still valid.
577 */
578
579 irq_spinlock_lock(&thread->lock, true);
580 ASSERT(!thread->detached);
581 irq_spinlock_unlock(&thread->lock, true);
582
583 return waitq_sleep_timeout(&thread->join_wq, usec, flags);
584}
585
586/** Detach thread.
587 *
588 * Mark the thread as detached. If the thread is already
589 * in the Lingering state, deallocate its resources.
590 *
591 * @param thread Thread to be detached.
592 *
593 */
594void thread_detach(thread_t *thread)
595{
596 /*
597 * Since the thread is expected not to be already detached,
598 * pointer to it must be still valid.
599 */
600 irq_spinlock_lock(&thread->lock, true);
601 ASSERT(!thread->detached);
602
603 if (thread->state == Lingering) {
604 /*
605 * Unlock &thread->lock and restore
606 * interrupts in thread_destroy().
607 */
608 thread_destroy(thread, true);
609 return;
610 } else {
611 thread->detached = true;
612 }
613
614 irq_spinlock_unlock(&thread->lock, true);
615}
616
617/** Thread usleep
618 *
619 * Suspend execution of the current thread.
620 *
621 * @param usec Number of microseconds to sleep.
622 *
623 */
624void thread_usleep(uint32_t usec)
625{
626 waitq_t wq;
627
628 waitq_initialize(&wq);
629
630 (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING);
631}
632
633static bool thread_walker(avltree_node_t *node, void *arg)
634{
635 bool *additional = (bool *) arg;
636 thread_t *thread = avltree_get_instance(node, thread_t, threads_tree_node);
637
638 uint64_t ucycles, kcycles;
639 char usuffix, ksuffix;
640 order_suffix(thread->ucycles, &ucycles, &usuffix);
641 order_suffix(thread->kcycles, &kcycles, &ksuffix);
642
643 char *name;
644 if (str_cmp(thread->name, "uinit") == 0)
645 name = thread->task->name;
646 else
647 name = thread->name;
648
649#ifdef __32_BITS__
650 if (*additional)
651 printf("%-8" PRIu64 " %10p %10p %9" PRIu64 "%c %9" PRIu64 "%c ",
652 thread->tid, thread->thread_code, thread->kstack,
653 ucycles, usuffix, kcycles, ksuffix);
654 else
655 printf("%-8" PRIu64 " %-14s %10p %-8s %10p %-5" PRIu32 "\n",
656 thread->tid, name, thread, thread_states[thread->state],
657 thread->task, thread->task->container);
658#endif
659
660#ifdef __64_BITS__
661 if (*additional)
662 printf("%-8" PRIu64 " %18p %18p\n"
663 " %9" PRIu64 "%c %9" PRIu64 "%c ",
664 thread->tid, thread->thread_code, thread->kstack,
665 ucycles, usuffix, kcycles, ksuffix);
666 else
667 printf("%-8" PRIu64 " %-14s %18p %-8s %18p %-5" PRIu32 "\n",
668 thread->tid, name, thread, thread_states[thread->state],
669 thread->task, thread->task->container);
670#endif
671
672 if (*additional) {
673 if (thread->cpu)
674 printf("%-5u", thread->cpu->id);
675 else
676 printf("none ");
677
678 if (thread->state == Sleeping) {
679#ifdef __32_BITS__
680 printf(" %10p", thread->sleep_queue);
681#endif
682
683#ifdef __64_BITS__
684 printf(" %18p", thread->sleep_queue);
685#endif
686 }
687
688 printf("\n");
689 }
690
691 return true;
692}
693
694/** Print list of threads debug info
695 *
696 * @param additional Print additional information.
697 *
698 */
699void thread_print_list(bool additional)
700{
701 /* Messing with thread structures, avoid deadlock */
702 irq_spinlock_lock(&threads_lock, true);
703
704#ifdef __32_BITS__
705 if (additional)
706 printf("[id ] [code ] [stack ] [ucycles ] [kcycles ]"
707 " [cpu] [waitqueue]\n");
708 else
709 printf("[id ] [name ] [address ] [state ] [task ]"
710 " [ctn]\n");
711#endif
712
713#ifdef __64_BITS__
714 if (additional) {
715 printf("[id ] [code ] [stack ]\n"
716 " [ucycles ] [kcycles ] [cpu] [waitqueue ]\n");
717 } else
718 printf("[id ] [name ] [address ] [state ]"
719 " [task ] [ctn]\n");
720#endif
721
722 avltree_walk(&threads_tree, thread_walker, &additional);
723
724 irq_spinlock_unlock(&threads_lock, true);
725}
726
727/** Check whether thread exists.
728 *
729 * Note that threads_lock must be already held and
730 * interrupts must be already disabled.
731 *
732 * @param thread Pointer to thread.
733 *
734 * @return True if thread t is known to the system, false otherwise.
735 *
736 */
737bool thread_exists(thread_t *thread)
738{
739 ASSERT(interrupts_disabled());
740 ASSERT(irq_spinlock_locked(&threads_lock));
741
742 avltree_node_t *node =
743 avltree_search(&threads_tree, (avltree_key_t) ((uintptr_t) thread));
744
745 return node != NULL;
746}
747
748/** Update accounting of current thread.
749 *
750 * Note that thread_lock on THREAD must be already held and
751 * interrupts must be already disabled.
752 *
753 * @param user True to update user accounting, false for kernel.
754 *
755 */
756void thread_update_accounting(bool user)
757{
758 uint64_t time = get_cycle();
759
760 ASSERT(interrupts_disabled());
761 ASSERT(irq_spinlock_locked(&THREAD->lock));
762
763 if (user)
764 THREAD->ucycles += time - THREAD->last_cycle;
765 else
766 THREAD->kcycles += time - THREAD->last_cycle;
767
768 THREAD->last_cycle = time;
769}
770
771static bool thread_search_walker(avltree_node_t *node, void *arg)
772{
773 thread_t *thread =
774 (thread_t *) avltree_get_instance(node, thread_t, threads_tree_node);
775 thread_iterator_t *iterator = (thread_iterator_t *) arg;
776
777 if (thread->tid == iterator->thread_id) {
778 iterator->thread = thread;
779 return false;
780 }
781
782 return true;
783}
784
785/** Find thread structure corresponding to thread ID.
786 *
787 * The threads_lock must be already held by the caller of this function and
788 * interrupts must be disabled.
789 *
790 * @param id Thread ID.
791 *
792 * @return Thread structure address or NULL if there is no such thread ID.
793 *
794 */
795thread_t *thread_find_by_id(thread_id_t thread_id)
796{
797 ASSERT(interrupts_disabled());
798 ASSERT(irq_spinlock_locked(&threads_lock));
799
800 thread_iterator_t iterator;
801
802 iterator.thread_id = thread_id;
803 iterator.thread = NULL;
804
805 avltree_walk(&threads_tree, thread_search_walker, (void *) &iterator);
806
807 return iterator.thread;
808}
809
810#ifdef CONFIG_UDEBUG
811
812void thread_stack_trace(thread_id_t thread_id)
813{
814 irq_spinlock_lock(&threads_lock, true);
815
816 thread_t *thread = thread_find_by_id(thread_id);
817 if (thread == NULL) {
818 printf("No such thread.\n");
819 irq_spinlock_unlock(&threads_lock, true);
820 return;
821 }
822
823 irq_spinlock_lock(&thread->lock, false);
824
825 /*
826 * Schedule a stack trace to be printed
827 * just before the thread is scheduled next.
828 *
829 * If the thread is sleeping then try to interrupt
830 * the sleep. Any request for printing an uspace stack
831 * trace from within the kernel should be always
832 * considered a last resort debugging means, therefore
833 * forcing the thread's sleep to be interrupted
834 * is probably justifiable.
835 */
836
837 bool sleeping = false;
838 istate_t *istate = thread->udebug.uspace_state;
839 if (istate != NULL) {
840 printf("Scheduling thread stack trace.\n");
841 thread->btrace = true;
842 if (thread->state == Sleeping)
843 sleeping = true;
844 } else
845 printf("Thread interrupt state not available.\n");
846
847 irq_spinlock_unlock(&thread->lock, false);
848
849 if (sleeping)
850 waitq_interrupt_sleep(thread);
851
852 irq_spinlock_unlock(&threads_lock, true);
853}
854
855#endif /* CONFIG_UDEBUG */
856
857/** Process syscall to create new thread.
858 *
859 */
860sysarg_t sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name,
861 size_t name_len, thread_id_t *uspace_thread_id)
862{
863 if (name_len > THREAD_NAME_BUFLEN - 1)
864 name_len = THREAD_NAME_BUFLEN - 1;
865
866 char namebuf[THREAD_NAME_BUFLEN];
867 int rc = copy_from_uspace(namebuf, uspace_name, name_len);
868 if (rc != 0)
869 return (sysarg_t) rc;
870
871 namebuf[name_len] = 0;
872
873 /*
874 * In case of failure, kernel_uarg will be deallocated in this function.
875 * In case of success, kernel_uarg will be freed in uinit().
876 */
877 uspace_arg_t *kernel_uarg =
878 (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
879
880 rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t));
881 if (rc != 0) {
882 free(kernel_uarg);
883 return (sysarg_t) rc;
884 }
885
886 thread_t *thread = thread_create(uinit, kernel_uarg, TASK,
887 THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf);
888 if (thread) {
889 if (uspace_thread_id != NULL) {
890 rc = copy_to_uspace(uspace_thread_id, &thread->tid,
891 sizeof(thread->tid));
892 if (rc != 0) {
893 /*
894 * We have encountered a failure, but the thread
895 * has already been created. We need to undo its
896 * creation now.
897 */
898
899 /*
900 * The new thread structure is initialized, but
901 * is still not visible to the system.
902 * We can safely deallocate it.
903 */
904 slab_free(thread_slab, thread);
905 free(kernel_uarg);
906
907 return (sysarg_t) rc;
908 }
909 }
910
911#ifdef CONFIG_UDEBUG
912 /*
913 * Generate udebug THREAD_B event and attach the thread.
914 * This must be done atomically (with the debug locks held),
915 * otherwise we would either miss some thread or receive
916 * THREAD_B events for threads that already existed
917 * and could be detected with THREAD_READ before.
918 */
919 udebug_thread_b_event_attach(thread, TASK);
920#else
921 thread_attach(thread, TASK);
922#endif
923 thread_ready(thread);
924
925 return 0;
926 } else
927 free(kernel_uarg);
928
929 return (sysarg_t) ENOMEM;
930}
931
932/** Process syscall to terminate thread.
933 *
934 */
935sysarg_t sys_thread_exit(int uspace_status)
936{
937 thread_exit();
938
939 /* Unreachable */
940 return 0;
941}
942
943/** Syscall for getting TID.
944 *
945 * @param uspace_thread_id Userspace address of 8-byte buffer where to store
946 * current thread ID.
947 *
948 * @return 0 on success or an error code from @ref errno.h.
949 *
950 */
951sysarg_t sys_thread_get_id(thread_id_t *uspace_thread_id)
952{
953 /*
954 * No need to acquire lock on THREAD because tid
955 * remains constant for the lifespan of the thread.
956 *
957 */
958 return (sysarg_t) copy_to_uspace(uspace_thread_id, &THREAD->tid,
959 sizeof(THREAD->tid));
960}
961
962/** Syscall wrapper for sleeping. */
963sysarg_t sys_thread_usleep(uint32_t usec)
964{
965 thread_usleep(usec);
966 return 0;
967}
968
969sysarg_t sys_thread_udelay(uint32_t usec)
970{
971 delay(usec);
972 return 0;
973}
974
975/** @}
976 */
Note: See TracBrowser for help on using the repository browser.