source: mainline/generic/src/proc/thread.c@ 279952c

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 279952c was 0182a665, checked in by Jakub Jermar <jakub@…>, 19 years ago

Fix double thread_join() in ktaskgc.
In thread_create(), lock TASK with interrupts disabled again.
thread_join_timeout() can use ordinary waitq_sleep_timeout().

  • Property mode set to 100644
File size: 13.5 KB
RevLine 
[f761f1eb]1/*
2 * Copyright (C) 2001-2004 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[9179d0a]29/**
30 * @file thread.c
31 * @brief Thread management functions.
32 */
33
[f761f1eb]34#include <proc/scheduler.h>
35#include <proc/thread.h>
36#include <proc/task.h>
[0f250f9]37#include <proc/uarg.h>
[f761f1eb]38#include <mm/frame.h>
39#include <mm/page.h>
40#include <arch/asm.h>
41#include <arch.h>
42#include <synch/synch.h>
43#include <synch/spinlock.h>
44#include <synch/waitq.h>
45#include <synch/rwlock.h>
46#include <cpu.h>
47#include <func.h>
48#include <context.h>
[016acbe]49#include <adt/btree.h>
[5c9a08b]50#include <adt/list.h>
[f761f1eb]51#include <typedefs.h>
52#include <time/clock.h>
[4ffa9e0]53#include <config.h>
54#include <arch/interrupt.h>
[26a8604f]55#include <smp/ipi.h>
[f2ffad4]56#include <arch/faddr.h>
[23684b7]57#include <atomic.h>
[9c0a9b3]58#include <memstr.h>
[55ab0f1]59#include <print.h>
[266294a9]60#include <mm/slab.h>
61#include <debug.h>
[9f52563]62#include <main/uinit.h>
[e3c762cd]63#include <syscall/copy.h>
64#include <errno.h>
[f761f1eb]65
[fe19611]66
67/** Thread states */
68char *thread_states[] = {
69 "Invalid",
70 "Running",
71 "Sleeping",
72 "Ready",
73 "Entering",
74 "Exiting",
75 "Undead"
76};
[f761f1eb]77
[88169d9]78/** Lock protecting the threads_btree B+tree. For locking rules, see declaration thereof. */
[016acbe]79SPINLOCK_INITIALIZE(threads_lock);
[88169d9]80
81/** B+tree of all threads.
82 *
83 * When a thread is found in the threads_btree B+tree, it is guaranteed to exist as long
84 * as the threads_lock is held.
85 */
86btree_t threads_btree;
[f761f1eb]87
[dc747e3]88SPINLOCK_INITIALIZE(tidlock);
[f761f1eb]89__u32 last_tid = 0;
90
[266294a9]91static slab_cache_t *thread_slab;
[f76fed4]92#ifdef ARCH_HAS_FPU
93slab_cache_t *fpu_context_slab;
94#endif
[266294a9]95
[70527f1]96/** Thread wrapper
97 *
98 * This wrapper is provided to ensure that every thread
[f761f1eb]99 * makes a call to thread_exit() when its implementing
100 * function returns.
101 *
[22f7769]102 * interrupts_disable() is assumed.
[70527f1]103 *
[f761f1eb]104 */
[e16e036a]105static void cushion(void)
[f761f1eb]106{
[43114c5]107 void (*f)(void *) = THREAD->thread_code;
108 void *arg = THREAD->thread_arg;
[f761f1eb]109
[3e1607f]110 /* this is where each thread wakes up after its creation */
[43114c5]111 spinlock_unlock(&THREAD->lock);
[22f7769]112 interrupts_enable();
[f761f1eb]113
114 f(arg);
115 thread_exit();
116 /* not reached */
117}
118
[266294a9]119/** Initialization and allocation for thread_t structure */
120static int thr_constructor(void *obj, int kmflags)
121{
122 thread_t *t = (thread_t *)obj;
[085d973]123 pfn_t pfn;
[2a46e10]124 int status;
[266294a9]125
126 spinlock_initialize(&t->lock, "thread_t_lock");
127 link_initialize(&t->rq_link);
128 link_initialize(&t->wq_link);
129 link_initialize(&t->th_link);
130
[f76fed4]131#ifdef ARCH_HAS_FPU
132# ifdef CONFIG_FPU_LAZY
133 t->saved_fpu_context = NULL;
134# else
135 t->saved_fpu_context = slab_alloc(fpu_context_slab,kmflags);
136 if (!t->saved_fpu_context)
137 return -1;
138# endif
139#endif
140
[a82500ce]141 pfn = frame_alloc_rc(STACK_FRAMES, FRAME_KA | kmflags,&status);
[f76fed4]142 if (status) {
143#ifdef ARCH_HAS_FPU
144 if (t->saved_fpu_context)
145 slab_free(fpu_context_slab,t->saved_fpu_context);
146#endif
[266294a9]147 return -1;
[f76fed4]148 }
[2a46e10]149 t->kstack = (__u8 *)PA2KA(PFN2ADDR(pfn));
[266294a9]150
151 return 0;
152}
153
154/** Destruction of thread_t object */
155static int thr_destructor(void *obj)
156{
157 thread_t *t = (thread_t *)obj;
158
[085d973]159 frame_free(ADDR2PFN(KA2PA(t->kstack)));
[f76fed4]160#ifdef ARCH_HAS_FPU
161 if (t->saved_fpu_context)
162 slab_free(fpu_context_slab,t->saved_fpu_context);
163#endif
[266294a9]164 return 1; /* One page freed */
165}
[70527f1]166
167/** Initialize threads
168 *
169 * Initialize kernel threads support.
170 *
171 */
[f761f1eb]172void thread_init(void)
173{
[43114c5]174 THREAD = NULL;
[80d2bdb]175 atomic_set(&nrdy,0);
[266294a9]176 thread_slab = slab_cache_create("thread_slab",
177 sizeof(thread_t),0,
178 thr_constructor, thr_destructor, 0);
[f76fed4]179#ifdef ARCH_HAS_FPU
180 fpu_context_slab = slab_cache_create("fpu_slab",
181 sizeof(fpu_context_t),
182 FPU_CONTEXT_ALIGN,
183 NULL, NULL, 0);
184#endif
[f761f1eb]185
[016acbe]186 btree_create(&threads_btree);
187}
[70527f1]188
189/** Make thread ready
190 *
191 * Switch thread t to the ready state.
192 *
193 * @param t Thread to make ready.
194 *
195 */
[f761f1eb]196void thread_ready(thread_t *t)
197{
198 cpu_t *cpu;
199 runq_t *r;
[22f7769]200 ipl_t ipl;
[80d2bdb]201 int i, avg;
[f761f1eb]202
[22f7769]203 ipl = interrupts_disable();
[f761f1eb]204
205 spinlock_lock(&t->lock);
206
[fbcfd458]207 ASSERT(! (t->state == Ready));
208
[22f7769]209 i = (t->priority < RQ_COUNT -1) ? ++t->priority : t->priority;
[f761f1eb]210
[8262010]211 cpu = CPU;
[f761f1eb]212 if (t->flags & X_WIRED) {
213 cpu = t->cpu;
214 }
[81c4c6da]215 t->state = Ready;
[f761f1eb]216 spinlock_unlock(&t->lock);
217
[70527f1]218 /*
[f761f1eb]219 * Append t to respective ready queue on respective processor.
220 */
221 r = &cpu->rq[i];
222 spinlock_lock(&r->lock);
223 list_append(&t->rq_link, &r->rq_head);
224 r->n++;
225 spinlock_unlock(&r->lock);
226
[59e07c91]227 atomic_inc(&nrdy);
[80d2bdb]228 avg = atomic_get(&nrdy) / config.cpu_active;
[248fc1a]229 atomic_inc(&cpu->nrdy);
[76cec1e]230
[22f7769]231 interrupts_restore(ipl);
[f761f1eb]232}
233
[266294a9]234/** Destroy thread memory structure
235 *
236 * Detach thread from all queues, cpus etc. and destroy it.
237 *
238 * Assume thread->lock is held!!
239 */
240void thread_destroy(thread_t *t)
241{
[7509ddc]242 bool destroy_task = false;
243
[c778c1a]244 ASSERT(t->state == Exiting || t->state == Undead);
[266294a9]245 ASSERT(t->task);
246 ASSERT(t->cpu);
247
248 spinlock_lock(&t->cpu->lock);
249 if(t->cpu->fpu_owner==t)
250 t->cpu->fpu_owner=NULL;
251 spinlock_unlock(&t->cpu->lock);
252
[7509ddc]253 spinlock_unlock(&t->lock);
254
255 spinlock_lock(&threads_lock);
256 btree_remove(&threads_btree, (btree_key_t) ((__address ) t), NULL);
257 spinlock_unlock(&threads_lock);
258
[266294a9]259 /*
260 * Detach from the containing task.
261 */
262 spinlock_lock(&t->task->lock);
263 list_remove(&t->th_link);
[7509ddc]264 if (--t->task->refcount == 0) {
265 t->task->accept_new_threads = false;
266 destroy_task = true;
267 }
268 spinlock_unlock(&t->task->lock);
[266294a9]269
[7509ddc]270 if (destroy_task)
271 task_destroy(t->task);
[266294a9]272
273 slab_free(thread_slab, t);
274}
275
[70527f1]276/** Create new thread
277 *
278 * Create a new thread.
279 *
280 * @param func Thread's implementing function.
281 * @param arg Thread's implementing function argument.
282 * @param task Task to which the thread belongs.
283 * @param flags Thread flags.
[ff14c520]284 * @param name Symbolic name.
[70527f1]285 *
286 * @return New thread's structure on success, NULL on failure.
287 *
288 */
[ff14c520]289thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags, char *name)
[f761f1eb]290{
291 thread_t *t;
[bb68433]292 ipl_t ipl;
293
[266294a9]294 t = (thread_t *) slab_alloc(thread_slab, 0);
[2a46e10]295 if (!t)
296 return NULL;
[3fa424a9]297
298 thread_create_arch(t);
[f761f1eb]299
[bb68433]300 /* Not needed, but good for debugging */
[b6d4566]301 memsetb((__address)t->kstack, THREAD_STACK_SIZE * 1<<STACK_FRAMES, 0);
[bb68433]302
303 ipl = interrupts_disable();
304 spinlock_lock(&tidlock);
305 t->tid = ++last_tid;
306 spinlock_unlock(&tidlock);
307 interrupts_restore(ipl);
308
309 context_save(&t->saved_context);
310 context_set(&t->saved_context, FADDR(cushion), (__address) t->kstack, THREAD_STACK_SIZE);
311
312 the_initialize((the_t *) t->kstack);
313
314 ipl = interrupts_disable();
315 t->saved_context.ipl = interrupts_read();
316 interrupts_restore(ipl);
317
[9f52563]318 memcpy(t->name, name, THREAD_NAME_BUFLEN);
319
[bb68433]320 t->thread_code = func;
321 t->thread_arg = arg;
322 t->ticks = -1;
323 t->priority = -1; /* start in rq[0] */
324 t->cpu = NULL;
325 t->flags = 0;
326 t->state = Entering;
327 t->call_me = NULL;
328 t->call_me_with = NULL;
329
330 timeout_initialize(&t->sleep_timeout);
[116d1ef4]331 t->sleep_interruptible = false;
[bb68433]332 t->sleep_queue = NULL;
333 t->timeout_pending = 0;
[e3c762cd]334
335 t->in_copy_from_uspace = false;
336 t->in_copy_to_uspace = false;
[7509ddc]337
338 t->interrupted = false;
[48e7dd6]339 t->join_type = None;
[fe19611]340 t->detached = false;
341 waitq_initialize(&t->join_wq);
342
[bb68433]343 t->rwlock_holder_type = RWLOCK_NONE;
[6a27d63]344
[bb68433]345 t->task = task;
346
[6f8a426]347 t->fpu_context_exists = 0;
348 t->fpu_context_engaged = 0;
[bb68433]349
[7509ddc]350 /*
351 * Attach to the containing task.
352 */
[0182a665]353 ipl = interrupts_disable();
[7509ddc]354 spinlock_lock(&task->lock);
355 if (!task->accept_new_threads) {
356 spinlock_unlock(&task->lock);
357 slab_free(thread_slab, t);
[0182a665]358 interrupts_restore(ipl);
[7509ddc]359 return NULL;
360 }
361 list_append(&t->th_link, &task->th_head);
[b91bb65]362 if (task->refcount++ == 0)
363 task->main_thread = t;
[7509ddc]364 spinlock_unlock(&task->lock);
365
[bb68433]366 /*
367 * Register this thread in the system-wide list.
368 */
369 spinlock_lock(&threads_lock);
[b7f364e]370 btree_insert(&threads_btree, (btree_key_t) ((__address) t), (void *) t, NULL);
[bb68433]371 spinlock_unlock(&threads_lock);
372
373 interrupts_restore(ipl);
[6f8a426]374
[f761f1eb]375 return t;
376}
377
[0182a665]378/** Terminate thread.
[70527f1]379 *
380 * End current thread execution and switch it to the exiting
381 * state. All pending timeouts are executed.
382 *
383 */
[f761f1eb]384void thread_exit(void)
385{
[22f7769]386 ipl_t ipl;
[f761f1eb]387
388restart:
[22f7769]389 ipl = interrupts_disable();
[43114c5]390 spinlock_lock(&THREAD->lock);
391 if (THREAD->timeout_pending) { /* busy waiting for timeouts in progress */
392 spinlock_unlock(&THREAD->lock);
[22f7769]393 interrupts_restore(ipl);
[f761f1eb]394 goto restart;
395 }
[43114c5]396 THREAD->state = Exiting;
397 spinlock_unlock(&THREAD->lock);
[f761f1eb]398 scheduler();
[874621f]399
400 /* Not reached */
401 while (1)
402 ;
[f761f1eb]403}
404
[70527f1]405
406/** Thread sleep
407 *
408 * Suspend execution of the current thread.
409 *
410 * @param sec Number of seconds to sleep.
411 *
412 */
[f761f1eb]413void thread_sleep(__u32 sec)
414{
[76cec1e]415 thread_usleep(sec*1000000);
[f761f1eb]416}
[70527f1]417
[fe19611]418/** Wait for another thread to exit.
419 *
420 * @param t Thread to join on exit.
421 * @param usec Timeout in microseconds.
422 * @param flags Mode of operation.
423 *
424 * @return An error code from errno.h or an error code from synch.h.
425 */
426int thread_join_timeout(thread_t *t, __u32 usec, int flags)
427{
428 ipl_t ipl;
429 int rc;
430
431 if (t == THREAD)
432 return EINVAL;
433
434 /*
435 * Since thread join can only be called once on an undetached thread,
436 * the thread pointer is guaranteed to be still valid.
437 */
438
439 ipl = interrupts_disable();
440 spinlock_lock(&t->lock);
441 ASSERT(!t->detached);
442 spinlock_unlock(&t->lock);
[7b3e7f4]443 interrupts_restore(ipl);
[fe19611]444
[0182a665]445 rc = waitq_sleep_timeout(&t->join_wq, usec, flags);
446
[fe19611]447 return rc;
448}
449
450/** Detach thread.
451 *
452 * Mark the thread as detached, if the thread is already in the Undead state,
453 * deallocate its resources.
454 *
455 * @param t Thread to be detached.
456 */
457void thread_detach(thread_t *t)
458{
459 ipl_t ipl;
460
461 /*
462 * Since the thread is expected to not be already detached,
463 * pointer to it must be still valid.
464 */
465 ipl = interrupts_disable();
466 spinlock_lock(&t->lock);
467 ASSERT(!t->detached);
468 if (t->state == Undead) {
469 thread_destroy(t); /* unlocks &t->lock */
470 interrupts_restore(ipl);
471 return;
472 } else {
473 t->detached = true;
474 }
475 spinlock_unlock(&t->lock);
476 interrupts_restore(ipl);
477}
478
[70527f1]479/** Thread usleep
480 *
481 * Suspend execution of the current thread.
482 *
483 * @param usec Number of microseconds to sleep.
484 *
485 */
[f761f1eb]486void thread_usleep(__u32 usec)
487{
488 waitq_t wq;
489
490 waitq_initialize(&wq);
491
[116d1ef4]492 (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING);
[f761f1eb]493}
494
[70527f1]495/** Register thread out-of-context invocation
496 *
497 * Register a function and its argument to be executed
498 * on next context switch to the current thread.
499 *
500 * @param call_me Out-of-context function.
501 * @param call_me_with Out-of-context function argument.
502 *
503 */
[f761f1eb]504void thread_register_call_me(void (* call_me)(void *), void *call_me_with)
505{
[22f7769]506 ipl_t ipl;
[f761f1eb]507
[22f7769]508 ipl = interrupts_disable();
[43114c5]509 spinlock_lock(&THREAD->lock);
510 THREAD->call_me = call_me;
511 THREAD->call_me_with = call_me_with;
512 spinlock_unlock(&THREAD->lock);
[22f7769]513 interrupts_restore(ipl);
[f761f1eb]514}
[55ab0f1]515
516/** Print list of threads debug info */
517void thread_print_list(void)
518{
519 link_t *cur;
520 ipl_t ipl;
521
522 /* Messing with thread structures, avoid deadlock */
523 ipl = interrupts_disable();
524 spinlock_lock(&threads_lock);
525
[016acbe]526 for (cur = threads_btree.leaf_head.next; cur != &threads_btree.leaf_head; cur = cur->next) {
527 btree_node_t *node;
528 int i;
529
530 node = list_get_instance(cur, btree_node_t, leaf_link);
531 for (i = 0; i < node->keys; i++) {
532 thread_t *t;
533
534 t = (thread_t *) node->value[i];
[280a27e]535 printf("%s: address=%#zX, tid=%zd, state=%s, task=%#zX, code=%#zX, stack=%#zX, cpu=",
[016acbe]536 t->name, t, t->tid, thread_states[t->state], t->task, t->thread_code, t->kstack);
537 if (t->cpu)
[3de6dd7a]538 printf("cpu%zd", t->cpu->id);
[016acbe]539 else
540 printf("none");
[3de6dd7a]541 if (t->state == Sleeping) {
542 printf(", kst=%#zX", t->kstack);
543 printf(", wq=%#zX", t->sleep_queue);
544 }
[016acbe]545 printf("\n");
546 }
[55ab0f1]547 }
548
549 spinlock_unlock(&threads_lock);
[37c57f2]550 interrupts_restore(ipl);
[55ab0f1]551}
[9f52563]552
[016acbe]553/** Check whether thread exists.
554 *
555 * Note that threads_lock must be already held and
556 * interrupts must be already disabled.
557 *
558 * @param t Pointer to thread.
559 *
560 * @return True if thread t is known to the system, false otherwise.
561 */
562bool thread_exists(thread_t *t)
563{
564 btree_node_t *leaf;
565
[b7f364e]566 return btree_search(&threads_btree, (btree_key_t) ((__address) t), &leaf) != NULL;
[016acbe]567}
568
[9f52563]569/** Process syscall to create new thread.
570 *
571 */
[0f250f9]572__native sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name)
[9f52563]573{
[68091bd]574 thread_t *t;
575 char namebuf[THREAD_NAME_BUFLEN];
[45fb65c]576 uspace_arg_t *kernel_uarg;
[9f52563]577 __u32 tid;
[e3c762cd]578 int rc;
[9f52563]579
[e3c762cd]580 rc = copy_from_uspace(namebuf, uspace_name, THREAD_NAME_BUFLEN);
581 if (rc != 0)
582 return (__native) rc;
[0f250f9]583
584 kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
[e3c762cd]585 rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t));
586 if (rc != 0) {
587 free(kernel_uarg);
588 return (__native) rc;
589 }
[9f52563]590
[68091bd]591 if ((t = thread_create(uinit, kernel_uarg, TASK, 0, namebuf))) {
[9f52563]592 tid = t->tid;
[68091bd]593 thread_ready(t);
[9f52563]594 return (__native) tid;
[68091bd]595 } else {
[0f250f9]596 free(kernel_uarg);
[68091bd]597 }
[9f52563]598
[e3c762cd]599 return (__native) ENOMEM;
[9f52563]600}
601
602/** Process syscall to terminate thread.
603 *
604 */
[0f250f9]605__native sys_thread_exit(int uspace_status)
[9f52563]606{
[68091bd]607 thread_exit();
608 /* Unreachable */
609 return 0;
[9f52563]610}
Note: See TracBrowser for help on using the repository browser.