source: mainline/kernel/generic/src/proc/task.c@ ff3a34b

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since ff3a34b was f6d2c81, checked in by Jakub Jermar <jakub@…>, 18 years ago

Fix two memory leaks.

In kernel, kernel_uarg structure needs to be deallocated when a thread
with userspace context is destroyed.

In userspace, the return value of the SYS_THREAD_CREATE must be checked
for error conditions and in case of error, uarg and stack must be freed
up.

  • Property mode set to 100644
File size: 13.3 KB
RevLine 
[f761f1eb]1/*
[df4ed85]2 * Copyright (c) 2001-2004 Jakub Jermar
[f761f1eb]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[cc73a8a1]29/** @addtogroup genericproc
[b45c443]30 * @{
31 */
32
[9179d0a]33/**
[b45c443]34 * @file
[9179d0a]35 * @brief Task management.
36 */
37
[5be1923]38#include <main/uinit.h>
[f761f1eb]39#include <proc/thread.h>
40#include <proc/task.h>
[0f250f9]41#include <proc/uarg.h>
[20d50a1]42#include <mm/as.h>
[085d973]43#include <mm/slab.h>
[31d8e10]44#include <atomic.h>
[f761f1eb]45#include <synch/spinlock.h>
[5573942]46#include <synch/waitq.h>
[f761f1eb]47#include <arch.h>
48#include <panic.h>
[7f6e755]49#include <adt/btree.h>
[5c9a08b]50#include <adt/list.h>
[6d9c49a]51#include <ipc/ipc.h>
[1077d91]52#include <security/cap.h>
[6d9c49a]53#include <memstr.h>
[37c57f2]54#include <print.h>
[d4b5542]55#include <lib/elf.h>
[7509ddc]56#include <errno.h>
[95155b0c]57#include <func.h>
[e3c762cd]58#include <syscall/copy.h>
[0dbc4e7]59#include <console/klog.h>
[8e5e78f]60
[a84af84]61#ifndef LOADED_PROG_STACK_PAGES_NO
62#define LOADED_PROG_STACK_PAGES_NO 1
63#endif
[8e5e78f]64
[88169d9]65/** Spinlock protecting the tasks_btree B+tree. */
[dc747e3]66SPINLOCK_INITIALIZE(tasks_lock);
[88169d9]67
68/** B+tree of active tasks.
69 *
[6f4495f5]70 * The task is guaranteed to exist after it was found in the tasks_btree as
71 * long as:
[88169d9]72 * @li the tasks_lock is held,
[6f4495f5]73 * @li the task's lock is held when task's lock is acquired before releasing
74 * tasks_lock or
[7bb6b06]75 * @li the task's refcount is greater than 0
[88169d9]76 *
77 */
[7f6e755]78btree_t tasks_btree;
[88169d9]79
[286e03d]80static task_id_t task_counter = 0;
[70527f1]81
[b91bb65]82static void ktaskclnp(void *arg);
[48e7dd6]83static void ktaskgc(void *arg);
[7509ddc]84
[70527f1]85/** Initialize tasks
86 *
87 * Initialize kernel tasks support.
88 *
89 */
[f761f1eb]90void task_init(void)
91{
[43114c5]92 TASK = NULL;
[7f6e755]93 btree_create(&tasks_btree);
[f761f1eb]94}
95
[f74bbaf]96/** Kill all tasks except the current task.
97 *
98 */
99void task_done(void)
100{
101 task_t *t;
102 do { /* Repeat until there are any tasks except TASK */
103
104 /* Messing with task structures, avoid deadlock */
105 ipl_t ipl = interrupts_disable();
106 spinlock_lock(&tasks_lock);
107
108 t = NULL;
109 link_t *cur;
[f6d2c81]110 for (cur = tasks_btree.leaf_head.next;
111 cur != &tasks_btree.leaf_head; cur = cur->next) {
112 btree_node_t *node;
113
114 node = list_get_instance(cur, btree_node_t, leaf_link);
[f74bbaf]115
116 unsigned int i;
117 for (i = 0; i < node->keys; i++) {
118 if ((task_t *) node->value[i] != TASK) {
119 t = (task_t *) node->value[i];
120 break;
121 }
122 }
123 }
124
125 if (t != NULL) {
126 task_id_t id = t->taskid;
127
128 spinlock_unlock(&tasks_lock);
129 interrupts_restore(ipl);
130
131#ifdef CONFIG_DEBUG
132 printf("Killing task %llu\n", id);
133#endif
134 task_kill(id);
135 } else {
136 spinlock_unlock(&tasks_lock);
137 interrupts_restore(ipl);
138 }
139
140 } while (t != NULL);
141}
[70527f1]142
143/** Create new task
144 *
145 * Create new task with no threads.
146 *
[20d50a1]147 * @param as Task's address space.
[ff14c520]148 * @param name Symbolic name.
[70527f1]149 *
[5be1923]150 * @return New task's structure
[70527f1]151 *
152 */
[ff14c520]153task_t *task_create(as_t *as, char *name)
[f761f1eb]154{
[22f7769]155 ipl_t ipl;
[f761f1eb]156 task_t *ta;
[2ba7810]157 int i;
[f761f1eb]158
[bb68433]159 ta = (task_t *) malloc(sizeof(task_t), 0);
160
[963074b3]161 task_create_arch(ta);
162
[bb68433]163 spinlock_initialize(&ta->lock, "task_ta_lock");
164 list_initialize(&ta->th_head);
165 ta->as = as;
[ff14c520]166 ta->name = name;
[b91bb65]167 ta->main_thread = NULL;
[7509ddc]168 ta->refcount = 0;
[cfffb290]169 ta->context = CONTEXT;
[7509ddc]170
[1077d91]171 ta->capabilities = 0;
[7509ddc]172 ta->accept_new_threads = true;
[0313ff0]173 ta->cycles = 0;
[2ba7810]174
[6d9c49a]175 ipc_answerbox_init(&ta->answerbox);
[cfffb290]176 for (i = 0; i < IPC_MAX_PHONES; i++)
[2ba7810]177 ipc_phone_init(&ta->phones[i]);
[6f4495f5]178 if ((ipc_phone_0) && (context_check(ipc_phone_0->task->context,
179 ta->context)))
[2ba7810]180 ipc_phone_connect(&ta->phones[0], ipc_phone_0);
[5f62ef9]181 atomic_set(&ta->active_calls, 0);
[4fded58]182
183 mutex_initialize(&ta->futexes_lock);
184 btree_create(&ta->futexes);
[bb68433]185
186 ipl = interrupts_disable();
[482826d]187
188 /*
189 * Increment address space reference count.
190 */
[31d8e10]191 atomic_inc(&as->refcount);
[482826d]192
[bb68433]193 spinlock_lock(&tasks_lock);
[286e03d]194
195 ta->taskid = ++task_counter;
[b7f364e]196 btree_insert(&tasks_btree, (btree_key_t) ta->taskid, (void *) ta, NULL);
[286e03d]197
[bb68433]198 spinlock_unlock(&tasks_lock);
199 interrupts_restore(ipl);
200
[f761f1eb]201 return ta;
202}
203
[7509ddc]204/** Destroy task.
205 *
206 * @param t Task to be destroyed.
207 */
208void task_destroy(task_t *t)
209{
[31e8ddd]210 task_destroy_arch(t);
211 btree_destroy(&t->futexes);
212
[31d8e10]213 if (atomic_predec(&t->as->refcount) == 0)
[31e8ddd]214 as_destroy(t->as);
215
216 free(t);
217 TASK = NULL;
[7509ddc]218}
219
[5be1923]220/** Create new task with 1 thread and run it
[ff14c520]221 *
[9179d0a]222 * @param program_addr Address of program executable image.
[ff14c520]223 * @param name Program name.
[5be1923]224 *
[7f0837c]225 * @return Task of the running program or NULL on error.
[5be1923]226 */
[f6d2c81]227task_t *task_run_program(void *program_addr, char *name)
[5be1923]228{
229 as_t *as;
230 as_area_t *a;
231 int rc;
[b91bb65]232 thread_t *t1, *t2;
[5be1923]233 task_t *task;
[0f250f9]234 uspace_arg_t *kernel_uarg;
[5be1923]235
236 as = as_create(0);
[a0bb10ef]237 ASSERT(as);
[5be1923]238
[649799a]239 rc = elf_load((elf_header_t *) program_addr, as);
[5be1923]240 if (rc != EE_OK) {
[482826d]241 as_destroy(as);
[5be1923]242 return NULL;
243 }
244
[0f250f9]245 kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
[6f4495f5]246 kernel_uarg->uspace_entry =
247 (void *) ((elf_header_t *) program_addr)->e_entry;
[0f250f9]248 kernel_uarg->uspace_stack = (void *) USTACK_ADDRESS;
249 kernel_uarg->uspace_thread_function = NULL;
250 kernel_uarg->uspace_thread_arg = NULL;
251 kernel_uarg->uspace_uarg = NULL;
[9f52563]252
[ff14c520]253 task = task_create(as, name);
[a0bb10ef]254 ASSERT(task);
255
[5be1923]256 /*
257 * Create the data as_area.
258 */
[6f4495f5]259 a = as_area_create(as, AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE,
260 LOADED_PROG_STACK_PAGES_NO * PAGE_SIZE, USTACK_ADDRESS,
261 AS_AREA_ATTR_NONE, &anon_backend, NULL);
[5be1923]262
[b91bb65]263 /*
264 * Create the main thread.
265 */
[6f4495f5]266 t1 = thread_create(uinit, kernel_uarg, task, THREAD_FLAG_USPACE,
267 "uinit", false);
[b91bb65]268 ASSERT(t1);
[a0bb10ef]269
[b91bb65]270 /*
271 * Create killer thread for the new task.
272 */
[62b6d17]273 t2 = thread_create(ktaskgc, t1, task, 0, "ktaskgc", true);
[b91bb65]274 ASSERT(t2);
275 thread_ready(t2);
276
277 thread_ready(t1);
278
[5be1923]279 return task;
280}
[37c57f2]281
[ec55358]282/** Syscall for reading task ID from userspace.
283 *
[6f4495f5]284 * @param uspace_task_id Userspace address of 8-byte buffer where to store
285 * current task ID.
[ec55358]286 *
[e3c762cd]287 * @return 0 on success or an error code from @ref errno.h.
[ec55358]288 */
[7f1c620]289unative_t sys_task_get_id(task_id_t *uspace_task_id)
[ec55358]290{
291 /*
292 * No need to acquire lock on TASK because taskid
293 * remains constant for the lifespan of the task.
294 */
[6f4495f5]295 return (unative_t) copy_to_uspace(uspace_task_id, &TASK->taskid,
296 sizeof(TASK->taskid));
[ec55358]297}
298
[9a8d91b]299/** Find task structure corresponding to task ID.
300 *
301 * The tasks_lock must be already held by the caller of this function
302 * and interrupts must be disabled.
303 *
304 * @param id Task ID.
305 *
306 * @return Task structure address or NULL if there is no such task ID.
307 */
308task_t *task_find_by_id(task_id_t id)
309{
310 btree_node_t *leaf;
311
312 return (task_t *) btree_search(&tasks_btree, (btree_key_t) id, &leaf);
313}
314
[0313ff0]315/** Get accounting data of given task.
316 *
[771cd22]317 * Note that task lock of 't' must be already held and
[0313ff0]318 * interrupts must be already disabled.
319 *
320 * @param t Pointer to thread.
321 *
322 */
323uint64_t task_get_accounting(task_t *t)
324{
325 /* Accumulated value of task */
326 uint64_t ret = t->cycles;
327
328 /* Current values of threads */
329 link_t *cur;
330 for (cur = t->th_head.next; cur != &t->th_head; cur = cur->next) {
331 thread_t *thr = list_get_instance(cur, thread_t, th_link);
332
333 spinlock_lock(&thr->lock);
[62b6d17]334 /* Process only counted threads */
335 if (!thr->uncounted) {
[6f4495f5]336 if (thr == THREAD) {
337 /* Update accounting of current thread */
338 thread_update_accounting();
339 }
[62b6d17]340 ret += thr->cycles;
341 }
[0313ff0]342 spinlock_unlock(&thr->lock);
343 }
344
345 return ret;
346}
347
[7509ddc]348/** Kill task.
349 *
350 * @param id ID of the task to be killed.
351 *
352 * @return 0 on success or an error code from errno.h
353 */
354int task_kill(task_id_t id)
355{
356 ipl_t ipl;
357 task_t *ta;
358 thread_t *t;
359 link_t *cur;
[9b6aae6]360
361 if (id == 1)
362 return EPERM;
[7509ddc]363
364 ipl = interrupts_disable();
365 spinlock_lock(&tasks_lock);
366
367 if (!(ta = task_find_by_id(id))) {
368 spinlock_unlock(&tasks_lock);
369 interrupts_restore(ipl);
370 return ENOENT;
371 }
[2569ec90]372
[7509ddc]373 spinlock_lock(&ta->lock);
374 ta->refcount++;
375 spinlock_unlock(&ta->lock);
[31e8ddd]376
[2569ec90]377 btree_remove(&tasks_btree, ta->taskid, NULL);
[31e8ddd]378 spinlock_unlock(&tasks_lock);
[7509ddc]379
[62b6d17]380 t = thread_create(ktaskclnp, NULL, ta, 0, "ktaskclnp", true);
[7509ddc]381
382 spinlock_lock(&ta->lock);
[34dcd3f]383 ta->accept_new_threads = false;
[7509ddc]384 ta->refcount--;
[b91bb65]385
386 /*
[0182a665]387 * Interrupt all threads except ktaskclnp.
[b91bb65]388 */
[7509ddc]389 for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) {
390 thread_t *thr;
391 bool sleeping = false;
392
393 thr = list_get_instance(cur, thread_t, th_link);
394 if (thr == t)
395 continue;
396
397 spinlock_lock(&thr->lock);
398 thr->interrupted = true;
399 if (thr->state == Sleeping)
400 sleeping = true;
401 spinlock_unlock(&thr->lock);
402
403 if (sleeping)
[5573942]404 waitq_interrupt_sleep(thr);
[7509ddc]405 }
406
[34dcd3f]407 spinlock_unlock(&ta->lock);
408 interrupts_restore(ipl);
[7509ddc]409
[34dcd3f]410 if (t)
411 thread_ready(t);
412
[7509ddc]413 return 0;
414}
415
[37c57f2]416/** Print task list */
417void task_print_list(void)
418{
419 link_t *cur;
420 ipl_t ipl;
421
[f74bbaf]422 /* Messing with task structures, avoid deadlock */
[37c57f2]423 ipl = interrupts_disable();
424 spinlock_lock(&tasks_lock);
[f88fcbe]425
[6f4495f5]426 printf("taskid name ctx address as cycles threads "
427 "calls callee\n");
428 printf("------ ---------- --- ---------- ---------- ---------- ------- " "------ ------>\n");
[37c57f2]429
[6f4495f5]430 for (cur = tasks_btree.leaf_head.next; cur != &tasks_btree.leaf_head;
431 cur = cur->next) {
[7f6e755]432 btree_node_t *node;
[4184e76]433 unsigned int i;
[7f6e755]434
435 node = list_get_instance(cur, btree_node_t, leaf_link);
436 for (i = 0; i < node->keys; i++) {
437 task_t *t;
438 int j;
439
440 t = (task_t *) node->value[i];
441
442 spinlock_lock(&t->lock);
[0313ff0]443
[95155b0c]444 uint64_t cycles;
[0313ff0]445 char suffix;
[95155b0c]446 order(task_get_accounting(t), &cycles, &suffix);
[0313ff0]447
[201abde]448 printf("%-6llu %-10s %-3ld %#10zx %#10zx %9llu%c %7zd "
[6f4495f5]449 "%6zd", t->taskid, t->name, t->context, t, t->as,
450 cycles, suffix, t->refcount,
451 atomic_get(&t->active_calls));
[f88fcbe]452 for (j = 0; j < IPC_MAX_PHONES; j++) {
[7f6e755]453 if (t->phones[j].callee)
[6f4495f5]454 printf(" %zd:%#zx", j,
455 t->phones[j].callee);
[7f6e755]456 }
457 printf("\n");
[0313ff0]458
[7f6e755]459 spinlock_unlock(&t->lock);
[37c57f2]460 }
461 }
462
463 spinlock_unlock(&tasks_lock);
464 interrupts_restore(ipl);
465}
[7509ddc]466
[b91bb65]467/** Kernel thread used to cleanup the task after it is killed. */
[34dcd3f]468void ktaskclnp(void *arg)
[7509ddc]469{
[34dcd3f]470 ipl_t ipl;
[b91bb65]471 thread_t *t = NULL, *main_thread;
[34dcd3f]472 link_t *cur;
[48e7dd6]473 bool again;
[34dcd3f]474
475 thread_detach(THREAD);
476
477loop:
478 ipl = interrupts_disable();
479 spinlock_lock(&TASK->lock);
480
[b91bb65]481 main_thread = TASK->main_thread;
482
[34dcd3f]483 /*
484 * Find a thread to join.
485 */
[48e7dd6]486 again = false;
[34dcd3f]487 for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) {
488 t = list_get_instance(cur, thread_t, th_link);
[48e7dd6]489
490 spinlock_lock(&t->lock);
491 if (t == THREAD) {
492 spinlock_unlock(&t->lock);
493 continue;
494 } else if (t == main_thread) {
495 spinlock_unlock(&t->lock);
[34dcd3f]496 continue;
[48e7dd6]497 } else if (t->join_type != None) {
498 spinlock_unlock(&t->lock);
499 again = true;
[b91bb65]500 continue;
[48e7dd6]501 } else {
502 t->join_type = TaskClnp;
503 spinlock_unlock(&t->lock);
504 again = false;
[34dcd3f]505 break;
[48e7dd6]506 }
[34dcd3f]507 }
508
509 spinlock_unlock(&TASK->lock);
510 interrupts_restore(ipl);
511
[48e7dd6]512 if (again) {
513 /*
514 * Other cleanup (e.g. ktaskgc) is in progress.
515 */
516 scheduler();
517 goto loop;
518 }
519
[34dcd3f]520 if (t != THREAD) {
[6f4495f5]521 ASSERT(t != main_thread); /* uninit is joined and detached
522 * in ktaskgc */
[34dcd3f]523 thread_join(t);
524 thread_detach(t);
[6f4495f5]525 goto loop; /* go for another thread */
[34dcd3f]526 }
527
528 /*
529 * Now there are no other threads in this task
530 * and no new threads can be created.
531 */
[7bb6b06]532
[e090e1bc]533 ipc_cleanup();
534 futex_cleanup();
[201abde]535 klog_printf("Cleanup of task %llu completed.", TASK->taskid);
[7509ddc]536}
[b91bb65]537
[88636f68]538/** Kernel thread used to kill the userspace task when its main thread exits.
[b91bb65]539 *
540 * This thread waits until the main userspace thread (i.e. uninit) exits.
[48e7dd6]541 * When this happens, the task is killed. In the meantime, exited threads
542 * are garbage collected.
[b91bb65]543 *
544 * @param arg Pointer to the thread structure of the task's main thread.
545 */
[48e7dd6]546void ktaskgc(void *arg)
[b91bb65]547{
548 thread_t *t = (thread_t *) arg;
[48e7dd6]549loop:
[b91bb65]550 /*
551 * Userspace threads cannot detach themselves,
552 * therefore the thread pointer is guaranteed to be valid.
553 */
[6f4495f5]554 if (thread_join_timeout(t, 1000000, SYNCH_FLAGS_NONE) ==
555 ESYNCH_TIMEOUT) { /* sleep uninterruptibly here! */
[48e7dd6]556 ipl_t ipl;
557 link_t *cur;
558 thread_t *thr = NULL;
559
560 /*
[6f4495f5]561 * The join timed out. Try to do some garbage collection of
562 * Undead threads.
[48e7dd6]563 */
564more_gc:
565 ipl = interrupts_disable();
566 spinlock_lock(&TASK->lock);
567
[6f4495f5]568 for (cur = TASK->th_head.next; cur != &TASK->th_head;
569 cur = cur->next) {
[48e7dd6]570 thr = list_get_instance(cur, thread_t, th_link);
571 spinlock_lock(&thr->lock);
[6f4495f5]572 if (thr != t && thr->state == Undead &&
573 thr->join_type == None) {
[48e7dd6]574 thr->join_type = TaskGC;
575 spinlock_unlock(&thr->lock);
576 break;
577 }
578 spinlock_unlock(&thr->lock);
579 thr = NULL;
580 }
581 spinlock_unlock(&TASK->lock);
[92922e6]582 interrupts_restore(ipl);
[48e7dd6]583
584 if (thr) {
585 thread_join(thr);
586 thread_detach(thr);
587 scheduler();
588 goto more_gc;
589 }
590
591 goto loop;
592 }
[b91bb65]593 thread_detach(t);
594 task_kill(TASK->taskid);
595}
[b45c443]596
[cc73a8a1]597/** @}
[b45c443]598 */
Note: See TracBrowser for help on using the repository browser.