source: mainline/kernel/generic/src/proc/task.c@ 5573942

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 5573942 was 5573942, checked in by Jakub Jermar <jakub@…>, 18 years ago

Revert thread_interrupt_sleep() to waitq_interrupt_sleep().
I'd prefer that this, IMO, waitq related stuff stays together.

  • Property mode set to 100644
File size: 12.5 KB
RevLine 
[f761f1eb]1/*
[df4ed85]2 * Copyright (c) 2001-2004 Jakub Jermar
[f761f1eb]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[cc73a8a1]29/** @addtogroup genericproc
[b45c443]30 * @{
31 */
32
[9179d0a]33/**
[b45c443]34 * @file
[9179d0a]35 * @brief Task management.
36 */
37
[5be1923]38#include <main/uinit.h>
[f761f1eb]39#include <proc/thread.h>
40#include <proc/task.h>
[0f250f9]41#include <proc/uarg.h>
[20d50a1]42#include <mm/as.h>
[085d973]43#include <mm/slab.h>
[f761f1eb]44#include <synch/spinlock.h>
[5573942]45#include <synch/waitq.h>
[f761f1eb]46#include <arch.h>
47#include <panic.h>
[7f6e755]48#include <adt/btree.h>
[5c9a08b]49#include <adt/list.h>
[6d9c49a]50#include <ipc/ipc.h>
[1077d91]51#include <security/cap.h>
[6d9c49a]52#include <memstr.h>
[37c57f2]53#include <print.h>
[d4b5542]54#include <lib/elf.h>
[7509ddc]55#include <errno.h>
[95155b0c]56#include <func.h>
[e3c762cd]57#include <syscall/copy.h>
[0dbc4e7]58#include <console/klog.h>
[8e5e78f]59
[a84af84]60#ifndef LOADED_PROG_STACK_PAGES_NO
61#define LOADED_PROG_STACK_PAGES_NO 1
62#endif
[8e5e78f]63
[88169d9]64/** Spinlock protecting the tasks_btree B+tree. */
[dc747e3]65SPINLOCK_INITIALIZE(tasks_lock);
[88169d9]66
67/** B+tree of active tasks.
68 *
[6f4495f5]69 * The task is guaranteed to exist after it was found in the tasks_btree as
70 * long as:
[88169d9]71 * @li the tasks_lock is held,
[6f4495f5]72 * @li the task's lock is held when task's lock is acquired before releasing
73 * tasks_lock or
[7bb6b06]74 * @li the task's refcount is greater than 0
[88169d9]75 *
76 */
[7f6e755]77btree_t tasks_btree;
[88169d9]78
[286e03d]79static task_id_t task_counter = 0;
[70527f1]80
[b91bb65]81static void ktaskclnp(void *arg);
[48e7dd6]82static void ktaskgc(void *arg);
[7509ddc]83
[70527f1]84/** Initialize tasks
85 *
86 * Initialize kernel tasks support.
87 *
88 */
[f761f1eb]89void task_init(void)
90{
[43114c5]91 TASK = NULL;
[7f6e755]92 btree_create(&tasks_btree);
[f761f1eb]93}
94
[70527f1]95
96/** Create new task
97 *
98 * Create new task with no threads.
99 *
[20d50a1]100 * @param as Task's address space.
[ff14c520]101 * @param name Symbolic name.
[70527f1]102 *
[5be1923]103 * @return New task's structure
[70527f1]104 *
105 */
[ff14c520]106task_t *task_create(as_t *as, char *name)
[f761f1eb]107{
[22f7769]108 ipl_t ipl;
[f761f1eb]109 task_t *ta;
[2ba7810]110 int i;
[f761f1eb]111
[bb68433]112 ta = (task_t *) malloc(sizeof(task_t), 0);
113
[963074b3]114 task_create_arch(ta);
115
[bb68433]116 spinlock_initialize(&ta->lock, "task_ta_lock");
117 list_initialize(&ta->th_head);
118 ta->as = as;
[ff14c520]119 ta->name = name;
[b91bb65]120 ta->main_thread = NULL;
[7509ddc]121 ta->refcount = 0;
[cfffb290]122 ta->context = CONTEXT;
[7509ddc]123
[1077d91]124 ta->capabilities = 0;
[7509ddc]125 ta->accept_new_threads = true;
[0313ff0]126 ta->cycles = 0;
[2ba7810]127
[6d9c49a]128 ipc_answerbox_init(&ta->answerbox);
[cfffb290]129 for (i = 0; i < IPC_MAX_PHONES; i++)
[2ba7810]130 ipc_phone_init(&ta->phones[i]);
[6f4495f5]131 if ((ipc_phone_0) && (context_check(ipc_phone_0->task->context,
132 ta->context)))
[2ba7810]133 ipc_phone_connect(&ta->phones[0], ipc_phone_0);
[5f62ef9]134 atomic_set(&ta->active_calls, 0);
[4fded58]135
136 mutex_initialize(&ta->futexes_lock);
137 btree_create(&ta->futexes);
[bb68433]138
139 ipl = interrupts_disable();
[482826d]140
141 /*
142 * Increment address space reference count.
143 * TODO: Reconsider the locking scheme.
144 */
145 mutex_lock(&as->lock);
146 as->refcount++;
147 mutex_unlock(&as->lock);
148
[bb68433]149 spinlock_lock(&tasks_lock);
[286e03d]150
151 ta->taskid = ++task_counter;
[b7f364e]152 btree_insert(&tasks_btree, (btree_key_t) ta->taskid, (void *) ta, NULL);
[286e03d]153
[bb68433]154 spinlock_unlock(&tasks_lock);
155 interrupts_restore(ipl);
156
[f761f1eb]157 return ta;
158}
159
[7509ddc]160/** Destroy task.
161 *
162 * @param t Task to be destroyed.
163 */
164void task_destroy(task_t *t)
165{
[31e8ddd]166 task_destroy_arch(t);
167 btree_destroy(&t->futexes);
168
169 mutex_lock_active(&t->as->lock);
170 if (--t->as->refcount == 0) {
171 mutex_unlock(&t->as->lock);
172 as_destroy(t->as);
173 /*
174 * t->as is destroyed.
175 */
[def5207]176 } else
[31e8ddd]177 mutex_unlock(&t->as->lock);
178
179 free(t);
180 TASK = NULL;
[7509ddc]181}
182
[5be1923]183/** Create new task with 1 thread and run it
[ff14c520]184 *
[9179d0a]185 * @param program_addr Address of program executable image.
[ff14c520]186 * @param name Program name.
[5be1923]187 *
[7f0837c]188 * @return Task of the running program or NULL on error.
[5be1923]189 */
[ff14c520]190task_t * task_run_program(void *program_addr, char *name)
[5be1923]191{
192 as_t *as;
193 as_area_t *a;
194 int rc;
[b91bb65]195 thread_t *t1, *t2;
[5be1923]196 task_t *task;
[0f250f9]197 uspace_arg_t *kernel_uarg;
[5be1923]198
199 as = as_create(0);
[a0bb10ef]200 ASSERT(as);
[5be1923]201
[649799a]202 rc = elf_load((elf_header_t *) program_addr, as);
[5be1923]203 if (rc != EE_OK) {
[482826d]204 as_destroy(as);
[5be1923]205 return NULL;
206 }
207
[0f250f9]208 kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
[6f4495f5]209 kernel_uarg->uspace_entry =
210 (void *) ((elf_header_t *) program_addr)->e_entry;
[0f250f9]211 kernel_uarg->uspace_stack = (void *) USTACK_ADDRESS;
212 kernel_uarg->uspace_thread_function = NULL;
213 kernel_uarg->uspace_thread_arg = NULL;
214 kernel_uarg->uspace_uarg = NULL;
[9f52563]215
[ff14c520]216 task = task_create(as, name);
[a0bb10ef]217 ASSERT(task);
218
[5be1923]219 /*
220 * Create the data as_area.
221 */
[6f4495f5]222 a = as_area_create(as, AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE,
223 LOADED_PROG_STACK_PAGES_NO * PAGE_SIZE, USTACK_ADDRESS,
224 AS_AREA_ATTR_NONE, &anon_backend, NULL);
[5be1923]225
[b91bb65]226 /*
227 * Create the main thread.
228 */
[6f4495f5]229 t1 = thread_create(uinit, kernel_uarg, task, THREAD_FLAG_USPACE,
230 "uinit", false);
[b91bb65]231 ASSERT(t1);
[a0bb10ef]232
[b91bb65]233 /*
234 * Create killer thread for the new task.
235 */
[62b6d17]236 t2 = thread_create(ktaskgc, t1, task, 0, "ktaskgc", true);
[b91bb65]237 ASSERT(t2);
238 thread_ready(t2);
239
240 thread_ready(t1);
241
[5be1923]242 return task;
243}
[37c57f2]244
[ec55358]245/** Syscall for reading task ID from userspace.
246 *
[6f4495f5]247 * @param uspace_task_id Userspace address of 8-byte buffer where to store
248 * current task ID.
[ec55358]249 *
[e3c762cd]250 * @return 0 on success or an error code from @ref errno.h.
[ec55358]251 */
[7f1c620]252unative_t sys_task_get_id(task_id_t *uspace_task_id)
[ec55358]253{
254 /*
255 * No need to acquire lock on TASK because taskid
256 * remains constant for the lifespan of the task.
257 */
[6f4495f5]258 return (unative_t) copy_to_uspace(uspace_task_id, &TASK->taskid,
259 sizeof(TASK->taskid));
[ec55358]260}
261
[9a8d91b]262/** Find task structure corresponding to task ID.
263 *
264 * The tasks_lock must be already held by the caller of this function
265 * and interrupts must be disabled.
266 *
267 * @param id Task ID.
268 *
269 * @return Task structure address or NULL if there is no such task ID.
270 */
271task_t *task_find_by_id(task_id_t id)
272{
273 btree_node_t *leaf;
274
275 return (task_t *) btree_search(&tasks_btree, (btree_key_t) id, &leaf);
276}
277
[0313ff0]278/** Get accounting data of given task.
279 *
[771cd22]280 * Note that task lock of 't' must be already held and
[0313ff0]281 * interrupts must be already disabled.
282 *
283 * @param t Pointer to thread.
284 *
285 */
286uint64_t task_get_accounting(task_t *t)
287{
288 /* Accumulated value of task */
289 uint64_t ret = t->cycles;
290
291 /* Current values of threads */
292 link_t *cur;
293 for (cur = t->th_head.next; cur != &t->th_head; cur = cur->next) {
294 thread_t *thr = list_get_instance(cur, thread_t, th_link);
295
296 spinlock_lock(&thr->lock);
[62b6d17]297 /* Process only counted threads */
298 if (!thr->uncounted) {
[6f4495f5]299 if (thr == THREAD) {
300 /* Update accounting of current thread */
301 thread_update_accounting();
302 }
[62b6d17]303 ret += thr->cycles;
304 }
[0313ff0]305 spinlock_unlock(&thr->lock);
306 }
307
308 return ret;
309}
310
[7509ddc]311/** Kill task.
312 *
313 * @param id ID of the task to be killed.
314 *
315 * @return 0 on success or an error code from errno.h
316 */
317int task_kill(task_id_t id)
318{
319 ipl_t ipl;
320 task_t *ta;
321 thread_t *t;
322 link_t *cur;
[9b6aae6]323
324 if (id == 1)
325 return EPERM;
[7509ddc]326
327 ipl = interrupts_disable();
328 spinlock_lock(&tasks_lock);
329
330 if (!(ta = task_find_by_id(id))) {
331 spinlock_unlock(&tasks_lock);
332 interrupts_restore(ipl);
333 return ENOENT;
334 }
[2569ec90]335
[7509ddc]336 spinlock_lock(&ta->lock);
337 ta->refcount++;
338 spinlock_unlock(&ta->lock);
[31e8ddd]339
[2569ec90]340 btree_remove(&tasks_btree, ta->taskid, NULL);
[31e8ddd]341 spinlock_unlock(&tasks_lock);
[7509ddc]342
[62b6d17]343 t = thread_create(ktaskclnp, NULL, ta, 0, "ktaskclnp", true);
[7509ddc]344
345 spinlock_lock(&ta->lock);
[34dcd3f]346 ta->accept_new_threads = false;
[7509ddc]347 ta->refcount--;
[b91bb65]348
349 /*
[0182a665]350 * Interrupt all threads except ktaskclnp.
[b91bb65]351 */
[7509ddc]352 for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) {
353 thread_t *thr;
354 bool sleeping = false;
355
356 thr = list_get_instance(cur, thread_t, th_link);
357 if (thr == t)
358 continue;
359
360 spinlock_lock(&thr->lock);
361 thr->interrupted = true;
362 if (thr->state == Sleeping)
363 sleeping = true;
364 spinlock_unlock(&thr->lock);
365
366 if (sleeping)
[5573942]367 waitq_interrupt_sleep(thr);
[7509ddc]368 }
369
[34dcd3f]370 spinlock_unlock(&ta->lock);
371 interrupts_restore(ipl);
[7509ddc]372
[34dcd3f]373 if (t)
374 thread_ready(t);
375
[7509ddc]376 return 0;
377}
378
[37c57f2]379/** Print task list */
380void task_print_list(void)
381{
382 link_t *cur;
383 ipl_t ipl;
384
385 /* Messing with thread structures, avoid deadlock */
386 ipl = interrupts_disable();
387 spinlock_lock(&tasks_lock);
[f88fcbe]388
[6f4495f5]389 printf("taskid name ctx address as cycles threads "
390 "calls callee\n");
391 printf("------ ---------- --- ---------- ---------- ---------- ------- " "------ ------>\n");
[37c57f2]392
[6f4495f5]393 for (cur = tasks_btree.leaf_head.next; cur != &tasks_btree.leaf_head;
394 cur = cur->next) {
[7f6e755]395 btree_node_t *node;
396 int i;
397
398 node = list_get_instance(cur, btree_node_t, leaf_link);
399 for (i = 0; i < node->keys; i++) {
400 task_t *t;
401 int j;
402
403 t = (task_t *) node->value[i];
404
405 spinlock_lock(&t->lock);
[0313ff0]406
[95155b0c]407 uint64_t cycles;
[0313ff0]408 char suffix;
[95155b0c]409 order(task_get_accounting(t), &cycles, &suffix);
[0313ff0]410
[6f4495f5]411 printf("%-6lld %-10s %-3ld %#10zx %#10zx %9llu%c %7zd "
412 "%6zd", t->taskid, t->name, t->context, t, t->as,
413 cycles, suffix, t->refcount,
414 atomic_get(&t->active_calls));
[f88fcbe]415 for (j = 0; j < IPC_MAX_PHONES; j++) {
[7f6e755]416 if (t->phones[j].callee)
[6f4495f5]417 printf(" %zd:%#zx", j,
418 t->phones[j].callee);
[7f6e755]419 }
420 printf("\n");
[0313ff0]421
[7f6e755]422 spinlock_unlock(&t->lock);
[37c57f2]423 }
424 }
425
426 spinlock_unlock(&tasks_lock);
427 interrupts_restore(ipl);
428}
[7509ddc]429
[b91bb65]430/** Kernel thread used to cleanup the task after it is killed. */
[34dcd3f]431void ktaskclnp(void *arg)
[7509ddc]432{
[34dcd3f]433 ipl_t ipl;
[b91bb65]434 thread_t *t = NULL, *main_thread;
[34dcd3f]435 link_t *cur;
[48e7dd6]436 bool again;
[34dcd3f]437
438 thread_detach(THREAD);
439
440loop:
441 ipl = interrupts_disable();
442 spinlock_lock(&TASK->lock);
443
[b91bb65]444 main_thread = TASK->main_thread;
445
[34dcd3f]446 /*
447 * Find a thread to join.
448 */
[48e7dd6]449 again = false;
[34dcd3f]450 for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) {
451 t = list_get_instance(cur, thread_t, th_link);
[48e7dd6]452
453 spinlock_lock(&t->lock);
454 if (t == THREAD) {
455 spinlock_unlock(&t->lock);
456 continue;
457 } else if (t == main_thread) {
458 spinlock_unlock(&t->lock);
[34dcd3f]459 continue;
[48e7dd6]460 } else if (t->join_type != None) {
461 spinlock_unlock(&t->lock);
462 again = true;
[b91bb65]463 continue;
[48e7dd6]464 } else {
465 t->join_type = TaskClnp;
466 spinlock_unlock(&t->lock);
467 again = false;
[34dcd3f]468 break;
[48e7dd6]469 }
[34dcd3f]470 }
471
472 spinlock_unlock(&TASK->lock);
473 interrupts_restore(ipl);
474
[48e7dd6]475 if (again) {
476 /*
477 * Other cleanup (e.g. ktaskgc) is in progress.
478 */
479 scheduler();
480 goto loop;
481 }
482
[34dcd3f]483 if (t != THREAD) {
[6f4495f5]484 ASSERT(t != main_thread); /* uninit is joined and detached
485 * in ktaskgc */
[34dcd3f]486 thread_join(t);
487 thread_detach(t);
[6f4495f5]488 goto loop; /* go for another thread */
[34dcd3f]489 }
490
491 /*
492 * Now there are no other threads in this task
493 * and no new threads can be created.
494 */
[7bb6b06]495
[e090e1bc]496 ipc_cleanup();
497 futex_cleanup();
[0dbc4e7]498 klog_printf("Cleanup of task %lld completed.", TASK->taskid);
[7509ddc]499}
[b91bb65]500
[88636f68]501/** Kernel thread used to kill the userspace task when its main thread exits.
[b91bb65]502 *
503 * This thread waits until the main userspace thread (i.e. uninit) exits.
[48e7dd6]504 * When this happens, the task is killed. In the meantime, exited threads
505 * are garbage collected.
[b91bb65]506 *
507 * @param arg Pointer to the thread structure of the task's main thread.
508 */
[48e7dd6]509void ktaskgc(void *arg)
[b91bb65]510{
511 thread_t *t = (thread_t *) arg;
[48e7dd6]512loop:
[b91bb65]513 /*
514 * Userspace threads cannot detach themselves,
515 * therefore the thread pointer is guaranteed to be valid.
516 */
[6f4495f5]517 if (thread_join_timeout(t, 1000000, SYNCH_FLAGS_NONE) ==
518 ESYNCH_TIMEOUT) { /* sleep uninterruptibly here! */
[48e7dd6]519 ipl_t ipl;
520 link_t *cur;
521 thread_t *thr = NULL;
522
523 /*
[6f4495f5]524 * The join timed out. Try to do some garbage collection of
525 * Undead threads.
[48e7dd6]526 */
527more_gc:
528 ipl = interrupts_disable();
529 spinlock_lock(&TASK->lock);
530
[6f4495f5]531 for (cur = TASK->th_head.next; cur != &TASK->th_head;
532 cur = cur->next) {
[48e7dd6]533 thr = list_get_instance(cur, thread_t, th_link);
534 spinlock_lock(&thr->lock);
[6f4495f5]535 if (thr != t && thr->state == Undead &&
536 thr->join_type == None) {
[48e7dd6]537 thr->join_type = TaskGC;
538 spinlock_unlock(&thr->lock);
539 break;
540 }
541 spinlock_unlock(&thr->lock);
542 thr = NULL;
543 }
544 spinlock_unlock(&TASK->lock);
[92922e6]545 interrupts_restore(ipl);
[48e7dd6]546
547 if (thr) {
548 thread_join(thr);
549 thread_detach(thr);
550 scheduler();
551 goto more_gc;
552 }
553
554 goto loop;
555 }
[b91bb65]556 thread_detach(t);
557 task_kill(TASK->taskid);
558}
[b45c443]559
[cc73a8a1]560/** @}
[b45c443]561 */
Note: See TracBrowser for help on using the repository browser.