source: mainline/kernel/generic/src/proc/task.c@ 201abde

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 201abde was 201abde, checked in by Martin Decky <martin@…>, 18 years ago

make thread ID 64 bit (task ID is 64 bit already)
cleanup thread syscalls

  • Property mode set to 100644
File size: 12.3 KB
Line 
1/*
2 * Copyright (c) 2001-2004 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup genericproc
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Task management.
36 */
37
38#include <main/uinit.h>
39#include <proc/thread.h>
40#include <proc/task.h>
41#include <proc/uarg.h>
42#include <mm/as.h>
43#include <mm/slab.h>
44#include <atomic.h>
45#include <synch/spinlock.h>
46#include <synch/waitq.h>
47#include <arch.h>
48#include <panic.h>
49#include <adt/btree.h>
50#include <adt/list.h>
51#include <ipc/ipc.h>
52#include <security/cap.h>
53#include <memstr.h>
54#include <print.h>
55#include <lib/elf.h>
56#include <errno.h>
57#include <func.h>
58#include <syscall/copy.h>
59#include <console/klog.h>
60
61#ifndef LOADED_PROG_STACK_PAGES_NO
62#define LOADED_PROG_STACK_PAGES_NO 1
63#endif
64
65/** Spinlock protecting the tasks_btree B+tree. */
66SPINLOCK_INITIALIZE(tasks_lock);
67
68/** B+tree of active tasks.
69 *
70 * The task is guaranteed to exist after it was found in the tasks_btree as
71 * long as:
72 * @li the tasks_lock is held,
73 * @li the task's lock is held when task's lock is acquired before releasing
74 * tasks_lock or
75 * @li the task's refcount is greater than 0
76 *
77 */
78btree_t tasks_btree;
79
80static task_id_t task_counter = 0;
81
82static void ktaskclnp(void *arg);
83static void ktaskgc(void *arg);
84
85/** Initialize tasks
86 *
87 * Initialize kernel tasks support.
88 *
89 */
90void task_init(void)
91{
92 TASK = NULL;
93 btree_create(&tasks_btree);
94}
95
96
97/** Create new task
98 *
99 * Create new task with no threads.
100 *
101 * @param as Task's address space.
102 * @param name Symbolic name.
103 *
104 * @return New task's structure
105 *
106 */
107task_t *task_create(as_t *as, char *name)
108{
109 ipl_t ipl;
110 task_t *ta;
111 int i;
112
113 ta = (task_t *) malloc(sizeof(task_t), 0);
114
115 task_create_arch(ta);
116
117 spinlock_initialize(&ta->lock, "task_ta_lock");
118 list_initialize(&ta->th_head);
119 ta->as = as;
120 ta->name = name;
121 ta->main_thread = NULL;
122 ta->refcount = 0;
123 ta->context = CONTEXT;
124
125 ta->capabilities = 0;
126 ta->accept_new_threads = true;
127 ta->cycles = 0;
128
129 ipc_answerbox_init(&ta->answerbox);
130 for (i = 0; i < IPC_MAX_PHONES; i++)
131 ipc_phone_init(&ta->phones[i]);
132 if ((ipc_phone_0) && (context_check(ipc_phone_0->task->context,
133 ta->context)))
134 ipc_phone_connect(&ta->phones[0], ipc_phone_0);
135 atomic_set(&ta->active_calls, 0);
136
137 mutex_initialize(&ta->futexes_lock);
138 btree_create(&ta->futexes);
139
140 ipl = interrupts_disable();
141
142 /*
143 * Increment address space reference count.
144 */
145 atomic_inc(&as->refcount);
146
147 spinlock_lock(&tasks_lock);
148
149 ta->taskid = ++task_counter;
150 btree_insert(&tasks_btree, (btree_key_t) ta->taskid, (void *) ta, NULL);
151
152 spinlock_unlock(&tasks_lock);
153 interrupts_restore(ipl);
154
155 return ta;
156}
157
158/** Destroy task.
159 *
160 * @param t Task to be destroyed.
161 */
162void task_destroy(task_t *t)
163{
164 task_destroy_arch(t);
165 btree_destroy(&t->futexes);
166
167 if (atomic_predec(&t->as->refcount) == 0)
168 as_destroy(t->as);
169
170 free(t);
171 TASK = NULL;
172}
173
174/** Create new task with 1 thread and run it
175 *
176 * @param program_addr Address of program executable image.
177 * @param name Program name.
178 *
179 * @return Task of the running program or NULL on error.
180 */
181task_t * task_run_program(void *program_addr, char *name)
182{
183 as_t *as;
184 as_area_t *a;
185 int rc;
186 thread_t *t1, *t2;
187 task_t *task;
188 uspace_arg_t *kernel_uarg;
189
190 as = as_create(0);
191 ASSERT(as);
192
193 rc = elf_load((elf_header_t *) program_addr, as);
194 if (rc != EE_OK) {
195 as_destroy(as);
196 return NULL;
197 }
198
199 kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
200 kernel_uarg->uspace_entry =
201 (void *) ((elf_header_t *) program_addr)->e_entry;
202 kernel_uarg->uspace_stack = (void *) USTACK_ADDRESS;
203 kernel_uarg->uspace_thread_function = NULL;
204 kernel_uarg->uspace_thread_arg = NULL;
205 kernel_uarg->uspace_uarg = NULL;
206
207 task = task_create(as, name);
208 ASSERT(task);
209
210 /*
211 * Create the data as_area.
212 */
213 a = as_area_create(as, AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE,
214 LOADED_PROG_STACK_PAGES_NO * PAGE_SIZE, USTACK_ADDRESS,
215 AS_AREA_ATTR_NONE, &anon_backend, NULL);
216
217 /*
218 * Create the main thread.
219 */
220 t1 = thread_create(uinit, kernel_uarg, task, THREAD_FLAG_USPACE,
221 "uinit", false);
222 ASSERT(t1);
223
224 /*
225 * Create killer thread for the new task.
226 */
227 t2 = thread_create(ktaskgc, t1, task, 0, "ktaskgc", true);
228 ASSERT(t2);
229 thread_ready(t2);
230
231 thread_ready(t1);
232
233 return task;
234}
235
236/** Syscall for reading task ID from userspace.
237 *
238 * @param uspace_task_id Userspace address of 8-byte buffer where to store
239 * current task ID.
240 *
241 * @return 0 on success or an error code from @ref errno.h.
242 */
243unative_t sys_task_get_id(task_id_t *uspace_task_id)
244{
245 /*
246 * No need to acquire lock on TASK because taskid
247 * remains constant for the lifespan of the task.
248 */
249 return (unative_t) copy_to_uspace(uspace_task_id, &TASK->taskid,
250 sizeof(TASK->taskid));
251}
252
253/** Find task structure corresponding to task ID.
254 *
255 * The tasks_lock must be already held by the caller of this function
256 * and interrupts must be disabled.
257 *
258 * @param id Task ID.
259 *
260 * @return Task structure address or NULL if there is no such task ID.
261 */
262task_t *task_find_by_id(task_id_t id)
263{
264 btree_node_t *leaf;
265
266 return (task_t *) btree_search(&tasks_btree, (btree_key_t) id, &leaf);
267}
268
269/** Get accounting data of given task.
270 *
271 * Note that task lock of 't' must be already held and
272 * interrupts must be already disabled.
273 *
274 * @param t Pointer to thread.
275 *
276 */
277uint64_t task_get_accounting(task_t *t)
278{
279 /* Accumulated value of task */
280 uint64_t ret = t->cycles;
281
282 /* Current values of threads */
283 link_t *cur;
284 for (cur = t->th_head.next; cur != &t->th_head; cur = cur->next) {
285 thread_t *thr = list_get_instance(cur, thread_t, th_link);
286
287 spinlock_lock(&thr->lock);
288 /* Process only counted threads */
289 if (!thr->uncounted) {
290 if (thr == THREAD) {
291 /* Update accounting of current thread */
292 thread_update_accounting();
293 }
294 ret += thr->cycles;
295 }
296 spinlock_unlock(&thr->lock);
297 }
298
299 return ret;
300}
301
302/** Kill task.
303 *
304 * @param id ID of the task to be killed.
305 *
306 * @return 0 on success or an error code from errno.h
307 */
308int task_kill(task_id_t id)
309{
310 ipl_t ipl;
311 task_t *ta;
312 thread_t *t;
313 link_t *cur;
314
315 if (id == 1)
316 return EPERM;
317
318 ipl = interrupts_disable();
319 spinlock_lock(&tasks_lock);
320
321 if (!(ta = task_find_by_id(id))) {
322 spinlock_unlock(&tasks_lock);
323 interrupts_restore(ipl);
324 return ENOENT;
325 }
326
327 spinlock_lock(&ta->lock);
328 ta->refcount++;
329 spinlock_unlock(&ta->lock);
330
331 btree_remove(&tasks_btree, ta->taskid, NULL);
332 spinlock_unlock(&tasks_lock);
333
334 t = thread_create(ktaskclnp, NULL, ta, 0, "ktaskclnp", true);
335
336 spinlock_lock(&ta->lock);
337 ta->accept_new_threads = false;
338 ta->refcount--;
339
340 /*
341 * Interrupt all threads except ktaskclnp.
342 */
343 for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) {
344 thread_t *thr;
345 bool sleeping = false;
346
347 thr = list_get_instance(cur, thread_t, th_link);
348 if (thr == t)
349 continue;
350
351 spinlock_lock(&thr->lock);
352 thr->interrupted = true;
353 if (thr->state == Sleeping)
354 sleeping = true;
355 spinlock_unlock(&thr->lock);
356
357 if (sleeping)
358 waitq_interrupt_sleep(thr);
359 }
360
361 spinlock_unlock(&ta->lock);
362 interrupts_restore(ipl);
363
364 if (t)
365 thread_ready(t);
366
367 return 0;
368}
369
370/** Print task list */
371void task_print_list(void)
372{
373 link_t *cur;
374 ipl_t ipl;
375
376 /* Messing with thread structures, avoid deadlock */
377 ipl = interrupts_disable();
378 spinlock_lock(&tasks_lock);
379
380 printf("taskid name ctx address as cycles threads "
381 "calls callee\n");
382 printf("------ ---------- --- ---------- ---------- ---------- ------- " "------ ------>\n");
383
384 for (cur = tasks_btree.leaf_head.next; cur != &tasks_btree.leaf_head;
385 cur = cur->next) {
386 btree_node_t *node;
387 unsigned int i;
388
389 node = list_get_instance(cur, btree_node_t, leaf_link);
390 for (i = 0; i < node->keys; i++) {
391 task_t *t;
392 int j;
393
394 t = (task_t *) node->value[i];
395
396 spinlock_lock(&t->lock);
397
398 uint64_t cycles;
399 char suffix;
400 order(task_get_accounting(t), &cycles, &suffix);
401
402 printf("%-6llu %-10s %-3ld %#10zx %#10zx %9llu%c %7zd "
403 "%6zd", t->taskid, t->name, t->context, t, t->as,
404 cycles, suffix, t->refcount,
405 atomic_get(&t->active_calls));
406 for (j = 0; j < IPC_MAX_PHONES; j++) {
407 if (t->phones[j].callee)
408 printf(" %zd:%#zx", j,
409 t->phones[j].callee);
410 }
411 printf("\n");
412
413 spinlock_unlock(&t->lock);
414 }
415 }
416
417 spinlock_unlock(&tasks_lock);
418 interrupts_restore(ipl);
419}
420
421/** Kernel thread used to cleanup the task after it is killed. */
422void ktaskclnp(void *arg)
423{
424 ipl_t ipl;
425 thread_t *t = NULL, *main_thread;
426 link_t *cur;
427 bool again;
428
429 thread_detach(THREAD);
430
431loop:
432 ipl = interrupts_disable();
433 spinlock_lock(&TASK->lock);
434
435 main_thread = TASK->main_thread;
436
437 /*
438 * Find a thread to join.
439 */
440 again = false;
441 for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) {
442 t = list_get_instance(cur, thread_t, th_link);
443
444 spinlock_lock(&t->lock);
445 if (t == THREAD) {
446 spinlock_unlock(&t->lock);
447 continue;
448 } else if (t == main_thread) {
449 spinlock_unlock(&t->lock);
450 continue;
451 } else if (t->join_type != None) {
452 spinlock_unlock(&t->lock);
453 again = true;
454 continue;
455 } else {
456 t->join_type = TaskClnp;
457 spinlock_unlock(&t->lock);
458 again = false;
459 break;
460 }
461 }
462
463 spinlock_unlock(&TASK->lock);
464 interrupts_restore(ipl);
465
466 if (again) {
467 /*
468 * Other cleanup (e.g. ktaskgc) is in progress.
469 */
470 scheduler();
471 goto loop;
472 }
473
474 if (t != THREAD) {
475 ASSERT(t != main_thread); /* uninit is joined and detached
476 * in ktaskgc */
477 thread_join(t);
478 thread_detach(t);
479 goto loop; /* go for another thread */
480 }
481
482 /*
483 * Now there are no other threads in this task
484 * and no new threads can be created.
485 */
486
487 ipc_cleanup();
488 futex_cleanup();
489 klog_printf("Cleanup of task %llu completed.", TASK->taskid);
490}
491
492/** Kernel thread used to kill the userspace task when its main thread exits.
493 *
494 * This thread waits until the main userspace thread (i.e. uninit) exits.
495 * When this happens, the task is killed. In the meantime, exited threads
496 * are garbage collected.
497 *
498 * @param arg Pointer to the thread structure of the task's main thread.
499 */
500void ktaskgc(void *arg)
501{
502 thread_t *t = (thread_t *) arg;
503loop:
504 /*
505 * Userspace threads cannot detach themselves,
506 * therefore the thread pointer is guaranteed to be valid.
507 */
508 if (thread_join_timeout(t, 1000000, SYNCH_FLAGS_NONE) ==
509 ESYNCH_TIMEOUT) { /* sleep uninterruptibly here! */
510 ipl_t ipl;
511 link_t *cur;
512 thread_t *thr = NULL;
513
514 /*
515 * The join timed out. Try to do some garbage collection of
516 * Undead threads.
517 */
518more_gc:
519 ipl = interrupts_disable();
520 spinlock_lock(&TASK->lock);
521
522 for (cur = TASK->th_head.next; cur != &TASK->th_head;
523 cur = cur->next) {
524 thr = list_get_instance(cur, thread_t, th_link);
525 spinlock_lock(&thr->lock);
526 if (thr != t && thr->state == Undead &&
527 thr->join_type == None) {
528 thr->join_type = TaskGC;
529 spinlock_unlock(&thr->lock);
530 break;
531 }
532 spinlock_unlock(&thr->lock);
533 thr = NULL;
534 }
535 spinlock_unlock(&TASK->lock);
536 interrupts_restore(ipl);
537
538 if (thr) {
539 thread_join(thr);
540 thread_detach(thr);
541 scheduler();
542 goto more_gc;
543 }
544
545 goto loop;
546 }
547 thread_detach(t);
548 task_kill(TASK->taskid);
549}
550
551/** @}
552 */
Note: See TracBrowser for help on using the repository browser.