source: mainline/kernel/generic/src/proc/task.c@ 5ab1648

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 5ab1648 was 5ab1648, checked in by Martin Decky <martin@…>, 16 years ago

remove obsolete TASK_WAIT notification

  • Property mode set to 100644
File size: 10.8 KB
Line 
1/*
2 * Copyright (c) 2001-2004 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup genericproc
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Task management.
36 */
37
38#include <proc/thread.h>
39#include <proc/task.h>
40#include <mm/as.h>
41#include <mm/slab.h>
42#include <atomic.h>
43#include <synch/spinlock.h>
44#include <synch/waitq.h>
45#include <arch.h>
46#include <arch/barrier.h>
47#include <adt/avl.h>
48#include <adt/btree.h>
49#include <adt/list.h>
50#include <ipc/ipc.h>
51#include <ipc/ipcrsc.h>
52#include <print.h>
53#include <errno.h>
54#include <func.h>
55#include <string.h>
56#include <syscall/copy.h>
57#include <macros.h>
58#include <ipc/event.h>
59
60/** Spinlock protecting the tasks_tree AVL tree. */
61SPINLOCK_INITIALIZE(tasks_lock);
62
63/** AVL tree of active tasks.
64 *
65 * The task is guaranteed to exist after it was found in the tasks_tree as
66 * long as:
67 * @li the tasks_lock is held,
68 * @li the task's lock is held when task's lock is acquired before releasing
69 * tasks_lock or
70 * @li the task's refcount is greater than 0
71 *
72 */
73avltree_t tasks_tree;
74
75static task_id_t task_counter = 0;
76
77/* Forward declarations. */
78static void task_kill_internal(task_t *);
79
80/** Initialize kernel tasks support. */
81void task_init(void)
82{
83 TASK = NULL;
84 avltree_create(&tasks_tree);
85}
86
87/*
88 * The idea behind this walker is to kill and count all tasks different from
89 * TASK.
90 */
91static bool task_done_walker(avltree_node_t *node, void *arg)
92{
93 task_t *t = avltree_get_instance(node, task_t, tasks_tree_node);
94 unsigned *cnt = (unsigned *) arg;
95
96 if (t != TASK) {
97 (*cnt)++;
98#ifdef CONFIG_DEBUG
99 printf("[%"PRIu64"] ", t->taskid);
100#endif
101 task_kill_internal(t);
102 }
103
104 return true; /* continue the walk */
105}
106
107/** Kill all tasks except the current task. */
108void task_done(void)
109{
110 unsigned tasks_left;
111
112 do { /* Repeat until there are any tasks except TASK */
113 /* Messing with task structures, avoid deadlock */
114#ifdef CONFIG_DEBUG
115 printf("Killing tasks... ");
116#endif
117 ipl_t ipl = interrupts_disable();
118 spinlock_lock(&tasks_lock);
119 tasks_left = 0;
120 avltree_walk(&tasks_tree, task_done_walker, &tasks_left);
121 spinlock_unlock(&tasks_lock);
122 interrupts_restore(ipl);
123 thread_sleep(1);
124#ifdef CONFIG_DEBUG
125 printf("\n");
126#endif
127 } while (tasks_left);
128}
129
130/** Create new task with no threads.
131 *
132 * @param as Task's address space.
133 * @param name Symbolic name (a copy is made).
134 *
135 * @return New task's structure.
136 *
137 */
138task_t *task_create(as_t *as, char *name)
139{
140 ipl_t ipl;
141 task_t *ta;
142 int i;
143
144 ta = (task_t *) malloc(sizeof(task_t), 0);
145
146 task_create_arch(ta);
147
148 spinlock_initialize(&ta->lock, "task_ta_lock");
149 list_initialize(&ta->th_head);
150 ta->as = as;
151
152 memcpy(ta->name, name, TASK_NAME_BUFLEN);
153 ta->name[TASK_NAME_BUFLEN - 1] = 0;
154
155 atomic_set(&ta->refcount, 0);
156 atomic_set(&ta->lifecount, 0);
157 ta->context = CONTEXT;
158
159 ta->capabilities = 0;
160 ta->cycles = 0;
161
162#ifdef CONFIG_UDEBUG
163 /* Init debugging stuff */
164 udebug_task_init(&ta->udebug);
165
166 /* Init kbox stuff */
167 ipc_answerbox_init(&ta->kb.box, ta);
168 ta->kb.thread = NULL;
169 mutex_initialize(&ta->kb.cleanup_lock, MUTEX_PASSIVE);
170 ta->kb.finished = false;
171#endif
172
173 ipc_answerbox_init(&ta->answerbox, ta);
174 for (i = 0; i < IPC_MAX_PHONES; i++)
175 ipc_phone_init(&ta->phones[i]);
176 if ((ipc_phone_0) && (context_check(ipc_phone_0->task->context,
177 ta->context)))
178 ipc_phone_connect(&ta->phones[0], ipc_phone_0);
179 atomic_set(&ta->active_calls, 0);
180
181 mutex_initialize(&ta->futexes_lock, MUTEX_PASSIVE);
182 btree_create(&ta->futexes);
183
184 ipl = interrupts_disable();
185
186 /*
187 * Increment address space reference count.
188 */
189 atomic_inc(&as->refcount);
190
191 spinlock_lock(&tasks_lock);
192 ta->taskid = ++task_counter;
193 avltree_node_initialize(&ta->tasks_tree_node);
194 ta->tasks_tree_node.key = ta->taskid;
195 avltree_insert(&tasks_tree, &ta->tasks_tree_node);
196 spinlock_unlock(&tasks_lock);
197 interrupts_restore(ipl);
198
199 return ta;
200}
201
202/** Destroy task.
203 *
204 * @param t Task to be destroyed.
205 */
206void task_destroy(task_t *t)
207{
208 /*
209 * Remove the task from the task B+tree.
210 */
211 spinlock_lock(&tasks_lock);
212 avltree_delete(&tasks_tree, &t->tasks_tree_node);
213 spinlock_unlock(&tasks_lock);
214
215 /*
216 * Perform architecture specific task destruction.
217 */
218 task_destroy_arch(t);
219
220 /*
221 * Free up dynamically allocated state.
222 */
223 btree_destroy(&t->futexes);
224
225 /*
226 * Drop our reference to the address space.
227 */
228 if (atomic_predec(&t->as->refcount) == 0)
229 as_destroy(t->as);
230
231 free(t);
232 TASK = NULL;
233}
234
235/** Syscall for reading task ID from userspace.
236 *
237 * @param uspace_task_id userspace address of 8-byte buffer
238 * where to store current task ID.
239 *
240 * @return Zero on success or an error code from @ref errno.h.
241 */
242unative_t sys_task_get_id(task_id_t *uspace_task_id)
243{
244 /*
245 * No need to acquire lock on TASK because taskid remains constant for
246 * the lifespan of the task.
247 */
248 return (unative_t) copy_to_uspace(uspace_task_id, &TASK->taskid,
249 sizeof(TASK->taskid));
250}
251
252/** Syscall for setting the task name.
253 *
254 * The name simplifies identifying the task in the task list.
255 *
256 * @param name The new name for the task. (typically the same
257 * as the command used to execute it).
258 *
259 * @return 0 on success or an error code from @ref errno.h.
260 */
261unative_t sys_task_set_name(const char *uspace_name, size_t name_len)
262{
263 int rc;
264 char namebuf[TASK_NAME_BUFLEN];
265
266 /* Cap length of name and copy it from userspace. */
267
268 if (name_len > TASK_NAME_BUFLEN - 1)
269 name_len = TASK_NAME_BUFLEN - 1;
270
271 rc = copy_from_uspace(namebuf, uspace_name, name_len);
272 if (rc != 0)
273 return (unative_t) rc;
274
275 namebuf[name_len] = '\0';
276 str_cpy(TASK->name, TASK_NAME_BUFLEN, namebuf);
277
278 return EOK;
279}
280
281/** Find task structure corresponding to task ID.
282 *
283 * The tasks_lock must be already held by the caller of this function and
284 * interrupts must be disabled.
285 *
286 * @param id Task ID.
287 *
288 * @return Task structure address or NULL if there is no such task
289 * ID.
290 */
291task_t *task_find_by_id(task_id_t id) { avltree_node_t *node;
292
293 node = avltree_search(&tasks_tree, (avltree_key_t) id);
294
295 if (node)
296 return avltree_get_instance(node, task_t, tasks_tree_node);
297 return NULL;
298}
299
300/** Get accounting data of given task.
301 *
302 * Note that task lock of 't' must be already held and interrupts must be
303 * already disabled.
304 *
305 * @param t Pointer to thread.
306 *
307 * @return Number of cycles used by the task and all its threads
308 * so far.
309 */
310uint64_t task_get_accounting(task_t *t)
311{
312 /* Accumulated value of task */
313 uint64_t ret = t->cycles;
314
315 /* Current values of threads */
316 link_t *cur;
317 for (cur = t->th_head.next; cur != &t->th_head; cur = cur->next) {
318 thread_t *thr = list_get_instance(cur, thread_t, th_link);
319
320 spinlock_lock(&thr->lock);
321 /* Process only counted threads */
322 if (!thr->uncounted) {
323 if (thr == THREAD) {
324 /* Update accounting of current thread */
325 thread_update_accounting();
326 }
327 ret += thr->cycles;
328 }
329 spinlock_unlock(&thr->lock);
330 }
331
332 return ret;
333}
334
335static void task_kill_internal(task_t *ta)
336{
337 link_t *cur;
338
339 /*
340 * Interrupt all threads.
341 */
342 spinlock_lock(&ta->lock);
343 for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) {
344 thread_t *thr;
345 bool sleeping = false;
346
347 thr = list_get_instance(cur, thread_t, th_link);
348
349 spinlock_lock(&thr->lock);
350 thr->interrupted = true;
351 if (thr->state == Sleeping)
352 sleeping = true;
353 spinlock_unlock(&thr->lock);
354
355 if (sleeping)
356 waitq_interrupt_sleep(thr);
357 }
358 spinlock_unlock(&ta->lock);
359}
360
361/** Kill task.
362 *
363 * This function is idempotent.
364 * It signals all the task's threads to bail it out.
365 *
366 * @param id ID of the task to be killed.
367 *
368 * @return Zero on success or an error code from errno.h.
369 */
370int task_kill(task_id_t id)
371{
372 ipl_t ipl;
373 task_t *ta;
374
375 if (id == 1)
376 return EPERM;
377
378 ipl = interrupts_disable();
379 spinlock_lock(&tasks_lock);
380 if (!(ta = task_find_by_id(id))) {
381 spinlock_unlock(&tasks_lock);
382 interrupts_restore(ipl);
383 return ENOENT;
384 }
385 task_kill_internal(ta);
386 spinlock_unlock(&tasks_lock);
387 interrupts_restore(ipl);
388 return 0;
389}
390
391static bool task_print_walker(avltree_node_t *node, void *arg)
392{
393 task_t *t = avltree_get_instance(node, task_t, tasks_tree_node);
394 int j;
395
396 spinlock_lock(&t->lock);
397
398 uint64_t cycles;
399 char suffix;
400 order(task_get_accounting(t), &cycles, &suffix);
401
402#ifdef __32_BITS__
403 printf("%-6" PRIu64 " %-12s %-3" PRIu32 " %10p %10p %9" PRIu64
404 "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, cycles,
405 suffix, atomic_get(&t->refcount), atomic_get(&t->active_calls));
406#endif
407
408#ifdef __64_BITS__
409 printf("%-6" PRIu64 " %-12s %-3" PRIu32 " %18p %18p %9" PRIu64
410 "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, cycles,
411 suffix, atomic_get(&t->refcount), atomic_get(&t->active_calls));
412#endif
413
414 for (j = 0; j < IPC_MAX_PHONES; j++) {
415 if (t->phones[j].callee)
416 printf(" %d:%p", j, t->phones[j].callee);
417 }
418 printf("\n");
419
420 spinlock_unlock(&t->lock);
421 return true;
422}
423
424/** Print task list */
425void task_print_list(void)
426{
427 ipl_t ipl;
428
429 /* Messing with task structures, avoid deadlock */
430 ipl = interrupts_disable();
431 spinlock_lock(&tasks_lock);
432
433#ifdef __32_BITS__
434 printf("taskid name ctx address as "
435 "cycles threads calls callee\n");
436 printf("------ ------------ --- ---------- ---------- "
437 "---------- ------- ------ ------>\n");
438#endif
439
440#ifdef __64_BITS__
441 printf("taskid name ctx address as "
442 "cycles threads calls callee\n");
443 printf("------ ------------ --- ------------------ ------------------ "
444 "---------- ------- ------ ------>\n");
445#endif
446
447 avltree_walk(&tasks_tree, task_print_walker, NULL);
448
449 spinlock_unlock(&tasks_lock);
450 interrupts_restore(ipl);
451}
452
453/** @}
454 */
Note: See TracBrowser for help on using the repository browser.