source: mainline/kernel/generic/src/proc/task.c@ 121966e

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 121966e was 121966e, checked in by Jakub Jermar <jakub@…>, 16 years ago

task_done() should not kill tasks one by one and wait until each dies.
Instead, the kill needs to be signalled to all tasks and repeated as
long as there are any user tasks. Otherwise completion of a task kill
may depend on death of another task with a higher ID due to unanswered
calls.

  • Property mode set to 100644
File size: 11.2 KB
Line 
1/*
2 * Copyright (c) 2001-2004 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup genericproc
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Task management.
36 */
37
38#include <proc/thread.h>
39#include <proc/task.h>
40#include <mm/as.h>
41#include <mm/slab.h>
42#include <atomic.h>
43#include <synch/spinlock.h>
44#include <synch/waitq.h>
45#include <arch.h>
46#include <arch/barrier.h>
47#include <adt/avl.h>
48#include <adt/btree.h>
49#include <adt/list.h>
50#include <ipc/ipc.h>
51#include <ipc/ipcrsc.h>
52#include <print.h>
53#include <errno.h>
54#include <func.h>
55#include <string.h>
56#include <syscall/copy.h>
57#include <macros.h>
58#include <ipc/event.h>
59
60/** Spinlock protecting the tasks_tree AVL tree. */
61SPINLOCK_INITIALIZE(tasks_lock);
62
63/** AVL tree of active tasks.
64 *
65 * The task is guaranteed to exist after it was found in the tasks_tree as
66 * long as:
67 * @li the tasks_lock is held,
68 * @li the task's lock is held when task's lock is acquired before releasing
69 * tasks_lock or
70 * @li the task's refcount is greater than 0
71 *
72 */
73avltree_t tasks_tree;
74
75static task_id_t task_counter = 0;
76
77/* Forward declarations. */
78static void task_kill_internal(task_t *);
79
80/** Initialize kernel tasks support. */
81void task_init(void)
82{
83 TASK = NULL;
84 avltree_create(&tasks_tree);
85}
86
87/*
88 * The idea behind this walker is to kill and count all tasks different from
89 * TASK.
90 */
91static bool task_done_walker(avltree_node_t *node, void *arg)
92{
93 task_t *t = avltree_get_instance(node, task_t, tasks_tree_node);
94 unsigned *cnt = (unsigned *) arg;
95
96 if (t != TASK) {
97 (*cnt)++;
98#ifdef CONFIG_DEBUG
99 printf("[%"PRIu64"] ", t->taskid);
100#endif
101 task_kill_internal(t);
102 }
103
104 return true; /* continue the walk */
105}
106
107/** Kill all tasks except the current task. */
108void task_done(void)
109{
110 unsigned tasks_left;
111
112 do { /* Repeat until there are any tasks except TASK */
113 /* Messing with task structures, avoid deadlock */
114#ifdef CONFIG_DEBUG
115 printf("Killing tasks... ");
116#endif
117 ipl_t ipl = interrupts_disable();
118 spinlock_lock(&tasks_lock);
119 tasks_left = 0;
120 avltree_walk(&tasks_tree, task_done_walker, &tasks_left);
121 spinlock_unlock(&tasks_lock);
122 interrupts_restore(ipl);
123 thread_sleep(1);
124#ifdef CONFIG_DEBUG
125 printf("\n");
126#endif
127 } while (tasks_left);
128}
129
130/** Create new task with no threads.
131 *
132 * @param as Task's address space.
133 * @param name Symbolic name (a copy is made).
134 *
135 * @return New task's structure.
136 *
137 */
138task_t *task_create(as_t *as, char *name)
139{
140 ipl_t ipl;
141 task_t *ta;
142 int i;
143
144 ta = (task_t *) malloc(sizeof(task_t), 0);
145
146 task_create_arch(ta);
147
148 spinlock_initialize(&ta->lock, "task_ta_lock");
149 list_initialize(&ta->th_head);
150 ta->as = as;
151
152 memcpy(ta->name, name, TASK_NAME_BUFLEN);
153 ta->name[TASK_NAME_BUFLEN - 1] = 0;
154
155 atomic_set(&ta->refcount, 0);
156 atomic_set(&ta->lifecount, 0);
157 ta->context = CONTEXT;
158
159 ta->capabilities = 0;
160 ta->cycles = 0;
161
162#ifdef CONFIG_UDEBUG
163 /* Init debugging stuff */
164 udebug_task_init(&ta->udebug);
165
166 /* Init kbox stuff */
167 ipc_answerbox_init(&ta->kb.box, ta);
168 ta->kb.thread = NULL;
169 mutex_initialize(&ta->kb.cleanup_lock, MUTEX_PASSIVE);
170 ta->kb.finished = false;
171#endif
172
173 ipc_answerbox_init(&ta->answerbox, ta);
174 for (i = 0; i < IPC_MAX_PHONES; i++)
175 ipc_phone_init(&ta->phones[i]);
176 if ((ipc_phone_0) && (context_check(ipc_phone_0->task->context,
177 ta->context)))
178 ipc_phone_connect(&ta->phones[0], ipc_phone_0);
179 atomic_set(&ta->active_calls, 0);
180
181 mutex_initialize(&ta->futexes_lock, MUTEX_PASSIVE);
182 btree_create(&ta->futexes);
183
184 ipl = interrupts_disable();
185
186 /*
187 * Increment address space reference count.
188 */
189 atomic_inc(&as->refcount);
190
191 spinlock_lock(&tasks_lock);
192 ta->taskid = ++task_counter;
193 avltree_node_initialize(&ta->tasks_tree_node);
194 ta->tasks_tree_node.key = ta->taskid;
195 avltree_insert(&tasks_tree, &ta->tasks_tree_node);
196 spinlock_unlock(&tasks_lock);
197 interrupts_restore(ipl);
198
199 /*
200 * Notify about task creation.
201 */
202 if (event_is_subscribed(EVENT_WAIT))
203 event_notify_3(EVENT_WAIT, TASK_CREATE, LOWER32(ta->taskid),
204 UPPER32(ta->taskid));
205
206 return ta;
207}
208
209/** Destroy task.
210 *
211 * @param t Task to be destroyed.
212 */
213void task_destroy(task_t *t)
214{
215 /*
216 * Remove the task from the task B+tree.
217 */
218 spinlock_lock(&tasks_lock);
219 avltree_delete(&tasks_tree, &t->tasks_tree_node);
220 spinlock_unlock(&tasks_lock);
221
222 /*
223 * Perform architecture specific task destruction.
224 */
225 task_destroy_arch(t);
226
227 /*
228 * Free up dynamically allocated state.
229 */
230 btree_destroy(&t->futexes);
231
232 /*
233 * Drop our reference to the address space.
234 */
235 if (atomic_predec(&t->as->refcount) == 0)
236 as_destroy(t->as);
237
238 /*
239 * Notify about task destruction.
240 */
241 if (event_is_subscribed(EVENT_WAIT))
242 event_notify_3(EVENT_WAIT, TASK_DESTROY, LOWER32(t->taskid),
243 UPPER32(t->taskid));
244
245 free(t);
246 TASK = NULL;
247}
248
249/** Syscall for reading task ID from userspace.
250 *
251 * @param uspace_task_id userspace address of 8-byte buffer
252 * where to store current task ID.
253 *
254 * @return Zero on success or an error code from @ref errno.h.
255 */
256unative_t sys_task_get_id(task_id_t *uspace_task_id)
257{
258 /*
259 * No need to acquire lock on TASK because taskid remains constant for
260 * the lifespan of the task.
261 */
262 return (unative_t) copy_to_uspace(uspace_task_id, &TASK->taskid,
263 sizeof(TASK->taskid));
264}
265
266/** Syscall for setting the task name.
267 *
268 * The name simplifies identifying the task in the task list.
269 *
270 * @param name The new name for the task. (typically the same
271 * as the command used to execute it).
272 *
273 * @return 0 on success or an error code from @ref errno.h.
274 */
275unative_t sys_task_set_name(const char *uspace_name, size_t name_len)
276{
277 int rc;
278 char namebuf[TASK_NAME_BUFLEN];
279
280 /* Cap length of name and copy it from userspace. */
281
282 if (name_len > TASK_NAME_BUFLEN - 1)
283 name_len = TASK_NAME_BUFLEN - 1;
284
285 rc = copy_from_uspace(namebuf, uspace_name, name_len);
286 if (rc != 0)
287 return (unative_t) rc;
288
289 namebuf[name_len] = '\0';
290 str_cpy(TASK->name, TASK_NAME_BUFLEN, namebuf);
291
292 return EOK;
293}
294
295/** Find task structure corresponding to task ID.
296 *
297 * The tasks_lock must be already held by the caller of this function and
298 * interrupts must be disabled.
299 *
300 * @param id Task ID.
301 *
302 * @return Task structure address or NULL if there is no such task
303 * ID.
304 */
305task_t *task_find_by_id(task_id_t id) { avltree_node_t *node;
306
307 node = avltree_search(&tasks_tree, (avltree_key_t) id);
308
309 if (node)
310 return avltree_get_instance(node, task_t, tasks_tree_node);
311 return NULL;
312}
313
314/** Get accounting data of given task.
315 *
316 * Note that task lock of 't' must be already held and interrupts must be
317 * already disabled.
318 *
319 * @param t Pointer to thread.
320 *
321 * @return Number of cycles used by the task and all its threads
322 * so far.
323 */
324uint64_t task_get_accounting(task_t *t)
325{
326 /* Accumulated value of task */
327 uint64_t ret = t->cycles;
328
329 /* Current values of threads */
330 link_t *cur;
331 for (cur = t->th_head.next; cur != &t->th_head; cur = cur->next) {
332 thread_t *thr = list_get_instance(cur, thread_t, th_link);
333
334 spinlock_lock(&thr->lock);
335 /* Process only counted threads */
336 if (!thr->uncounted) {
337 if (thr == THREAD) {
338 /* Update accounting of current thread */
339 thread_update_accounting();
340 }
341 ret += thr->cycles;
342 }
343 spinlock_unlock(&thr->lock);
344 }
345
346 return ret;
347}
348
349static void task_kill_internal(task_t *ta)
350{
351 link_t *cur;
352
353 /*
354 * Interrupt all threads.
355 */
356 spinlock_lock(&ta->lock);
357 for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) {
358 thread_t *thr;
359 bool sleeping = false;
360
361 thr = list_get_instance(cur, thread_t, th_link);
362
363 spinlock_lock(&thr->lock);
364 thr->interrupted = true;
365 if (thr->state == Sleeping)
366 sleeping = true;
367 spinlock_unlock(&thr->lock);
368
369 if (sleeping)
370 waitq_interrupt_sleep(thr);
371 }
372 spinlock_unlock(&ta->lock);
373}
374
375/** Kill task.
376 *
377 * This function is idempotent.
378 * It signals all the task's threads to bail it out.
379 *
380 * @param id ID of the task to be killed.
381 *
382 * @return Zero on success or an error code from errno.h.
383 */
384int task_kill(task_id_t id)
385{
386 ipl_t ipl;
387 task_t *ta;
388
389 if (id == 1)
390 return EPERM;
391
392 ipl = interrupts_disable();
393 spinlock_lock(&tasks_lock);
394 if (!(ta = task_find_by_id(id))) {
395 spinlock_unlock(&tasks_lock);
396 interrupts_restore(ipl);
397 return ENOENT;
398 }
399 task_kill_internal(ta);
400 spinlock_unlock(&tasks_lock);
401 interrupts_restore(ipl);
402 return 0;
403}
404
405static bool task_print_walker(avltree_node_t *node, void *arg)
406{
407 task_t *t = avltree_get_instance(node, task_t, tasks_tree_node);
408 int j;
409
410 spinlock_lock(&t->lock);
411
412 uint64_t cycles;
413 char suffix;
414 order(task_get_accounting(t), &cycles, &suffix);
415
416#ifdef __32_BITS__
417 printf("%-6" PRIu64 " %-12s %-3" PRIu32 " %10p %10p %9" PRIu64
418 "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, cycles,
419 suffix, atomic_get(&t->refcount), atomic_get(&t->active_calls));
420#endif
421
422#ifdef __64_BITS__
423 printf("%-6" PRIu64 " %-12s %-3" PRIu32 " %18p %18p %9" PRIu64
424 "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, cycles,
425 suffix, atomic_get(&t->refcount), atomic_get(&t->active_calls));
426#endif
427
428 for (j = 0; j < IPC_MAX_PHONES; j++) {
429 if (t->phones[j].callee)
430 printf(" %d:%p", j, t->phones[j].callee);
431 }
432 printf("\n");
433
434 spinlock_unlock(&t->lock);
435 return true;
436}
437
438/** Print task list */
439void task_print_list(void)
440{
441 ipl_t ipl;
442
443 /* Messing with task structures, avoid deadlock */
444 ipl = interrupts_disable();
445 spinlock_lock(&tasks_lock);
446
447#ifdef __32_BITS__
448 printf("taskid name ctx address as "
449 "cycles threads calls callee\n");
450 printf("------ ------------ --- ---------- ---------- "
451 "---------- ------- ------ ------>\n");
452#endif
453
454#ifdef __64_BITS__
455 printf("taskid name ctx address as "
456 "cycles threads calls callee\n");
457 printf("------ ------------ --- ------------------ ------------------ "
458 "---------- ------- ------ ------>\n");
459#endif
460
461 avltree_walk(&tasks_tree, task_print_walker, NULL);
462
463 spinlock_unlock(&tasks_lock);
464 interrupts_restore(ipl);
465}
466
467/** @}
468 */
Note: See TracBrowser for help on using the repository browser.