source: mainline/kernel/generic/src/proc/task.c@ 08a19ba

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 08a19ba was 08a19ba, checked in by Jakub Jermar <jakub@…>, 17 years ago

Support for active mutexes. Active mutexes implement busy waiting, pretty much
in the same way as spinlocks, but can be passed to condition variables, which is
the motivation for this enhancement.

  • Property mode set to 100644
File size: 11.7 KB
Line 
1/*
2 * Copyright (c) 2001-2004 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup genericproc
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Task management.
36 */
37
38#include <main/uinit.h>
39#include <proc/thread.h>
40#include <proc/task.h>
41#include <proc/uarg.h>
42#include <mm/as.h>
43#include <mm/slab.h>
44#include <atomic.h>
45#include <synch/spinlock.h>
46#include <synch/waitq.h>
47#include <arch.h>
48#include <arch/barrier.h>
49#include <panic.h>
50#include <adt/avl.h>
51#include <adt/btree.h>
52#include <adt/list.h>
53#include <ipc/ipc.h>
54#include <security/cap.h>
55#include <memstr.h>
56#include <print.h>
57#include <lib/elf.h>
58#include <errno.h>
59#include <func.h>
60#include <syscall/copy.h>
61
62#ifndef LOADED_PROG_STACK_PAGES_NO
63#define LOADED_PROG_STACK_PAGES_NO 1
64#endif
65
66/** Spinlock protecting the tasks_tree AVL tree. */
67SPINLOCK_INITIALIZE(tasks_lock);
68
69/** AVL tree of active tasks.
70 *
71 * The task is guaranteed to exist after it was found in the tasks_tree as
72 * long as:
73 * @li the tasks_lock is held,
74 * @li the task's lock is held when task's lock is acquired before releasing
75 * tasks_lock or
76 * @li the task's refcount is greater than 0
77 *
78 */
79avltree_t tasks_tree;
80
81static task_id_t task_counter = 0;
82
83/** Initialize tasks
84 *
85 * Initialize kernel tasks support.
86 *
87 */
88void task_init(void)
89{
90 TASK = NULL;
91 avltree_create(&tasks_tree);
92}
93
94/*
95 * The idea behind this walker is to remember a single task different from TASK.
96 */
97static bool task_done_walker(avltree_node_t *node, void *arg)
98{
99 task_t *t = avltree_get_instance(node, task_t, tasks_tree_node);
100 task_t **tp = (task_t **) arg;
101
102 if (t != TASK) {
103 *tp = t;
104 return false; /* stop walking */
105 }
106
107 return true; /* continue the walk */
108}
109
110/** Kill all tasks except the current task.
111 *
112 */
113void task_done(void)
114{
115 task_t *t;
116 do { /* Repeat until there are any tasks except TASK */
117
118 /* Messing with task structures, avoid deadlock */
119 ipl_t ipl = interrupts_disable();
120 spinlock_lock(&tasks_lock);
121
122 t = NULL;
123 avltree_walk(&tasks_tree, task_done_walker, &t);
124
125 if (t != NULL) {
126 task_id_t id = t->taskid;
127
128 spinlock_unlock(&tasks_lock);
129 interrupts_restore(ipl);
130
131#ifdef CONFIG_DEBUG
132 printf("Killing task %" PRIu64 "\n", id);
133#endif
134 task_kill(id);
135 thread_usleep(10000);
136 } else {
137 spinlock_unlock(&tasks_lock);
138 interrupts_restore(ipl);
139 }
140
141 } while (t != NULL);
142}
143
144/** Create new task
145 *
146 * Create new task with no threads.
147 *
148 * @param as Task's address space.
149 * @param name Symbolic name.
150 *
151 * @return New task's structure
152 *
153 */
154task_t *task_create(as_t *as, char *name)
155{
156 ipl_t ipl;
157 task_t *ta;
158 int i;
159
160 ta = (task_t *) malloc(sizeof(task_t), 0);
161
162 task_create_arch(ta);
163
164 spinlock_initialize(&ta->lock, "task_ta_lock");
165 list_initialize(&ta->th_head);
166 ta->as = as;
167 ta->name = name;
168 atomic_set(&ta->refcount, 0);
169 atomic_set(&ta->lifecount, 0);
170 ta->context = CONTEXT;
171
172 ta->capabilities = 0;
173 ta->cycles = 0;
174
175 ipc_answerbox_init(&ta->answerbox, ta);
176 for (i = 0; i < IPC_MAX_PHONES; i++)
177 ipc_phone_init(&ta->phones[i]);
178 if ((ipc_phone_0) && (context_check(ipc_phone_0->task->context,
179 ta->context)))
180 ipc_phone_connect(&ta->phones[0], ipc_phone_0);
181 atomic_set(&ta->active_calls, 0);
182
183 mutex_initialize(&ta->futexes_lock, MUTEX_PASSIVE);
184 btree_create(&ta->futexes);
185
186 ipl = interrupts_disable();
187
188 /*
189 * Increment address space reference count.
190 */
191 atomic_inc(&as->refcount);
192
193 spinlock_lock(&tasks_lock);
194 ta->taskid = ++task_counter;
195 avltree_node_initialize(&ta->tasks_tree_node);
196 ta->tasks_tree_node.key = ta->taskid;
197 avltree_insert(&tasks_tree, &ta->tasks_tree_node);
198 spinlock_unlock(&tasks_lock);
199 interrupts_restore(ipl);
200
201 return ta;
202}
203
204/** Destroy task.
205 *
206 * @param t Task to be destroyed.
207 */
208void task_destroy(task_t *t)
209{
210 /*
211 * Remove the task from the task B+tree.
212 */
213 spinlock_lock(&tasks_lock);
214 avltree_delete(&tasks_tree, &t->tasks_tree_node);
215 spinlock_unlock(&tasks_lock);
216
217 /*
218 * Perform architecture specific task destruction.
219 */
220 task_destroy_arch(t);
221
222 /*
223 * Free up dynamically allocated state.
224 */
225 btree_destroy(&t->futexes);
226
227 /*
228 * Drop our reference to the address space.
229 */
230 if (atomic_predec(&t->as->refcount) == 0)
231 as_destroy(t->as);
232
233 free(t);
234 TASK = NULL;
235}
236
237/** Syscall for reading task ID from userspace.
238 *
239 * @param uspace_task_id Userspace address of 8-byte buffer where to store
240 * current task ID.
241 *
242 * @return 0 on success or an error code from @ref errno.h.
243 */
244unative_t sys_task_get_id(task_id_t *uspace_task_id)
245{
246 /*
247 * No need to acquire lock on TASK because taskid
248 * remains constant for the lifespan of the task.
249 */
250 return (unative_t) copy_to_uspace(uspace_task_id, &TASK->taskid,
251 sizeof(TASK->taskid));
252}
253
254unative_t sys_task_spawn(void *image, size_t size)
255{
256 void *kimage = malloc(size, 0);
257 if (kimage == NULL)
258 return ENOMEM;
259
260 int rc = copy_from_uspace(kimage, image, size);
261 if (rc != EOK)
262 return rc;
263
264 /*
265 * Not very efficient and it would be better to call it on code only,
266 * but this whole function is a temporary hack anyway and one day it
267 * will go in favor of the userspace dynamic loader.
268 */
269 smc_coherence_block(kimage, size);
270
271 uspace_arg_t *kernel_uarg;
272 kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
273 if (kernel_uarg == NULL) {
274 free(kimage);
275 return ENOMEM;
276 }
277
278 kernel_uarg->uspace_entry =
279 (void *) ((elf_header_t *) kimage)->e_entry;
280 kernel_uarg->uspace_stack = (void *) USTACK_ADDRESS;
281 kernel_uarg->uspace_thread_function = NULL;
282 kernel_uarg->uspace_thread_arg = NULL;
283 kernel_uarg->uspace_uarg = NULL;
284
285 as_t *as = as_create(0);
286 if (as == NULL) {
287 free(kernel_uarg);
288 free(kimage);
289 return ENOMEM;
290 }
291
292 unsigned int erc = elf_load((elf_header_t *) kimage, as);
293 if (erc != EE_OK) {
294 as_destroy(as);
295 free(kernel_uarg);
296 free(kimage);
297 return ENOENT;
298 }
299
300 as_area_t *area = as_area_create(as,
301 AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE,
302 LOADED_PROG_STACK_PAGES_NO * PAGE_SIZE, USTACK_ADDRESS,
303 AS_AREA_ATTR_NONE, &anon_backend, NULL);
304 if (area == NULL) {
305 as_destroy(as);
306 free(kernel_uarg);
307 free(kimage);
308 return ENOMEM;
309 }
310
311 task_t *task = task_create(as, "app");
312 if (task == NULL) {
313 as_destroy(as);
314 free(kernel_uarg);
315 free(kimage);
316 return ENOENT;
317 }
318
319 // FIXME: control the capabilities
320 cap_set(task, cap_get(TASK));
321
322 thread_t *thread = thread_create(uinit, kernel_uarg, task,
323 THREAD_FLAG_USPACE, "user", false);
324 if (thread == NULL) {
325 task_destroy(task);
326 as_destroy(as);
327 free(kernel_uarg);
328 free(kimage);
329 return ENOENT;
330 }
331
332 thread_ready(thread);
333
334 return EOK;
335}
336
337/** Find task structure corresponding to task ID.
338 *
339 * The tasks_lock must be already held by the caller of this function
340 * and interrupts must be disabled.
341 *
342 * @param id Task ID.
343 *
344 * @return Task structure address or NULL if there is no such task ID.
345 */
346task_t *task_find_by_id(task_id_t id)
347{
348 avltree_node_t *node;
349
350 node = avltree_search(&tasks_tree, (avltree_key_t) id);
351
352 if (node)
353 return avltree_get_instance(node, task_t, tasks_tree_node);
354 return NULL;
355}
356
357/** Get accounting data of given task.
358 *
359 * Note that task lock of 't' must be already held and
360 * interrupts must be already disabled.
361 *
362 * @param t Pointer to thread.
363 *
364 */
365uint64_t task_get_accounting(task_t *t)
366{
367 /* Accumulated value of task */
368 uint64_t ret = t->cycles;
369
370 /* Current values of threads */
371 link_t *cur;
372 for (cur = t->th_head.next; cur != &t->th_head; cur = cur->next) {
373 thread_t *thr = list_get_instance(cur, thread_t, th_link);
374
375 spinlock_lock(&thr->lock);
376 /* Process only counted threads */
377 if (!thr->uncounted) {
378 if (thr == THREAD) {
379 /* Update accounting of current thread */
380 thread_update_accounting();
381 }
382 ret += thr->cycles;
383 }
384 spinlock_unlock(&thr->lock);
385 }
386
387 return ret;
388}
389
390/** Kill task.
391 *
392 * This function is idempotent.
393 * It signals all the task's threads to bail it out.
394 *
395 * @param id ID of the task to be killed.
396 *
397 * @return 0 on success or an error code from errno.h
398 */
399int task_kill(task_id_t id)
400{
401 ipl_t ipl;
402 task_t *ta;
403 link_t *cur;
404
405 if (id == 1)
406 return EPERM;
407
408 ipl = interrupts_disable();
409 spinlock_lock(&tasks_lock);
410 if (!(ta = task_find_by_id(id))) {
411 spinlock_unlock(&tasks_lock);
412 interrupts_restore(ipl);
413 return ENOENT;
414 }
415 spinlock_unlock(&tasks_lock);
416
417 /*
418 * Interrupt all threads except ktaskclnp.
419 */
420 spinlock_lock(&ta->lock);
421 for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) {
422 thread_t *thr;
423 bool sleeping = false;
424
425 thr = list_get_instance(cur, thread_t, th_link);
426
427 spinlock_lock(&thr->lock);
428 thr->interrupted = true;
429 if (thr->state == Sleeping)
430 sleeping = true;
431 spinlock_unlock(&thr->lock);
432
433 if (sleeping)
434 waitq_interrupt_sleep(thr);
435 }
436 spinlock_unlock(&ta->lock);
437 interrupts_restore(ipl);
438
439 return 0;
440}
441
442static bool task_print_walker(avltree_node_t *node, void *arg)
443{
444 task_t *t = avltree_get_instance(node, task_t, tasks_tree_node);
445 int j;
446
447 spinlock_lock(&t->lock);
448
449 uint64_t cycles;
450 char suffix;
451 order(task_get_accounting(t), &cycles, &suffix);
452
453#ifdef __32_BITS__
454 printf("%-6" PRIu64 " %-10s %-3" PRIu32 " %10p %10p %9" PRIu64
455 "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, cycles,
456 suffix, atomic_get(&t->refcount), atomic_get(&t->active_calls));
457#endif
458
459#ifdef __64_BITS__
460 printf("%-6" PRIu64 " %-10s %-3" PRIu32 " %18p %18p %9" PRIu64
461 "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, cycles,
462 suffix, atomic_get(&t->refcount), atomic_get(&t->active_calls));
463#endif
464
465 for (j = 0; j < IPC_MAX_PHONES; j++) {
466 if (t->phones[j].callee)
467 printf(" %d:%p", j, t->phones[j].callee);
468 }
469 printf("\n");
470
471 spinlock_unlock(&t->lock);
472 return true;
473}
474
475/** Print task list */
476void task_print_list(void)
477{
478 ipl_t ipl;
479
480 /* Messing with task structures, avoid deadlock */
481 ipl = interrupts_disable();
482 spinlock_lock(&tasks_lock);
483
484#ifdef __32_BITS__
485 printf("taskid name ctx address as "
486 "cycles threads calls callee\n");
487 printf("------ ---------- --- ---------- ---------- "
488 "---------- ------- ------ ------>\n");
489#endif
490
491#ifdef __64_BITS__
492 printf("taskid name ctx address as "
493 "cycles threads calls callee\n");
494 printf("------ ---------- --- ------------------ ------------------ "
495 "---------- ------- ------ ------>\n");
496#endif
497
498 avltree_walk(&tasks_tree, task_print_walker, NULL);
499
500 spinlock_unlock(&tasks_lock);
501 interrupts_restore(ipl);
502}
503
504/** @}
505 */
Note: See TracBrowser for help on using the repository browser.