source: mainline/kernel/generic/src/proc/task.c@ 04d66804

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 04d66804 was 03a8a8e, checked in by Jakub Jermar <jakub@…>, 13 years ago

Link each phone to its containing task.

This makes it possible to set the call's sender reliably using just the
info stored in the phone used to make the call.

  • Property mode set to 100644
File size: 14.9 KB
RevLine 
[f761f1eb]1/*
[278b4a30]2 * Copyright (c) 2010 Jakub Jermar
[f761f1eb]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[cc73a8a1]29/** @addtogroup genericproc
[b45c443]30 * @{
31 */
32
[9179d0a]33/**
[b45c443]34 * @file
[5ba201d]35 * @brief Task management.
[9179d0a]36 */
37
[f761f1eb]38#include <proc/thread.h>
39#include <proc/task.h>
[20d50a1]40#include <mm/as.h>
[085d973]41#include <mm/slab.h>
[31d8e10]42#include <atomic.h>
[f761f1eb]43#include <synch/spinlock.h>
[5573942]44#include <synch/waitq.h>
[f761f1eb]45#include <arch.h>
[8605b24]46#include <arch/barrier.h>
[b76a2217]47#include <adt/avl.h>
[7f6e755]48#include <adt/btree.h>
[5c9a08b]49#include <adt/list.h>
[6d9c49a]50#include <ipc/ipc.h>
[c98e6ee]51#include <ipc/ipcrsc.h>
[5d0500c]52#include <ipc/event.h>
[37c57f2]53#include <print.h>
[7509ddc]54#include <errno.h>
[95155b0c]55#include <func.h>
[19f857a]56#include <str.h>
[41df2827]57#include <memstr.h>
[e3c762cd]58#include <syscall/copy.h>
[95ad426]59#include <macros.h>
[8e5e78f]60
[b76a2217]61/** Spinlock protecting the tasks_tree AVL tree. */
[da1bafb]62IRQ_SPINLOCK_INITIALIZE(tasks_lock);
[88169d9]63
[b76a2217]64/** AVL tree of active tasks.
[88169d9]65 *
[b76a2217]66 * The task is guaranteed to exist after it was found in the tasks_tree as
[6f4495f5]67 * long as:
[5ba201d]68 *
[88169d9]69 * @li the tasks_lock is held,
[6f4495f5]70 * @li the task's lock is held when task's lock is acquired before releasing
71 * tasks_lock or
[7bb6b06]72 * @li the task's refcount is greater than 0
[88169d9]73 *
74 */
[b76a2217]75avltree_t tasks_tree;
[88169d9]76
[286e03d]77static task_id_t task_counter = 0;
[70527f1]78
[103de761]79static slab_cache_t *task_slab;
80
[121966e]81/* Forward declarations. */
82static void task_kill_internal(task_t *);
[da1bafb]83static int tsk_constructor(void *, unsigned int);
[121966e]84
[da1bafb]85/** Initialize kernel tasks support.
86 *
87 */
[f761f1eb]88void task_init(void)
89{
[43114c5]90 TASK = NULL;
[b76a2217]91 avltree_create(&tasks_tree);
[f97f1e51]92 task_slab = slab_cache_create("task_t", sizeof(task_t), 0,
[59ee56f]93 tsk_constructor, NULL, 0);
[b76a2217]94}
95
[da1bafb]96/** Task finish walker.
97 *
[121966e]98 * The idea behind this walker is to kill and count all tasks different from
[814c4f5]99 * TASK.
[da1bafb]100 *
[b76a2217]101 */
102static bool task_done_walker(avltree_node_t *node, void *arg)
103{
[da1bafb]104 task_t *task = avltree_get_instance(node, task_t, tasks_tree_node);
105 size_t *cnt = (size_t *) arg;
[5ba201d]106
[da1bafb]107 if (task != TASK) {
[121966e]108 (*cnt)++;
[da1bafb]109
[121966e]110#ifdef CONFIG_DEBUG
[da1bafb]111 printf("[%"PRIu64"] ", task->taskid);
[121966e]112#endif
[da1bafb]113
114 task_kill_internal(task);
[b76a2217]115 }
[5ba201d]116
117 /* Continue the walk */
118 return true;
[f761f1eb]119}
120
[da1bafb]121/** Kill all tasks except the current task.
122 *
123 */
[f74bbaf]124void task_done(void)
125{
[da1bafb]126 size_t tasks_left;
[5ba201d]127
[da1bafb]128 /* Repeat until there are any tasks except TASK */
129 do {
[121966e]130#ifdef CONFIG_DEBUG
131 printf("Killing tasks... ");
132#endif
[da1bafb]133
134 irq_spinlock_lock(&tasks_lock, true);
[121966e]135 tasks_left = 0;
136 avltree_walk(&tasks_tree, task_done_walker, &tasks_left);
[da1bafb]137 irq_spinlock_unlock(&tasks_lock, true);
138
[121966e]139 thread_sleep(1);
[da1bafb]140
[f74bbaf]141#ifdef CONFIG_DEBUG
[121966e]142 printf("\n");
143#endif
[da1bafb]144 } while (tasks_left > 0);
[f74bbaf]145}
[70527f1]146
[da1bafb]147int tsk_constructor(void *obj, unsigned int kmflags)
[59ee56f]148{
[da1bafb]149 task_t *task = (task_t *) obj;
150
151 atomic_set(&task->refcount, 0);
152 atomic_set(&task->lifecount, 0);
[5ba201d]153
[da1bafb]154 irq_spinlock_initialize(&task->lock, "task_t_lock");
155 mutex_initialize(&task->futexes_lock, MUTEX_PASSIVE);
[5ba201d]156
[55b77d9]157 list_initialize(&task->threads);
[5ba201d]158
[da1bafb]159 ipc_answerbox_init(&task->answerbox, task);
[5ba201d]160
[da1bafb]161 size_t i;
[59ee56f]162 for (i = 0; i < IPC_MAX_PHONES; i++)
[03a8a8e]163 ipc_phone_init(&task->phones[i], task);
[86939b1]164
165 spinlock_initialize(&task->active_calls_lock, "active_calls_lock");
166 list_initialize(&task->active_calls);
[5ba201d]167
[59ee56f]168#ifdef CONFIG_UDEBUG
169 /* Init kbox stuff */
[da1bafb]170 task->kb.thread = NULL;
171 ipc_answerbox_init(&task->kb.box, task);
172 mutex_initialize(&task->kb.cleanup_lock, MUTEX_PASSIVE);
[59ee56f]173#endif
[5ba201d]174
[59ee56f]175 return 0;
176}
177
[814c4f5]178/** Create new task with no threads.
[70527f1]179 *
[5ba201d]180 * @param as Task's address space.
181 * @param name Symbolic name (a copy is made).
[70527f1]182 *
[5ba201d]183 * @return New task's structure.
[70527f1]184 *
185 */
[a000878c]186task_t *task_create(as_t *as, const char *name)
[f761f1eb]187{
[da1bafb]188 task_t *task = (task_t *) slab_alloc(task_slab, 0);
189 task_create_arch(task);
190
191 task->as = as;
192 str_cpy(task->name, TASK_NAME_BUFLEN, name);
193
[473d5d2]194 task->container = CONTAINER;
[da1bafb]195 task->capabilities = 0;
196 task->ucycles = 0;
197 task->kcycles = 0;
198
199 task->ipc_info.call_sent = 0;
[be06914]200 task->ipc_info.call_received = 0;
[da1bafb]201 task->ipc_info.answer_sent = 0;
[be06914]202 task->ipc_info.answer_received = 0;
203 task->ipc_info.irq_notif_received = 0;
[da1bafb]204 task->ipc_info.forwarded = 0;
[5d0500c]205
206 event_task_init(task);
[da1bafb]207
[c33f39f]208 task->answerbox.active = true;
209
[9a1b20c]210#ifdef CONFIG_UDEBUG
211 /* Init debugging stuff */
[da1bafb]212 udebug_task_init(&task->udebug);
[5ba201d]213
[9a1b20c]214 /* Init kbox stuff */
[c33f39f]215 task->kb.box.active = true;
[da1bafb]216 task->kb.finished = false;
[9a1b20c]217#endif
[5ba201d]218
[59ee56f]219 if ((ipc_phone_0) &&
[473d5d2]220 (container_check(ipc_phone_0->task->container, task->container)))
[c33f39f]221 (void) ipc_phone_connect(&task->phones[0], ipc_phone_0);
[5ba201d]222
[da1bafb]223 btree_create(&task->futexes);
[bb68433]224
[6193351]225 /*
226 * Get a reference to the address space.
227 */
[da1bafb]228 as_hold(task->as);
229
230 irq_spinlock_lock(&tasks_lock, true);
231
232 task->taskid = ++task_counter;
233 avltree_node_initialize(&task->tasks_tree_node);
234 task->tasks_tree_node.key = task->taskid;
235 avltree_insert(&tasks_tree, &task->tasks_tree_node);
236
237 irq_spinlock_unlock(&tasks_lock, true);
238
239 return task;
[f761f1eb]240}
241
[7509ddc]242/** Destroy task.
243 *
[da1bafb]244 * @param task Task to be destroyed.
[5ba201d]245 *
[7509ddc]246 */
[da1bafb]247void task_destroy(task_t *task)
[7509ddc]248{
[ea7890e7]249 /*
250 * Remove the task from the task B+tree.
251 */
[da1bafb]252 irq_spinlock_lock(&tasks_lock, true);
253 avltree_delete(&tasks_tree, &task->tasks_tree_node);
254 irq_spinlock_unlock(&tasks_lock, true);
[5ba201d]255
[ea7890e7]256 /*
257 * Perform architecture specific task destruction.
258 */
[da1bafb]259 task_destroy_arch(task);
[5ba201d]260
[ea7890e7]261 /*
262 * Free up dynamically allocated state.
263 */
[da1bafb]264 btree_destroy(&task->futexes);
[5ba201d]265
[ea7890e7]266 /*
267 * Drop our reference to the address space.
268 */
[da1bafb]269 as_release(task->as);
[31e8ddd]270
[da1bafb]271 slab_free(task_slab, task);
[7509ddc]272}
273
[278b4a30]274/** Hold a reference to a task.
275 *
276 * Holding a reference to a task prevents destruction of that task.
277 *
[da1bafb]278 * @param task Task to be held.
279 *
[278b4a30]280 */
[da1bafb]281void task_hold(task_t *task)
[278b4a30]282{
[da1bafb]283 atomic_inc(&task->refcount);
[278b4a30]284}
285
286/** Release a reference to a task.
287 *
288 * The last one to release a reference to a task destroys the task.
289 *
[da1bafb]290 * @param task Task to be released.
291 *
[278b4a30]292 */
[da1bafb]293void task_release(task_t *task)
[278b4a30]294{
[da1bafb]295 if ((atomic_predec(&task->refcount)) == 0)
296 task_destroy(task);
[278b4a30]297}
298
[dd8d5a7]299#ifdef __32_BITS__
300
301/** Syscall for reading task ID from userspace (32 bits)
[ec55358]302 *
[dd8d5a7]303 * @param uspace_taskid Pointer to user-space buffer
304 * where to store current task ID.
[5ba201d]305 *
306 * @return Zero on success or an error code from @ref errno.h.
[ec55358]307 *
308 */
[dd8d5a7]309sysarg_t sys_task_get_id(sysarg64_t *uspace_taskid)
[ec55358]310{
311 /*
[814c4f5]312 * No need to acquire lock on TASK because taskid remains constant for
313 * the lifespan of the task.
[ec55358]314 */
[dd8d5a7]315 return (sysarg_t) copy_to_uspace(uspace_taskid, &TASK->taskid,
[6f4495f5]316 sizeof(TASK->taskid));
[ec55358]317}
318
[dd8d5a7]319#endif /* __32_BITS__ */
320
321#ifdef __64_BITS__
322
323/** Syscall for reading task ID from userspace (64 bits)
324 *
325 * @return Current task ID.
326 *
327 */
328sysarg_t sys_task_get_id(void)
329{
330 /*
331 * No need to acquire lock on TASK because taskid remains constant for
332 * the lifespan of the task.
333 */
334 return TASK->taskid;
335}
336
337#endif /* __64_BITS__ */
338
[bc18d63]339/** Syscall for setting the task name.
340 *
341 * The name simplifies identifying the task in the task list.
342 *
[5ba201d]343 * @param name The new name for the task. (typically the same
344 * as the command used to execute it).
[bc18d63]345 *
346 * @return 0 on success or an error code from @ref errno.h.
[5ba201d]347 *
[bc18d63]348 */
[96b02eb9]349sysarg_t sys_task_set_name(const char *uspace_name, size_t name_len)
[bc18d63]350{
351 char namebuf[TASK_NAME_BUFLEN];
[5ba201d]352
[bc18d63]353 /* Cap length of name and copy it from userspace. */
354 if (name_len > TASK_NAME_BUFLEN - 1)
355 name_len = TASK_NAME_BUFLEN - 1;
[5ba201d]356
[577f042a]357 int rc = copy_from_uspace(namebuf, uspace_name, name_len);
[bc18d63]358 if (rc != 0)
[96b02eb9]359 return (sysarg_t) rc;
[5ba201d]360
[f4b1535]361 namebuf[name_len] = '\0';
[577f042a]362
363 /*
364 * As the task name is referenced also from the
365 * threads, lock the threads' lock for the course
366 * of the update.
367 */
368
369 irq_spinlock_lock(&tasks_lock, true);
370 irq_spinlock_lock(&TASK->lock, false);
371 irq_spinlock_lock(&threads_lock, false);
372
373 /* Set task name */
[f4b1535]374 str_cpy(TASK->name, TASK_NAME_BUFLEN, namebuf);
[5ba201d]375
[577f042a]376 irq_spinlock_unlock(&threads_lock, false);
377 irq_spinlock_unlock(&TASK->lock, false);
378 irq_spinlock_unlock(&tasks_lock, true);
379
[bc18d63]380 return EOK;
381}
382
[1e9f8ab]383/** Syscall to forcefully terminate a task
384 *
385 * @param uspace_taskid Pointer to task ID in user space.
386 *
387 * @return 0 on success or an error code from @ref errno.h.
388 *
389 */
390sysarg_t sys_task_kill(task_id_t *uspace_taskid)
391{
392 task_id_t taskid;
[5bcf1f9]393 int rc = copy_from_uspace(&taskid, uspace_taskid, sizeof(taskid));
[1e9f8ab]394 if (rc != 0)
395 return (sysarg_t) rc;
[5bcf1f9]396
[1e9f8ab]397 return (sysarg_t) task_kill(taskid);
398}
399
[9a8d91b]400/** Find task structure corresponding to task ID.
401 *
[814c4f5]402 * The tasks_lock must be already held by the caller of this function and
403 * interrupts must be disabled.
[9a8d91b]404 *
[5ba201d]405 * @param id Task ID.
406 *
[e1b6742]407 * @return Task structure address or NULL if there is no such task ID.
[9a8d91b]408 *
409 */
[5ba201d]410task_t *task_find_by_id(task_id_t id)
411{
[1d432f9]412 ASSERT(interrupts_disabled());
413 ASSERT(irq_spinlock_locked(&tasks_lock));
414
[e1b6742]415 avltree_node_t *node =
416 avltree_search(&tasks_tree, (avltree_key_t) id);
[5ba201d]417
[b76a2217]418 if (node)
[da1bafb]419 return avltree_get_instance(node, task_t, tasks_tree_node);
[5ba201d]420
[b76a2217]421 return NULL;
[9a8d91b]422}
423
[0313ff0]424/** Get accounting data of given task.
425 *
[1d432f9]426 * Note that task lock of 'task' must be already held and interrupts must be
[814c4f5]427 * already disabled.
[0313ff0]428 *
[da1bafb]429 * @param task Pointer to the task.
[88dea9d]430 * @param ucycles Out pointer to sum of all user cycles.
431 * @param kcycles Out pointer to sum of all kernel cycles.
[0313ff0]432 *
433 */
[da1bafb]434void task_get_accounting(task_t *task, uint64_t *ucycles, uint64_t *kcycles)
[0313ff0]435{
[1d432f9]436 ASSERT(interrupts_disabled());
437 ASSERT(irq_spinlock_locked(&task->lock));
438
[a2a00e8]439 /* Accumulated values of task */
[da1bafb]440 uint64_t uret = task->ucycles;
441 uint64_t kret = task->kcycles;
[0313ff0]442
443 /* Current values of threads */
[55b77d9]444 list_foreach(task->threads, cur) {
[da1bafb]445 thread_t *thread = list_get_instance(cur, thread_t, th_link);
446
447 irq_spinlock_lock(&thread->lock, false);
[0313ff0]448
[62b6d17]449 /* Process only counted threads */
[da1bafb]450 if (!thread->uncounted) {
451 if (thread == THREAD) {
[6f4495f5]452 /* Update accounting of current thread */
[a2a00e8]453 thread_update_accounting(false);
[da1bafb]454 }
455
456 uret += thread->ucycles;
457 kret += thread->kcycles;
[62b6d17]458 }
[da1bafb]459
460 irq_spinlock_unlock(&thread->lock, false);
[0313ff0]461 }
462
[a2a00e8]463 *ucycles = uret;
464 *kcycles = kret;
[0313ff0]465}
466
[da1bafb]467static void task_kill_internal(task_t *task)
[121966e]468{
[df58e44]469 irq_spinlock_lock(&task->lock, false);
470 irq_spinlock_lock(&threads_lock, false);
[5ba201d]471
[121966e]472 /*
473 * Interrupt all threads.
474 */
[df58e44]475
[55b77d9]476 list_foreach(task->threads, cur) {
[da1bafb]477 thread_t *thread = list_get_instance(cur, thread_t, th_link);
[121966e]478 bool sleeping = false;
479
[da1bafb]480 irq_spinlock_lock(&thread->lock, false);
[121966e]481
[da1bafb]482 thread->interrupted = true;
483 if (thread->state == Sleeping)
[121966e]484 sleeping = true;
[da1bafb]485
486 irq_spinlock_unlock(&thread->lock, false);
[121966e]487
488 if (sleeping)
[da1bafb]489 waitq_interrupt_sleep(thread);
[121966e]490 }
[da1bafb]491
[df58e44]492 irq_spinlock_unlock(&threads_lock, false);
[da1bafb]493 irq_spinlock_unlock(&task->lock, false);
[121966e]494}
495
[7509ddc]496/** Kill task.
[ea7890e7]497 *
498 * This function is idempotent.
499 * It signals all the task's threads to bail it out.
[7509ddc]500 *
[5ba201d]501 * @param id ID of the task to be killed.
502 *
503 * @return Zero on success or an error code from errno.h.
[7509ddc]504 *
505 */
506int task_kill(task_id_t id)
507{
[9b6aae6]508 if (id == 1)
509 return EPERM;
[7509ddc]510
[da1bafb]511 irq_spinlock_lock(&tasks_lock, true);
512
513 task_t *task = task_find_by_id(id);
514 if (!task) {
515 irq_spinlock_unlock(&tasks_lock, true);
[7509ddc]516 return ENOENT;
517 }
[da1bafb]518
519 task_kill_internal(task);
520 irq_spinlock_unlock(&tasks_lock, true);
521
522 return EOK;
[7509ddc]523}
524
[5bcf1f9]525/** Kill the currently running task.
526 *
527 * @param notify Send out fault notifications.
528 *
529 * @return Zero on success or an error code from errno.h.
530 *
531 */
532void task_kill_self(bool notify)
533{
534 /*
535 * User space can subscribe for FAULT events to take action
536 * whenever a task faults (to take a dump, run a debugger, etc.).
537 * The notification is always available, but unless udebug is enabled,
538 * that's all you get.
539 */
540 if (notify) {
[f9061b4]541 /* Notify the subscriber that a fault occurred. */
542 if (event_notify_3(EVENT_FAULT, false, LOWER32(TASK->taskid),
543 UPPER32(TASK->taskid), (sysarg_t) THREAD) == EOK) {
[5bcf1f9]544#ifdef CONFIG_UDEBUG
545 /* Wait for a debugging session. */
546 udebug_thread_fault();
547#endif
548 }
549 }
550
551 irq_spinlock_lock(&tasks_lock, true);
552 task_kill_internal(TASK);
553 irq_spinlock_unlock(&tasks_lock, true);
554
555 thread_exit();
556}
557
558/** Process syscall to terminate the current task.
559 *
560 * @param notify Send out fault notifications.
561 *
562 */
563sysarg_t sys_task_exit(sysarg_t notify)
564{
565 task_kill_self(notify);
566
567 /* Unreachable */
568 return EOK;
569}
570
[b76a2217]571static bool task_print_walker(avltree_node_t *node, void *arg)
572{
[c0f13d2]573 bool *additional = (bool *) arg;
[da1bafb]574 task_t *task = avltree_get_instance(node, task_t, tasks_tree_node);
575 irq_spinlock_lock(&task->lock, false);
[5ba201d]576
[a2a00e8]577 uint64_t ucycles;
578 uint64_t kcycles;
[1ba37fa]579 char usuffix, ksuffix;
[da1bafb]580 task_get_accounting(task, &ucycles, &kcycles);
[e535eeb]581 order_suffix(ucycles, &ucycles, &usuffix);
582 order_suffix(kcycles, &kcycles, &ksuffix);
[88dea9d]583
[da1bafb]584#ifdef __32_BITS__
[c0f13d2]585 if (*additional)
[97d17fe]586 printf("%-8" PRIu64 " %9" PRIua, task->taskid,
587 atomic_get(&task->refcount));
[c0f13d2]588 else
589 printf("%-8" PRIu64 " %-14s %-5" PRIu32 " %10p %10p"
590 " %9" PRIu64 "%c %9" PRIu64 "%c\n", task->taskid,
[473d5d2]591 task->name, task->container, task, task->as,
[c0f13d2]592 ucycles, usuffix, kcycles, ksuffix);
[52755f1]593#endif
[5ba201d]594
[52755f1]595#ifdef __64_BITS__
[c0f13d2]596 if (*additional)
[7e752b2]597 printf("%-8" PRIu64 " %9" PRIu64 "%c %9" PRIu64 "%c "
[97d17fe]598 "%9" PRIua, task->taskid, ucycles, usuffix, kcycles,
599 ksuffix, atomic_get(&task->refcount));
[c0f13d2]600 else
601 printf("%-8" PRIu64 " %-14s %-5" PRIu32 " %18p %18p\n",
[473d5d2]602 task->taskid, task->name, task->container, task, task->as);
[52755f1]603#endif
[5ba201d]604
[c0f13d2]605 if (*additional) {
606 size_t i;
607 for (i = 0; i < IPC_MAX_PHONES; i++) {
608 if (task->phones[i].callee)
[7e752b2]609 printf(" %zu:%p", i, task->phones[i].callee);
[c0f13d2]610 }
611 printf("\n");
[b76a2217]612 }
[5ba201d]613
[da1bafb]614 irq_spinlock_unlock(&task->lock, false);
[b76a2217]615 return true;
616}
617
[c0f13d2]618/** Print task list
619 *
620 * @param additional Print additional information.
621 *
622 */
623void task_print_list(bool additional)
[37c57f2]624{
[f74bbaf]625 /* Messing with task structures, avoid deadlock */
[da1bafb]626 irq_spinlock_lock(&tasks_lock, true);
[5ba201d]627
628#ifdef __32_BITS__
[c0f13d2]629 if (additional)
[48dcc69]630 printf("[id ] [threads] [calls] [callee\n");
[c0f13d2]631 else
[473d5d2]632 printf("[id ] [name ] [ctn] [address ] [as ]"
[c0f13d2]633 " [ucycles ] [kcycles ]\n");
[52755f1]634#endif
[5ba201d]635
[52755f1]636#ifdef __64_BITS__
[c0f13d2]637 if (additional)
[be06914]638 printf("[id ] [ucycles ] [kcycles ] [threads] [calls]"
[c0f13d2]639 " [callee\n");
640 else
[473d5d2]641 printf("[id ] [name ] [ctn] [address ]"
[c0f13d2]642 " [as ]\n");
[52755f1]643#endif
[5ba201d]644
[c0f13d2]645 avltree_walk(&tasks_tree, task_print_walker, &additional);
[5ba201d]646
[da1bafb]647 irq_spinlock_unlock(&tasks_lock, true);
[37c57f2]648}
[7509ddc]649
[cc73a8a1]650/** @}
[b45c443]651 */
Note: See TracBrowser for help on using the repository browser.