source: mainline/kernel/generic/src/proc/task.c@ 3be9d10

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 3be9d10 was eadaeae8, checked in by Jakub Jermar <jakub@…>, 7 years ago

Make capability handles type-safe

Define distinct pointer types for the handles of the supported
capability types and use them instead of integer handles. This makes it
virtually impossible to pass a non-handle or a handle of different type
instead of the proper handle. Also turn cap_handle_t into an "untyped"
capability handle that can be assigned to and from the "typed" handles.

This commit also fixes a bug in msim-con driver, which wrongly used the
IRQ number instead of the IRQ capability handle to unregister the IRQ.

This commit also fixes the wrong use of the capability handle instead
of error code in libusbhost.

  • Property mode set to 100644
File size: 15.3 KB
RevLine 
[f761f1eb]1/*
[278b4a30]2 * Copyright (c) 2010 Jakub Jermar
[f761f1eb]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[cc73a8a1]29/** @addtogroup genericproc
[b45c443]30 * @{
31 */
32
[9179d0a]33/**
[b45c443]34 * @file
[5ba201d]35 * @brief Task management.
[9179d0a]36 */
37
[63e27ef]38#include <assert.h>
[f761f1eb]39#include <proc/thread.h>
40#include <proc/task.h>
[20d50a1]41#include <mm/as.h>
[085d973]42#include <mm/slab.h>
[31d8e10]43#include <atomic.h>
[669f3d32]44#include <synch/futex.h>
[f761f1eb]45#include <synch/spinlock.h>
[5573942]46#include <synch/waitq.h>
[f761f1eb]47#include <arch.h>
[8605b24]48#include <arch/barrier.h>
[b76a2217]49#include <adt/avl.h>
[7f6e755]50#include <adt/btree.h>
[5c9a08b]51#include <adt/list.h>
[3f74275]52#include <cap/cap.h>
[6d9c49a]53#include <ipc/ipc.h>
[c98e6ee]54#include <ipc/ipcrsc.h>
[5d0500c]55#include <ipc/event.h>
[37c57f2]56#include <print.h>
[7509ddc]57#include <errno.h>
[b2e121a]58#include <halt.h>
[19f857a]59#include <str.h>
[e3c762cd]60#include <syscall/copy.h>
[95ad426]61#include <macros.h>
[8e5e78f]62
[b76a2217]63/** Spinlock protecting the tasks_tree AVL tree. */
[da1bafb]64IRQ_SPINLOCK_INITIALIZE(tasks_lock);
[88169d9]65
[b76a2217]66/** AVL tree of active tasks.
[88169d9]67 *
[b76a2217]68 * The task is guaranteed to exist after it was found in the tasks_tree as
[6f4495f5]69 * long as:
[5ba201d]70 *
[88169d9]71 * @li the tasks_lock is held,
[6f4495f5]72 * @li the task's lock is held when task's lock is acquired before releasing
73 * tasks_lock or
[7bb6b06]74 * @li the task's refcount is greater than 0
[88169d9]75 *
76 */
[b76a2217]77avltree_t tasks_tree;
[88169d9]78
[286e03d]79static task_id_t task_counter = 0;
[70527f1]80
[82d515e9]81static slab_cache_t *task_cache;
[103de761]82
[121966e]83/* Forward declarations. */
84static void task_kill_internal(task_t *);
[b7fd2a0]85static errno_t tsk_constructor(void *, unsigned int);
[49115ac]86static size_t tsk_destructor(void *obj);
[121966e]87
[da1bafb]88/** Initialize kernel tasks support.
89 *
90 */
[f761f1eb]91void task_init(void)
92{
[43114c5]93 TASK = NULL;
[b76a2217]94 avltree_create(&tasks_tree);
[82d515e9]95 task_cache = slab_cache_create("task_t", sizeof(task_t), 0,
[49115ac]96 tsk_constructor, tsk_destructor, 0);
[b76a2217]97}
98
[da1bafb]99/** Task finish walker.
100 *
[121966e]101 * The idea behind this walker is to kill and count all tasks different from
[814c4f5]102 * TASK.
[da1bafb]103 *
[b76a2217]104 */
105static bool task_done_walker(avltree_node_t *node, void *arg)
106{
[da1bafb]107 task_t *task = avltree_get_instance(node, task_t, tasks_tree_node);
108 size_t *cnt = (size_t *) arg;
[a35b458]109
[da1bafb]110 if (task != TASK) {
[121966e]111 (*cnt)++;
[a35b458]112
[121966e]113#ifdef CONFIG_DEBUG
[da1bafb]114 printf("[%"PRIu64"] ", task->taskid);
[121966e]115#endif
[a35b458]116
[da1bafb]117 task_kill_internal(task);
[b76a2217]118 }
[a35b458]119
[5ba201d]120 /* Continue the walk */
121 return true;
[f761f1eb]122}
123
[da1bafb]124/** Kill all tasks except the current task.
125 *
126 */
[f74bbaf]127void task_done(void)
128{
[da1bafb]129 size_t tasks_left;
[2f2beb4]130
[cdc4334]131 if (ipc_box_0) {
132 task_t *task_0 = ipc_box_0->task;
133 ipc_box_0 = NULL;
[2f2beb4]134 /*
135 * The first task is held by kinit(), we need to release it or
136 * it will never finish cleanup.
137 */
138 task_release(task_0);
139 }
[a35b458]140
[da1bafb]141 /* Repeat until there are any tasks except TASK */
142 do {
[121966e]143#ifdef CONFIG_DEBUG
144 printf("Killing tasks... ");
145#endif
[a35b458]146
[da1bafb]147 irq_spinlock_lock(&tasks_lock, true);
[121966e]148 tasks_left = 0;
149 avltree_walk(&tasks_tree, task_done_walker, &tasks_left);
[da1bafb]150 irq_spinlock_unlock(&tasks_lock, true);
[a35b458]151
[121966e]152 thread_sleep(1);
[a35b458]153
[f74bbaf]154#ifdef CONFIG_DEBUG
[121966e]155 printf("\n");
156#endif
[da1bafb]157 } while (tasks_left > 0);
[f74bbaf]158}
[70527f1]159
[b7fd2a0]160errno_t tsk_constructor(void *obj, unsigned int kmflags)
[59ee56f]161{
[da1bafb]162 task_t *task = (task_t *) obj;
[c46bfbc]163
[b7fd2a0]164 errno_t rc = caps_task_alloc(task);
[c46bfbc]165 if (rc != EOK)
166 return rc;
[a35b458]167
[da1bafb]168 atomic_set(&task->refcount, 0);
169 atomic_set(&task->lifecount, 0);
[a35b458]170
[da1bafb]171 irq_spinlock_initialize(&task->lock, "task_t_lock");
[a35b458]172
[55b77d9]173 list_initialize(&task->threads);
[a35b458]174
[da1bafb]175 ipc_answerbox_init(&task->answerbox, task);
[a35b458]176
[86939b1]177 spinlock_initialize(&task->active_calls_lock, "active_calls_lock");
178 list_initialize(&task->active_calls);
[a35b458]179
[59ee56f]180#ifdef CONFIG_UDEBUG
181 /* Init kbox stuff */
[da1bafb]182 task->kb.thread = NULL;
183 ipc_answerbox_init(&task->kb.box, task);
184 mutex_initialize(&task->kb.cleanup_lock, MUTEX_PASSIVE);
[59ee56f]185#endif
[a35b458]186
[7f11dc6]187 return EOK;
[59ee56f]188}
189
[49115ac]190size_t tsk_destructor(void *obj)
191{
192 task_t *task = (task_t *) obj;
[a35b458]193
[3f74275]194 caps_task_free(task);
[49115ac]195 return 0;
196}
197
[814c4f5]198/** Create new task with no threads.
[70527f1]199 *
[5ba201d]200 * @param as Task's address space.
201 * @param name Symbolic name (a copy is made).
[70527f1]202 *
[5ba201d]203 * @return New task's structure.
[70527f1]204 *
205 */
[a000878c]206task_t *task_create(as_t *as, const char *name)
[f761f1eb]207{
[82d515e9]208 task_t *task = (task_t *) slab_alloc(task_cache, 0);
[6a32cc5f]209 if (task == NULL) {
210 return NULL;
211 }
[a35b458]212
[da1bafb]213 task_create_arch(task);
[a35b458]214
[da1bafb]215 task->as = as;
216 str_cpy(task->name, TASK_NAME_BUFLEN, name);
[a35b458]217
[473d5d2]218 task->container = CONTAINER;
[719a208]219 task->perms = 0;
[da1bafb]220 task->ucycles = 0;
221 task->kcycles = 0;
[7ad17de]222
[3f74275]223 caps_task_init(task);
[e9d15d9]224
[da1bafb]225 task->ipc_info.call_sent = 0;
[be06914]226 task->ipc_info.call_received = 0;
[da1bafb]227 task->ipc_info.answer_sent = 0;
[be06914]228 task->ipc_info.answer_received = 0;
229 task->ipc_info.irq_notif_received = 0;
[da1bafb]230 task->ipc_info.forwarded = 0;
[5d0500c]231
232 event_task_init(task);
[a35b458]233
[c33f39f]234 task->answerbox.active = true;
235
[9a1b20c]236#ifdef CONFIG_UDEBUG
237 /* Init debugging stuff */
[da1bafb]238 udebug_task_init(&task->udebug);
[a35b458]239
[9a1b20c]240 /* Init kbox stuff */
[c33f39f]241 task->kb.box.active = true;
[da1bafb]242 task->kb.finished = false;
[9a1b20c]243#endif
[a35b458]244
[cdc4334]245 if ((ipc_box_0) &&
246 (container_check(ipc_box_0->task->container, task->container))) {
[eadaeae8]247 cap_phone_handle_t phone_handle;
[334c103]248 errno_t rc = phone_alloc(task, true, &phone_handle, NULL);
[09d01f2]249 if (rc != EOK) {
[6a32cc5f]250 task->as = NULL;
251 task_destroy_arch(task);
252 slab_free(task_cache, task);
253 return NULL;
254 }
[a35b458]255
[48bcf49]256 kobject_t *phone_obj = kobject_get(task, phone_handle,
257 KOBJECT_TYPE_PHONE);
[cdc4334]258 (void) ipc_phone_connect(phone_obj->phone, ipc_box_0);
[05ffb41]259 }
[a35b458]260
[669f3d32]261 futex_task_init(task);
[a35b458]262
[6193351]263 /*
264 * Get a reference to the address space.
265 */
[da1bafb]266 as_hold(task->as);
[a35b458]267
[da1bafb]268 irq_spinlock_lock(&tasks_lock, true);
[a35b458]269
[da1bafb]270 task->taskid = ++task_counter;
271 avltree_node_initialize(&task->tasks_tree_node);
272 task->tasks_tree_node.key = task->taskid;
273 avltree_insert(&tasks_tree, &task->tasks_tree_node);
[a35b458]274
[da1bafb]275 irq_spinlock_unlock(&tasks_lock, true);
[a35b458]276
[da1bafb]277 return task;
[f761f1eb]278}
279
[7509ddc]280/** Destroy task.
281 *
[da1bafb]282 * @param task Task to be destroyed.
[5ba201d]283 *
[7509ddc]284 */
[da1bafb]285void task_destroy(task_t *task)
[7509ddc]286{
[ea7890e7]287 /*
288 * Remove the task from the task B+tree.
289 */
[da1bafb]290 irq_spinlock_lock(&tasks_lock, true);
291 avltree_delete(&tasks_tree, &task->tasks_tree_node);
292 irq_spinlock_unlock(&tasks_lock, true);
[a35b458]293
[ea7890e7]294 /*
295 * Perform architecture specific task destruction.
296 */
[da1bafb]297 task_destroy_arch(task);
[a35b458]298
[ea7890e7]299 /*
300 * Free up dynamically allocated state.
301 */
[669f3d32]302 futex_task_deinit(task);
[a35b458]303
[ea7890e7]304 /*
305 * Drop our reference to the address space.
306 */
[da1bafb]307 as_release(task->as);
[a35b458]308
[82d515e9]309 slab_free(task_cache, task);
[7509ddc]310}
311
[278b4a30]312/** Hold a reference to a task.
313 *
314 * Holding a reference to a task prevents destruction of that task.
315 *
[da1bafb]316 * @param task Task to be held.
317 *
[278b4a30]318 */
[da1bafb]319void task_hold(task_t *task)
[278b4a30]320{
[da1bafb]321 atomic_inc(&task->refcount);
[278b4a30]322}
323
324/** Release a reference to a task.
325 *
326 * The last one to release a reference to a task destroys the task.
327 *
[da1bafb]328 * @param task Task to be released.
329 *
[278b4a30]330 */
[da1bafb]331void task_release(task_t *task)
[278b4a30]332{
[da1bafb]333 if ((atomic_predec(&task->refcount)) == 0)
334 task_destroy(task);
[278b4a30]335}
336
[dd8d5a7]337#ifdef __32_BITS__
338
339/** Syscall for reading task ID from userspace (32 bits)
[ec55358]340 *
[dd8d5a7]341 * @param uspace_taskid Pointer to user-space buffer
342 * where to store current task ID.
[5ba201d]343 *
344 * @return Zero on success or an error code from @ref errno.h.
[ec55358]345 *
346 */
[b7fd2a0]347sys_errno_t sys_task_get_id(sysarg64_t *uspace_taskid)
[ec55358]348{
349 /*
[814c4f5]350 * No need to acquire lock on TASK because taskid remains constant for
351 * the lifespan of the task.
[ec55358]352 */
[b7fd2a0]353 return (sys_errno_t) copy_to_uspace(uspace_taskid, &TASK->taskid,
[6f4495f5]354 sizeof(TASK->taskid));
[ec55358]355}
356
[dd8d5a7]357#endif /* __32_BITS__ */
358
359#ifdef __64_BITS__
360
361/** Syscall for reading task ID from userspace (64 bits)
362 *
363 * @return Current task ID.
364 *
365 */
366sysarg_t sys_task_get_id(void)
367{
368 /*
369 * No need to acquire lock on TASK because taskid remains constant for
370 * the lifespan of the task.
371 */
372 return TASK->taskid;
373}
374
375#endif /* __64_BITS__ */
376
[bc18d63]377/** Syscall for setting the task name.
378 *
379 * The name simplifies identifying the task in the task list.
380 *
[5ba201d]381 * @param name The new name for the task. (typically the same
382 * as the command used to execute it).
[bc18d63]383 *
384 * @return 0 on success or an error code from @ref errno.h.
[5ba201d]385 *
[bc18d63]386 */
[b7fd2a0]387sys_errno_t sys_task_set_name(const char *uspace_name, size_t name_len)
[bc18d63]388{
389 char namebuf[TASK_NAME_BUFLEN];
[a35b458]390
[bc18d63]391 /* Cap length of name and copy it from userspace. */
392 if (name_len > TASK_NAME_BUFLEN - 1)
393 name_len = TASK_NAME_BUFLEN - 1;
[a35b458]394
[b7fd2a0]395 errno_t rc = copy_from_uspace(namebuf, uspace_name, name_len);
[a53ed3a]396 if (rc != EOK)
[b7fd2a0]397 return (sys_errno_t) rc;
[a35b458]398
[f4b1535]399 namebuf[name_len] = '\0';
[a35b458]400
[577f042a]401 /*
402 * As the task name is referenced also from the
403 * threads, lock the threads' lock for the course
404 * of the update.
405 */
[a35b458]406
[577f042a]407 irq_spinlock_lock(&tasks_lock, true);
408 irq_spinlock_lock(&TASK->lock, false);
409 irq_spinlock_lock(&threads_lock, false);
[a35b458]410
[577f042a]411 /* Set task name */
[f4b1535]412 str_cpy(TASK->name, TASK_NAME_BUFLEN, namebuf);
[a35b458]413
[577f042a]414 irq_spinlock_unlock(&threads_lock, false);
415 irq_spinlock_unlock(&TASK->lock, false);
416 irq_spinlock_unlock(&tasks_lock, true);
[a35b458]417
[bc18d63]418 return EOK;
419}
420
[1e9f8ab]421/** Syscall to forcefully terminate a task
422 *
423 * @param uspace_taskid Pointer to task ID in user space.
424 *
425 * @return 0 on success or an error code from @ref errno.h.
426 *
427 */
[b7fd2a0]428sys_errno_t sys_task_kill(task_id_t *uspace_taskid)
[1e9f8ab]429{
430 task_id_t taskid;
[b7fd2a0]431 errno_t rc = copy_from_uspace(&taskid, uspace_taskid, sizeof(taskid));
[a53ed3a]432 if (rc != EOK)
[b7fd2a0]433 return (sys_errno_t) rc;
[a35b458]434
[b7fd2a0]435 return (sys_errno_t) task_kill(taskid);
[1e9f8ab]436}
437
[9a8d91b]438/** Find task structure corresponding to task ID.
439 *
[814c4f5]440 * The tasks_lock must be already held by the caller of this function and
441 * interrupts must be disabled.
[9a8d91b]442 *
[5ba201d]443 * @param id Task ID.
444 *
[e1b6742]445 * @return Task structure address or NULL if there is no such task ID.
[9a8d91b]446 *
447 */
[5ba201d]448task_t *task_find_by_id(task_id_t id)
449{
[63e27ef]450 assert(interrupts_disabled());
451 assert(irq_spinlock_locked(&tasks_lock));
[1d432f9]452
[e1b6742]453 avltree_node_t *node =
454 avltree_search(&tasks_tree, (avltree_key_t) id);
[a35b458]455
[b76a2217]456 if (node)
[da1bafb]457 return avltree_get_instance(node, task_t, tasks_tree_node);
[a35b458]458
[b76a2217]459 return NULL;
[9a8d91b]460}
461
[0313ff0]462/** Get accounting data of given task.
463 *
[1d432f9]464 * Note that task lock of 'task' must be already held and interrupts must be
[814c4f5]465 * already disabled.
[0313ff0]466 *
[da1bafb]467 * @param task Pointer to the task.
[88dea9d]468 * @param ucycles Out pointer to sum of all user cycles.
469 * @param kcycles Out pointer to sum of all kernel cycles.
[0313ff0]470 *
471 */
[da1bafb]472void task_get_accounting(task_t *task, uint64_t *ucycles, uint64_t *kcycles)
[0313ff0]473{
[63e27ef]474 assert(interrupts_disabled());
475 assert(irq_spinlock_locked(&task->lock));
[1d432f9]476
[a2a00e8]477 /* Accumulated values of task */
[da1bafb]478 uint64_t uret = task->ucycles;
479 uint64_t kret = task->kcycles;
[a35b458]480
[0313ff0]481 /* Current values of threads */
[feeac0d]482 list_foreach(task->threads, th_link, thread_t, thread) {
[da1bafb]483 irq_spinlock_lock(&thread->lock, false);
[a35b458]484
[62b6d17]485 /* Process only counted threads */
[da1bafb]486 if (!thread->uncounted) {
487 if (thread == THREAD) {
[6f4495f5]488 /* Update accounting of current thread */
[a2a00e8]489 thread_update_accounting(false);
[da1bafb]490 }
[a35b458]491
[da1bafb]492 uret += thread->ucycles;
493 kret += thread->kcycles;
[62b6d17]494 }
[a35b458]495
[da1bafb]496 irq_spinlock_unlock(&thread->lock, false);
[0313ff0]497 }
[a35b458]498
[a2a00e8]499 *ucycles = uret;
500 *kcycles = kret;
[0313ff0]501}
502
[da1bafb]503static void task_kill_internal(task_t *task)
[121966e]504{
[df58e44]505 irq_spinlock_lock(&task->lock, false);
506 irq_spinlock_lock(&threads_lock, false);
[a35b458]507
[121966e]508 /*
509 * Interrupt all threads.
510 */
[a35b458]511
[feeac0d]512 list_foreach(task->threads, th_link, thread_t, thread) {
[121966e]513 bool sleeping = false;
[a35b458]514
[da1bafb]515 irq_spinlock_lock(&thread->lock, false);
[a35b458]516
[da1bafb]517 thread->interrupted = true;
518 if (thread->state == Sleeping)
[121966e]519 sleeping = true;
[a35b458]520
[da1bafb]521 irq_spinlock_unlock(&thread->lock, false);
[a35b458]522
[121966e]523 if (sleeping)
[da1bafb]524 waitq_interrupt_sleep(thread);
[121966e]525 }
[a35b458]526
[df58e44]527 irq_spinlock_unlock(&threads_lock, false);
[da1bafb]528 irq_spinlock_unlock(&task->lock, false);
[121966e]529}
530
[7509ddc]531/** Kill task.
[ea7890e7]532 *
533 * This function is idempotent.
534 * It signals all the task's threads to bail it out.
[7509ddc]535 *
[5ba201d]536 * @param id ID of the task to be killed.
537 *
538 * @return Zero on success or an error code from errno.h.
[7509ddc]539 *
540 */
[b7fd2a0]541errno_t task_kill(task_id_t id)
[7509ddc]542{
[9b6aae6]543 if (id == 1)
544 return EPERM;
[a35b458]545
[da1bafb]546 irq_spinlock_lock(&tasks_lock, true);
[a35b458]547
[da1bafb]548 task_t *task = task_find_by_id(id);
549 if (!task) {
550 irq_spinlock_unlock(&tasks_lock, true);
[7509ddc]551 return ENOENT;
552 }
[a35b458]553
[da1bafb]554 task_kill_internal(task);
555 irq_spinlock_unlock(&tasks_lock, true);
[a35b458]556
[da1bafb]557 return EOK;
[7509ddc]558}
559
[5bcf1f9]560/** Kill the currently running task.
561 *
562 * @param notify Send out fault notifications.
563 *
564 * @return Zero on success or an error code from errno.h.
565 *
566 */
567void task_kill_self(bool notify)
568{
569 /*
570 * User space can subscribe for FAULT events to take action
571 * whenever a task faults (to take a dump, run a debugger, etc.).
572 * The notification is always available, but unless udebug is enabled,
573 * that's all you get.
574 */
575 if (notify) {
[f9061b4]576 /* Notify the subscriber that a fault occurred. */
577 if (event_notify_3(EVENT_FAULT, false, LOWER32(TASK->taskid),
578 UPPER32(TASK->taskid), (sysarg_t) THREAD) == EOK) {
[5bcf1f9]579#ifdef CONFIG_UDEBUG
580 /* Wait for a debugging session. */
581 udebug_thread_fault();
582#endif
583 }
584 }
[a35b458]585
[5bcf1f9]586 irq_spinlock_lock(&tasks_lock, true);
587 task_kill_internal(TASK);
588 irq_spinlock_unlock(&tasks_lock, true);
[a35b458]589
[5bcf1f9]590 thread_exit();
591}
592
593/** Process syscall to terminate the current task.
594 *
595 * @param notify Send out fault notifications.
596 *
597 */
[b7fd2a0]598sys_errno_t sys_task_exit(sysarg_t notify)
[5bcf1f9]599{
600 task_kill_self(notify);
[a35b458]601
[5bcf1f9]602 /* Unreachable */
603 return EOK;
604}
605
[b76a2217]606static bool task_print_walker(avltree_node_t *node, void *arg)
607{
[c0f13d2]608 bool *additional = (bool *) arg;
[da1bafb]609 task_t *task = avltree_get_instance(node, task_t, tasks_tree_node);
610 irq_spinlock_lock(&task->lock, false);
[a35b458]611
[a2a00e8]612 uint64_t ucycles;
613 uint64_t kcycles;
[1ba37fa]614 char usuffix, ksuffix;
[da1bafb]615 task_get_accounting(task, &ucycles, &kcycles);
[e535eeb]616 order_suffix(ucycles, &ucycles, &usuffix);
617 order_suffix(kcycles, &kcycles, &ksuffix);
[a35b458]618
[da1bafb]619#ifdef __32_BITS__
[c0f13d2]620 if (*additional)
[97d17fe]621 printf("%-8" PRIu64 " %9" PRIua, task->taskid,
622 atomic_get(&task->refcount));
[c0f13d2]623 else
624 printf("%-8" PRIu64 " %-14s %-5" PRIu32 " %10p %10p"
625 " %9" PRIu64 "%c %9" PRIu64 "%c\n", task->taskid,
[473d5d2]626 task->name, task->container, task, task->as,
[c0f13d2]627 ucycles, usuffix, kcycles, ksuffix);
[52755f1]628#endif
[a35b458]629
[52755f1]630#ifdef __64_BITS__
[c0f13d2]631 if (*additional)
[7e752b2]632 printf("%-8" PRIu64 " %9" PRIu64 "%c %9" PRIu64 "%c "
[9e87562]633 "%9" PRIua "\n", task->taskid, ucycles, usuffix, kcycles,
[97d17fe]634 ksuffix, atomic_get(&task->refcount));
[c0f13d2]635 else
636 printf("%-8" PRIu64 " %-14s %-5" PRIu32 " %18p %18p\n",
[473d5d2]637 task->taskid, task->name, task->container, task, task->as);
[52755f1]638#endif
[a35b458]639
[da1bafb]640 irq_spinlock_unlock(&task->lock, false);
[b76a2217]641 return true;
642}
643
[c0f13d2]644/** Print task list
645 *
646 * @param additional Print additional information.
647 *
648 */
649void task_print_list(bool additional)
[37c57f2]650{
[f74bbaf]651 /* Messing with task structures, avoid deadlock */
[da1bafb]652 irq_spinlock_lock(&tasks_lock, true);
[a35b458]653
[5ba201d]654#ifdef __32_BITS__
[c0f13d2]655 if (additional)
[48dcc69]656 printf("[id ] [threads] [calls] [callee\n");
[c0f13d2]657 else
[473d5d2]658 printf("[id ] [name ] [ctn] [address ] [as ]"
[c0f13d2]659 " [ucycles ] [kcycles ]\n");
[52755f1]660#endif
[a35b458]661
[52755f1]662#ifdef __64_BITS__
[c0f13d2]663 if (additional)
[be06914]664 printf("[id ] [ucycles ] [kcycles ] [threads] [calls]"
[c0f13d2]665 " [callee\n");
666 else
[473d5d2]667 printf("[id ] [name ] [ctn] [address ]"
[c0f13d2]668 " [as ]\n");
[52755f1]669#endif
[a35b458]670
[c0f13d2]671 avltree_walk(&tasks_tree, task_print_walker, &additional);
[a35b458]672
[da1bafb]673 irq_spinlock_unlock(&tasks_lock, true);
[37c57f2]674}
[7509ddc]675
[cc73a8a1]676/** @}
[b45c443]677 */
Note: See TracBrowser for help on using the repository browser.