/* * Copyright (c) 2010 Stanislav Kozina * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** @addtogroup genericps * @{ */ /** * @file * @brief Process listing. */ #include #include #include #include #include #include #include #include #include static size_t count; static size_t max_count; #define WRITE_TASK_ID(dst, i, src) copy_to_uspace(dst + i, src, sizeof(task_id_t)) #define WRITE_THREAD_INFO(dst, i, src) copy_to_uspace(dst+i, src, sizeof(thread_info_t)) static bool task_walker(avltree_node_t *node, void *arg) { task_t *t = avltree_get_instance(node, task_t, tasks_tree_node); task_id_t *ids = (task_id_t *)arg; spinlock_lock(&t->lock); ++count; if (count > max_count) { spinlock_unlock(&t->lock); return false; } WRITE_TASK_ID(ids, count - 1, &t->taskid); spinlock_unlock(&t->lock); return true; } size_t sys_ps_get_tasks(task_id_t *uspace_ids, size_t size) { ipl_t ipl; /* Messing with task structures, avoid deadlock */ ipl = interrupts_disable(); spinlock_lock(&tasks_lock); count = 0; max_count = size / sizeof(task_id_t); avltree_walk(&tasks_tree, task_walker, uspace_ids); spinlock_unlock(&tasks_lock); interrupts_restore(ipl); return count; } static uint64_t get_task_memory(as_t *as) { mutex_lock(&as->lock); size_t result = 0; link_t *cur; for (cur = as->as_area_btree.leaf_head.next; cur != &as->as_area_btree.leaf_head; cur = cur->next) { btree_node_t *node; node = list_get_instance(cur, btree_node_t, leaf_link); unsigned int i; for (i = 0; i < node->keys; i++) { as_area_t *area = node->value[i]; mutex_lock(&area->lock); result += area->pages; mutex_unlock(&area->lock); } } mutex_unlock(&as->lock); return result * PAGE_SIZE; } int sys_ps_get_task_info(task_id_t *uspace_id, task_info_t *uspace_info) { ipl_t ipl; ipl = interrupts_disable(); task_id_t id; copy_from_uspace(&id, uspace_id, sizeof(task_id_t)); spinlock_lock(&tasks_lock); task_t *t = task_find_by_id(id); if (!t) { spinlock_unlock(&tasks_lock); return ENOENT; } spinlock_lock(&t->lock); spinlock_unlock(&tasks_lock); copy_to_uspace(&uspace_info->taskid, &t->taskid, sizeof(task_id_t)); copy_to_uspace(uspace_info->name, t->name, sizeof(t->name)); uint64_t ucycles; uint64_t kcycles; task_get_accounting(t, &ucycles, &kcycles); copy_to_uspace(&uspace_info->ucycles, &ucycles, sizeof(uint64_t)); copy_to_uspace(&uspace_info->kcycles, &kcycles, sizeof(uint64_t)); task_ipc_info_t ipc_info; ipc_info.call_sent = t->ipc_info.call_sent; ipc_info.call_recieved = t->ipc_info.call_recieved; ipc_info.answer_sent = t->ipc_info.answer_sent; ipc_info.answer_recieved = t->ipc_info.answer_recieved; ipc_info.irq_notif_recieved = t->ipc_info.irq_notif_recieved; ipc_info.forwarded = t->ipc_info.forwarded; copy_to_uspace(&uspace_info->ipc_info, &ipc_info, sizeof(task_ipc_info_t)); uint64_t memory = get_task_memory(t->as); copy_to_uspace(&uspace_info->virt_mem, &memory, sizeof(memory)); int thread_count = atomic_get(&t->refcount); copy_to_uspace(&uspace_info->thread_count, &thread_count, sizeof(thread_count)); spinlock_unlock(&t->lock); interrupts_restore(ipl); return 0; } static bool thread_walker(avltree_node_t *node, void *arg) { thread_t *t = avltree_get_instance(node, thread_t, threads_tree_node); thread_info_t *infos = (thread_info_t *)arg; thread_info_t result; spinlock_lock(&t->lock); ++count; if (count > max_count) { spinlock_unlock(&t->lock); return false; } result.tid = t->tid; ASSERT(t->task); result.taskid = t->task->taskid; result.state = t->state; result.priority = t->priority; result.ucycles = t->ucycles; result.kcycles = t->kcycles; if (t->cpu) result.cpu = t->cpu->id; else result.cpu = -1; WRITE_THREAD_INFO(infos, count - 1, &result); spinlock_unlock(&t->lock); return true; } int sys_ps_get_threads(thread_info_t *uspace_infos, size_t size) { ipl_t ipl; ipl = interrupts_disable(); spinlock_lock(&threads_lock); count = 0; max_count = size / sizeof(thread_info_t); avltree_walk(&threads_tree, thread_walker, uspace_infos); spinlock_unlock(&threads_lock); interrupts_restore(ipl); return count; } /** @} */