/* * Copyright (c) 2001-2004 Jakub Jermar * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** @addtogroup genericproc * @{ */ /** * @file * @brief Task management. */ #include
#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef LOADED_PROG_STACK_PAGES_NO #define LOADED_PROG_STACK_PAGES_NO 1 #endif /** Spinlock protecting the tasks_btree B+tree. */ SPINLOCK_INITIALIZE(tasks_lock); /** B+tree of active tasks. * * The task is guaranteed to exist after it was found in the tasks_btree as * long as: * @li the tasks_lock is held, * @li the task's lock is held when task's lock is acquired before releasing * tasks_lock or * @li the task's refcount is greater than 0 * */ btree_t tasks_btree; static task_id_t task_counter = 0; /** Initialize tasks * * Initialize kernel tasks support. * */ void task_init(void) { TASK = NULL; btree_create(&tasks_btree); } /** Kill all tasks except the current task. * */ void task_done(void) { task_t *t; do { /* Repeat until there are any tasks except TASK */ /* Messing with task structures, avoid deadlock */ ipl_t ipl = interrupts_disable(); spinlock_lock(&tasks_lock); t = NULL; link_t *cur; for (cur = tasks_btree.leaf_head.next; cur != &tasks_btree.leaf_head; cur = cur->next) { btree_node_t *node; node = list_get_instance(cur, btree_node_t, leaf_link); unsigned int i; for (i = 0; i < node->keys; i++) { if ((task_t *) node->value[i] != TASK) { t = (task_t *) node->value[i]; break; } } } if (t != NULL) { task_id_t id = t->taskid; spinlock_unlock(&tasks_lock); interrupts_restore(ipl); #ifdef CONFIG_DEBUG printf("Killing task %llu\n", id); #endif task_kill(id); } else { spinlock_unlock(&tasks_lock); interrupts_restore(ipl); } } while (t != NULL); } /** Create new task * * Create new task with no threads. * * @param as Task's address space. * @param name Symbolic name. * * @return New task's structure * */ task_t *task_create(as_t *as, char *name) { ipl_t ipl; task_t *ta; int i; ta = (task_t *) malloc(sizeof(task_t), 0); task_create_arch(ta); spinlock_initialize(&ta->lock, "task_ta_lock"); list_initialize(&ta->th_head); ta->as = as; ta->name = name; atomic_set(&ta->refcount, 0); atomic_set(&ta->lifecount, 0); ta->context = CONTEXT; ta->capabilities = 0; ta->cycles = 0; ipc_answerbox_init(&ta->answerbox); for (i = 0; i < IPC_MAX_PHONES; i++) ipc_phone_init(&ta->phones[i]); if ((ipc_phone_0) && (context_check(ipc_phone_0->task->context, ta->context))) ipc_phone_connect(&ta->phones[0], ipc_phone_0); atomic_set(&ta->active_calls, 0); mutex_initialize(&ta->futexes_lock); btree_create(&ta->futexes); ipl = interrupts_disable(); /* * Increment address space reference count. */ atomic_inc(&as->refcount); spinlock_lock(&tasks_lock); ta->taskid = ++task_counter; btree_insert(&tasks_btree, (btree_key_t) ta->taskid, (void *) ta, NULL); spinlock_unlock(&tasks_lock); interrupts_restore(ipl); return ta; } /** Destroy task. * * @param t Task to be destroyed. */ void task_destroy(task_t *t) { /* * Remove the task from the task B+tree. */ spinlock_lock(&tasks_lock); btree_remove(&tasks_btree, t->taskid, NULL); spinlock_unlock(&tasks_lock); /* * Perform architecture specific task destruction. */ task_destroy_arch(t); /* * Free up dynamically allocated state. */ btree_destroy(&t->futexes); /* * Drop our reference to the address space. */ if (atomic_predec(&t->as->refcount) == 0) as_destroy(t->as); free(t); TASK = NULL; } /** Create new task with 1 thread and run it * * @param program_addr Address of program executable image. * @param name Program name. * * @return Task of the running program or NULL on error. */ task_t *task_run_program(void *program_addr, char *name) { as_t *as; as_area_t *a; int rc; thread_t *t; task_t *task; uspace_arg_t *kernel_uarg; as = as_create(0); ASSERT(as); rc = elf_load((elf_header_t *) program_addr, as); if (rc != EE_OK) { as_destroy(as); return NULL; } kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); kernel_uarg->uspace_entry = (void *) ((elf_header_t *) program_addr)->e_entry; kernel_uarg->uspace_stack = (void *) USTACK_ADDRESS; kernel_uarg->uspace_thread_function = NULL; kernel_uarg->uspace_thread_arg = NULL; kernel_uarg->uspace_uarg = NULL; task = task_create(as, name); ASSERT(task); /* * Create the data as_area. */ a = as_area_create(as, AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE, LOADED_PROG_STACK_PAGES_NO * PAGE_SIZE, USTACK_ADDRESS, AS_AREA_ATTR_NONE, &anon_backend, NULL); /* * Create the main thread. */ t = thread_create(uinit, kernel_uarg, task, THREAD_FLAG_USPACE, "uinit", false); ASSERT(t); thread_ready(t); return task; } /** Syscall for reading task ID from userspace. * * @param uspace_task_id Userspace address of 8-byte buffer where to store * current task ID. * * @return 0 on success or an error code from @ref errno.h. */ unative_t sys_task_get_id(task_id_t *uspace_task_id) { /* * No need to acquire lock on TASK because taskid * remains constant for the lifespan of the task. */ return (unative_t) copy_to_uspace(uspace_task_id, &TASK->taskid, sizeof(TASK->taskid)); } /** Find task structure corresponding to task ID. * * The tasks_lock must be already held by the caller of this function * and interrupts must be disabled. * * @param id Task ID. * * @return Task structure address or NULL if there is no such task ID. */ task_t *task_find_by_id(task_id_t id) { btree_node_t *leaf; return (task_t *) btree_search(&tasks_btree, (btree_key_t) id, &leaf); } /** Get accounting data of given task. * * Note that task lock of 't' must be already held and * interrupts must be already disabled. * * @param t Pointer to thread. * */ uint64_t task_get_accounting(task_t *t) { /* Accumulated value of task */ uint64_t ret = t->cycles; /* Current values of threads */ link_t *cur; for (cur = t->th_head.next; cur != &t->th_head; cur = cur->next) { thread_t *thr = list_get_instance(cur, thread_t, th_link); spinlock_lock(&thr->lock); /* Process only counted threads */ if (!thr->uncounted) { if (thr == THREAD) { /* Update accounting of current thread */ thread_update_accounting(); } ret += thr->cycles; } spinlock_unlock(&thr->lock); } return ret; } /** Kill task. * * This function is idempotent. * It signals all the task's threads to bail it out. * * @param id ID of the task to be killed. * * @return 0 on success or an error code from errno.h */ int task_kill(task_id_t id) { ipl_t ipl; task_t *ta; link_t *cur; if (id == 1) return EPERM; ipl = interrupts_disable(); spinlock_lock(&tasks_lock); if (!(ta = task_find_by_id(id))) { spinlock_unlock(&tasks_lock); interrupts_restore(ipl); return ENOENT; } spinlock_unlock(&tasks_lock); /* * Interrupt all threads except ktaskclnp. */ spinlock_lock(&ta->lock); for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) { thread_t *thr; bool sleeping = false; thr = list_get_instance(cur, thread_t, th_link); spinlock_lock(&thr->lock); thr->interrupted = true; if (thr->state == Sleeping) sleeping = true; spinlock_unlock(&thr->lock); if (sleeping) waitq_interrupt_sleep(thr); } spinlock_unlock(&ta->lock); interrupts_restore(ipl); return 0; } /** Print task list */ void task_print_list(void) { link_t *cur; ipl_t ipl; /* Messing with task structures, avoid deadlock */ ipl = interrupts_disable(); spinlock_lock(&tasks_lock); printf("taskid name ctx address as cycles threads " "calls callee\n"); printf("------ ---------- --- ---------- ---------- ---------- ------- " "------ ------>\n"); for (cur = tasks_btree.leaf_head.next; cur != &tasks_btree.leaf_head; cur = cur->next) { btree_node_t *node; unsigned int i; node = list_get_instance(cur, btree_node_t, leaf_link); for (i = 0; i < node->keys; i++) { task_t *t; int j; t = (task_t *) node->value[i]; spinlock_lock(&t->lock); uint64_t cycles; char suffix; order(task_get_accounting(t), &cycles, &suffix); printf("%-6llu %-10s %-3ld %#10zx %#10zx %9llu%c %7zd " "%6zd", t->taskid, t->name, t->context, t, t->as, cycles, suffix, t->refcount, atomic_get(&t->active_calls)); for (j = 0; j < IPC_MAX_PHONES; j++) { if (t->phones[j].callee) printf(" %zd:%#zx", j, t->phones[j].callee); } printf("\n"); spinlock_unlock(&t->lock); } } spinlock_unlock(&tasks_lock); interrupts_restore(ipl); } /** @} */