Changes in kernel/generic/src/proc/task.c [7e752b2:41df2827] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/task.c
r7e752b2 r41df2827 1 1 /* 2 * Copyright (c) 20 10Jakub Jermar2 * Copyright (c) 2001-2004 Jakub Jermar 3 3 * All rights reserved. 4 4 * … … 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Task management. 36 36 */ 37 37 … … 53 53 #include <errno.h> 54 54 #include <func.h> 55 #include <str .h>55 #include <string.h> 56 56 #include <memstr.h> 57 57 #include <syscall/copy.h> … … 60 60 61 61 /** Spinlock protecting the tasks_tree AVL tree. */ 62 IRQ_SPINLOCK_INITIALIZE(tasks_lock);62 SPINLOCK_INITIALIZE(tasks_lock); 63 63 64 64 /** AVL tree of active tasks. … … 66 66 * The task is guaranteed to exist after it was found in the tasks_tree as 67 67 * long as: 68 *69 68 * @li the tasks_lock is held, 70 69 * @li the task's lock is held when task's lock is acquired before releasing … … 81 80 /* Forward declarations. */ 82 81 static void task_kill_internal(task_t *); 83 static int tsk_constructor(void *, unsigned int); 84 85 /** Initialize kernel tasks support. 86 * 87 */ 82 static int tsk_constructor(void *, int); 83 84 /** Initialize kernel tasks support. */ 88 85 void task_init(void) 89 86 { … … 94 91 } 95 92 96 /** Task finish walker. 97 * 93 /* 98 94 * The idea behind this walker is to kill and count all tasks different from 99 95 * TASK. 100 *101 96 */ 102 97 static bool task_done_walker(avltree_node_t *node, void *arg) 103 98 { 104 task_t *t ask= avltree_get_instance(node, task_t, tasks_tree_node);105 size_t *cnt = (size_t*) arg;106 107 if (t ask!= TASK) {99 task_t *t = avltree_get_instance(node, task_t, tasks_tree_node); 100 unsigned *cnt = (unsigned *) arg; 101 102 if (t != TASK) { 108 103 (*cnt)++; 109 110 104 #ifdef CONFIG_DEBUG 111 printf("[%"PRIu64"] ", task->taskid); 112 #endif 113 114 task_kill_internal(task); 105 printf("[%"PRIu64"] ", t->taskid); 106 #endif 107 task_kill_internal(t); 115 108 } 116 117 /* Continue the walk */ 118 return true; 119 } 120 121 /** Kill all tasks except the current task. 122 * 123 */ 109 110 return true; /* continue the walk */ 111 } 112 113 /** Kill all tasks except the current task. */ 124 114 void task_done(void) 125 115 { 126 size_ttasks_left;127 128 /* Repeat until there are any tasks except TASK */129 do {116 unsigned tasks_left; 117 118 do { /* Repeat until there are any tasks except TASK */ 119 /* Messing with task structures, avoid deadlock */ 130 120 #ifdef CONFIG_DEBUG 131 121 printf("Killing tasks... "); 132 122 #endif 133 134 irq_spinlock_lock(&tasks_lock, true);123 ipl_t ipl = interrupts_disable(); 124 spinlock_lock(&tasks_lock); 135 125 tasks_left = 0; 136 126 avltree_walk(&tasks_tree, task_done_walker, &tasks_left); 137 irq_spinlock_unlock(&tasks_lock, true);138 127 spinlock_unlock(&tasks_lock); 128 interrupts_restore(ipl); 139 129 thread_sleep(1); 140 141 130 #ifdef CONFIG_DEBUG 142 131 printf("\n"); 143 132 #endif 144 } while (tasks_left > 0); 145 } 146 147 int tsk_constructor(void *obj, unsigned int kmflags) 148 { 149 task_t *task = (task_t *) obj; 150 151 atomic_set(&task->refcount, 0); 152 atomic_set(&task->lifecount, 0); 153 atomic_set(&task->active_calls, 0); 154 155 irq_spinlock_initialize(&task->lock, "task_t_lock"); 156 mutex_initialize(&task->futexes_lock, MUTEX_PASSIVE); 157 158 list_initialize(&task->th_head); 159 list_initialize(&task->sync_box_head); 160 161 ipc_answerbox_init(&task->answerbox, task); 162 163 size_t i; 133 } while (tasks_left); 134 } 135 136 int tsk_constructor(void *obj, int kmflags) 137 { 138 task_t *ta = obj; 139 int i; 140 141 atomic_set(&ta->refcount, 0); 142 atomic_set(&ta->lifecount, 0); 143 atomic_set(&ta->active_calls, 0); 144 145 spinlock_initialize(&ta->lock, "task_ta_lock"); 146 mutex_initialize(&ta->futexes_lock, MUTEX_PASSIVE); 147 148 list_initialize(&ta->th_head); 149 list_initialize(&ta->sync_box_head); 150 151 ipc_answerbox_init(&ta->answerbox, ta); 164 152 for (i = 0; i < IPC_MAX_PHONES; i++) 165 ipc_phone_init(&ta sk->phones[i]);166 153 ipc_phone_init(&ta->phones[i]); 154 167 155 #ifdef CONFIG_UDEBUG 168 156 /* Init kbox stuff */ 169 ta sk->kb.thread = NULL;170 ipc_answerbox_init(&ta sk->kb.box, task);171 mutex_initialize(&ta sk->kb.cleanup_lock, MUTEX_PASSIVE);172 #endif 173 157 ta->kb.thread = NULL; 158 ipc_answerbox_init(&ta->kb.box, ta); 159 mutex_initialize(&ta->kb.cleanup_lock, MUTEX_PASSIVE); 160 #endif 161 174 162 return 0; 175 163 } … … 177 165 /** Create new task with no threads. 178 166 * 179 * @param as Task's address space. 180 * @param name Symbolic name (a copy is made). 181 * 182 * @return New task's structure. 183 * 184 */ 185 task_t *task_create(as_t *as, const char *name) 186 { 187 task_t *task = (task_t *) slab_alloc(task_slab, 0); 188 task_create_arch(task); 189 190 task->as = as; 191 str_cpy(task->name, TASK_NAME_BUFLEN, name); 192 193 task->context = CONTEXT; 194 task->capabilities = 0; 195 task->ucycles = 0; 196 task->kcycles = 0; 197 198 task->ipc_info.call_sent = 0; 199 task->ipc_info.call_received = 0; 200 task->ipc_info.answer_sent = 0; 201 task->ipc_info.answer_received = 0; 202 task->ipc_info.irq_notif_received = 0; 203 task->ipc_info.forwarded = 0; 204 167 * @param as Task's address space. 168 * @param name Symbolic name (a copy is made). 169 * 170 * @return New task's structure. 171 * 172 */ 173 task_t *task_create(as_t *as, char *name) 174 { 175 ipl_t ipl; 176 task_t *ta; 177 178 ta = (task_t *) slab_alloc(task_slab, 0); 179 task_create_arch(ta); 180 ta->as = as; 181 memcpy(ta->name, name, TASK_NAME_BUFLEN); 182 ta->name[TASK_NAME_BUFLEN - 1] = 0; 183 184 ta->context = CONTEXT; 185 ta->capabilities = 0; 186 ta->cycles = 0; 187 205 188 #ifdef CONFIG_UDEBUG 206 189 /* Init debugging stuff */ 207 udebug_task_init(&ta sk->udebug);208 190 udebug_task_init(&ta->udebug); 191 209 192 /* Init kbox stuff */ 210 ta sk->kb.finished = false;211 #endif 212 193 ta->kb.finished = false; 194 #endif 195 213 196 if ((ipc_phone_0) && 214 (context_check(ipc_phone_0->task->context, task->context))) 215 ipc_phone_connect(&task->phones[0], ipc_phone_0); 216 217 btree_create(&task->futexes); 218 219 /* 220 * Get a reference to the address space. 221 */ 222 as_hold(task->as); 223 224 irq_spinlock_lock(&tasks_lock, true); 225 226 task->taskid = ++task_counter; 227 avltree_node_initialize(&task->tasks_tree_node); 228 task->tasks_tree_node.key = task->taskid; 229 avltree_insert(&tasks_tree, &task->tasks_tree_node); 230 231 irq_spinlock_unlock(&tasks_lock, true); 232 233 return task; 197 (context_check(ipc_phone_0->task->context, ta->context))) 198 ipc_phone_connect(&ta->phones[0], ipc_phone_0); 199 200 btree_create(&ta->futexes); 201 202 ipl = interrupts_disable(); 203 atomic_inc(&as->refcount); 204 spinlock_lock(&tasks_lock); 205 ta->taskid = ++task_counter; 206 avltree_node_initialize(&ta->tasks_tree_node); 207 ta->tasks_tree_node.key = ta->taskid; 208 avltree_insert(&tasks_tree, &ta->tasks_tree_node); 209 spinlock_unlock(&tasks_lock); 210 interrupts_restore(ipl); 211 212 return ta; 234 213 } 235 214 236 215 /** Destroy task. 237 216 * 238 * @param task Task to be destroyed. 239 * 240 */ 241 void task_destroy(task_t *task) 217 * @param t Task to be destroyed. 218 */ 219 void task_destroy(task_t *t) 242 220 { 243 221 /* 244 222 * Remove the task from the task B+tree. 245 223 */ 246 irq_spinlock_lock(&tasks_lock, true);247 avltree_delete(&tasks_tree, &t ask->tasks_tree_node);248 irq_spinlock_unlock(&tasks_lock, true);249 224 spinlock_lock(&tasks_lock); 225 avltree_delete(&tasks_tree, &t->tasks_tree_node); 226 spinlock_unlock(&tasks_lock); 227 250 228 /* 251 229 * Perform architecture specific task destruction. 252 230 */ 253 task_destroy_arch(t ask);254 231 task_destroy_arch(t); 232 255 233 /* 256 234 * Free up dynamically allocated state. 257 235 */ 258 btree_destroy(&t ask->futexes);259 236 btree_destroy(&t->futexes); 237 260 238 /* 261 239 * Drop our reference to the address space. 262 240 */ 263 as_release(task->as); 264 265 slab_free(task_slab, task); 266 } 267 268 /** Hold a reference to a task. 269 * 270 * Holding a reference to a task prevents destruction of that task. 271 * 272 * @param task Task to be held. 273 * 274 */ 275 void task_hold(task_t *task) 276 { 277 atomic_inc(&task->refcount); 278 } 279 280 /** Release a reference to a task. 281 * 282 * The last one to release a reference to a task destroys the task. 283 * 284 * @param task Task to be released. 285 * 286 */ 287 void task_release(task_t *task) 288 { 289 if ((atomic_predec(&task->refcount)) == 0) 290 task_destroy(task); 241 if (atomic_predec(&t->as->refcount) == 0) 242 as_destroy(t->as); 243 244 slab_free(task_slab, t); 245 TASK = NULL; 291 246 } 292 247 293 248 /** Syscall for reading task ID from userspace. 294 249 * 295 * @param uspace_task_id Userspace address of 8-byte buffer 296 * where to store current task ID. 297 * 298 * @return Zero on success or an error code from @ref errno.h. 299 * 250 * @param uspace_task_id userspace address of 8-byte buffer 251 * where to store current task ID. 252 * 253 * @return Zero on success or an error code from @ref errno.h. 300 254 */ 301 255 unative_t sys_task_get_id(task_id_t *uspace_task_id) … … 313 267 * The name simplifies identifying the task in the task list. 314 268 * 315 * @param name 316 * 269 * @param name The new name for the task. (typically the same 270 * as the command used to execute it). 317 271 * 318 272 * @return 0 on success or an error code from @ref errno.h. 319 *320 273 */ 321 274 unative_t sys_task_set_name(const char *uspace_name, size_t name_len) … … 323 276 int rc; 324 277 char namebuf[TASK_NAME_BUFLEN]; 325 278 326 279 /* Cap length of name and copy it from userspace. */ 327 280 328 281 if (name_len > TASK_NAME_BUFLEN - 1) 329 282 name_len = TASK_NAME_BUFLEN - 1; 330 283 331 284 rc = copy_from_uspace(namebuf, uspace_name, name_len); 332 285 if (rc != 0) 333 286 return (unative_t) rc; 334 287 335 288 namebuf[name_len] = '\0'; 336 289 str_cpy(TASK->name, TASK_NAME_BUFLEN, namebuf); 337 290 338 291 return EOK; 339 292 } … … 344 297 * interrupts must be disabled. 345 298 * 346 * @param id Task ID. 347 * 348 * @return Task structure address or NULL if there is no such task ID. 349 * 350 */ 351 task_t *task_find_by_id(task_id_t id) 352 { 353 ASSERT(interrupts_disabled()); 354 ASSERT(irq_spinlock_locked(&tasks_lock)); 355 356 avltree_node_t *node = 357 avltree_search(&tasks_tree, (avltree_key_t) id); 358 299 * @param id Task ID. 300 * 301 * @return Task structure address or NULL if there is no such task 302 * ID. 303 */ 304 task_t *task_find_by_id(task_id_t id) { avltree_node_t *node; 305 306 node = avltree_search(&tasks_tree, (avltree_key_t) id); 307 359 308 if (node) 360 return avltree_get_instance(node, task_t, tasks_tree_node); 361 309 return avltree_get_instance(node, task_t, tasks_tree_node); 362 310 return NULL; 363 311 } … … 365 313 /** Get accounting data of given task. 366 314 * 367 * Note that task lock of 't ask' must be already held and interrupts must be315 * Note that task lock of 't' must be already held and interrupts must be 368 316 * already disabled. 369 317 * 370 * @param task Pointer to the task. 371 * @param ucycles Out pointer to sum of all user cycles. 372 * @param kcycles Out pointer to sum of all kernel cycles. 373 * 374 */ 375 void task_get_accounting(task_t *task, uint64_t *ucycles, uint64_t *kcycles) 376 { 377 ASSERT(interrupts_disabled()); 378 ASSERT(irq_spinlock_locked(&task->lock)); 379 380 /* Accumulated values of task */ 381 uint64_t uret = task->ucycles; 382 uint64_t kret = task->kcycles; 318 * @param t Pointer to thread. 319 * 320 * @return Number of cycles used by the task and all its threads 321 * so far. 322 */ 323 uint64_t task_get_accounting(task_t *t) 324 { 325 /* Accumulated value of task */ 326 uint64_t ret = t->cycles; 383 327 384 328 /* Current values of threads */ 385 329 link_t *cur; 386 for (cur = t ask->th_head.next; cur != &task->th_head; cur = cur->next) {387 thread_t *thr ead= list_get_instance(cur, thread_t, th_link);330 for (cur = t->th_head.next; cur != &t->th_head; cur = cur->next) { 331 thread_t *thr = list_get_instance(cur, thread_t, th_link); 388 332 389 irq_spinlock_lock(&thread->lock, false); 390 333 spinlock_lock(&thr->lock); 391 334 /* Process only counted threads */ 392 if (!thr ead->uncounted) {393 if (thr ead== THREAD) {335 if (!thr->uncounted) { 336 if (thr == THREAD) { 394 337 /* Update accounting of current thread */ 395 thread_update_accounting(false); 396 } 397 398 uret += thread->ucycles; 399 kret += thread->kcycles; 338 thread_update_accounting(); 339 } 340 ret += thr->cycles; 400 341 } 401 402 irq_spinlock_unlock(&thread->lock, false); 342 spinlock_unlock(&thr->lock); 403 343 } 404 344 405 *ucycles = uret; 406 *kcycles = kret; 407 } 408 409 static void task_kill_internal(task_t *task) 345 return ret; 346 } 347 348 static void task_kill_internal(task_t *ta) 410 349 { 411 350 link_t *cur; 412 351 413 352 /* 414 353 * Interrupt all threads. 415 354 */ 416 irq_spinlock_lock(&task->lock, false);417 for (cur = ta sk->th_head.next; cur != &task->th_head; cur = cur->next) {418 thread_t *thr ead = list_get_instance(cur, thread_t, th_link);355 spinlock_lock(&ta->lock); 356 for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) { 357 thread_t *thr; 419 358 bool sleeping = false; 420 359 421 irq_spinlock_lock(&thread->lock, false);360 thr = list_get_instance(cur, thread_t, th_link); 422 361 423 thread->interrupted = true; 424 if (thread->state == Sleeping) 362 spinlock_lock(&thr->lock); 363 thr->interrupted = true; 364 if (thr->state == Sleeping) 425 365 sleeping = true; 426 427 irq_spinlock_unlock(&thread->lock, false); 366 spinlock_unlock(&thr->lock); 428 367 429 368 if (sleeping) 430 waitq_interrupt_sleep(thr ead);369 waitq_interrupt_sleep(thr); 431 370 } 432 433 irq_spinlock_unlock(&task->lock, false); 371 spinlock_unlock(&ta->lock); 434 372 } 435 373 … … 439 377 * It signals all the task's threads to bail it out. 440 378 * 441 * @param id ID of the task to be killed. 442 * 443 * @return Zero on success or an error code from errno.h. 444 * 379 * @param id ID of the task to be killed. 380 * 381 * @return Zero on success or an error code from errno.h. 445 382 */ 446 383 int task_kill(task_id_t id) 447 384 { 385 ipl_t ipl; 386 task_t *ta; 387 448 388 if (id == 1) 449 389 return EPERM; 450 390 451 i rq_spinlock_lock(&tasks_lock, true);452 453 task_t *task = task_find_by_id(id);454 if (!task) {455 i rq_spinlock_unlock(&tasks_lock, true);391 ipl = interrupts_disable(); 392 spinlock_lock(&tasks_lock); 393 if (!(ta = task_find_by_id(id))) { 394 spinlock_unlock(&tasks_lock); 395 interrupts_restore(ipl); 456 396 return ENOENT; 457 397 } 458 459 task_kill_internal(task); 460 irq_spinlock_unlock(&tasks_lock, true); 461 462 return EOK; 398 task_kill_internal(ta); 399 spinlock_unlock(&tasks_lock); 400 interrupts_restore(ipl); 401 return 0; 463 402 } 464 403 465 404 static bool task_print_walker(avltree_node_t *node, void *arg) 466 405 { 467 bool *additional = (bool *) arg; 468 task_t *task = avltree_get_instance(node, task_t, tasks_tree_node); 469 irq_spinlock_lock(&task->lock, false); 470 471 uint64_t ucycles; 472 uint64_t kcycles; 473 char usuffix, ksuffix; 474 task_get_accounting(task, &ucycles, &kcycles); 475 order_suffix(ucycles, &ucycles, &usuffix); 476 order_suffix(kcycles, &kcycles, &ksuffix); 477 478 #ifdef __32_BITS__ 479 if (*additional) 480 printf("%-8" PRIu64 " %9" PRIua " %7" PRIua, task->taskid, 481 atomic_get(&task->refcount), atomic_get(&task->active_calls)); 482 else 483 printf("%-8" PRIu64 " %-14s %-5" PRIu32 " %10p %10p" 484 " %9" PRIu64 "%c %9" PRIu64 "%c\n", task->taskid, 485 task->name, task->context, task, task->as, 486 ucycles, usuffix, kcycles, ksuffix); 487 #endif 488 406 task_t *t = avltree_get_instance(node, task_t, tasks_tree_node); 407 int j; 408 409 spinlock_lock(&t->lock); 410 411 uint64_t cycles; 412 char suffix; 413 order(task_get_accounting(t), &cycles, &suffix); 414 415 #ifdef __32_BITS__ 416 printf("%-6" PRIu64 " %-12s %-3" PRIu32 " %10p %10p %9" PRIu64 417 "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, cycles, 418 suffix, atomic_get(&t->refcount), atomic_get(&t->active_calls)); 419 #endif 420 489 421 #ifdef __64_BITS__ 490 if (*additional) 491 printf("%-8" PRIu64 " %9" PRIu64 "%c %9" PRIu64 "%c " 492 "%9" PRIua " %7" PRIua, 493 task->taskid, ucycles, usuffix, kcycles, ksuffix, 494 atomic_get(&task->refcount), atomic_get(&task->active_calls)); 495 else 496 printf("%-8" PRIu64 " %-14s %-5" PRIu32 " %18p %18p\n", 497 task->taskid, task->name, task->context, task, task->as); 498 #endif 499 500 if (*additional) { 501 size_t i; 502 for (i = 0; i < IPC_MAX_PHONES; i++) { 503 if (task->phones[i].callee) 504 printf(" %zu:%p", i, task->phones[i].callee); 505 } 506 printf("\n"); 422 printf("%-6" PRIu64 " %-12s %-3" PRIu32 " %18p %18p %9" PRIu64 423 "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, cycles, 424 suffix, atomic_get(&t->refcount), atomic_get(&t->active_calls)); 425 #endif 426 427 for (j = 0; j < IPC_MAX_PHONES; j++) { 428 if (t->phones[j].callee) 429 printf(" %d:%p", j, t->phones[j].callee); 507 430 } 508 509 irq_spinlock_unlock(&task->lock, false); 431 printf("\n"); 432 433 spinlock_unlock(&t->lock); 510 434 return true; 511 435 } 512 436 513 /** Print task list 514 * 515 * @param additional Print additional information. 516 * 517 */ 518 void task_print_list(bool additional) 519 { 437 /** Print task list */ 438 void task_print_list(void) 439 { 440 ipl_t ipl; 441 520 442 /* Messing with task structures, avoid deadlock */ 521 i rq_spinlock_lock(&tasks_lock, true);522 523 #ifdef __32_BITS__ 524 if (additional) 525 printf("[id ] [threads] [calls] [callee\n");526 else527 printf("[id ] [name ] [ctx] [address ] [as ]"528 " [ucycles ] [kcycles ]\n");529 #endif 530 443 ipl = interrupts_disable(); 444 spinlock_lock(&tasks_lock); 445 446 #ifdef __32_BITS__ 447 printf("taskid name ctx address as " 448 "cycles threads calls callee\n"); 449 printf("------ ------------ --- ---------- ---------- " 450 "---------- ------- ------ ------>\n"); 451 #endif 452 531 453 #ifdef __64_BITS__ 532 if (additional) 533 printf("[id ] [ucycles ] [kcycles ] [threads] [calls]" 534 " [callee\n"); 535 else 536 printf("[id ] [name ] [ctx] [address ]" 537 " [as ]\n"); 538 #endif 539 540 avltree_walk(&tasks_tree, task_print_walker, &additional); 541 542 irq_spinlock_unlock(&tasks_lock, true); 454 printf("taskid name ctx address as " 455 "cycles threads calls callee\n"); 456 printf("------ ------------ --- ------------------ ------------------ " 457 "---------- ------- ------ ------>\n"); 458 #endif 459 460 avltree_walk(&tasks_tree, task_print_walker, NULL); 461 462 spinlock_unlock(&tasks_lock); 463 interrupts_restore(ipl); 543 464 } 544 465
Note:
See TracChangeset
for help on using the changeset viewer.