Changeset c47e1a8 in mainline for kernel/generic/src/proc/task.c
- Timestamp:
- 2010-05-21T07:50:04Z (15 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- d51ee2b
- Parents:
- cf8cc36 (diff), 15b592b (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/task.c
rcf8cc36 rc47e1a8 1 1 /* 2 * Copyright (c) 20 01-2004Jakub Jermar2 * Copyright (c) 2010 Jakub Jermar 3 3 * All rights reserved. 4 4 * … … 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Task management. 36 36 */ 37 37 … … 53 53 #include <errno.h> 54 54 #include <func.h> 55 #include <str ing.h>55 #include <str.h> 56 56 #include <memstr.h> 57 57 #include <syscall/copy.h> … … 66 66 * The task is guaranteed to exist after it was found in the tasks_tree as 67 67 * long as: 68 * 68 69 * @li the tasks_lock is held, 69 70 * @li the task's lock is held when task's lock is acquired before releasing … … 99 100 task_t *t = avltree_get_instance(node, task_t, tasks_tree_node); 100 101 unsigned *cnt = (unsigned *) arg; 101 102 102 103 if (t != TASK) { 103 104 (*cnt)++; … … 107 108 task_kill_internal(t); 108 109 } 109 110 return true; /* continue the walk */ 110 111 /* Continue the walk */ 112 return true; 111 113 } 112 114 … … 115 117 { 116 118 unsigned tasks_left; 117 119 118 120 do { /* Repeat until there are any tasks except TASK */ 119 121 /* Messing with task structures, avoid deadlock */ … … 138 140 task_t *ta = obj; 139 141 int i; 140 142 141 143 atomic_set(&ta->refcount, 0); 142 144 atomic_set(&ta->lifecount, 0); 143 145 atomic_set(&ta->active_calls, 0); 144 146 145 147 spinlock_initialize(&ta->lock, "task_ta_lock"); 146 148 mutex_initialize(&ta->futexes_lock, MUTEX_PASSIVE); 147 149 148 150 list_initialize(&ta->th_head); 149 151 list_initialize(&ta->sync_box_head); 150 152 151 153 ipc_answerbox_init(&ta->answerbox, ta); 152 154 for (i = 0; i < IPC_MAX_PHONES; i++) 153 155 ipc_phone_init(&ta->phones[i]); 154 156 155 157 #ifdef CONFIG_UDEBUG 156 158 /* Init kbox stuff */ … … 159 161 mutex_initialize(&ta->kb.cleanup_lock, MUTEX_PASSIVE); 160 162 #endif 161 163 162 164 return 0; 163 165 } … … 165 167 /** Create new task with no threads. 166 168 * 167 * @param as 168 * @param name 169 * 170 * @return 171 * 172 */ 173 task_t *task_create(as_t *as, c har *name)169 * @param as Task's address space. 170 * @param name Symbolic name (a copy is made). 171 * 172 * @return New task's structure. 173 * 174 */ 175 task_t *task_create(as_t *as, const char *name) 174 176 { 175 177 ipl_t ipl; … … 181 183 memcpy(ta->name, name, TASK_NAME_BUFLEN); 182 184 ta->name[TASK_NAME_BUFLEN - 1] = 0; 183 185 184 186 ta->context = CONTEXT; 185 187 ta->capabilities = 0; 186 ta->cycles = 0; 188 ta->ucycles = 0; 189 ta->kcycles = 0; 190 191 ta->ipc_info.call_sent = 0; 192 ta->ipc_info.call_recieved = 0; 193 ta->ipc_info.answer_sent = 0; 194 ta->ipc_info.answer_recieved = 0; 195 ta->ipc_info.irq_notif_recieved = 0; 196 ta->ipc_info.forwarded = 0; 187 197 188 198 #ifdef CONFIG_UDEBUG 189 199 /* Init debugging stuff */ 190 200 udebug_task_init(&ta->udebug); 191 201 192 202 /* Init kbox stuff */ 193 203 ta->kb.finished = false; 194 204 #endif 195 205 196 206 if ((ipc_phone_0) && 197 207 (context_check(ipc_phone_0->task->context, ta->context))) 198 208 ipc_phone_connect(&ta->phones[0], ipc_phone_0); 199 209 200 210 btree_create(&ta->futexes); 201 211 212 /* 213 * Get a reference to the address space. 214 */ 215 as_hold(ta->as); 216 202 217 ipl = interrupts_disable(); 203 atomic_inc(&as->refcount);204 218 spinlock_lock(&tasks_lock); 205 219 ta->taskid = ++task_counter; … … 215 229 /** Destroy task. 216 230 * 217 * @param t Task to be destroyed. 231 * @param t Task to be destroyed. 232 * 218 233 */ 219 234 void task_destroy(task_t *t) … … 225 240 avltree_delete(&tasks_tree, &t->tasks_tree_node); 226 241 spinlock_unlock(&tasks_lock); 227 242 228 243 /* 229 244 * Perform architecture specific task destruction. 230 245 */ 231 246 task_destroy_arch(t); 232 247 233 248 /* 234 249 * Free up dynamically allocated state. 235 250 */ 236 251 btree_destroy(&t->futexes); 237 252 238 253 /* 239 254 * Drop our reference to the address space. 240 255 */ 241 if (atomic_predec(&t->as->refcount) == 0) 242 as_destroy(t->as); 256 as_release(t->as); 243 257 244 258 slab_free(task_slab, t); 245 TASK = NULL; 259 } 260 261 /** Hold a reference to a task. 262 * 263 * Holding a reference to a task prevents destruction of that task. 264 * 265 * @param t Task to be held. 266 */ 267 void task_hold(task_t *t) 268 { 269 atomic_inc(&t->refcount); 270 } 271 272 /** Release a reference to a task. 273 * 274 * The last one to release a reference to a task destroys the task. 275 * 276 * @param t Task to be released. 277 */ 278 void task_release(task_t *t) 279 { 280 if ((atomic_predec(&t->refcount)) == 0) 281 task_destroy(t); 246 282 } 247 283 248 284 /** Syscall for reading task ID from userspace. 249 285 * 250 * @param uspace_task_id userspace address of 8-byte buffer 251 * where to store current task ID. 252 * 253 * @return Zero on success or an error code from @ref errno.h. 286 * @param uspace_task_id Userspace address of 8-byte buffer 287 * where to store current task ID. 288 * 289 * @return Zero on success or an error code from @ref errno.h. 290 * 254 291 */ 255 292 unative_t sys_task_get_id(task_id_t *uspace_task_id) … … 267 304 * The name simplifies identifying the task in the task list. 268 305 * 269 * @param name 270 * 306 * @param name The new name for the task. (typically the same 307 * as the command used to execute it). 271 308 * 272 309 * @return 0 on success or an error code from @ref errno.h. 310 * 273 311 */ 274 312 unative_t sys_task_set_name(const char *uspace_name, size_t name_len) … … 276 314 int rc; 277 315 char namebuf[TASK_NAME_BUFLEN]; 278 316 279 317 /* Cap length of name and copy it from userspace. */ 280 318 281 319 if (name_len > TASK_NAME_BUFLEN - 1) 282 320 name_len = TASK_NAME_BUFLEN - 1; 283 321 284 322 rc = copy_from_uspace(namebuf, uspace_name, name_len); 285 323 if (rc != 0) 286 324 return (unative_t) rc; 287 325 288 326 namebuf[name_len] = '\0'; 289 327 str_cpy(TASK->name, TASK_NAME_BUFLEN, namebuf); 290 328 291 329 return EOK; 292 330 } … … 297 335 * interrupts must be disabled. 298 336 * 299 * @param id Task ID. 300 * 301 * @return Task structure address or NULL if there is no such task 302 * ID. 303 */ 304 task_t *task_find_by_id(task_id_t id) { avltree_node_t *node; 305 306 node = avltree_search(&tasks_tree, (avltree_key_t) id); 307 337 * @param id Task ID. 338 * 339 * @return Task structure address or NULL if there is no such task ID. 340 * 341 */ 342 task_t *task_find_by_id(task_id_t id) 343 { 344 avltree_node_t *node = 345 avltree_search(&tasks_tree, (avltree_key_t) id); 346 308 347 if (node) 309 348 return avltree_get_instance(node, task_t, tasks_tree_node); 349 310 350 return NULL; 311 351 } … … 316 356 * already disabled. 317 357 * 318 * @param t Pointer to thread. 319 * 320 * @return Number of cycles used by the task and all its threads 321 * so far. 322 */ 323 uint64_t task_get_accounting(task_t *t) 324 { 325 /* Accumulated value of task */ 326 uint64_t ret = t->cycles; 358 * @param t Pointer to thread. 359 * @param ucycles Out pointer to sum of all user cycles. 360 * @param kcycles Out pointer to sum of all kernel cycles. 361 * 362 */ 363 void task_get_accounting(task_t *t, uint64_t *ucycles, uint64_t *kcycles) 364 { 365 /* Accumulated values of task */ 366 uint64_t uret = t->ucycles; 367 uint64_t kret = t->kcycles; 327 368 328 369 /* Current values of threads */ … … 336 377 if (thr == THREAD) { 337 378 /* Update accounting of current thread */ 338 thread_update_accounting( );379 thread_update_accounting(false); 339 380 } 340 ret += thr->cycles; 381 uret += thr->ucycles; 382 kret += thr->kcycles; 341 383 } 342 384 spinlock_unlock(&thr->lock); 343 385 } 344 386 345 return ret; 387 *ucycles = uret; 388 *kcycles = kret; 346 389 } 347 390 … … 349 392 { 350 393 link_t *cur; 351 394 352 395 /* 353 396 * Interrupt all threads. … … 377 420 * It signals all the task's threads to bail it out. 378 421 * 379 * @param id ID of the task to be killed. 380 * 381 * @return Zero on success or an error code from errno.h. 422 * @param id ID of the task to be killed. 423 * 424 * @return Zero on success or an error code from errno.h. 425 * 382 426 */ 383 427 int task_kill(task_id_t id) … … 406 450 task_t *t = avltree_get_instance(node, task_t, tasks_tree_node); 407 451 int j; 408 452 409 453 spinlock_lock(&t->lock); 410 411 uint64_t cycles; 412 char suffix; 413 order(task_get_accounting(t), &cycles, &suffix); 414 454 455 uint64_t ucycles; 456 uint64_t kcycles; 457 char usuffix, ksuffix; 458 task_get_accounting(t, &ucycles, &kcycles); 459 order_suffix(ucycles, &ucycles, &usuffix); 460 order_suffix(kcycles, &kcycles, &ksuffix); 461 415 462 #ifdef __32_BITS__ 416 printf("%-6" PRIu64 " %-12s %-3" PRIu32 " %10p %10p %9" PRIu64 417 "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, cycles, 418 suffix, atomic_get(&t->refcount), atomic_get(&t->active_calls)); 419 #endif 420 463 printf("%-6" PRIu64 " %-12s %-3" PRIu32 " %10p %10p %9" PRIu64 "%c %9" 464 PRIu64 "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, 465 ucycles, usuffix, kcycles, ksuffix, atomic_get(&t->refcount), 466 atomic_get(&t->active_calls)); 467 #endif 468 421 469 #ifdef __64_BITS__ 422 printf("%-6" PRIu64 " %-12s %-3" PRIu32 " %18p %18p %9" PRIu64 423 "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, cycles, 424 suffix, atomic_get(&t->refcount), atomic_get(&t->active_calls)); 425 #endif 426 470 printf("%-6" PRIu64 " %-12s %-3" PRIu32 " %18p %18p %9" PRIu64 "%c %9" 471 PRIu64 "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, 472 ucycles, usuffix, kcycles, ksuffix, atomic_get(&t->refcount), 473 atomic_get(&t->active_calls)); 474 #endif 475 427 476 for (j = 0; j < IPC_MAX_PHONES; j++) { 428 477 if (t->phones[j].callee) … … 430 479 } 431 480 printf("\n"); 432 481 433 482 spinlock_unlock(&t->lock); 434 483 return true; … … 443 492 ipl = interrupts_disable(); 444 493 spinlock_lock(&tasks_lock); 445 446 #ifdef __32_BITS__ 447 printf("taskid name ctx address as 448 " cyclesthreads calls callee\n");449 printf("------ ------------ --- ---------- ---------- 450 " ---------- ------- ------ ------>\n");451 #endif 452 494 495 #ifdef __32_BITS__ 496 printf("taskid name ctx address as " 497 " ucycles kcycles threads calls callee\n"); 498 printf("------ ------------ --- ---------- ----------" 499 " ---------- ---------- ------- ------ ------>\n"); 500 #endif 501 453 502 #ifdef __64_BITS__ 454 printf("taskid name ctx address as 455 " cyclesthreads calls callee\n");456 printf("------ ------------ --- ------------------ ------------------ 457 " ---------- ------- ------ ------>\n");458 #endif 459 503 printf("taskid name ctx address as " 504 " ucycles kcycles threads calls callee\n"); 505 printf("------ ------------ --- ------------------ ------------------" 506 " ---------- ---------- ---------- ------- ------ ------>\n"); 507 #endif 508 460 509 avltree_walk(&tasks_tree, task_print_walker, NULL); 461 510 462 511 spinlock_unlock(&tasks_lock); 463 512 interrupts_restore(ipl);
Note:
See TracChangeset
for help on using the changeset viewer.