Changeset f4f866c in mainline for kernel/generic/src/proc
- Timestamp:
- 2010-04-23T21:42:26Z (16 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 6c39a907
- Parents:
- 38aaacc2 (diff), 80badbe (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- kernel/generic/src/proc
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/scheduler.c
r38aaacc2 rf4f866c 202 202 */ 203 203 204 spinlock_lock(&CPU->lock); 205 CPU->idle = true; 206 spinlock_unlock(&CPU->lock); 204 207 cpu_sleep(); 205 208 goto loop; … … 313 316 spinlock_lock(&THREAD->lock); 314 317 315 /* Update thread accounting */316 THREAD-> cycles += get_cycle() - THREAD->last_cycle;318 /* Update thread kernel accounting */ 319 THREAD->kcycles += get_cycle() - THREAD->last_cycle; 317 320 318 321 #ifndef CONFIG_FPU_LAZY -
kernel/generic/src/proc/task.c
r38aaacc2 rf4f866c 33 33 /** 34 34 * @file 35 * @brief Task management.35 * @brief Task management. 36 36 */ 37 37 … … 66 66 * The task is guaranteed to exist after it was found in the tasks_tree as 67 67 * long as: 68 * 68 69 * @li the tasks_lock is held, 69 70 * @li the task's lock is held when task's lock is acquired before releasing … … 99 100 task_t *t = avltree_get_instance(node, task_t, tasks_tree_node); 100 101 unsigned *cnt = (unsigned *) arg; 101 102 102 103 if (t != TASK) { 103 104 (*cnt)++; … … 107 108 task_kill_internal(t); 108 109 } 109 110 return true; /* continue the walk */ 110 111 /* Continue the walk */ 112 return true; 111 113 } 112 114 … … 115 117 { 116 118 unsigned tasks_left; 117 119 118 120 do { /* Repeat until there are any tasks except TASK */ 119 121 /* Messing with task structures, avoid deadlock */ … … 138 140 task_t *ta = obj; 139 141 int i; 140 142 141 143 atomic_set(&ta->refcount, 0); 142 144 atomic_set(&ta->lifecount, 0); 143 145 atomic_set(&ta->active_calls, 0); 144 146 145 147 spinlock_initialize(&ta->lock, "task_ta_lock"); 146 148 mutex_initialize(&ta->futexes_lock, MUTEX_PASSIVE); 147 149 148 150 list_initialize(&ta->th_head); 149 151 list_initialize(&ta->sync_box_head); 150 152 151 153 ipc_answerbox_init(&ta->answerbox, ta); 152 154 for (i = 0; i < IPC_MAX_PHONES; i++) 153 155 ipc_phone_init(&ta->phones[i]); 154 156 155 157 #ifdef CONFIG_UDEBUG 156 158 /* Init kbox stuff */ … … 159 161 mutex_initialize(&ta->kb.cleanup_lock, MUTEX_PASSIVE); 160 162 #endif 161 163 162 164 return 0; 163 165 } … … 165 167 /** Create new task with no threads. 166 168 * 167 * @param as Task's address space.168 * @param name Symbolic name (a copy is made).169 * 170 * @return New task's structure.169 * @param as Task's address space. 170 * @param name Symbolic name (a copy is made). 171 * 172 * @return New task's structure. 171 173 * 172 174 */ … … 181 183 memcpy(ta->name, name, TASK_NAME_BUFLEN); 182 184 ta->name[TASK_NAME_BUFLEN - 1] = 0; 183 185 184 186 ta->context = CONTEXT; 185 187 ta->capabilities = 0; 186 ta->cycles = 0; 188 ta->ucycles = 0; 189 ta->kcycles = 0; 190 191 ta->ipc_info.call_sent = 0; 192 ta->ipc_info.call_recieved = 0; 193 ta->ipc_info.answer_sent = 0; 194 ta->ipc_info.answer_recieved = 0; 195 ta->ipc_info.irq_notif_recieved = 0; 196 ta->ipc_info.forwarded = 0; 187 197 188 198 #ifdef CONFIG_UDEBUG 189 199 /* Init debugging stuff */ 190 200 udebug_task_init(&ta->udebug); 191 201 192 202 /* Init kbox stuff */ 193 203 ta->kb.finished = false; 194 204 #endif 195 205 196 206 if ((ipc_phone_0) && 197 207 (context_check(ipc_phone_0->task->context, ta->context))) 198 208 ipc_phone_connect(&ta->phones[0], ipc_phone_0); 199 209 200 210 btree_create(&ta->futexes); 201 211 … … 215 225 /** Destroy task. 216 226 * 217 * @param t Task to be destroyed. 227 * @param t Task to be destroyed. 228 * 218 229 */ 219 230 void task_destroy(task_t *t) … … 225 236 avltree_delete(&tasks_tree, &t->tasks_tree_node); 226 237 spinlock_unlock(&tasks_lock); 227 238 228 239 /* 229 240 * Perform architecture specific task destruction. 230 241 */ 231 242 task_destroy_arch(t); 232 243 233 244 /* 234 245 * Free up dynamically allocated state. 235 246 */ 236 247 btree_destroy(&t->futexes); 237 248 238 249 /* 239 250 * Drop our reference to the address space. … … 248 259 /** Syscall for reading task ID from userspace. 249 260 * 250 * @param uspace_task_id userspace address of 8-byte buffer 251 * where to store current task ID. 252 * 253 * @return Zero on success or an error code from @ref errno.h. 261 * @param uspace_task_id Userspace address of 8-byte buffer 262 * where to store current task ID. 263 * 264 * @return Zero on success or an error code from @ref errno.h. 265 * 254 266 */ 255 267 unative_t sys_task_get_id(task_id_t *uspace_task_id) … … 267 279 * The name simplifies identifying the task in the task list. 268 280 * 269 * @param name The new name for the task. (typically the same270 * as the command used to execute it).281 * @param name The new name for the task. (typically the same 282 * as the command used to execute it). 271 283 * 272 284 * @return 0 on success or an error code from @ref errno.h. 285 * 273 286 */ 274 287 unative_t sys_task_set_name(const char *uspace_name, size_t name_len) … … 276 289 int rc; 277 290 char namebuf[TASK_NAME_BUFLEN]; 278 291 279 292 /* Cap length of name and copy it from userspace. */ 280 293 281 294 if (name_len > TASK_NAME_BUFLEN - 1) 282 295 name_len = TASK_NAME_BUFLEN - 1; 283 296 284 297 rc = copy_from_uspace(namebuf, uspace_name, name_len); 285 298 if (rc != 0) 286 299 return (unative_t) rc; 287 300 288 301 namebuf[name_len] = '\0'; 289 302 str_cpy(TASK->name, TASK_NAME_BUFLEN, namebuf); 290 303 291 304 return EOK; 292 305 } … … 297 310 * interrupts must be disabled. 298 311 * 299 * @param id Task ID. 300 * 301 * @return Task structure address or NULL if there is no such task 302 * ID. 303 */ 304 task_t *task_find_by_id(task_id_t id) { avltree_node_t *node; 305 306 node = avltree_search(&tasks_tree, (avltree_key_t) id); 307 312 * @param id Task ID. 313 * 314 * @return Task structure address or NULL if there is no such task ID. 315 * 316 */ 317 task_t *task_find_by_id(task_id_t id) 318 { 319 avltree_node_t *node = 320 avltree_search(&tasks_tree, (avltree_key_t) id); 321 308 322 if (node) 309 323 return avltree_get_instance(node, task_t, tasks_tree_node); 324 310 325 return NULL; 311 326 } … … 316 331 * already disabled. 317 332 * 318 * @param t Pointer to thread. 319 * 320 * @return Number of cycles used by the task and all its threads 321 * so far. 322 */ 323 uint64_t task_get_accounting(task_t *t) 324 { 325 /* Accumulated value of task */ 326 uint64_t ret = t->cycles; 333 * @param t Pointer to thread. 334 * @param ucycles Out pointer to sum of all user cycles. 335 * @param kcycles Out pointer to sum of all kernel cycles. 336 * 337 */ 338 void task_get_accounting(task_t *t, uint64_t *ucycles, uint64_t *kcycles) 339 { 340 /* Accumulated values of task */ 341 uint64_t uret = t->ucycles; 342 uint64_t kret = t->kcycles; 327 343 328 344 /* Current values of threads */ … … 336 352 if (thr == THREAD) { 337 353 /* Update accounting of current thread */ 338 thread_update_accounting( );354 thread_update_accounting(false); 339 355 } 340 ret += thr->cycles; 356 uret += thr->ucycles; 357 kret += thr->kcycles; 341 358 } 342 359 spinlock_unlock(&thr->lock); 343 360 } 344 361 345 return ret; 362 *ucycles = uret; 363 *kcycles = kret; 346 364 } 347 365 … … 349 367 { 350 368 link_t *cur; 351 369 352 370 /* 353 371 * Interrupt all threads. … … 377 395 * It signals all the task's threads to bail it out. 378 396 * 379 * @param id ID of the task to be killed. 380 * 381 * @return Zero on success or an error code from errno.h. 397 * @param id ID of the task to be killed. 398 * 399 * @return Zero on success or an error code from errno.h. 400 * 382 401 */ 383 402 int task_kill(task_id_t id) … … 406 425 task_t *t = avltree_get_instance(node, task_t, tasks_tree_node); 407 426 int j; 408 427 409 428 spinlock_lock(&t->lock); 410 411 uint64_t cycles; 412 char suffix; 413 order(task_get_accounting(t), &cycles, &suffix); 414 429 430 uint64_t ucycles; 431 uint64_t kcycles; 432 char usuffix, ksuffix; 433 task_get_accounting(t, &ucycles, &kcycles); 434 order_suffix(ucycles, &ucycles, &usuffix); 435 order_suffix(kcycles, &kcycles, &ksuffix); 436 415 437 #ifdef __32_BITS__ 416 printf("%-6" PRIu64 " %-12s %-3" PRIu32 " %10p %10p %9" PRIu64 417 "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, cycles, 418 suffix, atomic_get(&t->refcount), atomic_get(&t->active_calls)); 419 #endif 420 438 printf("%-6" PRIu64 " %-12s %-3" PRIu32 " %10p %10p %9" PRIu64 "%c %9" 439 PRIu64 "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, 440 ucycles, usuffix, kcycles, ksuffix, atomic_get(&t->refcount), 441 atomic_get(&t->active_calls)); 442 #endif 443 421 444 #ifdef __64_BITS__ 422 printf("%-6" PRIu64 " %-12s %-3" PRIu32 " %18p %18p %9" PRIu64 423 "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, cycles, 424 suffix, atomic_get(&t->refcount), atomic_get(&t->active_calls)); 425 #endif 426 445 printf("%-6" PRIu64 " %-12s %-3" PRIu32 " %18p %18p %9" PRIu64 "%c %9" 446 PRIu64 "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, 447 ucycles, usuffix, kcycles, ksuffix, atomic_get(&t->refcount), 448 atomic_get(&t->active_calls)); 449 #endif 450 427 451 for (j = 0; j < IPC_MAX_PHONES; j++) { 428 452 if (t->phones[j].callee) … … 430 454 } 431 455 printf("\n"); 432 456 433 457 spinlock_unlock(&t->lock); 434 458 return true; … … 443 467 ipl = interrupts_disable(); 444 468 spinlock_lock(&tasks_lock); 445 446 #ifdef __32_BITS__ 447 printf("taskid name ctx address as "448 " cyclesthreads calls callee\n");449 printf("------ ------------ --- ---------- ---------- "450 " ---------- ------- ------ ------>\n");451 #endif 452 469 470 #ifdef __32_BITS__ 471 printf("taskid name ctx address as " 472 " ucycles kcycles threads calls callee\n"); 473 printf("------ ------------ --- ---------- ----------" 474 " ---------- ---------- ------- ------ ------>\n"); 475 #endif 476 453 477 #ifdef __64_BITS__ 454 printf("taskid name ctx address as "455 " cyclesthreads calls callee\n");456 printf("------ ------------ --- ------------------ ------------------ "457 " ---------- ------- ------ ------>\n");458 #endif 459 478 printf("taskid name ctx address as " 479 " ucycles kcycles threads calls callee\n"); 480 printf("------ ------------ --- ------------------ ------------------" 481 " ---------- ---------- ---------- ------- ------ ------>\n"); 482 #endif 483 460 484 avltree_walk(&tasks_tree, task_print_walker, NULL); 461 485 462 486 spinlock_unlock(&tasks_lock); 463 487 interrupts_restore(ipl); -
kernel/generic/src/proc/thread.c
r38aaacc2 rf4f866c 50 50 #include <synch/rwlock.h> 51 51 #include <cpu.h> 52 #include < func.h>52 #include <str.h> 53 53 #include <context.h> 54 54 #include <adt/avl.h> … … 84 84 "Exiting", 85 85 "Lingering" 86 }; 86 }; 87 88 typedef struct { 89 thread_id_t thread_id; 90 thread_t *thread; 91 } thread_iterator_t; 87 92 88 93 /** Lock protecting the threads_tree AVL tree. … … 132 137 spinlock_lock(&THREAD->lock); 133 138 if (!THREAD->uncounted) { 134 thread_update_accounting(); 135 uint64_t cycles = THREAD->cycles; 136 THREAD->cycles = 0; 139 thread_update_accounting(true); 140 uint64_t ucycles = THREAD->ucycles; 141 THREAD->ucycles = 0; 142 uint64_t kcycles = THREAD->kcycles; 143 THREAD->kcycles = 0; 144 137 145 spinlock_unlock(&THREAD->lock); 138 146 139 147 spinlock_lock(&TASK->lock); 140 TASK->cycles += cycles; 148 TASK->ucycles += ucycles; 149 TASK->kcycles += kcycles; 141 150 spinlock_unlock(&TASK->lock); 142 151 } else … … 323 332 t->thread_arg = arg; 324 333 t->ticks = -1; 325 t->cycles = 0; 334 t->ucycles = 0; 335 t->kcycles = 0; 326 336 t->uncounted = uncounted; 327 337 t->priority = -1; /* start in rq[0] */ … … 614 624 thread_t *t = avltree_get_instance(node, thread_t, threads_tree_node); 615 625 616 uint64_t cycles; 617 char suffix; 618 order(t->cycles, &cycles, &suffix); 626 uint64_t ucycles, kcycles; 627 char usuffix, ksuffix; 628 order_suffix(t->ucycles, &ucycles, &usuffix); 629 order_suffix(t->kcycles, &kcycles, &ksuffix); 619 630 620 631 #ifdef __32_BITS__ 621 printf("%-6" PRIu64" %-10s %10p %-8s %10p %-3" PRIu32 " %10p %10p %9" PRIu64 "%c ", 622 t->tid, t->name, t, thread_states[t->state], t->task, 623 t->task->context, t->thread_code, t->kstack, cycles, suffix); 632 printf("%-6" PRIu64" %-10s %10p %-8s %10p %-3" PRIu32 " %10p %10p %9" 633 PRIu64 "%c %9" PRIu64 "%c ", t->tid, t->name, t, 634 thread_states[t->state], t->task, t->task->context, t->thread_code, 635 t->kstack, ucycles, usuffix, kcycles, ksuffix); 624 636 #endif 625 637 626 638 #ifdef __64_BITS__ 627 printf("%-6" PRIu64" %-10s %18p %-8s %18p %-3" PRIu32 " %18p %18p %9" PRIu64 "%c ", 628 t->tid, t->name, t, thread_states[t->state], t->task, 629 t->task->context, t->thread_code, t->kstack, cycles, suffix); 639 printf("%-6" PRIu64" %-10s %18p %-8s %18p %-3" PRIu32 " %18p %18p %9" 640 PRIu64 "%c %9" PRIu64 "%c ", t->tid, t->name, t, 641 thread_states[t->state], t->task, t->task->context, t->thread_code, 642 t->kstack, ucycles, usuffix, kcycles, ksuffix); 630 643 #endif 631 644 … … 661 674 #ifdef __32_BITS__ 662 675 printf("tid name address state task " 663 "ctx code stack cyclescpu "676 "ctx code stack ucycles kcycles cpu " 664 677 "waitqueue\n"); 665 678 printf("------ ---------- ---------- -------- ---------- " 666 "--- ---------- ---------- ---------- ---- "679 "--- ---------- ---------- ---------- ---------- ---- " 667 680 "----------\n"); 668 681 #endif … … 670 683 #ifdef __64_BITS__ 671 684 printf("tid name address state task " 672 "ctx code stack cyclescpu "685 "ctx code stack ucycles kcycles cpu " 673 686 "waitqueue\n"); 674 687 printf("------ ---------- ------------------ -------- ------------------ " 675 "--- ------------------ ------------------ ---------- ---- "688 "--- ------------------ ------------------ ---------- ---------- ---- " 676 689 "------------------\n"); 677 690 #endif … … 706 719 * interrupts must be already disabled. 707 720 * 708 */ 709 void thread_update_accounting(void) 721 * @param user True to update user accounting, false for kernel. 722 */ 723 void thread_update_accounting(bool user) 710 724 { 711 725 uint64_t time = get_cycle(); 712 THREAD->cycles += time - THREAD->last_cycle; 726 if (user) { 727 THREAD->ucycles += time - THREAD->last_cycle; 728 } else { 729 THREAD->kcycles += time - THREAD->last_cycle; 730 } 713 731 THREAD->last_cycle = time; 714 732 } 733 734 static bool thread_search_walker(avltree_node_t *node, void *arg) 735 { 736 thread_t *thread = 737 (thread_t *) avltree_get_instance(node, thread_t, threads_tree_node); 738 thread_iterator_t *iterator = (thread_iterator_t *) arg; 739 740 if (thread->tid == iterator->thread_id) { 741 iterator->thread = thread; 742 return false; 743 } 744 745 return true; 746 } 747 748 /** Find thread structure corresponding to thread ID. 749 * 750 * The threads_lock must be already held by the caller of this function and 751 * interrupts must be disabled. 752 * 753 * @param id Thread ID. 754 * 755 * @return Thread structure address or NULL if there is no such thread ID. 756 * 757 */ 758 thread_t *thread_find_by_id(thread_id_t thread_id) 759 { 760 thread_iterator_t iterator; 761 762 iterator.thread_id = thread_id; 763 iterator.thread = NULL; 764 765 avltree_walk(&threads_tree, thread_search_walker, (void *) &iterator); 766 767 return iterator.thread; 768 } 769 715 770 716 771 /** Process syscall to create new thread.
Note:
See TracChangeset
for help on using the changeset viewer.
