Changeset a35b458 in mainline for kernel/generic/src/sysinfo/stats.c
- Timestamp:
- 2018-03-02T20:10:49Z (7 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- f1380b7
- Parents:
- 3061bc1
- git-author:
- Jiří Zárevúcky <zarevucky.jiri@…> (2018-02-28 17:38:31)
- git-committer:
- Jiří Zárevúcky <zarevucky.jiri@…> (2018-03-02 20:10:49)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/sysinfo/stats.c
r3061bc1 ra35b458 99 99 if (dry_run) 100 100 return NULL; 101 101 102 102 /* Assumption: config.cpu_count is constant */ 103 103 stats_cpu_t *stats_cpus = (stats_cpu_t *) malloc(*size, FRAME_ATOMIC); … … 106 106 return NULL; 107 107 } 108 108 109 109 size_t i; 110 110 for (i = 0; i < config.cpu_count; i++) { 111 111 irq_spinlock_lock(&cpus[i].lock, true); 112 112 113 113 stats_cpus[i].id = cpus[i].id; 114 114 stats_cpus[i].active = cpus[i].active; … … 116 116 stats_cpus[i].busy_cycles = cpus[i].busy_cycles; 117 117 stats_cpus[i].idle_cycles = cpus[i].idle_cycles; 118 118 119 119 irq_spinlock_unlock(&cpus[i].lock, true); 120 120 } 121 121 122 122 return ((void *) stats_cpus); 123 123 } … … 137 137 size_t *count = (size_t *) arg; 138 138 (*count)++; 139 139 140 140 return true; 141 141 } … … 156 156 * object, return inexact statistics by skipping the respective object. 157 157 */ 158 158 159 159 if (mutex_trylock(&as->lock) != EOK) 160 160 return 0; 161 161 162 162 size_t pages = 0; 163 163 164 164 /* Walk the B+ tree and count pages */ 165 165 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, … … 168 168 for (i = 0; i < node->keys; i++) { 169 169 as_area_t *area = node->value[i]; 170 170 171 171 if (mutex_trylock(&area->lock) != EOK) 172 172 continue; 173 173 174 174 pages += area->pages; 175 175 mutex_unlock(&area->lock); 176 176 } 177 177 } 178 178 179 179 mutex_unlock(&as->lock); 180 180 181 181 return (pages << PAGE_WIDTH); 182 182 } … … 197 197 * object, return inexact statistics by skipping the respective object. 198 198 */ 199 199 200 200 if (mutex_trylock(&as->lock) != EOK) 201 201 return 0; 202 202 203 203 size_t pages = 0; 204 204 205 205 /* Walk the B+ tree and count pages */ 206 206 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, node) { … … 208 208 for (i = 0; i < node->keys; i++) { 209 209 as_area_t *area = node->value[i]; 210 210 211 211 if (mutex_trylock(&area->lock) != EOK) 212 212 continue; 213 213 214 214 pages += area->resident; 215 215 mutex_unlock(&area->lock); 216 216 } 217 217 } 218 218 219 219 mutex_unlock(&as->lock); 220 220 221 221 return (pages << PAGE_WIDTH); 222 222 } … … 234 234 assert(interrupts_disabled()); 235 235 assert(irq_spinlock_locked(&task->lock)); 236 236 237 237 stats_task->task_id = task->taskid; 238 238 str_cpy(stats_task->name, TASK_NAME_BUFLEN, task->name); … … 260 260 stats_task_t **iterator = (stats_task_t **) arg; 261 261 task_t *task = avltree_get_instance(node, task_t, tasks_tree_node); 262 262 263 263 /* Interrupts are already disabled */ 264 264 irq_spinlock_lock(&(task->lock), false); 265 265 266 266 /* Record the statistics and increment the iterator */ 267 267 produce_stats_task(task, *iterator); 268 268 (*iterator)++; 269 269 270 270 irq_spinlock_unlock(&(task->lock), false); 271 271 272 272 return true; 273 273 } … … 289 289 /* Messing with task structures, avoid deadlock */ 290 290 irq_spinlock_lock(&tasks_lock, true); 291 291 292 292 /* First walk the task tree to count the tasks */ 293 293 size_t count = 0; 294 294 avltree_walk(&tasks_tree, avl_count_walker, (void *) &count); 295 295 296 296 if (count == 0) { 297 297 /* No tasks found (strange) */ … … 300 300 return NULL; 301 301 } 302 302 303 303 *size = sizeof(stats_task_t) * count; 304 304 if (dry_run) { … … 306 306 return NULL; 307 307 } 308 308 309 309 stats_task_t *stats_tasks = (stats_task_t *) malloc(*size, FRAME_ATOMIC); 310 310 if (stats_tasks == NULL) { … … 314 314 return NULL; 315 315 } 316 316 317 317 /* Walk tha task tree again to gather the statistics */ 318 318 stats_task_t *iterator = stats_tasks; 319 319 avltree_walk(&tasks_tree, task_serialize_walker, (void *) &iterator); 320 320 321 321 irq_spinlock_unlock(&tasks_lock, true); 322 322 323 323 return ((void *) stats_tasks); 324 324 } … … 336 336 assert(interrupts_disabled()); 337 337 assert(irq_spinlock_locked(&thread->lock)); 338 338 339 339 stats_thread->thread_id = thread->tid; 340 340 stats_thread->task_id = thread->task->taskid; … … 343 343 stats_thread->ucycles = thread->ucycles; 344 344 stats_thread->kcycles = thread->kcycles; 345 345 346 346 if (thread->cpu != NULL) { 347 347 stats_thread->on_cpu = true; … … 366 366 stats_thread_t **iterator = (stats_thread_t **) arg; 367 367 thread_t *thread = avltree_get_instance(node, thread_t, threads_tree_node); 368 368 369 369 /* Interrupts are already disabled */ 370 370 irq_spinlock_lock(&thread->lock, false); 371 371 372 372 /* Record the statistics and increment the iterator */ 373 373 produce_stats_thread(thread, *iterator); 374 374 (*iterator)++; 375 375 376 376 irq_spinlock_unlock(&thread->lock, false); 377 377 378 378 return true; 379 379 } … … 395 395 /* Messing with threads structures, avoid deadlock */ 396 396 irq_spinlock_lock(&threads_lock, true); 397 397 398 398 /* First walk the thread tree to count the threads */ 399 399 size_t count = 0; 400 400 avltree_walk(&threads_tree, avl_count_walker, (void *) &count); 401 401 402 402 if (count == 0) { 403 403 /* No threads found (strange) */ … … 406 406 return NULL; 407 407 } 408 408 409 409 *size = sizeof(stats_thread_t) * count; 410 410 if (dry_run) { … … 412 412 return NULL; 413 413 } 414 414 415 415 stats_thread_t *stats_threads = (stats_thread_t *) malloc(*size, FRAME_ATOMIC); 416 416 if (stats_threads == NULL) { … … 420 420 return NULL; 421 421 } 422 422 423 423 /* Walk tha thread tree again to gather the statistics */ 424 424 stats_thread_t *iterator = stats_threads; 425 425 avltree_walk(&threads_tree, thread_serialize_walker, (void *) &iterator); 426 426 427 427 irq_spinlock_unlock(&threads_lock, true); 428 428 429 429 return ((void *) stats_threads); 430 430 } … … 454 454 sysinfo_return_t ret; 455 455 ret.tag = SYSINFO_VAL_UNDEFINED; 456 456 457 457 /* Parse the task ID */ 458 458 task_id_t task_id; 459 459 if (str_uint64_t(name, NULL, 0, true, &task_id) != EOK) 460 460 return ret; 461 461 462 462 /* Messing with task structures, avoid deadlock */ 463 463 irq_spinlock_lock(&tasks_lock, true); 464 464 465 465 task_t *task = task_find_by_id(task_id); 466 466 if (task == NULL) { … … 469 469 return ret; 470 470 } 471 471 472 472 if (dry_run) { 473 473 ret.tag = SYSINFO_VAL_FUNCTION_DATA; 474 474 ret.data.data = NULL; 475 475 ret.data.size = sizeof(stats_task_t); 476 476 477 477 irq_spinlock_unlock(&tasks_lock, true); 478 478 } else { … … 484 484 return ret; 485 485 } 486 486 487 487 /* Correct return value */ 488 488 ret.tag = SYSINFO_VAL_FUNCTION_DATA; 489 489 ret.data.data = (void *) stats_task; 490 490 ret.data.size = sizeof(stats_task_t); 491 491 492 492 /* Hand-over-hand locking */ 493 493 irq_spinlock_exchange(&tasks_lock, &task->lock); 494 494 495 495 produce_stats_task(task, stats_task); 496 496 497 497 irq_spinlock_unlock(&task->lock, true); 498 498 } 499 499 500 500 return ret; 501 501 } … … 525 525 sysinfo_return_t ret; 526 526 ret.tag = SYSINFO_VAL_UNDEFINED; 527 527 528 528 /* Parse the thread ID */ 529 529 thread_id_t thread_id; 530 530 if (str_uint64_t(name, NULL, 0, true, &thread_id) != EOK) 531 531 return ret; 532 532 533 533 /* Messing with threads structures, avoid deadlock */ 534 534 irq_spinlock_lock(&threads_lock, true); 535 535 536 536 thread_t *thread = thread_find_by_id(thread_id); 537 537 if (thread == NULL) { … … 540 540 return ret; 541 541 } 542 542 543 543 if (dry_run) { 544 544 ret.tag = SYSINFO_VAL_FUNCTION_DATA; 545 545 ret.data.data = NULL; 546 546 ret.data.size = sizeof(stats_thread_t); 547 547 548 548 irq_spinlock_unlock(&threads_lock, true); 549 549 } else { … … 555 555 return ret; 556 556 } 557 557 558 558 /* Correct return value */ 559 559 ret.tag = SYSINFO_VAL_FUNCTION_DATA; 560 560 ret.data.data = (void *) stats_thread; 561 561 ret.data.size = sizeof(stats_thread_t); 562 562 563 563 /* Hand-over-hand locking */ 564 564 irq_spinlock_exchange(&threads_lock, &thread->lock); 565 565 566 566 produce_stats_thread(thread, stats_thread); 567 567 568 568 irq_spinlock_unlock(&thread->lock, true); 569 569 } 570 570 571 571 return ret; 572 572 } … … 587 587 { 588 588 *size = sizeof(stats_exc_t) * IVT_ITEMS; 589 589 590 590 if ((dry_run) || (IVT_ITEMS == 0)) 591 591 return NULL; 592 592 593 593 stats_exc_t *stats_exceptions = 594 594 (stats_exc_t *) malloc(*size, FRAME_ATOMIC); … … 598 598 return NULL; 599 599 } 600 600 601 601 #if (IVT_ITEMS > 0) 602 602 /* Messing with exception table, avoid deadlock */ 603 603 irq_spinlock_lock(&exctbl_lock, true); 604 604 605 605 unsigned int i; 606 606 for (i = 0; i < IVT_ITEMS; i++) { … … 611 611 stats_exceptions[i].count = exc_table[i].count; 612 612 } 613 613 614 614 irq_spinlock_unlock(&exctbl_lock, true); 615 615 #endif 616 616 617 617 return ((void *) stats_exceptions); 618 618 } … … 642 642 sysinfo_return_t ret; 643 643 ret.tag = SYSINFO_VAL_UNDEFINED; 644 644 645 645 /* Parse the exception number */ 646 646 uint64_t excn; 647 647 if (str_uint64_t(name, NULL, 0, true, &excn) != EOK) 648 648 return ret; 649 649 650 650 #if (IVT_FIRST > 0) 651 651 if (excn < IVT_FIRST) 652 652 return ret; 653 653 #endif 654 654 655 655 #if (IVT_ITEMS + IVT_FIRST == 0) 656 656 return ret; … … 659 659 return ret; 660 660 #endif 661 661 662 662 if (dry_run) { 663 663 ret.tag = SYSINFO_VAL_FUNCTION_DATA; … … 667 667 /* Update excn index for accessing exc_table */ 668 668 excn -= IVT_FIRST; 669 669 670 670 /* Allocate stats_exc_t structure */ 671 671 stats_exc_t *stats_exception = … … 673 673 if (stats_exception == NULL) 674 674 return ret; 675 675 676 676 /* Messing with exception table, avoid deadlock */ 677 677 irq_spinlock_lock(&exctbl_lock, true); 678 678 679 679 /* Correct return value */ 680 680 ret.tag = SYSINFO_VAL_FUNCTION_DATA; 681 681 ret.data.data = (void *) stats_exception; 682 682 ret.data.size = sizeof(stats_exc_t); 683 683 684 684 stats_exception->id = excn; 685 685 str_cpy(stats_exception->desc, EXC_NAME_BUFLEN, exc_table[excn].name); … … 687 687 stats_exception->cycles = exc_table[excn].cycles; 688 688 stats_exception->count = exc_table[excn].count; 689 689 690 690 irq_spinlock_unlock(&exctbl_lock, true); 691 691 } 692 692 693 693 return ret; 694 694 } … … 711 711 if (dry_run) 712 712 return NULL; 713 713 714 714 stats_physmem_t *stats_physmem = 715 715 (stats_physmem_t *) malloc(*size, FRAME_ATOMIC); … … 718 718 return NULL; 719 719 } 720 720 721 721 zones_stats(&(stats_physmem->total), &(stats_physmem->unavail), 722 722 &(stats_physmem->used), &(stats_physmem->free)); 723 723 724 724 return ((void *) stats_physmem); 725 725 } … … 742 742 if (dry_run) 743 743 return NULL; 744 744 745 745 load_t *stats_load = (load_t *) malloc(*size, FRAME_ATOMIC); 746 746 if (stats_load == NULL) { … … 748 748 return NULL; 749 749 } 750 750 751 751 /* To always get consistent values acquire the mutex */ 752 752 mutex_lock(&load_lock); 753 753 754 754 unsigned int i; 755 755 for (i = 0; i < LOAD_STEPS; i++) 756 756 stats_load[i] = avenrdy[i] << LOAD_KERNEL_SHIFT; 757 757 758 758 mutex_unlock(&load_lock); 759 759 760 760 return ((void *) stats_load); 761 761 } … … 768 768 load *= exp; 769 769 load += (ready << LOAD_FIXED_SHIFT) * (LOAD_FIXED_1 - exp); 770 770 771 771 return (load >> LOAD_FIXED_SHIFT); 772 772 } … … 782 782 { 783 783 thread_detach(THREAD); 784 784 785 785 while (true) { 786 786 atomic_count_t ready = atomic_get(&nrdy); 787 787 788 788 /* Mutually exclude with get_stats_load() */ 789 789 mutex_lock(&load_lock); 790 790 791 791 unsigned int i; 792 792 for (i = 0; i < LOAD_STEPS; i++) 793 793 avenrdy[i] = load_calc(avenrdy[i], load_exp[i], ready); 794 794 795 795 mutex_unlock(&load_lock); 796 796 797 797 thread_sleep(LOAD_INTERVAL); 798 798 } … … 805 805 { 806 806 mutex_initialize(&load_lock, MUTEX_PASSIVE); 807 807 808 808 sysinfo_set_item_gen_data("system.cpus", NULL, get_stats_cpus, NULL); 809 809 sysinfo_set_item_gen_data("system.physmem", NULL, get_stats_physmem, NULL);
Note:
See TracChangeset
for help on using the changeset viewer.