source: mainline/kernel/generic/src/sysinfo/stats.c@ a35b458

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since a35b458 was a35b458, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 7 years ago

style: Remove trailing whitespace on _all_ lines, including empty ones, for particular file types.

Command used: tools/srepl '\s\+$' '' -- *.c *.h *.py *.sh *.s *.S *.ag

Currently, whitespace on empty lines is very inconsistent.
There are two basic choices: Either remove the whitespace, or keep empty lines
indented to the level of surrounding code. The former is AFAICT more common,
and also much easier to do automatically.

Alternatively, we could write script for automatic indentation, and use that
instead. However, if such a script exists, it's possible to use the indented
style locally, by having the editor apply relevant conversions on load/save,
without affecting remote repository. IMO, it makes more sense to adopt
the simpler rule.

  • Property mode set to 100644
File size: 22.0 KB
Line 
1/*
2 * Copyright (c) 2010 Stanislav Kozina
3 * Copyright (c) 2010 Martin Decky
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup generic
31 * @{
32 */
33/** @file
34 */
35
36#include <assert.h>
37#include <typedefs.h>
38#include <abi/sysinfo.h>
39#include <sysinfo/stats.h>
40#include <sysinfo/sysinfo.h>
41#include <synch/spinlock.h>
42#include <synch/mutex.h>
43#include <time/clock.h>
44#include <mm/frame.h>
45#include <proc/task.h>
46#include <proc/thread.h>
47#include <interrupt.h>
48#include <stdbool.h>
49#include <str.h>
50#include <errno.h>
51#include <cpu.h>
52#include <arch.h>
53
54/** Bits of fixed-point precision for load */
55#define LOAD_FIXED_SHIFT 11
56
57/** Uspace load fixed-point precision */
58#define LOAD_USPACE_SHIFT 6
59
60/** Kernel load shift */
61#define LOAD_KERNEL_SHIFT (LOAD_FIXED_SHIFT - LOAD_USPACE_SHIFT)
62
63/** 1.0 as fixed-point for load */
64#define LOAD_FIXED_1 (1 << LOAD_FIXED_SHIFT)
65
66/** Compute load in 5 second intervals */
67#define LOAD_INTERVAL 5
68
69/** Fixed-point representation of
70 *
71 * 1 / exp(5 sec / 1 min)
72 * 1 / exp(5 sec / 5 min)
73 * 1 / exp(5 sec / 15 min)
74 *
75 */
76static load_t load_exp[LOAD_STEPS] = {1884, 2014, 2037};
77
78/** Running average of the number of ready threads */
79static load_t avenrdy[LOAD_STEPS] = {0, 0, 0};
80
81/** Load calculation lock */
82static mutex_t load_lock;
83
84/** Get statistics of all CPUs
85 *
86 * @param item Sysinfo item (unused).
87 * @param size Size of the returned data.
88 * @param dry_run Do not get the data, just calculate the size.
89 * @param data Unused.
90 *
91 * @return Data containing several stats_cpu_t structures.
92 * If the return value is not NULL, it should be freed
93 * in the context of the sysinfo request.
94 */
95static void *get_stats_cpus(struct sysinfo_item *item, size_t *size,
96 bool dry_run, void *data)
97{
98 *size = sizeof(stats_cpu_t) * config.cpu_count;
99 if (dry_run)
100 return NULL;
101
102 /* Assumption: config.cpu_count is constant */
103 stats_cpu_t *stats_cpus = (stats_cpu_t *) malloc(*size, FRAME_ATOMIC);
104 if (stats_cpus == NULL) {
105 *size = 0;
106 return NULL;
107 }
108
109 size_t i;
110 for (i = 0; i < config.cpu_count; i++) {
111 irq_spinlock_lock(&cpus[i].lock, true);
112
113 stats_cpus[i].id = cpus[i].id;
114 stats_cpus[i].active = cpus[i].active;
115 stats_cpus[i].frequency_mhz = cpus[i].frequency_mhz;
116 stats_cpus[i].busy_cycles = cpus[i].busy_cycles;
117 stats_cpus[i].idle_cycles = cpus[i].idle_cycles;
118
119 irq_spinlock_unlock(&cpus[i].lock, true);
120 }
121
122 return ((void *) stats_cpus);
123}
124
125/** Count number of nodes in an AVL tree
126 *
127 * AVL tree walker for counting nodes.
128 *
129 * @param node AVL tree node (unused).
130 * @param arg Pointer to the counter (size_t).
131 *
132 * @param Always true (continue the walk).
133 *
134 */
135static bool avl_count_walker(avltree_node_t *node, void *arg)
136{
137 size_t *count = (size_t *) arg;
138 (*count)++;
139
140 return true;
141}
142
143/** Get the size of a virtual address space
144 *
145 * @param as Address space.
146 *
147 * @return Size of the mapped virtual address space (bytes).
148 *
149 */
150static size_t get_task_virtmem(as_t *as)
151{
152 /*
153 * We are holding spinlocks here and therefore are not allowed to
154 * block. Only attempt to lock the address space and address space
155 * area mutexes conditionally. If it is not possible to lock either
156 * object, return inexact statistics by skipping the respective object.
157 */
158
159 if (mutex_trylock(&as->lock) != EOK)
160 return 0;
161
162 size_t pages = 0;
163
164 /* Walk the B+ tree and count pages */
165 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t,
166 node) {
167 unsigned int i;
168 for (i = 0; i < node->keys; i++) {
169 as_area_t *area = node->value[i];
170
171 if (mutex_trylock(&area->lock) != EOK)
172 continue;
173
174 pages += area->pages;
175 mutex_unlock(&area->lock);
176 }
177 }
178
179 mutex_unlock(&as->lock);
180
181 return (pages << PAGE_WIDTH);
182}
183
184/** Get the resident (used) size of a virtual address space
185 *
186 * @param as Address space.
187 *
188 * @return Size of the resident (used) virtual address space (bytes).
189 *
190 */
191static size_t get_task_resmem(as_t *as)
192{
193 /*
194 * We are holding spinlocks here and therefore are not allowed to
195 * block. Only attempt to lock the address space and address space
196 * area mutexes conditionally. If it is not possible to lock either
197 * object, return inexact statistics by skipping the respective object.
198 */
199
200 if (mutex_trylock(&as->lock) != EOK)
201 return 0;
202
203 size_t pages = 0;
204
205 /* Walk the B+ tree and count pages */
206 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, node) {
207 unsigned int i;
208 for (i = 0; i < node->keys; i++) {
209 as_area_t *area = node->value[i];
210
211 if (mutex_trylock(&area->lock) != EOK)
212 continue;
213
214 pages += area->resident;
215 mutex_unlock(&area->lock);
216 }
217 }
218
219 mutex_unlock(&as->lock);
220
221 return (pages << PAGE_WIDTH);
222}
223
224/* Produce task statistics
225 *
226 * Summarize task information into task statistics.
227 *
228 * @param task Task.
229 * @param stats_task Task statistics.
230 *
231 */
232static void produce_stats_task(task_t *task, stats_task_t *stats_task)
233{
234 assert(interrupts_disabled());
235 assert(irq_spinlock_locked(&task->lock));
236
237 stats_task->task_id = task->taskid;
238 str_cpy(stats_task->name, TASK_NAME_BUFLEN, task->name);
239 stats_task->virtmem = get_task_virtmem(task->as);
240 stats_task->resmem = get_task_resmem(task->as);
241 stats_task->threads = atomic_get(&task->refcount);
242 task_get_accounting(task, &(stats_task->ucycles),
243 &(stats_task->kcycles));
244 stats_task->ipc_info = task->ipc_info;
245}
246
247/** Gather statistics of all tasks
248 *
249 * AVL task tree walker for gathering task statistics. Interrupts should
250 * be already disabled while walking the tree.
251 *
252 * @param node AVL task tree node.
253 * @param arg Pointer to the iterator into the array of stats_task_t.
254 *
255 * @param Always true (continue the walk).
256 *
257 */
258static bool task_serialize_walker(avltree_node_t *node, void *arg)
259{
260 stats_task_t **iterator = (stats_task_t **) arg;
261 task_t *task = avltree_get_instance(node, task_t, tasks_tree_node);
262
263 /* Interrupts are already disabled */
264 irq_spinlock_lock(&(task->lock), false);
265
266 /* Record the statistics and increment the iterator */
267 produce_stats_task(task, *iterator);
268 (*iterator)++;
269
270 irq_spinlock_unlock(&(task->lock), false);
271
272 return true;
273}
274
275/** Get task statistics
276 *
277 * @param item Sysinfo item (unused).
278 * @param size Size of the returned data.
279 * @param dry_run Do not get the data, just calculate the size.
280 * @param data Unused.
281 *
282 * @return Data containing several stats_task_t structures.
283 * If the return value is not NULL, it should be freed
284 * in the context of the sysinfo request.
285 */
286static void *get_stats_tasks(struct sysinfo_item *item, size_t *size,
287 bool dry_run, void *data)
288{
289 /* Messing with task structures, avoid deadlock */
290 irq_spinlock_lock(&tasks_lock, true);
291
292 /* First walk the task tree to count the tasks */
293 size_t count = 0;
294 avltree_walk(&tasks_tree, avl_count_walker, (void *) &count);
295
296 if (count == 0) {
297 /* No tasks found (strange) */
298 irq_spinlock_unlock(&tasks_lock, true);
299 *size = 0;
300 return NULL;
301 }
302
303 *size = sizeof(stats_task_t) * count;
304 if (dry_run) {
305 irq_spinlock_unlock(&tasks_lock, true);
306 return NULL;
307 }
308
309 stats_task_t *stats_tasks = (stats_task_t *) malloc(*size, FRAME_ATOMIC);
310 if (stats_tasks == NULL) {
311 /* No free space for allocation */
312 irq_spinlock_unlock(&tasks_lock, true);
313 *size = 0;
314 return NULL;
315 }
316
317 /* Walk tha task tree again to gather the statistics */
318 stats_task_t *iterator = stats_tasks;
319 avltree_walk(&tasks_tree, task_serialize_walker, (void *) &iterator);
320
321 irq_spinlock_unlock(&tasks_lock, true);
322
323 return ((void *) stats_tasks);
324}
325
326/* Produce thread statistics
327 *
328 * Summarize thread information into thread statistics.
329 *
330 * @param thread Thread.
331 * @param stats_thread Thread statistics.
332 *
333 */
334static void produce_stats_thread(thread_t *thread, stats_thread_t *stats_thread)
335{
336 assert(interrupts_disabled());
337 assert(irq_spinlock_locked(&thread->lock));
338
339 stats_thread->thread_id = thread->tid;
340 stats_thread->task_id = thread->task->taskid;
341 stats_thread->state = thread->state;
342 stats_thread->priority = thread->priority;
343 stats_thread->ucycles = thread->ucycles;
344 stats_thread->kcycles = thread->kcycles;
345
346 if (thread->cpu != NULL) {
347 stats_thread->on_cpu = true;
348 stats_thread->cpu = thread->cpu->id;
349 } else
350 stats_thread->on_cpu = false;
351}
352
353/** Gather statistics of all threads
354 *
355 * AVL three tree walker for gathering thread statistics. Interrupts should
356 * be already disabled while walking the tree.
357 *
358 * @param node AVL thread tree node.
359 * @param arg Pointer to the iterator into the array of thread statistics.
360 *
361 * @param Always true (continue the walk).
362 *
363 */
364static bool thread_serialize_walker(avltree_node_t *node, void *arg)
365{
366 stats_thread_t **iterator = (stats_thread_t **) arg;
367 thread_t *thread = avltree_get_instance(node, thread_t, threads_tree_node);
368
369 /* Interrupts are already disabled */
370 irq_spinlock_lock(&thread->lock, false);
371
372 /* Record the statistics and increment the iterator */
373 produce_stats_thread(thread, *iterator);
374 (*iterator)++;
375
376 irq_spinlock_unlock(&thread->lock, false);
377
378 return true;
379}
380
381/** Get thread statistics
382 *
383 * @param item Sysinfo item (unused).
384 * @param size Size of the returned data.
385 * @param dry_run Do not get the data, just calculate the size.
386 * @param data Unused.
387 *
388 * @return Data containing several stats_task_t structures.
389 * If the return value is not NULL, it should be freed
390 * in the context of the sysinfo request.
391 */
392static void *get_stats_threads(struct sysinfo_item *item, size_t *size,
393 bool dry_run, void *data)
394{
395 /* Messing with threads structures, avoid deadlock */
396 irq_spinlock_lock(&threads_lock, true);
397
398 /* First walk the thread tree to count the threads */
399 size_t count = 0;
400 avltree_walk(&threads_tree, avl_count_walker, (void *) &count);
401
402 if (count == 0) {
403 /* No threads found (strange) */
404 irq_spinlock_unlock(&threads_lock, true);
405 *size = 0;
406 return NULL;
407 }
408
409 *size = sizeof(stats_thread_t) * count;
410 if (dry_run) {
411 irq_spinlock_unlock(&threads_lock, true);
412 return NULL;
413 }
414
415 stats_thread_t *stats_threads = (stats_thread_t *) malloc(*size, FRAME_ATOMIC);
416 if (stats_threads == NULL) {
417 /* No free space for allocation */
418 irq_spinlock_unlock(&threads_lock, true);
419 *size = 0;
420 return NULL;
421 }
422
423 /* Walk tha thread tree again to gather the statistics */
424 stats_thread_t *iterator = stats_threads;
425 avltree_walk(&threads_tree, thread_serialize_walker, (void *) &iterator);
426
427 irq_spinlock_unlock(&threads_lock, true);
428
429 return ((void *) stats_threads);
430}
431
432/** Get a single task statistics
433 *
434 * Get statistics of a given task. The task ID is passed
435 * as a string (current limitation of the sysinfo interface,
436 * but it is still reasonable for the given purpose).
437 *
438 * @param name Task ID (string-encoded number).
439 * @param dry_run Do not get the data, just calculate the size.
440 * @param data Unused.
441 *
442 * @return Sysinfo return holder. The type of the returned
443 * data is either SYSINFO_VAL_UNDEFINED (unknown
444 * task ID or memory allocation error) or
445 * SYSINFO_VAL_FUNCTION_DATA (in that case the
446 * generated data should be freed within the
447 * sysinfo request context).
448 *
449 */
450static sysinfo_return_t get_stats_task(const char *name, bool dry_run,
451 void *data)
452{
453 /* Initially no return value */
454 sysinfo_return_t ret;
455 ret.tag = SYSINFO_VAL_UNDEFINED;
456
457 /* Parse the task ID */
458 task_id_t task_id;
459 if (str_uint64_t(name, NULL, 0, true, &task_id) != EOK)
460 return ret;
461
462 /* Messing with task structures, avoid deadlock */
463 irq_spinlock_lock(&tasks_lock, true);
464
465 task_t *task = task_find_by_id(task_id);
466 if (task == NULL) {
467 /* No task with this ID */
468 irq_spinlock_unlock(&tasks_lock, true);
469 return ret;
470 }
471
472 if (dry_run) {
473 ret.tag = SYSINFO_VAL_FUNCTION_DATA;
474 ret.data.data = NULL;
475 ret.data.size = sizeof(stats_task_t);
476
477 irq_spinlock_unlock(&tasks_lock, true);
478 } else {
479 /* Allocate stats_task_t structure */
480 stats_task_t *stats_task =
481 (stats_task_t *) malloc(sizeof(stats_task_t), FRAME_ATOMIC);
482 if (stats_task == NULL) {
483 irq_spinlock_unlock(&tasks_lock, true);
484 return ret;
485 }
486
487 /* Correct return value */
488 ret.tag = SYSINFO_VAL_FUNCTION_DATA;
489 ret.data.data = (void *) stats_task;
490 ret.data.size = sizeof(stats_task_t);
491
492 /* Hand-over-hand locking */
493 irq_spinlock_exchange(&tasks_lock, &task->lock);
494
495 produce_stats_task(task, stats_task);
496
497 irq_spinlock_unlock(&task->lock, true);
498 }
499
500 return ret;
501}
502
503/** Get thread statistics
504 *
505 * Get statistics of a given thread. The thread ID is passed
506 * as a string (current limitation of the sysinfo interface,
507 * but it is still reasonable for the given purpose).
508 *
509 * @param name Thread ID (string-encoded number).
510 * @param dry_run Do not get the data, just calculate the size.
511 * @param data Unused.
512 *
513 * @return Sysinfo return holder. The type of the returned
514 * data is either SYSINFO_VAL_UNDEFINED (unknown
515 * thread ID or memory allocation error) or
516 * SYSINFO_VAL_FUNCTION_DATA (in that case the
517 * generated data should be freed within the
518 * sysinfo request context).
519 *
520 */
521static sysinfo_return_t get_stats_thread(const char *name, bool dry_run,
522 void *data)
523{
524 /* Initially no return value */
525 sysinfo_return_t ret;
526 ret.tag = SYSINFO_VAL_UNDEFINED;
527
528 /* Parse the thread ID */
529 thread_id_t thread_id;
530 if (str_uint64_t(name, NULL, 0, true, &thread_id) != EOK)
531 return ret;
532
533 /* Messing with threads structures, avoid deadlock */
534 irq_spinlock_lock(&threads_lock, true);
535
536 thread_t *thread = thread_find_by_id(thread_id);
537 if (thread == NULL) {
538 /* No thread with this ID */
539 irq_spinlock_unlock(&threads_lock, true);
540 return ret;
541 }
542
543 if (dry_run) {
544 ret.tag = SYSINFO_VAL_FUNCTION_DATA;
545 ret.data.data = NULL;
546 ret.data.size = sizeof(stats_thread_t);
547
548 irq_spinlock_unlock(&threads_lock, true);
549 } else {
550 /* Allocate stats_thread_t structure */
551 stats_thread_t *stats_thread =
552 (stats_thread_t *) malloc(sizeof(stats_thread_t), FRAME_ATOMIC);
553 if (stats_thread == NULL) {
554 irq_spinlock_unlock(&threads_lock, true);
555 return ret;
556 }
557
558 /* Correct return value */
559 ret.tag = SYSINFO_VAL_FUNCTION_DATA;
560 ret.data.data = (void *) stats_thread;
561 ret.data.size = sizeof(stats_thread_t);
562
563 /* Hand-over-hand locking */
564 irq_spinlock_exchange(&threads_lock, &thread->lock);
565
566 produce_stats_thread(thread, stats_thread);
567
568 irq_spinlock_unlock(&thread->lock, true);
569 }
570
571 return ret;
572}
573
574/** Get exceptions statistics
575 *
576 * @param item Sysinfo item (unused).
577 * @param size Size of the returned data.
578 * @param dry_run Do not get the data, just calculate the size.
579 * @param data Unused.
580 *
581 * @return Data containing several stats_exc_t structures.
582 * If the return value is not NULL, it should be freed
583 * in the context of the sysinfo request.
584 */
585static void *get_stats_exceptions(struct sysinfo_item *item, size_t *size,
586 bool dry_run, void *data)
587{
588 *size = sizeof(stats_exc_t) * IVT_ITEMS;
589
590 if ((dry_run) || (IVT_ITEMS == 0))
591 return NULL;
592
593 stats_exc_t *stats_exceptions =
594 (stats_exc_t *) malloc(*size, FRAME_ATOMIC);
595 if (stats_exceptions == NULL) {
596 /* No free space for allocation */
597 *size = 0;
598 return NULL;
599 }
600
601#if (IVT_ITEMS > 0)
602 /* Messing with exception table, avoid deadlock */
603 irq_spinlock_lock(&exctbl_lock, true);
604
605 unsigned int i;
606 for (i = 0; i < IVT_ITEMS; i++) {
607 stats_exceptions[i].id = i + IVT_FIRST;
608 str_cpy(stats_exceptions[i].desc, EXC_NAME_BUFLEN, exc_table[i].name);
609 stats_exceptions[i].hot = exc_table[i].hot;
610 stats_exceptions[i].cycles = exc_table[i].cycles;
611 stats_exceptions[i].count = exc_table[i].count;
612 }
613
614 irq_spinlock_unlock(&exctbl_lock, true);
615#endif
616
617 return ((void *) stats_exceptions);
618}
619
620/** Get exception statistics
621 *
622 * Get statistics of a given exception. The exception number
623 * is passed as a string (current limitation of the sysinfo
624 * interface, but it is still reasonable for the given purpose).
625 *
626 * @param name Exception number (string-encoded number).
627 * @param dry_run Do not get the data, just calculate the size.
628 * @param data Unused.
629 *
630 * @return Sysinfo return holder. The type of the returned
631 * data is either SYSINFO_VAL_UNDEFINED (unknown
632 * exception number or memory allocation error) or
633 * SYSINFO_VAL_FUNCTION_DATA (in that case the
634 * generated data should be freed within the
635 * sysinfo request context).
636 *
637 */
638static sysinfo_return_t get_stats_exception(const char *name, bool dry_run,
639 void *data)
640{
641 /* Initially no return value */
642 sysinfo_return_t ret;
643 ret.tag = SYSINFO_VAL_UNDEFINED;
644
645 /* Parse the exception number */
646 uint64_t excn;
647 if (str_uint64_t(name, NULL, 0, true, &excn) != EOK)
648 return ret;
649
650#if (IVT_FIRST > 0)
651 if (excn < IVT_FIRST)
652 return ret;
653#endif
654
655#if (IVT_ITEMS + IVT_FIRST == 0)
656 return ret;
657#else
658 if (excn >= IVT_ITEMS + IVT_FIRST)
659 return ret;
660#endif
661
662 if (dry_run) {
663 ret.tag = SYSINFO_VAL_FUNCTION_DATA;
664 ret.data.data = NULL;
665 ret.data.size = sizeof(stats_thread_t);
666 } else {
667 /* Update excn index for accessing exc_table */
668 excn -= IVT_FIRST;
669
670 /* Allocate stats_exc_t structure */
671 stats_exc_t *stats_exception =
672 (stats_exc_t *) malloc(sizeof(stats_exc_t), FRAME_ATOMIC);
673 if (stats_exception == NULL)
674 return ret;
675
676 /* Messing with exception table, avoid deadlock */
677 irq_spinlock_lock(&exctbl_lock, true);
678
679 /* Correct return value */
680 ret.tag = SYSINFO_VAL_FUNCTION_DATA;
681 ret.data.data = (void *) stats_exception;
682 ret.data.size = sizeof(stats_exc_t);
683
684 stats_exception->id = excn;
685 str_cpy(stats_exception->desc, EXC_NAME_BUFLEN, exc_table[excn].name);
686 stats_exception->hot = exc_table[excn].hot;
687 stats_exception->cycles = exc_table[excn].cycles;
688 stats_exception->count = exc_table[excn].count;
689
690 irq_spinlock_unlock(&exctbl_lock, true);
691 }
692
693 return ret;
694}
695
696/** Get physical memory statistics
697 *
698 * @param item Sysinfo item (unused).
699 * @param size Size of the returned data.
700 * @param dry_run Do not get the data, just calculate the size.
701 * @param data Unused.
702 *
703 * @return Data containing stats_physmem_t.
704 * If the return value is not NULL, it should be freed
705 * in the context of the sysinfo request.
706 */
707static void *get_stats_physmem(struct sysinfo_item *item, size_t *size,
708 bool dry_run, void *data)
709{
710 *size = sizeof(stats_physmem_t);
711 if (dry_run)
712 return NULL;
713
714 stats_physmem_t *stats_physmem =
715 (stats_physmem_t *) malloc(*size, FRAME_ATOMIC);
716 if (stats_physmem == NULL) {
717 *size = 0;
718 return NULL;
719 }
720
721 zones_stats(&(stats_physmem->total), &(stats_physmem->unavail),
722 &(stats_physmem->used), &(stats_physmem->free));
723
724 return ((void *) stats_physmem);
725}
726
727/** Get system load
728 *
729 * @param item Sysinfo item (unused).
730 * @param size Size of the returned data.
731 * @param dry_run Do not get the data, just calculate the size.
732 * @param data Unused.
733 *
734 * @return Data several load_t values.
735 * If the return value is not NULL, it should be freed
736 * in the context of the sysinfo request.
737 */
738static void *get_stats_load(struct sysinfo_item *item, size_t *size,
739 bool dry_run, void *data)
740{
741 *size = sizeof(load_t) * LOAD_STEPS;
742 if (dry_run)
743 return NULL;
744
745 load_t *stats_load = (load_t *) malloc(*size, FRAME_ATOMIC);
746 if (stats_load == NULL) {
747 *size = 0;
748 return NULL;
749 }
750
751 /* To always get consistent values acquire the mutex */
752 mutex_lock(&load_lock);
753
754 unsigned int i;
755 for (i = 0; i < LOAD_STEPS; i++)
756 stats_load[i] = avenrdy[i] << LOAD_KERNEL_SHIFT;
757
758 mutex_unlock(&load_lock);
759
760 return ((void *) stats_load);
761}
762
763/** Calculate load
764 *
765 */
766static inline load_t load_calc(load_t load, load_t exp, atomic_count_t ready)
767{
768 load *= exp;
769 load += (ready << LOAD_FIXED_SHIFT) * (LOAD_FIXED_1 - exp);
770
771 return (load >> LOAD_FIXED_SHIFT);
772}
773
774/** Load computation thread.
775 *
776 * Compute system load every few seconds.
777 *
778 * @param arg Unused.
779 *
780 */
781void kload(void *arg)
782{
783 thread_detach(THREAD);
784
785 while (true) {
786 atomic_count_t ready = atomic_get(&nrdy);
787
788 /* Mutually exclude with get_stats_load() */
789 mutex_lock(&load_lock);
790
791 unsigned int i;
792 for (i = 0; i < LOAD_STEPS; i++)
793 avenrdy[i] = load_calc(avenrdy[i], load_exp[i], ready);
794
795 mutex_unlock(&load_lock);
796
797 thread_sleep(LOAD_INTERVAL);
798 }
799}
800
801/** Register sysinfo statistical items
802 *
803 */
804void stats_init(void)
805{
806 mutex_initialize(&load_lock, MUTEX_PASSIVE);
807
808 sysinfo_set_item_gen_data("system.cpus", NULL, get_stats_cpus, NULL);
809 sysinfo_set_item_gen_data("system.physmem", NULL, get_stats_physmem, NULL);
810 sysinfo_set_item_gen_data("system.load", NULL, get_stats_load, NULL);
811 sysinfo_set_item_gen_data("system.tasks", NULL, get_stats_tasks, NULL);
812 sysinfo_set_item_gen_data("system.threads", NULL, get_stats_threads, NULL);
813 sysinfo_set_item_gen_data("system.exceptions", NULL, get_stats_exceptions, NULL);
814 sysinfo_set_subtree_fn("system.tasks", NULL, get_stats_task, NULL);
815 sysinfo_set_subtree_fn("system.threads", NULL, get_stats_thread, NULL);
816 sysinfo_set_subtree_fn("system.exceptions", NULL, get_stats_exception, NULL);
817}
818
819/** @}
820 */
Note: See TracBrowser for help on using the repository browser.