source: mainline/kernel/generic/src/sysinfo/stats.c@ 170332d

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 170332d was d69f959, checked in by Jakub Jermar <jakub@…>, 15 years ago

Take the address space and address space area mutexes conditionally in
get_task_virtmem() so that we never block while holding the tasks and TASK
spinlocks.

  • Property mode set to 100644
File size: 18.2 KB
Line 
1/*
2 * Copyright (c) 2010 Stanislav Kozina
3 * Copyright (c) 2010 Martin Decky
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup generic
31 * @{
32 */
33/** @file
34 */
35
36#include <typedefs.h>
37#include <sysinfo/abi.h>
38#include <sysinfo/stats.h>
39#include <sysinfo/sysinfo.h>
40#include <time/clock.h>
41#include <mm/frame.h>
42#include <proc/task.h>
43#include <proc/thread.h>
44#include <str.h>
45#include <errno.h>
46#include <cpu.h>
47#include <arch.h>
48
49/** Bits of fixed-point precision for load */
50#define LOAD_FIXED_SHIFT 11
51
52/** 1.0 as fixed-point for load */
53#define LOAD_FIXED_1 (1 << LOAD_FIXED_SHIFT)
54
55/** Compute load in 5 second intervals */
56#define LOAD_INTERVAL 5
57
58/** Fixed-point representation of
59 *
60 * 1 / exp(5 sec / 1 min)
61 * 1 / exp(5 sec / 5 min)
62 * 1 / exp(5 sec / 15 min)
63 *
64 */
65static load_t load_exp[LOAD_STEPS] = {1884, 2014, 2037};
66
67/** Running average of the number of ready threads */
68static load_t avenrdy[LOAD_STEPS] = {0, 0, 0};
69
70/** Load calculation spinlock */
71SPINLOCK_STATIC_INITIALIZE_NAME(load_lock, "load_lock");
72
73/** Get system uptime
74 *
75 * @param item Sysinfo item (unused).
76 *
77 * @return System uptime (in secords).
78 *
79 */
80static unative_t get_stats_uptime(struct sysinfo_item *item)
81{
82 /* This doesn't have to be very accurate */
83 return uptime->seconds1;
84}
85
86/** Get statistics of all CPUs
87 *
88 * @param item Sysinfo item (unused).
89 * @param size Size of the returned data.
90 * @param dry_run Do not get the data, just calculate the size.
91 *
92 * @return Data containing several stats_cpu_t structures.
93 * If the return value is not NULL, it should be freed
94 * in the context of the sysinfo request.
95 */
96static void *get_stats_cpus(struct sysinfo_item *item, size_t *size,
97 bool dry_run)
98{
99 *size = sizeof(stats_cpu_t) * config.cpu_count;
100 if (dry_run)
101 return NULL;
102
103 /* Assumption: config.cpu_count is constant */
104 stats_cpu_t *stats_cpus = (stats_cpu_t *) malloc(*size, FRAME_ATOMIC);
105 if (stats_cpus == NULL) {
106 *size = 0;
107 return NULL;
108 }
109
110 /* Each CPU structure is locked separatelly */
111 ipl_t ipl = interrupts_disable();
112
113 size_t i;
114 for (i = 0; i < config.cpu_count; i++) {
115 spinlock_lock(&cpus[i].lock);
116
117 stats_cpus[i].id = cpus[i].id;
118 stats_cpus[i].active = cpus[i].active;
119 stats_cpus[i].frequency_mhz = cpus[i].frequency_mhz;
120 stats_cpus[i].busy_ticks = cpus[i].busy_ticks;
121 stats_cpus[i].idle_ticks = cpus[i].idle_ticks;
122
123 spinlock_unlock(&cpus[i].lock);
124 }
125
126 interrupts_restore(ipl);
127
128 return ((void *) stats_cpus);
129}
130
131/** Count number of nodes in an AVL tree
132 *
133 * AVL tree walker for counting nodes.
134 *
135 * @param node AVL tree node (unused).
136 * @param arg Pointer to the counter (size_t).
137 *
138 * @param Always true (continue the walk).
139 *
140 */
141static bool avl_count_walker(avltree_node_t *node, void *arg)
142{
143 size_t *count = (size_t *) arg;
144 (*count)++;
145
146 return true;
147}
148
149/** Get the size of a virtual address space
150 *
151 * @param as Address space.
152 *
153 * @return Size of the mapped virtual address space (bytes).
154 *
155 */
156static size_t get_task_virtmem(as_t *as)
157{
158 size_t result = 0;
159
160 /*
161 * We are holding some spinlocks here and therefore are not allowed to
162 * block. Only attempt to lock the address space and address space area
163 * mutexes conditionally. If it is not possible to lock either object,
164 * allow the statistics to be inexact by skipping the respective object.
165 *
166 * Note that it may be infinitely better to let the address space
167 * management code compute these statistics as it proceeds instead of
168 * having them calculated here over and over again here.
169 */
170
171 if (!mutex_trylock(&as->lock))
172 return result * PAGE_SIZE;
173
174 /* Walk the B+ tree and count pages */
175 link_t *cur;
176 for (cur = as->as_area_btree.leaf_head.next;
177 cur != &as->as_area_btree.leaf_head; cur = cur->next) {
178 btree_node_t *node =
179 list_get_instance(cur, btree_node_t, leaf_link);
180
181 unsigned int i;
182 for (i = 0; i < node->keys; i++) {
183 as_area_t *area = node->value[i];
184
185 if (!mutex_trylock(&area->lock))
186 continue;
187 result += area->pages;
188 mutex_unlock(&area->lock);
189 }
190 }
191
192 mutex_unlock(&as->lock);
193
194 return result * PAGE_SIZE;
195}
196
197/* Produce task statistics
198 *
199 * Summarize task information into task statistics.
200 * Task lock should be held and interrupts disabled
201 * before executing this function.
202 *
203 * @param task Task.
204 * @param stats_task Task statistics.
205 *
206 */
207static void produce_stats_task(task_t *task, stats_task_t *stats_task)
208{
209 stats_task->task_id = task->taskid;
210 str_cpy(stats_task->name, TASK_NAME_BUFLEN, task->name);
211 stats_task->virtmem = get_task_virtmem(task->as);
212 stats_task->threads = atomic_get(&task->refcount);
213 task_get_accounting(task, &(stats_task->ucycles),
214 &(stats_task->kcycles));
215 stats_task->ipc_info = task->ipc_info;
216}
217
218/** Gather statistics of all tasks
219 *
220 * AVL task tree walker for gathering task statistics. Interrupts should
221 * be already disabled while walking the tree.
222 *
223 * @param node AVL task tree node.
224 * @param arg Pointer to the iterator into the array of stats_task_t.
225 *
226 * @param Always true (continue the walk).
227 *
228 */
229static bool task_serialize_walker(avltree_node_t *node, void *arg)
230{
231 stats_task_t **iterator = (stats_task_t **) arg;
232 task_t *task = avltree_get_instance(node, task_t, tasks_tree_node);
233
234 /* Interrupts are already disabled */
235 spinlock_lock(&(task->lock));
236
237 /* Record the statistics and increment the iterator */
238 produce_stats_task(task, *iterator);
239 (*iterator)++;
240
241 spinlock_unlock(&(task->lock));
242
243 return true;
244}
245
246/** Get task statistics
247 *
248 * @param item Sysinfo item (unused).
249 * @param size Size of the returned data.
250 * @param dry_run Do not get the data, just calculate the size.
251 *
252 * @return Data containing several stats_task_t structures.
253 * If the return value is not NULL, it should be freed
254 * in the context of the sysinfo request.
255 */
256static void *get_stats_tasks(struct sysinfo_item *item, size_t *size,
257 bool dry_run)
258{
259 /* Messing with task structures, avoid deadlock */
260 ipl_t ipl = interrupts_disable();
261 spinlock_lock(&tasks_lock);
262
263 /* First walk the task tree to count the tasks */
264 size_t count = 0;
265 avltree_walk(&tasks_tree, avl_count_walker, (void *) &count);
266
267 if (count == 0) {
268 /* No tasks found (strange) */
269 spinlock_unlock(&tasks_lock);
270 interrupts_restore(ipl);
271
272 *size = 0;
273 return NULL;
274 }
275
276 *size = sizeof(stats_task_t) * count;
277 if (dry_run) {
278 spinlock_unlock(&tasks_lock);
279 interrupts_restore(ipl);
280 return NULL;
281 }
282
283 stats_task_t *stats_tasks = (stats_task_t *) malloc(*size, FRAME_ATOMIC);
284 if (stats_tasks == NULL) {
285 /* No free space for allocation */
286 spinlock_unlock(&tasks_lock);
287 interrupts_restore(ipl);
288
289 *size = 0;
290 return NULL;
291 }
292
293 /* Walk tha task tree again to gather the statistics */
294 stats_task_t *iterator = stats_tasks;
295 avltree_walk(&tasks_tree, task_serialize_walker, (void *) &iterator);
296
297 spinlock_unlock(&tasks_lock);
298 interrupts_restore(ipl);
299
300 return ((void *) stats_tasks);
301}
302
303/* Produce thread statistics
304 *
305 * Summarize thread information into thread statistics.
306 * Thread lock should be held and interrupts disabled
307 * before executing this function.
308 *
309 * @param thread Thread.
310 * @param stats_thread Thread statistics.
311 *
312 */
313static void produce_stats_thread(thread_t *thread, stats_thread_t *stats_thread)
314{
315 stats_thread->thread_id = thread->tid;
316 stats_thread->task_id = thread->task->taskid;
317 stats_thread->state = thread->state;
318 stats_thread->priority = thread->priority;
319 stats_thread->ucycles = thread->ucycles;
320 stats_thread->kcycles = thread->kcycles;
321
322 if (thread->cpu != NULL) {
323 stats_thread->on_cpu = true;
324 stats_thread->cpu = thread->cpu->id;
325 } else
326 stats_thread->on_cpu = false;
327}
328
329/** Gather statistics of all threads
330 *
331 * AVL three tree walker for gathering thread statistics. Interrupts should
332 * be already disabled while walking the tree.
333 *
334 * @param node AVL thread tree node.
335 * @param arg Pointer to the iterator into the array of thread statistics.
336 *
337 * @param Always true (continue the walk).
338 *
339 */
340static bool thread_serialize_walker(avltree_node_t *node, void *arg)
341{
342 stats_thread_t **iterator = (stats_thread_t **) arg;
343 thread_t *thread = avltree_get_instance(node, thread_t, threads_tree_node);
344
345 /* Interrupts are already disabled */
346 spinlock_lock(&(thread->lock));
347
348 /* Record the statistics and increment the iterator */
349 produce_stats_thread(thread, *iterator);
350 (*iterator)++;
351
352 spinlock_unlock(&(thread->lock));
353
354 return true;
355}
356
357/** Get thread statistics
358 *
359 * @param item Sysinfo item (unused).
360 * @param size Size of the returned data.
361 * @param dry_run Do not get the data, just calculate the size.
362 *
363 * @return Data containing several stats_task_t structures.
364 * If the return value is not NULL, it should be freed
365 * in the context of the sysinfo request.
366 */
367static void *get_stats_threads(struct sysinfo_item *item, size_t *size,
368 bool dry_run)
369{
370 /* Messing with threads structures, avoid deadlock */
371 ipl_t ipl = interrupts_disable();
372 spinlock_lock(&threads_lock);
373
374 /* First walk the thread tree to count the threads */
375 size_t count = 0;
376 avltree_walk(&threads_tree, avl_count_walker, (void *) &count);
377
378 if (count == 0) {
379 /* No threads found (strange) */
380 spinlock_unlock(&threads_lock);
381 interrupts_restore(ipl);
382
383 *size = 0;
384 return NULL;
385 }
386
387 *size = sizeof(stats_thread_t) * count;
388 if (dry_run) {
389 spinlock_unlock(&threads_lock);
390 interrupts_restore(ipl);
391 return NULL;
392 }
393
394 stats_thread_t *stats_threads = (stats_thread_t *) malloc(*size, FRAME_ATOMIC);
395 if (stats_threads == NULL) {
396 /* No free space for allocation */
397 spinlock_unlock(&threads_lock);
398 interrupts_restore(ipl);
399
400 *size = 0;
401 return NULL;
402 }
403
404 /* Walk tha thread tree again to gather the statistics */
405 stats_thread_t *iterator = stats_threads;
406 avltree_walk(&threads_tree, thread_serialize_walker, (void *) &iterator);
407
408 spinlock_unlock(&threads_lock);
409 interrupts_restore(ipl);
410
411 return ((void *) stats_threads);
412}
413
414/** Get a single task statistics
415 *
416 * Get statistics of a given task. The task ID is passed
417 * as a string (current limitation of the sysinfo interface,
418 * but it is still reasonable for the given purpose).
419 *
420 * @param name Task ID (string-encoded number).
421 * @param dry_run Do not get the data, just calculate the size.
422 *
423 * @return Sysinfo return holder. The type of the returned
424 * data is either SYSINFO_VAL_UNDEFINED (unknown
425 * task ID or memory allocation error) or
426 * SYSINFO_VAL_FUNCTION_DATA (in that case the
427 * generated data should be freed within the
428 * sysinfo request context).
429 *
430 */
431static sysinfo_return_t get_stats_task(const char *name, bool dry_run)
432{
433 /* Initially no return value */
434 sysinfo_return_t ret;
435 ret.tag = SYSINFO_VAL_UNDEFINED;
436
437 /* Parse the task ID */
438 task_id_t task_id;
439 if (str_uint64(name, NULL, 0, true, &task_id) != EOK)
440 return ret;
441
442 /* Messing with task structures, avoid deadlock */
443 ipl_t ipl = interrupts_disable();
444 spinlock_lock(&tasks_lock);
445
446 task_t *task = task_find_by_id(task_id);
447 if (task == NULL) {
448 /* No task with this ID */
449 spinlock_unlock(&tasks_lock);
450 interrupts_restore(ipl);
451 return ret;
452 }
453
454 if (dry_run) {
455 ret.tag = SYSINFO_VAL_FUNCTION_DATA;
456 ret.data.data = NULL;
457 ret.data.size = sizeof(stats_task_t);
458
459 spinlock_unlock(&tasks_lock);
460 } else {
461 /* Allocate stats_task_t structure */
462 stats_task_t *stats_task =
463 (stats_task_t *) malloc(sizeof(stats_task_t), FRAME_ATOMIC);
464 if (stats_task == NULL) {
465 spinlock_unlock(&tasks_lock);
466 interrupts_restore(ipl);
467 return ret;
468 }
469
470 /* Correct return value */
471 ret.tag = SYSINFO_VAL_FUNCTION_DATA;
472 ret.data.data = (void *) stats_task;
473 ret.data.size = sizeof(stats_task_t);
474
475 /* Hand-over-hand locking */
476 spinlock_lock(&task->lock);
477 spinlock_unlock(&tasks_lock);
478
479 produce_stats_task(task, stats_task);
480
481 spinlock_unlock(&task->lock);
482 }
483
484 interrupts_restore(ipl);
485
486 return ret;
487}
488
489/** Get thread statistics
490 *
491 * Get statistics of a given thread. The thread ID is passed
492 * as a string (current limitation of the sysinfo interface,
493 * but it is still reasonable for the given purpose).
494 *
495 * @param name Thread ID (string-encoded number).
496 * @param dry_run Do not get the data, just calculate the size.
497 *
498 * @return Sysinfo return holder. The type of the returned
499 * data is either SYSINFO_VAL_UNDEFINED (unknown
500 * thread ID or memory allocation error) or
501 * SYSINFO_VAL_FUNCTION_DATA (in that case the
502 * generated data should be freed within the
503 * sysinfo request context).
504 *
505 */
506static sysinfo_return_t get_stats_thread(const char *name, bool dry_run)
507{
508 /* Initially no return value */
509 sysinfo_return_t ret;
510 ret.tag = SYSINFO_VAL_UNDEFINED;
511
512 /* Parse the thread ID */
513 thread_id_t thread_id;
514 if (str_uint64(name, NULL, 0, true, &thread_id) != EOK)
515 return ret;
516
517 /* Messing with threads structures, avoid deadlock */
518 ipl_t ipl = interrupts_disable();
519 spinlock_lock(&threads_lock);
520
521 thread_t *thread = thread_find_by_id(thread_id);
522 if (thread == NULL) {
523 /* No thread with this ID */
524 spinlock_unlock(&threads_lock);
525 interrupts_restore(ipl);
526 return ret;
527 }
528
529 if (dry_run) {
530 ret.tag = SYSINFO_VAL_FUNCTION_DATA;
531 ret.data.data = NULL;
532 ret.data.size = sizeof(stats_thread_t);
533
534 spinlock_unlock(&threads_lock);
535 } else {
536 /* Allocate stats_thread_t structure */
537 stats_thread_t *stats_thread =
538 (stats_thread_t *) malloc(sizeof(stats_thread_t), FRAME_ATOMIC);
539 if (stats_thread == NULL) {
540 spinlock_unlock(&threads_lock);
541 interrupts_restore(ipl);
542 return ret;
543 }
544
545 /* Correct return value */
546 ret.tag = SYSINFO_VAL_FUNCTION_DATA;
547 ret.data.data = (void *) stats_thread;
548 ret.data.size = sizeof(stats_thread_t);
549
550 /* Hand-over-hand locking */
551 spinlock_lock(&thread->lock);
552 spinlock_unlock(&threads_lock);
553
554 produce_stats_thread(thread, stats_thread);
555
556 spinlock_unlock(&thread->lock);
557 }
558
559 interrupts_restore(ipl);
560
561 return ret;
562}
563
564/** Get physical memory statistics
565 *
566 * @param item Sysinfo item (unused).
567 * @param size Size of the returned data.
568 * @param dry_run Do not get the data, just calculate the size.
569 *
570 * @return Data containing stats_physmem_t.
571 * If the return value is not NULL, it should be freed
572 * in the context of the sysinfo request.
573 */
574static void *get_stats_physmem(struct sysinfo_item *item, size_t *size,
575 bool dry_run)
576{
577 *size = sizeof(stats_physmem_t);
578 if (dry_run)
579 return NULL;
580
581 stats_physmem_t *stats_physmem =
582 (stats_physmem_t *) malloc(*size, FRAME_ATOMIC);
583 if (stats_physmem == NULL) {
584 *size = 0;
585 return NULL;
586 }
587
588 zones_stats(&(stats_physmem->total), &(stats_physmem->unavail),
589 &(stats_physmem->used), &(stats_physmem->free));
590
591 return ((void *) stats_physmem);
592}
593
594/** Get system load
595 *
596 * @param item Sysinfo item (unused).
597 * @param size Size of the returned data.
598 * @param dry_run Do not get the data, just calculate the size.
599 *
600 * @return Data several load_t values.
601 * If the return value is not NULL, it should be freed
602 * in the context of the sysinfo request.
603 */
604static void *get_stats_load(struct sysinfo_item *item, size_t *size,
605 bool dry_run)
606{
607 *size = sizeof(load_t) * LOAD_STEPS;
608 if (dry_run)
609 return NULL;
610
611 load_t *stats_load = (load_t *) malloc(*size, FRAME_ATOMIC);
612 if (stats_load == NULL) {
613 *size = 0;
614 return NULL;
615 }
616
617 /* To always get consistent values acquire the spinlock */
618 ipl_t ipl = interrupts_disable();
619 spinlock_lock(&load_lock);
620
621 unsigned int i;
622 for (i = 0; i < LOAD_STEPS; i++)
623 stats_load[i] = avenrdy[i] << LOAD_FIXED_SHIFT;
624
625 spinlock_unlock(&load_lock);
626 interrupts_restore(ipl);
627
628 return ((void *) stats_load);
629}
630
631/** Calculate load
632 *
633 */
634static inline load_t load_calc(load_t load, load_t exp, atomic_count_t ready)
635{
636 load *= exp;
637 load += (ready << LOAD_FIXED_SHIFT) * (LOAD_FIXED_1 - exp);
638
639 return (load >> LOAD_FIXED_SHIFT);
640}
641
642/** Load computation thread.
643 *
644 * Compute system load every few seconds.
645 *
646 * @param arg Unused.
647 *
648 */
649void kload(void *arg)
650{
651 thread_detach(THREAD);
652
653 while (true) {
654 atomic_count_t ready = atomic_get(&nrdy);
655
656 /* Mutually exclude with get_stats_load() */
657 ipl_t ipl = interrupts_disable();
658 spinlock_lock(&load_lock);
659
660 unsigned int i;
661 for (i = 0; i < LOAD_STEPS; i++)
662 avenrdy[i] = load_calc(avenrdy[i], load_exp[i], ready);
663
664 spinlock_unlock(&load_lock);
665 interrupts_restore(ipl);
666
667 thread_sleep(LOAD_INTERVAL);
668 }
669}
670
671/** Register sysinfo statistical items
672 *
673 */
674void stats_init(void)
675{
676 sysinfo_set_item_fn_val("system.uptime", NULL, get_stats_uptime);
677 sysinfo_set_item_fn_data("system.cpus", NULL, get_stats_cpus);
678 sysinfo_set_item_fn_data("system.physmem", NULL, get_stats_physmem);
679 sysinfo_set_item_fn_data("system.load", NULL, get_stats_load);
680 sysinfo_set_item_fn_data("system.tasks", NULL, get_stats_tasks);
681 sysinfo_set_item_fn_data("system.threads", NULL, get_stats_threads);
682 sysinfo_set_subtree_fn("system.tasks", NULL, get_stats_task);
683 sysinfo_set_subtree_fn("system.threads", NULL, get_stats_thread);
684}
685
686/** @}
687 */
Note: See TracBrowser for help on using the repository browser.