source: mainline/kernel/generic/src/sysinfo/stats.c@ b2ec5cf

ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since b2ec5cf was b2ec5cf, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 3 years ago

Implement atomic_time_stat_t for lockless timekeeping

We keep monotonically increasing temporal statistics in several places.
They are frequently written from the thread that owns them, and rarely
read from other threads in certain syscalls. This new code serves the
purpose of avoiding the need for synchronization on the writer side.
On 64b system, we can simply assume that 64b writes are indivisible,
and relaxed atomic read/writes simply serve to formally prevent C
undefined behavior from data races (they translate to regular memory
reads/writes in assembly).

On 32b systems, we use the same algorithm that's been used for userspace
clock access, using three fields and some memory barriers to maintain
consistency of reads when the upper half changes. Only readers always
synchronize though. For writers, barriers are avoided in the common case
when the upper half remains unchanged.

  • Property mode set to 100644
File size: 23.1 KB
Line 
1/*
2 * Copyright (c) 2010 Stanislav Kozina
3 * Copyright (c) 2010 Martin Decky
4 * Copyright (c) 2018 Jiri Svoboda
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * - The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/** @addtogroup kernel_generic
32 * @{
33 */
34/** @file
35 */
36
37#include <assert.h>
38#include <typedefs.h>
39#include <abi/sysinfo.h>
40#include <sysinfo/stats.h>
41#include <sysinfo/sysinfo.h>
42#include <synch/spinlock.h>
43#include <synch/mutex.h>
44#include <time/clock.h>
45#include <mm/frame.h>
46#include <proc/task.h>
47#include <proc/thread.h>
48#include <interrupt.h>
49#include <stdbool.h>
50#include <str.h>
51#include <errno.h>
52#include <cpu.h>
53#include <arch.h>
54#include <stdlib.h>
55
56/** Bits of fixed-point precision for load */
57#define LOAD_FIXED_SHIFT 11
58
59/** Uspace load fixed-point precision */
60#define LOAD_USPACE_SHIFT 6
61
62/** Kernel load shift */
63#define LOAD_KERNEL_SHIFT (LOAD_FIXED_SHIFT - LOAD_USPACE_SHIFT)
64
65/** 1.0 as fixed-point for load */
66#define LOAD_FIXED_1 (1 << LOAD_FIXED_SHIFT)
67
68/** Compute load in 5 second intervals */
69#define LOAD_INTERVAL 5
70
71/** IPC connections statistics state */
72typedef struct {
73 bool counting;
74 size_t count;
75 size_t i;
76 stats_ipcc_t *data;
77} ipccs_state_t;
78
79/** Fixed-point representation of
80 *
81 * 1 / exp(5 sec / 1 min)
82 * 1 / exp(5 sec / 5 min)
83 * 1 / exp(5 sec / 15 min)
84 *
85 */
86static load_t load_exp[LOAD_STEPS] = { 1884, 2014, 2037 };
87
88/** Running average of the number of ready threads */
89static load_t avenrdy[LOAD_STEPS] = { 0, 0, 0 };
90
91/** Load calculation lock */
92static mutex_t load_lock;
93
94/** Get statistics of all CPUs
95 *
96 * @param item Sysinfo item (unused).
97 * @param size Size of the returned data.
98 * @param dry_run Do not get the data, just calculate the size.
99 * @param data Unused.
100 *
101 * @return Data containing several stats_cpu_t structures.
102 * If the return value is not NULL, it should be freed
103 * in the context of the sysinfo request.
104 */
105static void *get_stats_cpus(struct sysinfo_item *item, size_t *size,
106 bool dry_run, void *data)
107{
108 *size = sizeof(stats_cpu_t) * config.cpu_count;
109 if (dry_run)
110 return NULL;
111
112 /* Assumption: config.cpu_count is constant */
113 stats_cpu_t *stats_cpus = (stats_cpu_t *) malloc(*size);
114 if (stats_cpus == NULL) {
115 *size = 0;
116 return NULL;
117 }
118
119 size_t i;
120 for (i = 0; i < config.cpu_count; i++) {
121 irq_spinlock_lock(&cpus[i].lock, true);
122
123 stats_cpus[i].id = cpus[i].id;
124 stats_cpus[i].active = cpus[i].active;
125 stats_cpus[i].frequency_mhz = cpus[i].frequency_mhz;
126
127 stats_cpus[i].busy_cycles = atomic_time_read(&cpus[i].busy_cycles);
128 stats_cpus[i].idle_cycles = atomic_time_read(&cpus[i].idle_cycles);
129
130 irq_spinlock_unlock(&cpus[i].lock, true);
131 }
132
133 return ((void *) stats_cpus);
134}
135
136/** Get the size of a virtual address space
137 *
138 * @param as Address space.
139 *
140 * @return Size of the mapped virtual address space (bytes).
141 *
142 */
143static size_t get_task_virtmem(as_t *as)
144{
145 /*
146 * We are holding spinlocks here and therefore are not allowed to
147 * block. Only attempt to lock the address space and address space
148 * area mutexes conditionally. If it is not possible to lock either
149 * object, return inexact statistics by skipping the respective object.
150 */
151
152 if (mutex_trylock(&as->lock) != EOK)
153 return 0;
154
155 size_t pages = 0;
156
157 /* Walk areas in the address space and count pages */
158 as_area_t *area = as_area_first(as);
159 while (area != NULL) {
160 if (mutex_trylock(&area->lock) != EOK)
161 continue;
162
163 pages += area->pages;
164 mutex_unlock(&area->lock);
165 area = as_area_next(area);
166 }
167
168 mutex_unlock(&as->lock);
169
170 return (pages << PAGE_WIDTH);
171}
172
173/** Get the resident (used) size of a virtual address space
174 *
175 * @param as Address space.
176 *
177 * @return Size of the resident (used) virtual address space (bytes).
178 *
179 */
180static size_t get_task_resmem(as_t *as)
181{
182 /*
183 * We are holding spinlocks here and therefore are not allowed to
184 * block. Only attempt to lock the address space and address space
185 * area mutexes conditionally. If it is not possible to lock either
186 * object, return inexact statistics by skipping the respective object.
187 */
188
189 if (mutex_trylock(&as->lock) != EOK)
190 return 0;
191
192 size_t pages = 0;
193
194 /* Walk areas in the address space and count pages */
195 as_area_t *area = as_area_first(as);
196 while (area != NULL) {
197 if (mutex_trylock(&area->lock) != EOK)
198 continue;
199
200 pages += area->used_space.pages;
201 mutex_unlock(&area->lock);
202 area = as_area_next(area);
203 }
204
205 mutex_unlock(&as->lock);
206
207 return (pages << PAGE_WIDTH);
208}
209
210/** Produce task statistics
211 *
212 * Summarize task information into task statistics.
213 *
214 * @param task Task.
215 * @param stats_task Task statistics.
216 *
217 */
218static void produce_stats_task(task_t *task, stats_task_t *stats_task)
219{
220 assert(interrupts_disabled());
221 assert(irq_spinlock_locked(&task->lock));
222
223 stats_task->task_id = task->taskid;
224 str_cpy(stats_task->name, TASK_NAME_BUFLEN, task->name);
225 stats_task->virtmem = get_task_virtmem(task->as);
226 stats_task->resmem = get_task_resmem(task->as);
227 stats_task->threads = atomic_load(&task->refcount);
228 task_get_accounting(task, &(stats_task->ucycles),
229 &(stats_task->kcycles));
230 stats_task->ipc_info = task->ipc_info;
231}
232
233/** Get task statistics
234 *
235 * @param item Sysinfo item (unused).
236 * @param size Size of the returned data.
237 * @param dry_run Do not get the data, just calculate the size.
238 * @param data Unused.
239 *
240 * @return Data containing several stats_task_t structures.
241 * If the return value is not NULL, it should be freed
242 * in the context of the sysinfo request.
243 */
244static void *get_stats_tasks(struct sysinfo_item *item, size_t *size,
245 bool dry_run, void *data)
246{
247 /* Messing with task structures, avoid deadlock */
248 irq_spinlock_lock(&tasks_lock, true);
249
250 /* Count the tasks */
251 size_t count = task_count();
252
253 if (count == 0) {
254 /* No tasks found (strange) */
255 irq_spinlock_unlock(&tasks_lock, true);
256 *size = 0;
257 return NULL;
258 }
259
260 *size = sizeof(stats_task_t) * count;
261 if (dry_run) {
262 irq_spinlock_unlock(&tasks_lock, true);
263 return NULL;
264 }
265
266 stats_task_t *stats_tasks = (stats_task_t *) malloc(*size);
267 if (stats_tasks == NULL) {
268 /* No free space for allocation */
269 irq_spinlock_unlock(&tasks_lock, true);
270 *size = 0;
271 return NULL;
272 }
273
274 /* Gather the statistics for each task */
275 size_t i = 0;
276 task_t *task = task_first();
277 while (task != NULL) {
278 /* Interrupts are already disabled */
279 irq_spinlock_lock(&(task->lock), false);
280
281 /* Record the statistics and increment the index */
282 produce_stats_task(task, &stats_tasks[i]);
283 i++;
284
285 irq_spinlock_unlock(&(task->lock), false);
286 task = task_next(task);
287 }
288
289 irq_spinlock_unlock(&tasks_lock, true);
290
291 return ((void *) stats_tasks);
292}
293
294/** Produce thread statistics
295 *
296 * Summarize thread information into thread statistics.
297 *
298 * @param thread Thread.
299 * @param stats_thread Thread statistics.
300 *
301 */
302static void produce_stats_thread(thread_t *thread, stats_thread_t *stats_thread)
303{
304 assert(interrupts_disabled());
305 assert(irq_spinlock_locked(&thread->lock));
306
307 stats_thread->thread_id = thread->tid;
308 stats_thread->task_id = thread->task->taskid;
309 stats_thread->state = thread->state;
310 stats_thread->priority = thread->priority;
311 stats_thread->ucycles = thread->ucycles;
312 stats_thread->kcycles = thread->kcycles;
313
314 if (thread->cpu != NULL) {
315 stats_thread->on_cpu = true;
316 stats_thread->cpu = thread->cpu->id;
317 } else
318 stats_thread->on_cpu = false;
319}
320
321/** Get thread statistics
322 *
323 * @param item Sysinfo item (unused).
324 * @param size Size of the returned data.
325 * @param dry_run Do not get the data, just calculate the size.
326 * @param data Unused.
327 *
328 * @return Data containing several stats_task_t structures.
329 * If the return value is not NULL, it should be freed
330 * in the context of the sysinfo request.
331 */
332static void *get_stats_threads(struct sysinfo_item *item, size_t *size,
333 bool dry_run, void *data)
334{
335 /* Messing with threads structures */
336 irq_spinlock_lock(&threads_lock, true);
337
338 /* Count the threads */
339 size_t count = thread_count();
340
341 if (count == 0) {
342 /* No threads found (strange) */
343 irq_spinlock_unlock(&threads_lock, true);
344 *size = 0;
345 return NULL;
346 }
347
348 *size = sizeof(stats_thread_t) * count;
349 if (dry_run) {
350 irq_spinlock_unlock(&threads_lock, true);
351 return NULL;
352 }
353
354 stats_thread_t *stats_threads = (stats_thread_t *) malloc(*size);
355 if (stats_threads == NULL) {
356 /* No free space for allocation */
357 irq_spinlock_unlock(&threads_lock, true);
358 *size = 0;
359 return NULL;
360 }
361
362 /* Walk tha thread tree again to gather the statistics */
363 size_t i = 0;
364
365 thread_t *thread = thread_first();
366 while (thread != NULL) {
367 /* Interrupts are already disabled */
368 irq_spinlock_lock(&thread->lock, false);
369
370 /* Record the statistics and increment the index */
371 produce_stats_thread(thread, &stats_threads[i]);
372 i++;
373
374 irq_spinlock_unlock(&thread->lock, false);
375
376 thread = thread_next(thread);
377 }
378
379 irq_spinlock_unlock(&threads_lock, true);
380
381 return ((void *) stats_threads);
382}
383
384/** Produce IPC connection statistics
385 *
386 * Summarize IPC connection information into IPC connection statistics.
387 *
388 * @param cap Phone capability.
389 * @param arg State variable.
390 *
391 */
392static bool produce_stats_ipcc_cb(cap_t *cap, void *arg)
393{
394 phone_t *phone = cap->kobject->phone;
395 ipccs_state_t *state = (ipccs_state_t *) arg;
396
397 if (state->counting) {
398 /*
399 * Simply update the number of entries
400 * in case we are in the counting mode.
401 */
402
403 state->count++;
404 return true;
405 }
406
407 /* We are in the gathering mode */
408
409 if ((state->data == NULL) || (state->i >= state->count)) {
410 /*
411 * Do nothing if we have no buffer
412 * to store the data to (meaning we are
413 * in a dry run) or the buffer is already
414 * full.
415 */
416
417 return true;
418 }
419
420 mutex_lock(&phone->lock);
421
422 if (phone->state == IPC_PHONE_CONNECTED) {
423 state->data[state->i].caller = phone->caller->taskid;
424 state->data[state->i].callee = phone->callee->task->taskid;
425 state->i++;
426 }
427
428 mutex_unlock(&phone->lock);
429
430 return true;
431}
432
433/** Get IPC connections statistics
434 *
435 * @param item Sysinfo item (unused).
436 * @param size Size of the returned data.
437 * @param dry_run Do not get the data, just calculate the size.
438 * @param data Unused.
439 *
440 * @return Data containing several stats_ipccs_t structures.
441 * If the return value is not NULL, it should be freed
442 * in the context of the sysinfo request.
443 *
444 */
445static void *get_stats_ipccs(struct sysinfo_item *item, size_t *size,
446 bool dry_run, void *data)
447{
448 /* Messing with tasks structures, avoid deadlock */
449 irq_spinlock_lock(&tasks_lock, true);
450
451 ipccs_state_t state = {
452 .counting = true,
453 .count = 0,
454 .i = 0,
455 .data = NULL
456 };
457
458 /* Compute the number of IPC connections */
459 task_t *task = task_first();
460 while (task != NULL) {
461 task_hold(task);
462 irq_spinlock_unlock(&tasks_lock, true);
463
464 caps_apply_to_kobject_type(task, KOBJECT_TYPE_PHONE,
465 produce_stats_ipcc_cb, &state);
466
467 irq_spinlock_lock(&tasks_lock, true);
468
469 task = task_next(task);
470 }
471
472 state.counting = false;
473 *size = sizeof(stats_ipcc_t) * state.count;
474
475 if (!dry_run)
476 state.data = (stats_ipcc_t *) malloc(*size);
477
478 /* Gather the statistics for each task */
479 task = task_first();
480 while (task != NULL) {
481 /* We already hold a reference to the task */
482 irq_spinlock_unlock(&tasks_lock, true);
483
484 caps_apply_to_kobject_type(task, KOBJECT_TYPE_PHONE,
485 produce_stats_ipcc_cb, &state);
486
487 irq_spinlock_lock(&tasks_lock, true);
488
489 task_t *prev_task = task;
490 task = task_next(prev_task);
491 task_release(prev_task);
492 }
493
494 irq_spinlock_unlock(&tasks_lock, true);
495
496 return ((void *) state.data);
497}
498
499/** Get a single task statistics
500 *
501 * Get statistics of a given task. The task ID is passed
502 * as a string (current limitation of the sysinfo interface,
503 * but it is still reasonable for the given purpose).
504 *
505 * @param name Task ID (string-encoded number).
506 * @param dry_run Do not get the data, just calculate the size.
507 * @param data Unused.
508 *
509 * @return Sysinfo return holder. The type of the returned
510 * data is either SYSINFO_VAL_UNDEFINED (unknown
511 * task ID or memory allocation error) or
512 * SYSINFO_VAL_FUNCTION_DATA (in that case the
513 * generated data should be freed within the
514 * sysinfo request context).
515 *
516 */
517static sysinfo_return_t get_stats_task(const char *name, bool dry_run,
518 void *data)
519{
520 /* Initially no return value */
521 sysinfo_return_t ret;
522 ret.tag = SYSINFO_VAL_UNDEFINED;
523
524 /* Parse the task ID */
525 task_id_t task_id;
526 if (str_uint64_t(name, NULL, 0, true, &task_id) != EOK)
527 return ret;
528
529 /* Messing with task structures, avoid deadlock */
530 irq_spinlock_lock(&tasks_lock, true);
531
532 task_t *task = task_find_by_id(task_id);
533 if (task == NULL) {
534 /* No task with this ID */
535 irq_spinlock_unlock(&tasks_lock, true);
536 return ret;
537 }
538
539 if (dry_run) {
540 ret.tag = SYSINFO_VAL_FUNCTION_DATA;
541 ret.data.data = NULL;
542 ret.data.size = sizeof(stats_task_t);
543
544 irq_spinlock_unlock(&tasks_lock, true);
545 } else {
546 /* Allocate stats_task_t structure */
547 stats_task_t *stats_task =
548 (stats_task_t *) malloc(sizeof(stats_task_t));
549 if (stats_task == NULL) {
550 irq_spinlock_unlock(&tasks_lock, true);
551 return ret;
552 }
553
554 /* Correct return value */
555 ret.tag = SYSINFO_VAL_FUNCTION_DATA;
556 ret.data.data = (void *) stats_task;
557 ret.data.size = sizeof(stats_task_t);
558
559 /* Hand-over-hand locking */
560 irq_spinlock_exchange(&tasks_lock, &task->lock);
561
562 produce_stats_task(task, stats_task);
563
564 irq_spinlock_unlock(&task->lock, true);
565 }
566
567 return ret;
568}
569
570/** Get thread statistics
571 *
572 * Get statistics of a given thread. The thread ID is passed
573 * as a string (current limitation of the sysinfo interface,
574 * but it is still reasonable for the given purpose).
575 *
576 * @param name Thread ID (string-encoded number).
577 * @param dry_run Do not get the data, just calculate the size.
578 * @param data Unused.
579 *
580 * @return Sysinfo return holder. The type of the returned
581 * data is either SYSINFO_VAL_UNDEFINED (unknown
582 * thread ID or memory allocation error) or
583 * SYSINFO_VAL_FUNCTION_DATA (in that case the
584 * generated data should be freed within the
585 * sysinfo request context).
586 *
587 */
588static sysinfo_return_t get_stats_thread(const char *name, bool dry_run,
589 void *data)
590{
591 /* Initially no return value */
592 sysinfo_return_t ret;
593 ret.tag = SYSINFO_VAL_UNDEFINED;
594
595 /* Parse the thread ID */
596 thread_id_t thread_id;
597 if (str_uint64_t(name, NULL, 0, true, &thread_id) != EOK)
598 return ret;
599
600 /* Messing with threads structures */
601 irq_spinlock_lock(&threads_lock, true);
602
603 thread_t *thread = thread_find_by_id(thread_id);
604 if (thread == NULL) {
605 /* No thread with this ID */
606 irq_spinlock_unlock(&threads_lock, true);
607 return ret;
608 }
609
610 if (dry_run) {
611 ret.tag = SYSINFO_VAL_FUNCTION_DATA;
612 ret.data.data = NULL;
613 ret.data.size = sizeof(stats_thread_t);
614
615 irq_spinlock_unlock(&threads_lock, true);
616 } else {
617 /* Allocate stats_thread_t structure */
618 stats_thread_t *stats_thread =
619 (stats_thread_t *) malloc(sizeof(stats_thread_t));
620 if (stats_thread == NULL) {
621 irq_spinlock_unlock(&threads_lock, true);
622 return ret;
623 }
624
625 /* Correct return value */
626 ret.tag = SYSINFO_VAL_FUNCTION_DATA;
627 ret.data.data = (void *) stats_thread;
628 ret.data.size = sizeof(stats_thread_t);
629
630 /*
631 * Replaced hand-over-hand locking with regular nested sections
632 * to avoid weak reference leak issues.
633 */
634 irq_spinlock_lock(&thread->lock, false);
635 produce_stats_thread(thread, stats_thread);
636 irq_spinlock_unlock(&thread->lock, false);
637
638 irq_spinlock_unlock(&threads_lock, true);
639 }
640
641 return ret;
642}
643
644/** Get exceptions statistics
645 *
646 * @param item Sysinfo item (unused).
647 * @param size Size of the returned data.
648 * @param dry_run Do not get the data, just calculate the size.
649 * @param data Unused.
650 *
651 * @return Data containing several stats_exc_t structures.
652 * If the return value is not NULL, it should be freed
653 * in the context of the sysinfo request.
654 */
655static void *get_stats_exceptions(struct sysinfo_item *item, size_t *size,
656 bool dry_run, void *data)
657{
658 *size = sizeof(stats_exc_t) * IVT_ITEMS;
659
660 if ((dry_run) || (IVT_ITEMS == 0))
661 return NULL;
662
663 stats_exc_t *stats_exceptions =
664 (stats_exc_t *) malloc(*size);
665 if (stats_exceptions == NULL) {
666 /* No free space for allocation */
667 *size = 0;
668 return NULL;
669 }
670
671#if (IVT_ITEMS > 0)
672 /* Messing with exception table, avoid deadlock */
673 irq_spinlock_lock(&exctbl_lock, true);
674
675 unsigned int i;
676 for (i = 0; i < IVT_ITEMS; i++) {
677 stats_exceptions[i].id = i + IVT_FIRST;
678 str_cpy(stats_exceptions[i].desc, EXC_NAME_BUFLEN, exc_table[i].name);
679 stats_exceptions[i].hot = exc_table[i].hot;
680 stats_exceptions[i].cycles = exc_table[i].cycles;
681 stats_exceptions[i].count = exc_table[i].count;
682 }
683
684 irq_spinlock_unlock(&exctbl_lock, true);
685#endif
686
687 return ((void *) stats_exceptions);
688}
689
690/** Get exception statistics
691 *
692 * Get statistics of a given exception. The exception number
693 * is passed as a string (current limitation of the sysinfo
694 * interface, but it is still reasonable for the given purpose).
695 *
696 * @param name Exception number (string-encoded number).
697 * @param dry_run Do not get the data, just calculate the size.
698 * @param data Unused.
699 *
700 * @return Sysinfo return holder. The type of the returned
701 * data is either SYSINFO_VAL_UNDEFINED (unknown
702 * exception number or memory allocation error) or
703 * SYSINFO_VAL_FUNCTION_DATA (in that case the
704 * generated data should be freed within the
705 * sysinfo request context).
706 *
707 */
708static sysinfo_return_t get_stats_exception(const char *name, bool dry_run,
709 void *data)
710{
711 /* Initially no return value */
712 sysinfo_return_t ret;
713 ret.tag = SYSINFO_VAL_UNDEFINED;
714
715 /* Parse the exception number */
716 uint64_t excn;
717 if (str_uint64_t(name, NULL, 0, true, &excn) != EOK)
718 return ret;
719
720#if (IVT_FIRST > 0)
721 if (excn < IVT_FIRST)
722 return ret;
723#endif
724
725#if (IVT_ITEMS + IVT_FIRST == 0)
726 return ret;
727#else
728 if (excn >= IVT_ITEMS + IVT_FIRST)
729 return ret;
730#endif
731
732 if (dry_run) {
733 ret.tag = SYSINFO_VAL_FUNCTION_DATA;
734 ret.data.data = NULL;
735 ret.data.size = sizeof(stats_thread_t);
736 } else {
737 /* Update excn index for accessing exc_table */
738 excn -= IVT_FIRST;
739
740 /* Allocate stats_exc_t structure */
741 stats_exc_t *stats_exception =
742 (stats_exc_t *) malloc(sizeof(stats_exc_t));
743 if (stats_exception == NULL)
744 return ret;
745
746 /* Messing with exception table, avoid deadlock */
747 irq_spinlock_lock(&exctbl_lock, true);
748
749 /* Correct return value */
750 ret.tag = SYSINFO_VAL_FUNCTION_DATA;
751 ret.data.data = (void *) stats_exception;
752 ret.data.size = sizeof(stats_exc_t);
753
754 stats_exception->id = excn;
755 str_cpy(stats_exception->desc, EXC_NAME_BUFLEN, exc_table[excn].name);
756 stats_exception->hot = exc_table[excn].hot;
757 stats_exception->cycles = exc_table[excn].cycles;
758 stats_exception->count = exc_table[excn].count;
759
760 irq_spinlock_unlock(&exctbl_lock, true);
761 }
762
763 return ret;
764}
765
766/** Get physical memory statistics
767 *
768 * @param item Sysinfo item (unused).
769 * @param size Size of the returned data.
770 * @param dry_run Do not get the data, just calculate the size.
771 * @param data Unused.
772 *
773 * @return Data containing stats_physmem_t.
774 * If the return value is not NULL, it should be freed
775 * in the context of the sysinfo request.
776 */
777static void *get_stats_physmem(struct sysinfo_item *item, size_t *size,
778 bool dry_run, void *data)
779{
780 *size = sizeof(stats_physmem_t);
781 if (dry_run)
782 return NULL;
783
784 stats_physmem_t *stats_physmem =
785 (stats_physmem_t *) malloc(*size);
786 if (stats_physmem == NULL) {
787 *size = 0;
788 return NULL;
789 }
790
791 zones_stats(&(stats_physmem->total), &(stats_physmem->unavail),
792 &(stats_physmem->used), &(stats_physmem->free));
793
794 return ((void *) stats_physmem);
795}
796
797/** Get system load
798 *
799 * @param item Sysinfo item (unused).
800 * @param size Size of the returned data.
801 * @param dry_run Do not get the data, just calculate the size.
802 * @param data Unused.
803 *
804 * @return Data several load_t values.
805 * If the return value is not NULL, it should be freed
806 * in the context of the sysinfo request.
807 */
808static void *get_stats_load(struct sysinfo_item *item, size_t *size,
809 bool dry_run, void *data)
810{
811 *size = sizeof(load_t) * LOAD_STEPS;
812 if (dry_run)
813 return NULL;
814
815 load_t *stats_load = (load_t *) malloc(*size);
816 if (stats_load == NULL) {
817 *size = 0;
818 return NULL;
819 }
820
821 /* To always get consistent values acquire the mutex */
822 mutex_lock(&load_lock);
823
824 unsigned int i;
825 for (i = 0; i < LOAD_STEPS; i++)
826 stats_load[i] = avenrdy[i] << LOAD_KERNEL_SHIFT;
827
828 mutex_unlock(&load_lock);
829
830 return ((void *) stats_load);
831}
832
833/** Calculate load
834 *
835 */
836static inline load_t load_calc(load_t load, load_t exp, size_t ready)
837{
838 load *= exp;
839 load += (ready << LOAD_FIXED_SHIFT) * (LOAD_FIXED_1 - exp);
840
841 return (load >> LOAD_FIXED_SHIFT);
842}
843
844/** Load computation thread.
845 *
846 * Compute system load every few seconds.
847 *
848 * @param arg Unused.
849 *
850 */
851void kload(void *arg)
852{
853 while (true) {
854 size_t ready = atomic_load(&nrdy);
855
856 /* Mutually exclude with get_stats_load() */
857 mutex_lock(&load_lock);
858
859 unsigned int i;
860 for (i = 0; i < LOAD_STEPS; i++)
861 avenrdy[i] = load_calc(avenrdy[i], load_exp[i], ready);
862
863 mutex_unlock(&load_lock);
864
865 thread_sleep(LOAD_INTERVAL);
866 }
867}
868
869/** Register sysinfo statistical items
870 *
871 */
872void stats_init(void)
873{
874 mutex_initialize(&load_lock, MUTEX_PASSIVE);
875
876 sysinfo_set_item_gen_data("system.cpus", NULL, get_stats_cpus, NULL);
877 sysinfo_set_item_gen_data("system.physmem", NULL, get_stats_physmem, NULL);
878 sysinfo_set_item_gen_data("system.load", NULL, get_stats_load, NULL);
879 sysinfo_set_item_gen_data("system.tasks", NULL, get_stats_tasks, NULL);
880 sysinfo_set_item_gen_data("system.threads", NULL, get_stats_threads, NULL);
881 sysinfo_set_item_gen_data("system.ipccs", NULL, get_stats_ipccs, NULL);
882 sysinfo_set_item_gen_data("system.exceptions", NULL, get_stats_exceptions, NULL);
883 sysinfo_set_subtree_fn("system.tasks", NULL, get_stats_task, NULL);
884 sysinfo_set_subtree_fn("system.threads", NULL, get_stats_thread, NULL);
885 sysinfo_set_subtree_fn("system.exceptions", NULL, get_stats_exception, NULL);
886}
887
888/** @}
889 */
Note: See TracBrowser for help on using the repository browser.