source: mainline/kernel/generic/src/sysinfo/stats.c@ aafed15

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since aafed15 was aafed15, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 7 years ago

Declare malloc() etc in standard <stdlib.h> rather than <mm/slab.h>

  • Property mode set to 100644
File size: 20.2 KB
RevLine 
[9dae191e]1/*
2 * Copyright (c) 2010 Stanislav Kozina
3 * Copyright (c) 2010 Martin Decky
[ef1eab7]4 * Copyright (c) 2018 Jiri Svoboda
[9dae191e]5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * - The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
[174156fd]31/** @addtogroup kernel_generic
[9dae191e]32 * @{
33 */
34/** @file
35 */
36
[63e27ef]37#include <assert.h>
[9dae191e]38#include <typedefs.h>
[c0699467]39#include <abi/sysinfo.h>
[9dae191e]40#include <sysinfo/stats.h>
41#include <sysinfo/sysinfo.h>
[6e121b8]42#include <synch/spinlock.h>
43#include <synch/mutex.h>
[9dae191e]44#include <time/clock.h>
45#include <mm/frame.h>
[e1b6742]46#include <proc/task.h>
[9dae191e]47#include <proc/thread.h>
[8eec3c8]48#include <interrupt.h>
[525c5ac]49#include <stdbool.h>
[9dae191e]50#include <str.h>
51#include <errno.h>
52#include <cpu.h>
53#include <arch.h>
[aafed15]54#include <stdlib.h>
[9dae191e]55
56/** Bits of fixed-point precision for load */
57#define LOAD_FIXED_SHIFT 11
58
[fd3a631f]59/** Uspace load fixed-point precision */
60#define LOAD_USPACE_SHIFT 6
61
62/** Kernel load shift */
63#define LOAD_KERNEL_SHIFT (LOAD_FIXED_SHIFT - LOAD_USPACE_SHIFT)
64
[9dae191e]65/** 1.0 as fixed-point for load */
66#define LOAD_FIXED_1 (1 << LOAD_FIXED_SHIFT)
67
68/** Compute load in 5 second intervals */
69#define LOAD_INTERVAL 5
70
[80bfb601]71/** Fixed-point representation of
[9dae191e]72 *
73 * 1 / exp(5 sec / 1 min)
74 * 1 / exp(5 sec / 5 min)
75 * 1 / exp(5 sec / 15 min)
76 *
77 */
[3bacee1]78static load_t load_exp[LOAD_STEPS] = { 1884, 2014, 2037 };
[80bfb601]79
80/** Running average of the number of ready threads */
[3bacee1]81static load_t avenrdy[LOAD_STEPS] = { 0, 0, 0 };
[80bfb601]82
[6e121b8]83/** Load calculation lock */
84static mutex_t load_lock;
[9dae191e]85
[80bfb601]86/** Get statistics of all CPUs
87 *
[70e2b2d]88 * @param item Sysinfo item (unused).
89 * @param size Size of the returned data.
90 * @param dry_run Do not get the data, just calculate the size.
[196c253]91 * @param data Unused.
[80bfb601]92 *
93 * @return Data containing several stats_cpu_t structures.
94 * If the return value is not NULL, it should be freed
95 * in the context of the sysinfo request.
96 */
[70e2b2d]97static void *get_stats_cpus(struct sysinfo_item *item, size_t *size,
[196c253]98 bool dry_run, void *data)
[9dae191e]99{
[70e2b2d]100 *size = sizeof(stats_cpu_t) * config.cpu_count;
101 if (dry_run)
102 return NULL;
[a35b458]103
[80bfb601]104 /* Assumption: config.cpu_count is constant */
[11b285d]105 stats_cpu_t *stats_cpus = (stats_cpu_t *) malloc(*size);
[9dae191e]106 if (stats_cpus == NULL) {
107 *size = 0;
108 return NULL;
109 }
[a35b458]110
[9dae191e]111 size_t i;
112 for (i = 0; i < config.cpu_count; i++) {
[da1bafb]113 irq_spinlock_lock(&cpus[i].lock, true);
[a35b458]114
[9dae191e]115 stats_cpus[i].id = cpus[i].id;
[bd01a4e]116 stats_cpus[i].active = cpus[i].active;
[9dae191e]117 stats_cpus[i].frequency_mhz = cpus[i].frequency_mhz;
[d0c82c5]118 stats_cpus[i].busy_cycles = cpus[i].busy_cycles;
119 stats_cpus[i].idle_cycles = cpus[i].idle_cycles;
[a35b458]120
[da1bafb]121 irq_spinlock_unlock(&cpus[i].lock, true);
[9dae191e]122 }
[a35b458]123
[9dae191e]124 return ((void *) stats_cpus);
125}
126
[dec16a2]127/** Get the size of a virtual address space
128 *
129 * @param as Address space.
[80bfb601]130 *
[dec16a2]131 * @return Size of the mapped virtual address space (bytes).
132 *
133 */
134static size_t get_task_virtmem(as_t *as)
135{
[d69f959]136 /*
[fc47885]137 * We are holding spinlocks here and therefore are not allowed to
138 * block. Only attempt to lock the address space and address space
139 * area mutexes conditionally. If it is not possible to lock either
140 * object, return inexact statistics by skipping the respective object.
[d69f959]141 */
[a35b458]142
[897fd8f1]143 if (mutex_trylock(&as->lock) != EOK)
[fc47885]144 return 0;
[a35b458]145
[fc47885]146 size_t pages = 0;
[a35b458]147
[88cc71c0]148 /* Walk areas in the address space and count pages */
149 as_area_t *area = as_area_first(as);
150 while (area != NULL) {
151 if (mutex_trylock(&area->lock) != EOK)
152 continue;
153
154 pages += area->pages;
155 mutex_unlock(&area->lock);
156 area = as_area_next(area);
[dec16a2]157 }
[a35b458]158
[dec16a2]159 mutex_unlock(&as->lock);
[a35b458]160
[fc47885]161 return (pages << PAGE_WIDTH);
[dec16a2]162}
163
[a0ce870]164/** Get the resident (used) size of a virtual address space
165 *
166 * @param as Address space.
167 *
168 * @return Size of the resident (used) virtual address space (bytes).
169 *
170 */
171static size_t get_task_resmem(as_t *as)
172{
173 /*
[fc47885]174 * We are holding spinlocks here and therefore are not allowed to
175 * block. Only attempt to lock the address space and address space
176 * area mutexes conditionally. If it is not possible to lock either
177 * object, return inexact statistics by skipping the respective object.
[a0ce870]178 */
[a35b458]179
[897fd8f1]180 if (mutex_trylock(&as->lock) != EOK)
[fc47885]181 return 0;
[a35b458]182
[fc47885]183 size_t pages = 0;
[a35b458]184
[88cc71c0]185 /* Walk areas in the address space and count pages */
186 as_area_t *area = as_area_first(as);
187 while (area != NULL) {
188 if (mutex_trylock(&area->lock) != EOK)
189 continue;
[a35b458]190
[88cc71c0]191 pages += area->resident;
192 mutex_unlock(&area->lock);
193 area = as_area_next(area);
[a0ce870]194 }
[a35b458]195
[a0ce870]196 mutex_unlock(&as->lock);
[a35b458]197
[fc47885]198 return (pages << PAGE_WIDTH);
[a0ce870]199}
200
[7c3fb9b]201/** Produce task statistics
[dec16a2]202 *
203 * Summarize task information into task statistics.
204 *
205 * @param task Task.
206 * @param stats_task Task statistics.
207 *
208 */
209static void produce_stats_task(task_t *task, stats_task_t *stats_task)
210{
[63e27ef]211 assert(interrupts_disabled());
212 assert(irq_spinlock_locked(&task->lock));
[a35b458]213
[dec16a2]214 stats_task->task_id = task->taskid;
215 str_cpy(stats_task->name, TASK_NAME_BUFLEN, task->name);
216 stats_task->virtmem = get_task_virtmem(task->as);
[a0ce870]217 stats_task->resmem = get_task_resmem(task->as);
[036e97c]218 stats_task->threads = atomic_load(&task->refcount);
[dec16a2]219 task_get_accounting(task, &(stats_task->ucycles),
220 &(stats_task->kcycles));
221 stats_task->ipc_info = task->ipc_info;
222}
223
224/** Get task statistics
[80bfb601]225 *
[70e2b2d]226 * @param item Sysinfo item (unused).
227 * @param size Size of the returned data.
228 * @param dry_run Do not get the data, just calculate the size.
[196c253]229 * @param data Unused.
[80bfb601]230 *
[dec16a2]231 * @return Data containing several stats_task_t structures.
[80bfb601]232 * If the return value is not NULL, it should be freed
233 * in the context of the sysinfo request.
234 */
[70e2b2d]235static void *get_stats_tasks(struct sysinfo_item *item, size_t *size,
[196c253]236 bool dry_run, void *data)
[9dae191e]237{
238 /* Messing with task structures, avoid deadlock */
[da1bafb]239 irq_spinlock_lock(&tasks_lock, true);
[a35b458]240
[ef1eab7]241 /* Count the tasks */
[aab5e46]242 size_t count = task_count();
[a35b458]243
[9dae191e]244 if (count == 0) {
[80bfb601]245 /* No tasks found (strange) */
[da1bafb]246 irq_spinlock_unlock(&tasks_lock, true);
[9dae191e]247 *size = 0;
248 return NULL;
249 }
[a35b458]250
[dec16a2]251 *size = sizeof(stats_task_t) * count;
[70e2b2d]252 if (dry_run) {
[da1bafb]253 irq_spinlock_unlock(&tasks_lock, true);
[70e2b2d]254 return NULL;
255 }
[a35b458]256
[11b285d]257 stats_task_t *stats_tasks = (stats_task_t *) malloc(*size);
[dec16a2]258 if (stats_tasks == NULL) {
[80bfb601]259 /* No free space for allocation */
[da1bafb]260 irq_spinlock_unlock(&tasks_lock, true);
[9dae191e]261 *size = 0;
262 return NULL;
263 }
[a35b458]264
[ef1eab7]265 /* Gather the statistics for each task */
266 size_t i = 0;
[aab5e46]267 task_t *task = task_first();
268 while (task != NULL) {
[ef1eab7]269 /* Interrupts are already disabled */
270 irq_spinlock_lock(&(task->lock), false);
271
272 /* Record the statistics and increment the index */
273 produce_stats_task(task, &stats_tasks[i]);
274 i++;
275
276 irq_spinlock_unlock(&(task->lock), false);
[aab5e46]277 task = task_next(task);
[ef1eab7]278 }
[a35b458]279
[da1bafb]280 irq_spinlock_unlock(&tasks_lock, true);
[a35b458]281
[dec16a2]282 return ((void *) stats_tasks);
283}
284
[7c3fb9b]285/** Produce thread statistics
[dec16a2]286 *
287 * Summarize thread information into thread statistics.
288 *
289 * @param thread Thread.
290 * @param stats_thread Thread statistics.
291 *
292 */
293static void produce_stats_thread(thread_t *thread, stats_thread_t *stats_thread)
294{
[63e27ef]295 assert(interrupts_disabled());
296 assert(irq_spinlock_locked(&thread->lock));
[a35b458]297
[dec16a2]298 stats_thread->thread_id = thread->tid;
299 stats_thread->task_id = thread->task->taskid;
300 stats_thread->state = thread->state;
301 stats_thread->priority = thread->priority;
302 stats_thread->ucycles = thread->ucycles;
303 stats_thread->kcycles = thread->kcycles;
[a35b458]304
[dec16a2]305 if (thread->cpu != NULL) {
306 stats_thread->on_cpu = true;
307 stats_thread->cpu = thread->cpu->id;
308 } else
309 stats_thread->on_cpu = false;
[9dae191e]310}
311
[dec16a2]312/** Get thread statistics
[e1b6742]313 *
314 * @param item Sysinfo item (unused).
315 * @param size Size of the returned data.
316 * @param dry_run Do not get the data, just calculate the size.
[196c253]317 * @param data Unused.
[e1b6742]318 *
[dec16a2]319 * @return Data containing several stats_task_t structures.
[e1b6742]320 * If the return value is not NULL, it should be freed
321 * in the context of the sysinfo request.
322 */
323static void *get_stats_threads(struct sysinfo_item *item, size_t *size,
[196c253]324 bool dry_run, void *data)
[e1b6742]325{
326 /* Messing with threads structures, avoid deadlock */
[da1bafb]327 irq_spinlock_lock(&threads_lock, true);
[a35b458]328
[ef1eab7]329 /* Count the threads */
[aab5e46]330 size_t count = thread_count();
[a35b458]331
[e1b6742]332 if (count == 0) {
333 /* No threads found (strange) */
[da1bafb]334 irq_spinlock_unlock(&threads_lock, true);
[e1b6742]335 *size = 0;
336 return NULL;
337 }
[a35b458]338
[dec16a2]339 *size = sizeof(stats_thread_t) * count;
[e1b6742]340 if (dry_run) {
[da1bafb]341 irq_spinlock_unlock(&threads_lock, true);
[e1b6742]342 return NULL;
343 }
[a35b458]344
[11b285d]345 stats_thread_t *stats_threads = (stats_thread_t *) malloc(*size);
[dec16a2]346 if (stats_threads == NULL) {
[e1b6742]347 /* No free space for allocation */
[da1bafb]348 irq_spinlock_unlock(&threads_lock, true);
[e1b6742]349 *size = 0;
350 return NULL;
351 }
[a35b458]352
[dec16a2]353 /* Walk tha thread tree again to gather the statistics */
[ef1eab7]354 size_t i = 0;
355
[aab5e46]356 thread_t *thread = thread_first();
357 while (thread != NULL) {
[ef1eab7]358 /* Interrupts are already disabled */
359 irq_spinlock_lock(&thread->lock, false);
360
361 /* Record the statistics and increment the index */
362 produce_stats_thread(thread, &stats_threads[i]);
363 i++;
364
365 irq_spinlock_unlock(&thread->lock, false);
366
[aab5e46]367 thread = thread_next(thread);
[ef1eab7]368 }
[a35b458]369
[da1bafb]370 irq_spinlock_unlock(&threads_lock, true);
[a35b458]371
[dec16a2]372 return ((void *) stats_threads);
[e1b6742]373}
374
[dec16a2]375/** Get a single task statistics
[80bfb601]376 *
377 * Get statistics of a given task. The task ID is passed
378 * as a string (current limitation of the sysinfo interface,
379 * but it is still reasonable for the given purpose).
380 *
[e1b6742]381 * @param name Task ID (string-encoded number).
382 * @param dry_run Do not get the data, just calculate the size.
[5869ce0]383 * @param data Unused.
[80bfb601]384 *
385 * @return Sysinfo return holder. The type of the returned
386 * data is either SYSINFO_VAL_UNDEFINED (unknown
387 * task ID or memory allocation error) or
388 * SYSINFO_VAL_FUNCTION_DATA (in that case the
389 * generated data should be freed within the
390 * sysinfo request context).
391 *
392 */
[5869ce0]393static sysinfo_return_t get_stats_task(const char *name, bool dry_run,
394 void *data)
[9dae191e]395{
[80bfb601]396 /* Initially no return value */
[9dae191e]397 sysinfo_return_t ret;
398 ret.tag = SYSINFO_VAL_UNDEFINED;
[a35b458]399
[80bfb601]400 /* Parse the task ID */
[9dae191e]401 task_id_t task_id;
[059a8e4]402 if (str_uint64_t(name, NULL, 0, true, &task_id) != EOK)
[9dae191e]403 return ret;
[a35b458]404
[80bfb601]405 /* Messing with task structures, avoid deadlock */
[da1bafb]406 irq_spinlock_lock(&tasks_lock, true);
[a35b458]407
[9dae191e]408 task_t *task = task_find_by_id(task_id);
409 if (task == NULL) {
[80bfb601]410 /* No task with this ID */
[da1bafb]411 irq_spinlock_unlock(&tasks_lock, true);
[9dae191e]412 return ret;
413 }
[a35b458]414
[e1b6742]415 if (dry_run) {
416 ret.tag = SYSINFO_VAL_FUNCTION_DATA;
417 ret.data.data = NULL;
418 ret.data.size = sizeof(stats_task_t);
[a35b458]419
[da1bafb]420 irq_spinlock_unlock(&tasks_lock, true);
[e1b6742]421 } else {
422 /* Allocate stats_task_t structure */
423 stats_task_t *stats_task =
[11b285d]424 (stats_task_t *) malloc(sizeof(stats_task_t));
[e1b6742]425 if (stats_task == NULL) {
[da1bafb]426 irq_spinlock_unlock(&tasks_lock, true);
[e1b6742]427 return ret;
428 }
[a35b458]429
[e1b6742]430 /* Correct return value */
431 ret.tag = SYSINFO_VAL_FUNCTION_DATA;
432 ret.data.data = (void *) stats_task;
433 ret.data.size = sizeof(stats_task_t);
[a35b458]434
[e1b6742]435 /* Hand-over-hand locking */
[da1bafb]436 irq_spinlock_exchange(&tasks_lock, &task->lock);
[a35b458]437
[dec16a2]438 produce_stats_task(task, stats_task);
[a35b458]439
[da1bafb]440 irq_spinlock_unlock(&task->lock, true);
[e1b6742]441 }
[a35b458]442
[e1b6742]443 return ret;
444}
445
446/** Get thread statistics
447 *
448 * Get statistics of a given thread. The thread ID is passed
449 * as a string (current limitation of the sysinfo interface,
450 * but it is still reasonable for the given purpose).
451 *
452 * @param name Thread ID (string-encoded number).
453 * @param dry_run Do not get the data, just calculate the size.
[5869ce0]454 * @param data Unused.
[e1b6742]455 *
456 * @return Sysinfo return holder. The type of the returned
457 * data is either SYSINFO_VAL_UNDEFINED (unknown
458 * thread ID or memory allocation error) or
459 * SYSINFO_VAL_FUNCTION_DATA (in that case the
460 * generated data should be freed within the
461 * sysinfo request context).
462 *
463 */
[5869ce0]464static sysinfo_return_t get_stats_thread(const char *name, bool dry_run,
465 void *data)
[e1b6742]466{
467 /* Initially no return value */
468 sysinfo_return_t ret;
469 ret.tag = SYSINFO_VAL_UNDEFINED;
[a35b458]470
[e1b6742]471 /* Parse the thread ID */
472 thread_id_t thread_id;
[059a8e4]473 if (str_uint64_t(name, NULL, 0, true, &thread_id) != EOK)
[e1b6742]474 return ret;
[a35b458]475
[e1b6742]476 /* Messing with threads structures, avoid deadlock */
[da1bafb]477 irq_spinlock_lock(&threads_lock, true);
[a35b458]478
[e1b6742]479 thread_t *thread = thread_find_by_id(thread_id);
480 if (thread == NULL) {
481 /* No thread with this ID */
[da1bafb]482 irq_spinlock_unlock(&threads_lock, true);
[e1b6742]483 return ret;
484 }
[a35b458]485
[e1b6742]486 if (dry_run) {
487 ret.tag = SYSINFO_VAL_FUNCTION_DATA;
488 ret.data.data = NULL;
489 ret.data.size = sizeof(stats_thread_t);
[a35b458]490
[da1bafb]491 irq_spinlock_unlock(&threads_lock, true);
[e1b6742]492 } else {
493 /* Allocate stats_thread_t structure */
494 stats_thread_t *stats_thread =
[11b285d]495 (stats_thread_t *) malloc(sizeof(stats_thread_t));
[e1b6742]496 if (stats_thread == NULL) {
[da1bafb]497 irq_spinlock_unlock(&threads_lock, true);
[e1b6742]498 return ret;
499 }
[a35b458]500
[e1b6742]501 /* Correct return value */
502 ret.tag = SYSINFO_VAL_FUNCTION_DATA;
503 ret.data.data = (void *) stats_thread;
504 ret.data.size = sizeof(stats_thread_t);
[a35b458]505
[e1b6742]506 /* Hand-over-hand locking */
[da1bafb]507 irq_spinlock_exchange(&threads_lock, &thread->lock);
[a35b458]508
[dec16a2]509 produce_stats_thread(thread, stats_thread);
[a35b458]510
[da1bafb]511 irq_spinlock_unlock(&thread->lock, true);
[e1b6742]512 }
[a35b458]513
[9dae191e]514 return ret;
515}
516
[8eec3c8]517/** Get exceptions statistics
518 *
519 * @param item Sysinfo item (unused).
520 * @param size Size of the returned data.
521 * @param dry_run Do not get the data, just calculate the size.
[196c253]522 * @param data Unused.
[8eec3c8]523 *
524 * @return Data containing several stats_exc_t structures.
525 * If the return value is not NULL, it should be freed
526 * in the context of the sysinfo request.
527 */
528static void *get_stats_exceptions(struct sysinfo_item *item, size_t *size,
[196c253]529 bool dry_run, void *data)
[8eec3c8]530{
531 *size = sizeof(stats_exc_t) * IVT_ITEMS;
[a35b458]532
[8eec3c8]533 if ((dry_run) || (IVT_ITEMS == 0))
534 return NULL;
[a35b458]535
[8eec3c8]536 stats_exc_t *stats_exceptions =
[11b285d]537 (stats_exc_t *) malloc(*size);
[8eec3c8]538 if (stats_exceptions == NULL) {
539 /* No free space for allocation */
540 *size = 0;
541 return NULL;
542 }
[a35b458]543
[b3b7e14a]544#if (IVT_ITEMS > 0)
[8eec3c8]545 /* Messing with exception table, avoid deadlock */
546 irq_spinlock_lock(&exctbl_lock, true);
[a35b458]547
[8eec3c8]548 unsigned int i;
549 for (i = 0; i < IVT_ITEMS; i++) {
550 stats_exceptions[i].id = i + IVT_FIRST;
551 str_cpy(stats_exceptions[i].desc, EXC_NAME_BUFLEN, exc_table[i].name);
[b3b7e14a]552 stats_exceptions[i].hot = exc_table[i].hot;
[8eec3c8]553 stats_exceptions[i].cycles = exc_table[i].cycles;
554 stats_exceptions[i].count = exc_table[i].count;
555 }
[a35b458]556
[8eec3c8]557 irq_spinlock_unlock(&exctbl_lock, true);
[b3b7e14a]558#endif
[a35b458]559
[8eec3c8]560 return ((void *) stats_exceptions);
561}
562
563/** Get exception statistics
564 *
565 * Get statistics of a given exception. The exception number
566 * is passed as a string (current limitation of the sysinfo
567 * interface, but it is still reasonable for the given purpose).
568 *
569 * @param name Exception number (string-encoded number).
570 * @param dry_run Do not get the data, just calculate the size.
[5869ce0]571 * @param data Unused.
[8eec3c8]572 *
573 * @return Sysinfo return holder. The type of the returned
574 * data is either SYSINFO_VAL_UNDEFINED (unknown
575 * exception number or memory allocation error) or
576 * SYSINFO_VAL_FUNCTION_DATA (in that case the
577 * generated data should be freed within the
578 * sysinfo request context).
579 *
580 */
[5869ce0]581static sysinfo_return_t get_stats_exception(const char *name, bool dry_run,
582 void *data)
[8eec3c8]583{
584 /* Initially no return value */
585 sysinfo_return_t ret;
586 ret.tag = SYSINFO_VAL_UNDEFINED;
[a35b458]587
[8eec3c8]588 /* Parse the exception number */
589 uint64_t excn;
[059a8e4]590 if (str_uint64_t(name, NULL, 0, true, &excn) != EOK)
[8eec3c8]591 return ret;
[a35b458]592
[b3b7e14a]593#if (IVT_FIRST > 0)
[8eec3c8]594 if (excn < IVT_FIRST)
595 return ret;
596#endif
[a35b458]597
[b3b7e14a]598#if (IVT_ITEMS + IVT_FIRST == 0)
599 return ret;
600#else
[8eec3c8]601 if (excn >= IVT_ITEMS + IVT_FIRST)
602 return ret;
[b3b7e14a]603#endif
[a35b458]604
[8eec3c8]605 if (dry_run) {
606 ret.tag = SYSINFO_VAL_FUNCTION_DATA;
607 ret.data.data = NULL;
608 ret.data.size = sizeof(stats_thread_t);
609 } else {
610 /* Update excn index for accessing exc_table */
611 excn -= IVT_FIRST;
[a35b458]612
[8eec3c8]613 /* Allocate stats_exc_t structure */
614 stats_exc_t *stats_exception =
[11b285d]615 (stats_exc_t *) malloc(sizeof(stats_exc_t));
[8eec3c8]616 if (stats_exception == NULL)
617 return ret;
[a35b458]618
[8eec3c8]619 /* Messing with exception table, avoid deadlock */
620 irq_spinlock_lock(&exctbl_lock, true);
[a35b458]621
[8eec3c8]622 /* Correct return value */
623 ret.tag = SYSINFO_VAL_FUNCTION_DATA;
624 ret.data.data = (void *) stats_exception;
625 ret.data.size = sizeof(stats_exc_t);
[a35b458]626
[8eec3c8]627 stats_exception->id = excn;
628 str_cpy(stats_exception->desc, EXC_NAME_BUFLEN, exc_table[excn].name);
[b3b7e14a]629 stats_exception->hot = exc_table[excn].hot;
[8eec3c8]630 stats_exception->cycles = exc_table[excn].cycles;
631 stats_exception->count = exc_table[excn].count;
[a35b458]632
[8eec3c8]633 irq_spinlock_unlock(&exctbl_lock, true);
634 }
[a35b458]635
[8eec3c8]636 return ret;
637}
638
[80bfb601]639/** Get physical memory statistics
640 *
[70e2b2d]641 * @param item Sysinfo item (unused).
642 * @param size Size of the returned data.
643 * @param dry_run Do not get the data, just calculate the size.
[196c253]644 * @param data Unused.
[80bfb601]645 *
646 * @return Data containing stats_physmem_t.
647 * If the return value is not NULL, it should be freed
648 * in the context of the sysinfo request.
649 */
[70e2b2d]650static void *get_stats_physmem(struct sysinfo_item *item, size_t *size,
[196c253]651 bool dry_run, void *data)
[9dae191e]652{
[70e2b2d]653 *size = sizeof(stats_physmem_t);
654 if (dry_run)
655 return NULL;
[a35b458]656
[9dae191e]657 stats_physmem_t *stats_physmem =
[11b285d]658 (stats_physmem_t *) malloc(*size);
[9dae191e]659 if (stats_physmem == NULL) {
660 *size = 0;
661 return NULL;
662 }
[a35b458]663
[9dae191e]664 zones_stats(&(stats_physmem->total), &(stats_physmem->unavail),
665 &(stats_physmem->used), &(stats_physmem->free));
[a35b458]666
[9dae191e]667 return ((void *) stats_physmem);
668}
669
[80bfb601]670/** Get system load
671 *
[70e2b2d]672 * @param item Sysinfo item (unused).
673 * @param size Size of the returned data.
674 * @param dry_run Do not get the data, just calculate the size.
[196c253]675 * @param data Unused.
[80bfb601]676 *
677 * @return Data several load_t values.
678 * If the return value is not NULL, it should be freed
679 * in the context of the sysinfo request.
680 */
[70e2b2d]681static void *get_stats_load(struct sysinfo_item *item, size_t *size,
[196c253]682 bool dry_run, void *data)
[9dae191e]683{
[70e2b2d]684 *size = sizeof(load_t) * LOAD_STEPS;
685 if (dry_run)
686 return NULL;
[a35b458]687
[11b285d]688 load_t *stats_load = (load_t *) malloc(*size);
[9dae191e]689 if (stats_load == NULL) {
690 *size = 0;
691 return NULL;
692 }
[a35b458]693
[6e121b8]694 /* To always get consistent values acquire the mutex */
695 mutex_lock(&load_lock);
[a35b458]696
[9dae191e]697 unsigned int i;
698 for (i = 0; i < LOAD_STEPS; i++)
[fd3a631f]699 stats_load[i] = avenrdy[i] << LOAD_KERNEL_SHIFT;
[a35b458]700
[6e121b8]701 mutex_unlock(&load_lock);
[a35b458]702
[9dae191e]703 return ((void *) stats_load);
704}
705
706/** Calculate load
707 *
708 */
[3cfe2b8]709static inline load_t load_calc(load_t load, load_t exp, size_t ready)
[9dae191e]710{
711 load *= exp;
[9efff92]712 load += (ready << LOAD_FIXED_SHIFT) * (LOAD_FIXED_1 - exp);
[a35b458]713
[9dae191e]714 return (load >> LOAD_FIXED_SHIFT);
715}
716
717/** Load computation thread.
718 *
719 * Compute system load every few seconds.
720 *
721 * @param arg Unused.
722 *
723 */
724void kload(void *arg)
725{
726 thread_detach(THREAD);
[a35b458]727
[9dae191e]728 while (true) {
[3cfe2b8]729 size_t ready = atomic_load(&nrdy);
[a35b458]730
[80bfb601]731 /* Mutually exclude with get_stats_load() */
[6e121b8]732 mutex_lock(&load_lock);
[a35b458]733
[9dae191e]734 unsigned int i;
735 for (i = 0; i < LOAD_STEPS; i++)
736 avenrdy[i] = load_calc(avenrdy[i], load_exp[i], ready);
[a35b458]737
[6e121b8]738 mutex_unlock(&load_lock);
[a35b458]739
[9dae191e]740 thread_sleep(LOAD_INTERVAL);
741 }
742}
743
[80bfb601]744/** Register sysinfo statistical items
745 *
746 */
[9dae191e]747void stats_init(void)
748{
[6e121b8]749 mutex_initialize(&load_lock, MUTEX_PASSIVE);
[a35b458]750
[196c253]751 sysinfo_set_item_gen_data("system.cpus", NULL, get_stats_cpus, NULL);
752 sysinfo_set_item_gen_data("system.physmem", NULL, get_stats_physmem, NULL);
753 sysinfo_set_item_gen_data("system.load", NULL, get_stats_load, NULL);
754 sysinfo_set_item_gen_data("system.tasks", NULL, get_stats_tasks, NULL);
755 sysinfo_set_item_gen_data("system.threads", NULL, get_stats_threads, NULL);
756 sysinfo_set_item_gen_data("system.exceptions", NULL, get_stats_exceptions, NULL);
[5869ce0]757 sysinfo_set_subtree_fn("system.tasks", NULL, get_stats_task, NULL);
758 sysinfo_set_subtree_fn("system.threads", NULL, get_stats_thread, NULL);
759 sysinfo_set_subtree_fn("system.exceptions", NULL, get_stats_exception, NULL);
[9dae191e]760}
761
762/** @}
763 */
Note: See TracBrowser for help on using the repository browser.