Changeset 80bfb601 in mainline for kernel/generic/src/sysinfo/stats.c
- Timestamp:
- 2010-04-18T09:57:19Z (15 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- d869398
- Parents:
- fce3536
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/sysinfo/stats.c
rfce3536 r80bfb601 55 55 #define LOAD_INTERVAL 5 56 56 57 /** 58 * Fixed-point representation of: 57 /** Fixed-point representation of 59 58 * 60 59 * 1 / exp(5 sec / 1 min) … … 64 63 */ 65 64 static load_t load_exp[LOAD_STEPS] = {1884, 2014, 2037}; 65 66 /** Running average of the number of ready threads */ 66 67 static load_t avenrdy[LOAD_STEPS] = {0, 0, 0}; 68 69 /** Load calculation spinlock */ 67 70 SPINLOCK_STATIC_INITIALIZE_NAME(load_lock, "load_lock"); 68 71 72 /** Get system uptime 73 * 74 * @param item Sysinfo item (unused). 75 * 76 * @return System uptime (in secords). 77 * 78 */ 69 79 static unative_t get_stats_uptime(struct sysinfo_item *item) 70 80 { … … 73 83 } 74 84 85 /** Get statistics of all CPUs 86 * 87 * @param item Sysinfo item (unused). 88 * @param size Size of the returned data. 89 * 90 * @return Data containing several stats_cpu_t structures. 91 * If the return value is not NULL, it should be freed 92 * in the context of the sysinfo request. 93 */ 75 94 static void *get_stats_cpus(struct sysinfo_item *item, size_t *size) 76 95 { 96 /* Assumption: config.cpu_count is constant */ 77 97 stats_cpu_t *stats_cpus = 78 98 (stats_cpu_t *) malloc(sizeof(stats_cpu_t) * config.cpu_count, … … 83 103 } 84 104 105 /* Each CPU structure is locked separatelly */ 85 106 ipl_t ipl = interrupts_disable(); 86 107 … … 103 124 } 104 125 126 /** Count number of tasks 127 * 128 * AVL task tree walker for counting tasks. 129 * 130 * @param node AVL task tree node (unused). 131 * @param arg Pointer to the counter. 132 * 133 * @param Always true (continue the walk). 134 * 135 */ 105 136 static bool task_count_walker(avltree_node_t *node, void *arg) 106 137 { … … 111 142 } 112 143 144 /** Gather tasks 145 * 146 * AVL task tree walker for gathering task IDs. Interrupts should 147 * be already disabled while walking the tree. 148 * 149 * @param node AVL task tree node. 150 * @param arg Pointer to the iterator into the array of task IDs. 151 * 152 * @param Always true (continue the walk). 153 * 154 */ 113 155 static bool task_serialize_walker(avltree_node_t *node, void *arg) 114 156 { … … 116 158 task_t *task = avltree_get_instance(node, task_t, tasks_tree_node); 117 159 160 /* Interrupts are already disabled */ 118 161 spinlock_lock(&(task->lock)); 119 162 163 /* Record the ID and increment the iterator */ 120 164 **ids = task->taskid; 121 165 (*ids)++; … … 126 170 } 127 171 172 /** Get task IDs 173 * 174 * @param item Sysinfo item (unused). 175 * @param size Size of the returned data. 176 * 177 * @return Data containing task IDs of all tasks. 178 * If the return value is not NULL, it should be freed 179 * in the context of the sysinfo request. 180 */ 128 181 static void *get_stats_tasks(struct sysinfo_item *item, size_t *size) 129 182 { … … 132 185 spinlock_lock(&tasks_lock); 133 186 187 /* First walk the task tree to count the tasks */ 134 188 size_t count = 0; 135 189 avltree_walk(&tasks_tree, task_count_walker, (void *) &count); 136 190 137 191 if (count == 0) { 192 /* No tasks found (strange) */ 138 193 spinlock_unlock(&tasks_lock); 139 194 interrupts_restore(ipl); … … 146 201 (task_id_t *) malloc(sizeof(task_id_t) * count, FRAME_ATOMIC); 147 202 if (task_ids == NULL) { 203 /* No free space for allocation */ 148 204 spinlock_unlock(&tasks_lock); 149 205 interrupts_restore(ipl); … … 153 209 } 154 210 211 /* Walk tha task tree again to gather the IDs */ 155 212 task_id_t *iterator = task_ids; 156 213 avltree_walk(&tasks_tree, task_serialize_walker, (void *) &iterator); … … 163 220 } 164 221 222 /** Get the size of a virtual address space 223 * 224 * @param as Address space. 225 * 226 * @return Size of the mapped virtual address space (bytes). 227 * 228 */ 165 229 static size_t get_task_virtmem(as_t *as) 166 230 { … … 169 233 size_t result = 0; 170 234 235 /* Walk the B+ tree and count pages */ 171 236 link_t *cur; 172 237 for (cur = as->as_area_btree.leaf_head.next; … … 190 255 } 191 256 257 /** Get task statistics 258 * 259 * Get statistics of a given task. The task ID is passed 260 * as a string (current limitation of the sysinfo interface, 261 * but it is still reasonable for the given purpose). 262 * 263 * @param name Task ID (string-encoded number). 264 * 265 * @return Sysinfo return holder. The type of the returned 266 * data is either SYSINFO_VAL_UNDEFINED (unknown 267 * task ID or memory allocation error) or 268 * SYSINFO_VAL_FUNCTION_DATA (in that case the 269 * generated data should be freed within the 270 * sysinfo request context). 271 * 272 */ 192 273 static sysinfo_return_t get_stats_task(const char *name) 193 274 { 194 275 /* Initially no return value */ 195 276 sysinfo_return_t ret; 196 277 ret.tag = SYSINFO_VAL_UNDEFINED; 197 278 279 /* Parse the task ID */ 198 280 task_id_t task_id; 199 281 if (str_uint64(name, NULL, 0, true, &task_id) != EOK) 200 282 return ret; 201 283 284 /* Allocate stats_task_t structure */ 202 285 stats_task_t *stats_task = 203 286 (stats_task_t *) malloc(sizeof(stats_task_t), FRAME_ATOMIC); … … 205 288 return ret; 206 289 290 /* Messing with task structures, avoid deadlock */ 207 291 ipl_t ipl = interrupts_disable(); 208 292 spinlock_lock(&tasks_lock); … … 210 294 task_t *task = task_find_by_id(task_id); 211 295 if (task == NULL) { 296 /* No task with this ID */ 212 297 spinlock_unlock(&tasks_lock); 213 298 interrupts_restore(ipl); … … 216 301 } 217 302 303 /* Hand-over-hand locking */ 218 304 spinlock_lock(&task->lock); 219 305 spinlock_unlock(&tasks_lock); 220 306 307 /* Copy task's statistics */ 221 308 str_cpy(stats_task->name, TASK_NAME_BUFLEN, task->name); 222 309 stats_task->virtmem = get_task_virtmem(task->as); … … 229 316 interrupts_restore(ipl); 230 317 318 /* Correct return value */ 231 319 ret.tag = SYSINFO_VAL_FUNCTION_DATA; 232 320 ret.data.data = (void *) stats_task; … … 236 324 } 237 325 326 /** Get physical memory statistics 327 * 328 * @param item Sysinfo item (unused). 329 * @param size Size of the returned data. 330 * 331 * @return Data containing stats_physmem_t. 332 * If the return value is not NULL, it should be freed 333 * in the context of the sysinfo request. 334 */ 238 335 static void *get_stats_physmem(struct sysinfo_item *item, size_t *size) 239 336 { … … 252 349 } 253 350 351 /** Get system load 352 * 353 * @param item Sysinfo item (unused). 354 * @param size Size of the returned data. 355 * 356 * @return Data several load_t values. 357 * If the return value is not NULL, it should be freed 358 * in the context of the sysinfo request. 359 */ 254 360 static void *get_stats_load(struct sysinfo_item *item, size_t *size) 255 361 { … … 261 367 } 262 368 369 /* To always get consistent values acquire the spinlock */ 263 370 ipl_t ipl = interrupts_disable(); 264 371 spinlock_lock(&load_lock); … … 324 431 325 432 while (true) { 433 /* Mutually exclude with get_stats_load() */ 326 434 ipl_t ipl = interrupts_disable(); 327 435 spinlock_lock(&load_lock); … … 340 448 } 341 449 450 /** Register sysinfo statistical items 451 * 452 */ 342 453 void stats_init(void) 343 454 {
Note:
See TracChangeset
for help on using the changeset viewer.