Changeset dec16a2 in mainline for kernel/generic/src/sysinfo/stats.c


Ignore:
Timestamp:
2010-04-18T16:52:47Z (14 years ago)
Author:
Martin Decky <martin@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
5c45ca8
Parents:
e535eeb
Message:
  • sysinfo items "system.tasks" and "system.threads" now return complete statistics of all tasks and threads (statistics of individual tasks and threads can be still acquited from "system.tasks.#" and "system.threads.#")
  • update user space functions accordingly
  • cleanup top — it is fully functional again
File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/sysinfo/stats.c

    re535eeb rdec16a2  
    146146}
    147147
    148 /** Gather tasks
    149  *
    150  * AVL task tree walker for gathering task IDs. Interrupts should
    151  * be already disabled while walking the tree.
    152  *
    153  * @param node AVL task tree node.
    154  * @param arg  Pointer to the iterator into the array of task IDs.
    155  *
    156  * @param Always true (continue the walk).
    157  *
    158  */
    159 static bool task_serialize_walker(avltree_node_t *node, void *arg)
    160 {
    161         task_id_t **ids = (task_id_t **) arg;
    162         task_t *task = avltree_get_instance(node, task_t, tasks_tree_node);
    163        
    164         /* Interrupts are already disabled */
    165         spinlock_lock(&(task->lock));
    166        
    167         /* Record the ID and increment the iterator */
    168         **ids = task->taskid;
    169         (*ids)++;
    170        
    171         spinlock_unlock(&(task->lock));
    172        
    173         return true;
    174 }
    175 
    176 /** Get task IDs
    177  *
    178  * @param item    Sysinfo item (unused).
    179  * @param size    Size of the returned data.
    180  * @param dry_run Do not get the data, just calculate the size.
    181  *
    182  * @return Data containing task IDs of all tasks.
    183  *         If the return value is not NULL, it should be freed
    184  *         in the context of the sysinfo request.
    185  */
    186 static void *get_stats_tasks(struct sysinfo_item *item, size_t *size,
    187     bool dry_run)
    188 {
    189         /* Messing with task structures, avoid deadlock */
    190         ipl_t ipl = interrupts_disable();
    191         spinlock_lock(&tasks_lock);
    192        
    193         /* First walk the task tree to count the tasks */
    194         size_t count = 0;
    195         avltree_walk(&tasks_tree, avl_count_walker, (void *) &count);
    196        
    197         if (count == 0) {
    198                 /* No tasks found (strange) */
    199                 spinlock_unlock(&tasks_lock);
    200                 interrupts_restore(ipl);
    201                
    202                 *size = 0;
    203                 return NULL;
    204         }
    205        
    206         *size = sizeof(task_id_t) * count;
    207         if (dry_run) {
    208                 spinlock_unlock(&tasks_lock);
    209                 interrupts_restore(ipl);
    210                 return NULL;
    211         }
    212        
    213         task_id_t *task_ids = (task_id_t *) malloc(*size, FRAME_ATOMIC);
    214         if (task_ids == NULL) {
    215                 /* No free space for allocation */
    216                 spinlock_unlock(&tasks_lock);
    217                 interrupts_restore(ipl);
    218                
    219                 *size = 0;
    220                 return NULL;
    221         }
    222        
    223         /* Walk tha task tree again to gather the IDs */
    224         task_id_t *iterator = task_ids;
    225         avltree_walk(&tasks_tree, task_serialize_walker, (void *) &iterator);
    226        
    227         spinlock_unlock(&tasks_lock);
    228         interrupts_restore(ipl);
    229        
    230         return ((void *) task_ids);
    231 }
    232 
    233 /** Gather threads
    234  *
    235  * AVL three tree walker for gathering thread IDs. Interrupts should
    236  * be already disabled while walking the tree.
    237  *
    238  * @param node AVL thread tree node.
    239  * @param arg  Pointer to the iterator into the array of thread IDs.
    240  *
    241  * @param Always true (continue the walk).
    242  *
    243  */
    244 static bool thread_serialize_walker(avltree_node_t *node, void *arg)
    245 {
    246         thread_id_t **ids = (thread_id_t **) arg;
    247         thread_t *thread = avltree_get_instance(node, thread_t, threads_tree_node);
    248        
    249         /* Interrupts are already disabled */
    250         spinlock_lock(&(thread->lock));
    251        
    252         /* Record the ID and increment the iterator */
    253         **ids = thread->tid;
    254         (*ids)++;
    255        
    256         spinlock_unlock(&(thread->lock));
    257        
    258         return true;
    259 }
    260 
    261 /** Get thread IDs
    262  *
    263  * @param item    Sysinfo item (unused).
    264  * @param size    Size of the returned data.
    265  * @param dry_run Do not get the data, just calculate the size.
    266  *
    267  * @return Data containing thread IDs of all threads.
    268  *         If the return value is not NULL, it should be freed
    269  *         in the context of the sysinfo request.
    270  */
    271 static void *get_stats_threads(struct sysinfo_item *item, size_t *size,
    272     bool dry_run)
    273 {
    274         /* Messing with threads structures, avoid deadlock */
    275         ipl_t ipl = interrupts_disable();
    276         spinlock_lock(&threads_lock);
    277        
    278         /* First walk the thread tree to count the threads */
    279         size_t count = 0;
    280         avltree_walk(&threads_tree, avl_count_walker, (void *) &count);
    281        
    282         if (count == 0) {
    283                 /* No threads found (strange) */
    284                 spinlock_unlock(&threads_lock);
    285                 interrupts_restore(ipl);
    286                
    287                 *size = 0;
    288                 return NULL;
    289         }
    290        
    291         *size = sizeof(thread_id_t) * count;
    292         if (dry_run) {
    293                 spinlock_unlock(&threads_lock);
    294                 interrupts_restore(ipl);
    295                 return NULL;
    296         }
    297        
    298         thread_id_t *thread_ids = (thread_id_t *) malloc(*size, FRAME_ATOMIC);
    299         if (thread_ids == NULL) {
    300                 /* No free space for allocation */
    301                 spinlock_unlock(&threads_lock);
    302                 interrupts_restore(ipl);
    303                
    304                 *size = 0;
    305                 return NULL;
    306         }
    307        
    308         /* Walk tha thread tree again to gather the IDs */
    309         thread_id_t *iterator = thread_ids;
    310         avltree_walk(&threads_tree, thread_serialize_walker, (void *) &iterator);
    311        
    312         spinlock_unlock(&threads_lock);
    313         interrupts_restore(ipl);
    314        
    315         return ((void *) thread_ids);
    316 }
    317 
    318148/** Get the size of a virtual address space
    319149 *
     
    351181}
    352182
     183/* Produce task statistics
     184 *
     185 * Summarize task information into task statistics.
     186 * Task lock should be held and interrupts disabled
     187 * before executing this function.
     188 *
     189 * @param task       Task.
     190 * @param stats_task Task statistics.
     191 *
     192 */
     193static void produce_stats_task(task_t *task, stats_task_t *stats_task)
     194{
     195        stats_task->task_id = task->taskid;
     196        str_cpy(stats_task->name, TASK_NAME_BUFLEN, task->name);
     197        stats_task->virtmem = get_task_virtmem(task->as);
     198        stats_task->threads = atomic_get(&task->refcount);
     199        task_get_accounting(task, &(stats_task->ucycles),
     200            &(stats_task->kcycles));
     201        stats_task->ipc_info = task->ipc_info;
     202}
     203
     204/** Gather statistics of all tasks
     205 *
     206 * AVL task tree walker for gathering task statistics. Interrupts should
     207 * be already disabled while walking the tree.
     208 *
     209 * @param node AVL task tree node.
     210 * @param arg  Pointer to the iterator into the array of stats_task_t.
     211 *
     212 * @param Always true (continue the walk).
     213 *
     214 */
     215static bool task_serialize_walker(avltree_node_t *node, void *arg)
     216{
     217        stats_task_t **iterator = (stats_task_t **) arg;
     218        task_t *task = avltree_get_instance(node, task_t, tasks_tree_node);
     219       
     220        /* Interrupts are already disabled */
     221        spinlock_lock(&(task->lock));
     222       
     223        /* Record the statistics and increment the iterator */
     224        produce_stats_task(task, *iterator);
     225        (*iterator)++;
     226       
     227        spinlock_unlock(&(task->lock));
     228       
     229        return true;
     230}
     231
    353232/** Get task statistics
     233 *
     234 * @param item    Sysinfo item (unused).
     235 * @param size    Size of the returned data.
     236 * @param dry_run Do not get the data, just calculate the size.
     237 *
     238 * @return Data containing several stats_task_t structures.
     239 *         If the return value is not NULL, it should be freed
     240 *         in the context of the sysinfo request.
     241 */
     242static void *get_stats_tasks(struct sysinfo_item *item, size_t *size,
     243    bool dry_run)
     244{
     245        /* Messing with task structures, avoid deadlock */
     246        ipl_t ipl = interrupts_disable();
     247        spinlock_lock(&tasks_lock);
     248       
     249        /* First walk the task tree to count the tasks */
     250        size_t count = 0;
     251        avltree_walk(&tasks_tree, avl_count_walker, (void *) &count);
     252       
     253        if (count == 0) {
     254                /* No tasks found (strange) */
     255                spinlock_unlock(&tasks_lock);
     256                interrupts_restore(ipl);
     257               
     258                *size = 0;
     259                return NULL;
     260        }
     261       
     262        *size = sizeof(stats_task_t) * count;
     263        if (dry_run) {
     264                spinlock_unlock(&tasks_lock);
     265                interrupts_restore(ipl);
     266                return NULL;
     267        }
     268       
     269        stats_task_t *stats_tasks = (stats_task_t *) malloc(*size, FRAME_ATOMIC);
     270        if (stats_tasks == NULL) {
     271                /* No free space for allocation */
     272                spinlock_unlock(&tasks_lock);
     273                interrupts_restore(ipl);
     274               
     275                *size = 0;
     276                return NULL;
     277        }
     278       
     279        /* Walk tha task tree again to gather the statistics */
     280        stats_task_t *iterator = stats_tasks;
     281        avltree_walk(&tasks_tree, task_serialize_walker, (void *) &iterator);
     282       
     283        spinlock_unlock(&tasks_lock);
     284        interrupts_restore(ipl);
     285       
     286        return ((void *) stats_tasks);
     287}
     288
     289/* Produce thread statistics
     290 *
     291 * Summarize thread information into thread statistics.
     292 * Thread lock should be held and interrupts disabled
     293 * before executing this function.
     294 *
     295 * @param thread       Thread.
     296 * @param stats_thread Thread statistics.
     297 *
     298 */
     299static void produce_stats_thread(thread_t *thread, stats_thread_t *stats_thread)
     300{
     301        stats_thread->thread_id = thread->tid;
     302        stats_thread->task_id = thread->task->taskid;
     303        stats_thread->state = thread->state;
     304        stats_thread->priority = thread->priority;
     305        stats_thread->ucycles = thread->ucycles;
     306        stats_thread->kcycles = thread->kcycles;
     307       
     308        if (thread->cpu != NULL) {
     309                stats_thread->on_cpu = true;
     310                stats_thread->cpu = thread->cpu->id;
     311        } else
     312                stats_thread->on_cpu = false;
     313}
     314
     315/** Gather statistics of all threads
     316 *
     317 * AVL three tree walker for gathering thread statistics. Interrupts should
     318 * be already disabled while walking the tree.
     319 *
     320 * @param node AVL thread tree node.
     321 * @param arg  Pointer to the iterator into the array of thread statistics.
     322 *
     323 * @param Always true (continue the walk).
     324 *
     325 */
     326static bool thread_serialize_walker(avltree_node_t *node, void *arg)
     327{
     328        stats_thread_t **iterator = (stats_thread_t **) arg;
     329        thread_t *thread = avltree_get_instance(node, thread_t, threads_tree_node);
     330       
     331        /* Interrupts are already disabled */
     332        spinlock_lock(&(thread->lock));
     333       
     334        /* Record the statistics and increment the iterator */
     335        produce_stats_thread(thread, *iterator);
     336        (*iterator)++;
     337       
     338        spinlock_unlock(&(thread->lock));
     339       
     340        return true;
     341}
     342
     343/** Get thread statistics
     344 *
     345 * @param item    Sysinfo item (unused).
     346 * @param size    Size of the returned data.
     347 * @param dry_run Do not get the data, just calculate the size.
     348 *
     349 * @return Data containing several stats_task_t structures.
     350 *         If the return value is not NULL, it should be freed
     351 *         in the context of the sysinfo request.
     352 */
     353static void *get_stats_threads(struct sysinfo_item *item, size_t *size,
     354    bool dry_run)
     355{
     356        /* Messing with threads structures, avoid deadlock */
     357        ipl_t ipl = interrupts_disable();
     358        spinlock_lock(&threads_lock);
     359       
     360        /* First walk the thread tree to count the threads */
     361        size_t count = 0;
     362        avltree_walk(&threads_tree, avl_count_walker, (void *) &count);
     363       
     364        if (count == 0) {
     365                /* No threads found (strange) */
     366                spinlock_unlock(&threads_lock);
     367                interrupts_restore(ipl);
     368               
     369                *size = 0;
     370                return NULL;
     371        }
     372       
     373        *size = sizeof(stats_thread_t) * count;
     374        if (dry_run) {
     375                spinlock_unlock(&threads_lock);
     376                interrupts_restore(ipl);
     377                return NULL;
     378        }
     379       
     380        stats_thread_t *stats_threads = (stats_thread_t *) malloc(*size, FRAME_ATOMIC);
     381        if (stats_threads == NULL) {
     382                /* No free space for allocation */
     383                spinlock_unlock(&threads_lock);
     384                interrupts_restore(ipl);
     385               
     386                *size = 0;
     387                return NULL;
     388        }
     389       
     390        /* Walk tha thread tree again to gather the statistics */
     391        stats_thread_t *iterator = stats_threads;
     392        avltree_walk(&threads_tree, thread_serialize_walker, (void *) &iterator);
     393       
     394        spinlock_unlock(&threads_lock);
     395        interrupts_restore(ipl);
     396       
     397        return ((void *) stats_threads);
     398}
     399
     400/** Get a single task statistics
    354401 *
    355402 * Get statistics of a given task. The task ID is passed
     
    416463                spinlock_unlock(&tasks_lock);
    417464               
    418                 /* Copy task's statistics */
    419                 str_cpy(stats_task->name, TASK_NAME_BUFLEN, task->name);
    420                 stats_task->virtmem = get_task_virtmem(task->as);
    421                 stats_task->threads = atomic_get(&task->refcount);
    422                 task_get_accounting(task, &(stats_task->ucycles),
    423                     &(stats_task->kcycles));
    424                 stats_task->ipc_info = task->ipc_info;
     465                produce_stats_task(task, stats_task);
    425466               
    426467                spinlock_unlock(&task->lock);
     
    492533                ret.data.data = (void *) stats_thread;
    493534                ret.data.size = sizeof(stats_thread_t);
    494        
     535               
    495536                /* Hand-over-hand locking */
    496537                spinlock_lock(&thread->lock);
    497538                spinlock_unlock(&threads_lock);
    498539               
    499                 /* Copy thread's statistics */
    500                 stats_thread->task_id = thread->task->taskid;
    501                 stats_thread->state = thread->state;
    502                 stats_thread->priority = thread->priority;
    503                 stats_thread->ucycles = thread->ucycles;
    504                 stats_thread->kcycles = thread->kcycles;
    505                
    506                 if (thread->cpu != NULL) {
    507                         stats_thread->on_cpu = true;
    508                         stats_thread->cpu = thread->cpu->id;
    509                 } else
    510                         stats_thread->on_cpu = false;
     540                produce_stats_thread(thread, stats_thread);
    511541               
    512542                spinlock_unlock(&thread->lock);
Note: See TracChangeset for help on using the changeset viewer.