Changeset 599d6f5 in mainline
- Timestamp:
- 2008-06-22T14:35:21Z (17 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 69eac4aa
- Parents:
- 1a48bcd
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/slab.c
r1a48bcd r599d6f5 795 795 void slab_print_list(void) 796 796 { 797 slab_cache_t *cache; 798 link_t *cur; 799 ipl_t ipl; 800 801 ipl = interrupts_disable(); 802 spinlock_lock(&slab_cache_lock); 797 int skip = 0; 798 803 799 printf("slab name size pages obj/pg slabs cached allocated" 804 800 " ctl\n"); 805 801 printf("---------------- -------- ------ ------ ------ ------ ---------" 806 802 " ---\n"); 807 808 for (cur = slab_cache_list.next; cur != &slab_cache_list; 809 cur = cur->next) { 803 804 while (true) { 805 slab_cache_t *cache; 806 link_t *cur; 807 ipl_t ipl; 808 int i; 809 810 /* 811 * We must not hold the slab_cache_lock spinlock when printing 812 * the statistics. Otherwise we can easily deadlock if the print 813 * needs to allocate memory. 814 * 815 * Therefore, we walk through the slab cache list, skipping some 816 * amount of already processed caches during each iteration and 817 * gathering statistics about the first unprocessed cache. For 818 * the sake of printing the statistics, we realese the 819 * slab_cache_lock and reacquire it afterwards. Then the walk 820 * starts again. 821 * 822 * This limits both the efficiency and also accuracy of the 823 * obtained statistics. The efficiency is decreased because the 824 * time complexity of the algorithm is quadratic instead of 825 * linear. The accuracy is impacted because we drop the lock 826 * after processing one cache. If there is someone else 827 * manipulating the cache list, we might omit an arbitrary 828 * number of caches or process one cache multiple times. 829 * However, we don't bleed for this algorithm for it is only 830 * statistics. 831 */ 832 833 ipl = interrupts_disable(); 834 spinlock_lock(&slab_cache_lock); 835 836 for (i = 0, cur = slab_cache_list.next; 837 i < skip && cur != &slab_cache_list; 838 i++, cur = cur->next) 839 ; 840 841 if (cur == &slab_cache_list) { 842 spinlock_unlock(&slab_cache_lock); 843 interrupts_restore(ipl); 844 break; 845 } 846 847 skip++; 848 810 849 cache = list_get_instance(cur, slab_cache_t, link); 850 851 char *name = cache->name; 852 uint8_t order = cache->order; 853 size_t size = cache->size; 854 unsigned int objects = cache->objects; 855 long allocated_slabs = atomic_get(&cache->allocated_slabs); 856 long cached_objs = atomic_get(&cache->cached_objs); 857 long allocated_objs = atomic_get(&cache->allocated_objs); 858 int flags = cache->flags; 859 860 spinlock_unlock(&slab_cache_lock); 861 interrupts_restore(ipl); 811 862 812 863 printf("%-16s %8" PRIs " %6d %6u %6ld %6ld %9ld %-3s\n", 813 cache->name, cache->size, (1 << cache->order), 814 cache->objects, atomic_get(&cache->allocated_slabs), 815 atomic_get(&cache->cached_objs), 816 atomic_get(&cache->allocated_objs), 817 cache->flags & SLAB_CACHE_SLINSIDE ? "in" : "out"); 818 } 819 spinlock_unlock(&slab_cache_lock); 820 interrupts_restore(ipl); 864 name, size, (1 << order), objects, allocated_slabs, 865 cached_objs, allocated_objs, 866 flags & SLAB_CACHE_SLINSIDE ? "in" : "out"); 867 } 821 868 } 822 869
Note:
See TracChangeset
for help on using the changeset viewer.