Changeset b0f00a9 in mainline for kernel/generic/src/mm
- Timestamp:
- 2011-11-06T22:21:05Z (14 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/fix-logger-deadlock, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 898e847
- Parents:
- 2bdf8313 (diff), 7b5f4c9 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- kernel/generic/src/mm
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/as.c
r2bdf8313 rb0f00a9 94 94 * 95 95 * This lock protects: 96 * - inactive_as_with_asid_ headlist96 * - inactive_as_with_asid_list 97 97 * - as->asid for each as of the as_t type 98 98 * - asids_allocated counter … … 105 105 * that have valid ASID. 106 106 */ 107 LIST_INITIALIZE(inactive_as_with_asid_ head);107 LIST_INITIALIZE(inactive_as_with_asid_list); 108 108 109 109 /** Kernel address space. */ … … 235 235 bool cond = true; 236 236 while (cond) { 237 ASSERT(!list_empty(&as->as_area_btree.leaf_ head));237 ASSERT(!list_empty(&as->as_area_btree.leaf_list)); 238 238 239 239 btree_node_t *node = 240 list_get_instance( as->as_area_btree.leaf_head.next,240 list_get_instance(list_first(&as->as_area_btree.leaf_list), 241 241 btree_node_t, leaf_link); 242 242 … … 602 602 bool cond = true; 603 603 while (cond) { 604 ASSERT(!list_empty(&area->used_space.leaf_ head));604 ASSERT(!list_empty(&area->used_space.leaf_list)); 605 605 606 606 btree_node_t *node = 607 list_get_instance( area->used_space.leaf_head.prev,607 list_get_instance(list_last(&area->used_space.leaf_list), 608 608 btree_node_t, leaf_link); 609 609 … … 675 675 676 676 /* 677 * Invalidate software translation caches (e.g. TSB on sparc64). 677 * Invalidate software translation caches 678 * (e.g. TSB on sparc64, PHT on ppc32). 678 679 */ 679 680 as_invalidate_translation_cache(as, area->base + P2SZ(pages), … … 726 727 if (--sh_info->refcount == 0) { 727 728 dealloc = true; 728 link_t *cur;729 729 730 730 /* … … 732 732 * reference from all frames found there. 733 733 */ 734 for (cur = sh_info->pagemap.leaf_head.next; 735 cur != &sh_info->pagemap.leaf_head; cur = cur->next) { 734 list_foreach(sh_info->pagemap.leaf_list, cur) { 736 735 btree_node_t *node 737 736 = list_get_instance(cur, btree_node_t, leaf_link); … … 785 784 * Visit only the pages mapped by used_space B+tree. 786 785 */ 787 link_t *cur; 788 for (cur = area->used_space.leaf_head.next; 789 cur != &area->used_space.leaf_head; cur = cur->next) { 786 list_foreach(area->used_space.leaf_list, cur) { 790 787 btree_node_t *node; 791 788 btree_key_t i; … … 823 820 824 821 /* 825 * Invalidate potential software translation caches (e.g. TSB on826 * sparc64).822 * Invalidate potential software translation caches 823 * (e.g. TSB on sparc64, PHT on ppc32). 827 824 */ 828 825 as_invalidate_translation_cache(as, area->base, area->pages); … … 1064 1061 */ 1065 1062 size_t used_pages = 0; 1066 link_t *cur; 1067 1068 for (cur = area->used_space.leaf_head.next; 1069 cur != &area->used_space.leaf_head; cur = cur->next) { 1063 1064 list_foreach(area->used_space.leaf_list, cur) { 1070 1065 btree_node_t *node 1071 1066 = list_get_instance(cur, btree_node_t, leaf_link); … … 1093 1088 size_t frame_idx = 0; 1094 1089 1095 for (cur = area->used_space.leaf_head.next; 1096 cur != &area->used_space.leaf_head; cur = cur->next) { 1090 list_foreach(area->used_space.leaf_list, cur) { 1097 1091 btree_node_t *node = list_get_instance(cur, btree_node_t, 1098 1092 leaf_link); … … 1126 1120 1127 1121 /* 1128 * Invalidate potential software translation caches (e.g. TSB on1129 * sparc64).1122 * Invalidate potential software translation caches 1123 * (e.g. TSB on sparc64, PHT on ppc32). 1130 1124 */ 1131 1125 as_invalidate_translation_cache(as, area->base, area->pages); … … 1146 1140 frame_idx = 0; 1147 1141 1148 for (cur = area->used_space.leaf_head.next; 1149 cur != &area->used_space.leaf_head; cur = cur->next) { 1142 list_foreach(area->used_space.leaf_list, cur) { 1150 1143 btree_node_t *node 1151 1144 = list_get_instance(cur, btree_node_t, leaf_link); … … 1291 1284 * thing which is forbidden in this context is locking the address space. 1292 1285 * 1293 * When this function is en etered, no spinlocks may be held.1286 * When this function is entered, no spinlocks may be held. 1294 1287 * 1295 1288 * @param old Old address space or NULL. … … 1333 1326 1334 1327 list_append(&old_as->inactive_as_with_asid_link, 1335 &inactive_as_with_asid_ head);1328 &inactive_as_with_asid_list); 1336 1329 } 1337 1330 … … 2026 2019 2027 2020 /* Eventually check the addresses behind each area */ 2028 li nk_t *cur;2029 for (cur = AS->as_area_btree.leaf_head.next;2030 (ret == 0) && (cur != &AS->as_area_btree.leaf_head);2031 cur = cur->next) { 2021 list_foreach(AS->as_area_btree.leaf_list, cur) { 2022 if (ret != 0) 2023 break; 2024 2032 2025 btree_node_t *node = 2033 2026 list_get_instance(cur, btree_node_t, leaf_link); … … 2071 2064 2072 2065 size_t area_cnt = 0; 2073 link_t *cur; 2074 2075 for (cur = as->as_area_btree.leaf_head.next; 2076 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 2066 2067 list_foreach(as->as_area_btree.leaf_list, cur) { 2077 2068 btree_node_t *node = 2078 2069 list_get_instance(cur, btree_node_t, leaf_link); … … 2087 2078 size_t area_idx = 0; 2088 2079 2089 for (cur = as->as_area_btree.leaf_head.next; 2090 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 2080 list_foreach(as->as_area_btree.leaf_list, cur) { 2091 2081 btree_node_t *node = 2092 2082 list_get_instance(cur, btree_node_t, leaf_link); … … 2124 2114 2125 2115 /* Print out info about address space areas */ 2126 link_t *cur; 2127 for (cur = as->as_area_btree.leaf_head.next; 2128 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 2116 list_foreach(as->as_area_btree.leaf_list, cur) { 2129 2117 btree_node_t *node 2130 2118 = list_get_instance(cur, btree_node_t, leaf_link); -
kernel/generic/src/mm/backend_anon.c
r2bdf8313 rb0f00a9 97 97 void anon_share(as_area_t *area) 98 98 { 99 link_t *cur;100 101 99 ASSERT(mutex_locked(&area->as->lock)); 102 100 ASSERT(mutex_locked(&area->lock)); … … 106 104 */ 107 105 mutex_lock(&area->sh_info->lock); 108 for (cur = area->used_space.leaf_head.next; 109 cur != &area->used_space.leaf_head; cur = cur->next) { 106 list_foreach(area->used_space.leaf_list, cur) { 110 107 btree_node_t *node; 111 108 unsigned int i; -
kernel/generic/src/mm/backend_elf.c
r2bdf8313 rb0f00a9 139 139 */ 140 140 if (area->flags & AS_AREA_WRITE) { 141 node = list_get_instance( area->used_space.leaf_head.next,141 node = list_get_instance(list_first(&area->used_space.leaf_list), 142 142 btree_node_t, leaf_link); 143 143 } else { … … 153 153 */ 154 154 mutex_lock(&area->sh_info->lock); 155 for (cur = &node->leaf_link; cur != &area->used_space.leaf_ head;155 for (cur = &node->leaf_link; cur != &area->used_space.leaf_list.head; 156 156 cur = cur->next) { 157 157 unsigned int i; -
kernel/generic/src/mm/buddy.c
r2bdf8313 rb0f00a9 82 82 * Use memory after our own structure. 83 83 */ 84 b->order = (li nk_t *) (&b[1]);84 b->order = (list_t *) (&b[1]); 85 85 86 86 for (i = 0; i <= max_order; i++) … … 176 176 * the request can be immediatelly satisfied. 177 177 */ 178 if (!list_empty(&b->order[i])) {179 res = b->order[i].next;178 res = list_first(&b->order[i]); 179 if (res != NULL) { 180 180 list_remove(res); 181 181 b->op->mark_busy(b, res); -
kernel/generic/src/mm/frame.c
r2bdf8313 rb0f00a9 1142 1142 size_t znum = find_zone(pfn, 1, 0); 1143 1143 1144 1145 1144 ASSERT(znum != (size_t) -1); 1146 1145 -
kernel/generic/src/mm/page.c
r2bdf8313 rb0f00a9 60 60 61 61 #include <mm/page.h> 62 #include <genarch/mm/page_ht.h> 63 #include <genarch/mm/page_pt.h> 62 64 #include <arch/mm/page.h> 63 65 #include <arch/mm/asid.h> … … 70 72 #include <debug.h> 71 73 #include <arch.h> 74 #include <syscall/copy.h> 75 #include <errno.h> 72 76 73 77 /** Virtual operations for page subsystem. */ … … 172 176 } 173 177 178 /** Syscall wrapper for getting mapping of a virtual page. 179 * 180 * @retval EOK Everything went find, @p uspace_frame and @p uspace_node 181 * contains correct values. 182 * @retval ENOENT Virtual address has no mapping. 183 */ 184 sysarg_t sys_page_find_mapping(uintptr_t virt_address, 185 uintptr_t *uspace_frame) 186 { 187 mutex_lock(&AS->lock); 188 189 pte_t *pte = page_mapping_find(AS, virt_address, false); 190 if (!PTE_VALID(pte) || !PTE_PRESENT(pte)) { 191 mutex_unlock(&AS->lock); 192 193 return (sysarg_t) ENOENT; 194 } 195 196 uintptr_t phys_address = PTE_GET_FRAME(pte); 197 198 mutex_unlock(&AS->lock); 199 200 int rc = copy_to_uspace(uspace_frame, 201 &phys_address, sizeof(phys_address)); 202 if (rc != EOK) { 203 return (sysarg_t) rc; 204 } 205 206 return EOK; 207 } 208 174 209 /** @} 175 210 */ -
kernel/generic/src/mm/slab.c
r2bdf8313 rb0f00a9 180 180 unsigned int flags) 181 181 { 182 183 184 182 size_t zone = 0; 185 183 … … 317 315 spinlock_lock(&cache->slablock); 318 316 } else { 319 slab = list_get_instance( cache->partial_slabs.next, slab_t,320 link);317 slab = list_get_instance(list_first(&cache->partial_slabs), 318 slab_t, link); 321 319 list_remove(&slab->link); 322 320 } … … 360 358 if (!list_empty(&cache->magazines)) { 361 359 if (first) 362 cur = cache->magazines.next;360 cur = list_first(&cache->magazines); 363 361 else 364 cur = cache->magazines.prev;362 cur = list_last(&cache->magazines); 365 363 366 364 mag = list_get_instance(cur, slab_magazine_t, link); … … 812 810 813 811 size_t frames = 0; 814 link_t *cur; 815 for (cur = slab_cache_list.next; cur != &slab_cache_list; 816 cur = cur->next) { 812 list_foreach(slab_cache_list, cur) { 817 813 slab_cache_t *cache = list_get_instance(cur, slab_cache_t, link); 818 814 frames += _slab_reclaim(cache, flags); … … 861 857 link_t *cur; 862 858 size_t i; 863 for (i = 0, cur = slab_cache_list. next;864 (i < skip) && (cur != &slab_cache_list );859 for (i = 0, cur = slab_cache_list.head.next; 860 (i < skip) && (cur != &slab_cache_list.head); 865 861 i++, cur = cur->next); 866 862 867 if (cur == &slab_cache_list ) {863 if (cur == &slab_cache_list.head) { 868 864 irq_spinlock_unlock(&slab_cache_lock, true); 869 865 break; … … 940 936 irq_spinlock_lock(&slab_cache_lock, false); 941 937 942 link_t *cur; 943 for (cur = slab_cache_list.next; cur != &slab_cache_list; 944 cur = cur->next) { 938 list_foreach(slab_cache_list, cur) { 945 939 slab_cache_t *slab = list_get_instance(cur, slab_cache_t, link); 946 940 if ((slab->flags & SLAB_CACHE_MAGDEFERRED) !=
Note:
See TracChangeset
for help on using the changeset viewer.
