Changeset 63e27ef in mainline for kernel/generic/src/mm
- Timestamp:
- 2017-06-19T21:47:42Z (8 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- deacc58d
- Parents:
- 7354b5e
- Location:
- kernel/generic/src/mm
- Files:
-
- 11 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/as.c
r7354b5e r63e27ef 67 67 #include <arch/asm.h> 68 68 #include <panic.h> 69 #include < debug.h>69 #include <assert.h> 70 70 #include <print.h> 71 71 #include <mem.h> … … 189 189 DEADLOCK_PROBE_INIT(p_asidlock); 190 190 191 ASSERT(as != AS);192 ASSERT(atomic_get(&as->refcount) == 0);191 assert(as != AS); 192 assert(atomic_get(&as->refcount) == 0); 193 193 194 194 /* … … 236 236 bool cond = true; 237 237 while (cond) { 238 ASSERT(!list_empty(&as->as_area_btree.leaf_list));238 assert(!list_empty(&as->as_area_btree.leaf_list)); 239 239 240 240 btree_node_t *node = … … 298 298 size_t count, bool guarded, as_area_t *avoid) 299 299 { 300 ASSERT((addr % PAGE_SIZE) == 0);301 ASSERT(mutex_locked(&as->lock));300 assert((addr % PAGE_SIZE) == 0); 301 assert(mutex_locked(&as->lock)); 302 302 303 303 /* … … 455 455 size_t size, bool guarded) 456 456 { 457 ASSERT(mutex_locked(&as->lock));457 assert(mutex_locked(&as->lock)); 458 458 459 459 if (size == 0) … … 532 532 533 533 mutex_lock(&sh_info->lock); 534 ASSERT(sh_info->refcount);534 assert(sh_info->refcount); 535 535 536 536 if (--sh_info->refcount == 0) { … … 696 696 NO_TRACE static as_area_t *find_area_and_lock(as_t *as, uintptr_t va) 697 697 { 698 ASSERT(mutex_locked(&as->lock));698 assert(mutex_locked(&as->lock)); 699 699 700 700 btree_node_t *leaf; … … 827 827 bool cond = true; 828 828 while (cond) { 829 ASSERT(!list_empty(&area->used_space.leaf_list));829 assert(!list_empty(&area->used_space.leaf_list)); 830 830 831 831 btree_node_t *node = … … 893 893 ptr + P2SZ(i), false, &pte); 894 894 895 ASSERT(found);896 ASSERT(PTE_VALID(&pte));897 ASSERT(PTE_PRESENT(&pte));895 assert(found); 896 assert(PTE_VALID(&pte)); 897 assert(PTE_PRESENT(&pte)); 898 898 899 899 if ((area->backend) && … … 1008 1008 ptr + P2SZ(size), false, &pte); 1009 1009 1010 ASSERT(found);1011 ASSERT(PTE_VALID(&pte));1012 ASSERT(PTE_PRESENT(&pte));1010 assert(found); 1011 assert(PTE_VALID(&pte)); 1012 assert(PTE_PRESENT(&pte)); 1013 1013 1014 1014 if ((area->backend) && … … 1194 1194 NO_TRACE bool as_area_check_access(as_area_t *area, pf_access_t access) 1195 1195 { 1196 ASSERT(mutex_locked(&area->lock));1196 assert(mutex_locked(&area->lock)); 1197 1197 1198 1198 int flagmap[] = { … … 1321 1321 ptr + P2SZ(size), false, &pte); 1322 1322 1323 ASSERT(found);1324 ASSERT(PTE_VALID(&pte));1325 ASSERT(PTE_PRESENT(&pte));1323 assert(found); 1324 assert(PTE_VALID(&pte)); 1325 assert(PTE_PRESENT(&pte)); 1326 1326 1327 1327 old_frame[frame_idx++] = PTE_GET_FRAME(&pte); … … 1541 1541 */ 1542 1542 if (old_as) { 1543 ASSERT(old_as->cpu_refcount);1543 assert(old_as->cpu_refcount); 1544 1544 1545 1545 if ((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) { … … 1550 1550 * ASID. 1551 1551 */ 1552 ASSERT(old_as->asid != ASID_INVALID);1552 assert(old_as->asid != ASID_INVALID); 1553 1553 1554 1554 list_append(&old_as->inactive_as_with_asid_link, … … 1597 1597 NO_TRACE unsigned int as_area_get_flags(as_area_t *area) 1598 1598 { 1599 ASSERT(mutex_locked(&area->lock));1599 assert(mutex_locked(&area->lock)); 1600 1600 1601 1601 return area_flags_to_page_flags(area->flags); … … 1615 1615 NO_TRACE pte_t *page_table_create(unsigned int flags) 1616 1616 { 1617 ASSERT(as_operations);1618 ASSERT(as_operations->page_table_create);1617 assert(as_operations); 1618 assert(as_operations->page_table_create); 1619 1619 1620 1620 return as_operations->page_table_create(flags); … … 1630 1630 NO_TRACE void page_table_destroy(pte_t *page_table) 1631 1631 { 1632 ASSERT(as_operations);1633 ASSERT(as_operations->page_table_destroy);1632 assert(as_operations); 1633 assert(as_operations->page_table_destroy); 1634 1634 1635 1635 as_operations->page_table_destroy(page_table); … … 1651 1651 NO_TRACE void page_table_lock(as_t *as, bool lock) 1652 1652 { 1653 ASSERT(as_operations);1654 ASSERT(as_operations->page_table_lock);1653 assert(as_operations); 1654 assert(as_operations->page_table_lock); 1655 1655 1656 1656 as_operations->page_table_lock(as, lock); … … 1665 1665 NO_TRACE void page_table_unlock(as_t *as, bool unlock) 1666 1666 { 1667 ASSERT(as_operations);1668 ASSERT(as_operations->page_table_unlock);1667 assert(as_operations); 1668 assert(as_operations->page_table_unlock); 1669 1669 1670 1670 as_operations->page_table_unlock(as, unlock); … … 1680 1680 NO_TRACE bool page_table_locked(as_t *as) 1681 1681 { 1682 ASSERT(as_operations);1683 ASSERT(as_operations->page_table_locked);1682 assert(as_operations); 1683 assert(as_operations->page_table_locked); 1684 1684 1685 1685 return as_operations->page_table_locked(as); … … 1724 1724 bool used_space_insert(as_area_t *area, uintptr_t page, size_t count) 1725 1725 { 1726 ASSERT(mutex_locked(&area->lock));1727 ASSERT(IS_ALIGNED(page, PAGE_SIZE));1728 ASSERT(count);1726 assert(mutex_locked(&area->lock)); 1727 assert(IS_ALIGNED(page, PAGE_SIZE)); 1728 assert(count); 1729 1729 1730 1730 btree_node_t *leaf = NULL; … … 1737 1737 } 1738 1738 1739 ASSERT(leaf != NULL);1739 assert(leaf != NULL); 1740 1740 1741 1741 if (!leaf->keys) { … … 2010 2010 bool used_space_remove(as_area_t *area, uintptr_t page, size_t count) 2011 2011 { 2012 ASSERT(mutex_locked(&area->lock));2013 ASSERT(IS_ALIGNED(page, PAGE_SIZE));2014 ASSERT(count);2012 assert(mutex_locked(&area->lock)); 2013 assert(IS_ALIGNED(page, PAGE_SIZE)); 2014 assert(count); 2015 2015 2016 2016 btree_node_t *leaf; … … 2257 2257 as_area_t *area = node->value[i]; 2258 2258 2259 ASSERT(area_idx < area_cnt);2259 assert(area_idx < area_cnt); 2260 2260 mutex_lock(&area->lock); 2261 2261 -
kernel/generic/src/mm/backend_anon.c
r7354b5e r63e27ef 37 37 */ 38 38 39 #include <assert.h> 39 40 #include <mm/as.h> 40 41 #include <mm/page.h> … … 113 114 void anon_share(as_area_t *area) 114 115 { 115 ASSERT(mutex_locked(&area->as->lock));116 ASSERT(mutex_locked(&area->lock));117 ASSERT(!(area->flags & AS_AREA_LATE_RESERVE));116 assert(mutex_locked(&area->as->lock)); 117 assert(mutex_locked(&area->lock)); 118 assert(!(area->flags & AS_AREA_LATE_RESERVE)); 118 119 119 120 /* … … 138 139 base + P2SZ(j), false, &pte); 139 140 140 ASSERT(found);141 ASSERT(PTE_VALID(&pte));142 ASSERT(PTE_PRESENT(&pte));141 assert(found); 142 assert(PTE_VALID(&pte)); 143 assert(PTE_PRESENT(&pte)); 143 144 144 145 btree_insert(&area->sh_info->pagemap, … … 190 191 uintptr_t frame; 191 192 192 ASSERT(page_table_locked(AS));193 ASSERT(mutex_locked(&area->lock));194 ASSERT(IS_ALIGNED(upage, PAGE_SIZE));193 assert(page_table_locked(AS)); 194 assert(mutex_locked(&area->lock)); 195 assert(IS_ALIGNED(upage, PAGE_SIZE)); 195 196 196 197 if (!as_area_check_access(area, access)) … … 294 295 void anon_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame) 295 296 { 296 ASSERT(page_table_locked(area->as));297 ASSERT(mutex_locked(&area->lock));297 assert(page_table_locked(area->as)); 298 assert(mutex_locked(&area->lock)); 298 299 299 300 if (area->flags & AS_AREA_LATE_RESERVE) { -
kernel/generic/src/mm/backend_elf.c
r7354b5e r63e27ef 37 37 38 38 #include <lib/elf.h> 39 #include < debug.h>39 #include <assert.h> 40 40 #include <typedefs.h> 41 41 #include <mm/as.h> … … 142 142 uintptr_t start_anon = entry->p_vaddr + entry->p_filesz; 143 143 144 ASSERT(mutex_locked(&area->as->lock));145 ASSERT(mutex_locked(&area->lock));144 assert(mutex_locked(&area->as->lock)); 145 assert(mutex_locked(&area->lock)); 146 146 147 147 /* … … 200 200 base + P2SZ(j), false, &pte); 201 201 202 ASSERT(found);203 ASSERT(PTE_VALID(&pte));204 ASSERT(PTE_PRESENT(&pte));202 assert(found); 203 assert(PTE_VALID(&pte)); 204 assert(PTE_PRESENT(&pte)); 205 205 206 206 btree_insert(&area->sh_info->pagemap, … … 261 261 bool dirty = false; 262 262 263 ASSERT(page_table_locked(AS));264 ASSERT(mutex_locked(&area->lock));265 ASSERT(IS_ALIGNED(upage, PAGE_SIZE));263 assert(page_table_locked(AS)); 264 assert(mutex_locked(&area->lock)); 265 assert(IS_ALIGNED(upage, PAGE_SIZE)); 266 266 267 267 if (!as_area_check_access(area, access)) … … 345 345 base + i * FRAME_SIZE, true, &pte); 346 346 347 ASSERT(found);348 ASSERT(PTE_PRESENT(&pte));347 assert(found); 348 assert(PTE_PRESENT(&pte)); 349 349 350 350 frame = PTE_GET_FRAME(&pte); … … 424 424 uintptr_t start_anon; 425 425 426 ASSERT(page_table_locked(area->as));427 ASSERT(mutex_locked(&area->lock));428 429 ASSERT(page >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE));430 ASSERT(page < entry->p_vaddr + entry->p_memsz);426 assert(page_table_locked(area->as)); 427 assert(mutex_locked(&area->lock)); 428 429 assert(page >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)); 430 assert(page < entry->p_vaddr + entry->p_memsz); 431 431 432 432 start_anon = entry->p_vaddr + entry->p_filesz; -
kernel/generic/src/mm/backend_phys.c
r7354b5e r63e27ef 37 37 */ 38 38 39 #include < debug.h>39 #include <assert.h> 40 40 #include <typedefs.h> 41 41 #include <mm/as.h> … … 95 95 void phys_share(as_area_t *area) 96 96 { 97 ASSERT(mutex_locked(&area->as->lock));98 ASSERT(mutex_locked(&area->lock));97 assert(mutex_locked(&area->as->lock)); 98 assert(mutex_locked(&area->lock)); 99 99 } 100 100 … … 135 135 uintptr_t base = area->backend_data.base; 136 136 137 ASSERT(page_table_locked(AS));138 ASSERT(mutex_locked(&area->lock));139 ASSERT(IS_ALIGNED(upage, PAGE_SIZE));137 assert(page_table_locked(AS)); 138 assert(mutex_locked(&area->lock)); 139 assert(IS_ALIGNED(upage, PAGE_SIZE)); 140 140 141 141 if (!as_area_check_access(area, access)) 142 142 return AS_PF_FAULT; 143 143 144 ASSERT(upage - area->base < area->backend_data.frames * FRAME_SIZE);144 assert(upage - area->base < area->backend_data.frames * FRAME_SIZE); 145 145 page_mapping_insert(AS, upage, base + (upage - area->base), 146 146 as_area_get_flags(area)); -
kernel/generic/src/mm/backend_user.c
r7354b5e r63e27ef 46 46 #include <typedefs.h> 47 47 #include <align.h> 48 #include < debug.h>48 #include <assert.h> 49 49 #include <errno.h> 50 50 #include <log.h> … … 108 108 int user_page_fault(as_area_t *area, uintptr_t upage, pf_access_t access) 109 109 { 110 ASSERT(page_table_locked(AS));111 ASSERT(mutex_locked(&area->lock));112 ASSERT(IS_ALIGNED(upage, PAGE_SIZE));110 assert(page_table_locked(AS)); 111 assert(mutex_locked(&area->lock)); 112 assert(IS_ALIGNED(upage, PAGE_SIZE)); 113 113 114 114 if (!as_area_check_access(area, access)) … … 162 162 void user_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame) 163 163 { 164 ASSERT(page_table_locked(area->as));165 ASSERT(mutex_locked(&area->lock));164 assert(page_table_locked(area->as)); 165 assert(mutex_locked(&area->lock)); 166 166 167 167 pfn_t pfn = ADDR2PFN(frame); -
kernel/generic/src/mm/frame.c
r7354b5e r63e27ef 47 47 #include <mm/as.h> 48 48 #include <panic.h> 49 #include < debug.h>49 #include <assert.h> 50 50 #include <adt/list.h> 51 51 #include <synch/mutex.h> … … 349 349 NO_TRACE static frame_t *zone_get_frame(zone_t *zone, size_t index) 350 350 { 351 ASSERT(index < zone->count);351 assert(index < zone->count); 352 352 353 353 return &zone->frames[index]; … … 370 370 pfn_t constraint) 371 371 { 372 ASSERT(zone->flags & ZONE_AVAILABLE);372 assert(zone->flags & ZONE_AVAILABLE); 373 373 374 374 /* Allocate frames from zone */ … … 377 377 FRAME_LOWPRIO, constraint, &index); 378 378 379 ASSERT(avail);380 ASSERT(index != (size_t) -1);379 assert(avail); 380 assert(index != (size_t) -1); 381 381 382 382 /* Update frame reference count */ … … 384 384 frame_t *frame = zone_get_frame(zone, index + i); 385 385 386 ASSERT(frame->refcount == 0);386 assert(frame->refcount == 0); 387 387 frame->refcount = 1; 388 388 } … … 407 407 NO_TRACE static size_t zone_frame_free(zone_t *zone, size_t index) 408 408 { 409 ASSERT(zone->flags & ZONE_AVAILABLE);409 assert(zone->flags & ZONE_AVAILABLE); 410 410 411 411 frame_t *frame = zone_get_frame(zone, index); 412 412 413 ASSERT(frame->refcount > 0);413 assert(frame->refcount > 0); 414 414 415 415 if (!--frame->refcount) { … … 429 429 NO_TRACE static void zone_mark_unavailable(zone_t *zone, size_t index) 430 430 { 431 ASSERT(zone->flags & ZONE_AVAILABLE);431 assert(zone->flags & ZONE_AVAILABLE); 432 432 433 433 frame_t *frame = zone_get_frame(zone, index); … … 456 456 void *confdata) 457 457 { 458 ASSERT(zones.info[z1].flags & ZONE_AVAILABLE);459 ASSERT(zones.info[z2].flags & ZONE_AVAILABLE);460 ASSERT(zones.info[z1].flags == zones.info[z2].flags);461 ASSERT(zones.info[z1].base < zones.info[z2].base);462 ASSERT(!overlaps(zones.info[z1].base, zones.info[z1].count,458 assert(zones.info[z1].flags & ZONE_AVAILABLE); 459 assert(zones.info[z2].flags & ZONE_AVAILABLE); 460 assert(zones.info[z1].flags == zones.info[z2].flags); 461 assert(zones.info[z1].base < zones.info[z2].base); 462 assert(!overlaps(zones.info[z1].base, zones.info[z1].count, 463 463 zones.info[z2].base, zones.info[z2].count)); 464 464 … … 509 509 NO_TRACE static void return_config_frames(size_t znum, pfn_t pfn, size_t count) 510 510 { 511 ASSERT(zones.info[znum].flags & ZONE_AVAILABLE);511 assert(zones.info[znum].flags & ZONE_AVAILABLE); 512 512 513 513 size_t cframes = SIZE2FRAMES(zone_conf_size(count)); … … 704 704 * the assert 705 705 */ 706 ASSERT(confframe != ADDR2PFN((uintptr_t ) NULL));706 assert(confframe != ADDR2PFN((uintptr_t ) NULL)); 707 707 708 708 /* Update the known end of physical memory. */ … … 792 792 size_t znum = find_zone(pfn, 1, hint); 793 793 794 ASSERT(znum != (size_t) -1);794 assert(znum != (size_t) -1); 795 795 796 796 zone_get_frame(&zones.info[znum], … … 806 806 size_t znum = find_zone(pfn, 1, hint); 807 807 808 ASSERT(znum != (size_t) -1);808 assert(znum != (size_t) -1); 809 809 810 810 void *res = zone_get_frame(&zones.info[znum], … … 830 830 uintptr_t constraint, size_t *pzone) 831 831 { 832 ASSERT(count > 0);832 assert(count > 0); 833 833 834 834 size_t hint = pzone ? (*pzone) : 0; … … 970 970 size_t znum = find_zone(pfn, 1, 0); 971 971 972 ASSERT(znum != (size_t) -1);972 assert(znum != (size_t) -1); 973 973 974 974 freed += zone_frame_free(&zones.info[znum], … … 1030 1030 size_t znum = find_zone(pfn, 1, 0); 1031 1031 1032 ASSERT(znum != (size_t) -1);1032 assert(znum != (size_t) -1); 1033 1033 1034 1034 zones.info[znum].frames[pfn - zones.info[znum].base].refcount++; … … 1153 1153 uint64_t *free) 1154 1154 { 1155 ASSERT(total != NULL);1156 ASSERT(unavail != NULL);1157 ASSERT(busy != NULL);1158 ASSERT(free != NULL);1155 assert(total != NULL); 1156 assert(unavail != NULL); 1157 assert(busy != NULL); 1158 assert(free != NULL); 1159 1159 1160 1160 irq_spinlock_lock(&zones.lock, true); -
kernel/generic/src/mm/km.c
r7354b5e r63e27ef 38 38 #include <mm/km.h> 39 39 #include <arch/mm/km.h> 40 #include <assert.h> 40 41 #include <mm/page.h> 41 42 #include <mm/frame.h> … … 44 45 #include <typedefs.h> 45 46 #include <lib/ra.h> 46 #include <debug.h>47 47 #include <arch.h> 48 48 #include <align.h> … … 95 95 { 96 96 km_ni_arena = ra_arena_create(); 97 ASSERT(km_ni_arena != NULL);97 assert(km_ni_arena != NULL); 98 98 km_non_identity_arch_init(); 99 99 config.non_identity_configured = true; … … 112 112 113 113 span_added = ra_span_add(km_ni_arena, base, size); 114 ASSERT(span_added);114 assert(span_added); 115 115 } 116 116 … … 132 132 uintptr_t offs; 133 133 134 ASSERT(ALIGN_DOWN(paddr, FRAME_SIZE) == paddr);135 ASSERT(ALIGN_UP(size, FRAME_SIZE) == size);134 assert(ALIGN_DOWN(paddr, FRAME_SIZE) == paddr); 135 assert(ALIGN_UP(size, FRAME_SIZE) == size); 136 136 137 137 /* Enforce natural or at least PAGE_SIZE alignment. */ … … 154 154 ipl_t ipl; 155 155 156 ASSERT(ALIGN_DOWN(vaddr, PAGE_SIZE) == vaddr);157 ASSERT(ALIGN_UP(size, PAGE_SIZE) == size);156 assert(ALIGN_DOWN(vaddr, PAGE_SIZE) == vaddr); 157 assert(ALIGN_UP(size, PAGE_SIZE) == size); 158 158 159 159 page_table_lock(AS_KERNEL, true); … … 240 240 uintptr_t km_temporary_page_get(uintptr_t *framep, frame_flags_t flags) 241 241 { 242 ASSERT(THREAD);243 ASSERT(framep);244 ASSERT(!(flags & ~(FRAME_NO_RESERVE | FRAME_ATOMIC)));242 assert(THREAD); 243 assert(framep); 244 assert(!(flags & ~(FRAME_NO_RESERVE | FRAME_ATOMIC))); 245 245 246 246 /* … … 281 281 void km_temporary_page_put(uintptr_t page) 282 282 { 283 ASSERT(THREAD);283 assert(THREAD); 284 284 285 285 if (km_is_non_identity(page)) -
kernel/generic/src/mm/page.c
r7354b5e r63e27ef 69 69 #include <typedefs.h> 70 70 #include <arch/asm.h> 71 #include <debug.h>72 71 #include <arch.h> 72 #include <assert.h> 73 73 #include <syscall/copy.h> 74 74 #include <errno.h> … … 98 98 unsigned int flags) 99 99 { 100 ASSERT(page_table_locked(as));101 102 ASSERT(page_mapping_operations);103 ASSERT(page_mapping_operations->mapping_insert);100 assert(page_table_locked(as)); 101 102 assert(page_mapping_operations); 103 assert(page_mapping_operations->mapping_insert); 104 104 105 105 page_mapping_operations->mapping_insert(as, ALIGN_DOWN(page, PAGE_SIZE), … … 122 122 NO_TRACE void page_mapping_remove(as_t *as, uintptr_t page) 123 123 { 124 ASSERT(page_table_locked(as));125 126 ASSERT(page_mapping_operations);127 ASSERT(page_mapping_operations->mapping_remove);124 assert(page_table_locked(as)); 125 126 assert(page_mapping_operations); 127 assert(page_mapping_operations->mapping_remove); 128 128 129 129 page_mapping_operations->mapping_remove(as, … … 147 147 pte_t *pte) 148 148 { 149 ASSERT(nolock || page_table_locked(as));150 151 ASSERT(page_mapping_operations);152 ASSERT(page_mapping_operations->mapping_find);149 assert(nolock || page_table_locked(as)); 150 151 assert(page_mapping_operations); 152 assert(page_mapping_operations->mapping_find); 153 153 154 154 return page_mapping_operations->mapping_find(as, … … 168 168 pte_t *pte) 169 169 { 170 ASSERT(nolock || page_table_locked(as));171 172 ASSERT(page_mapping_operations);173 ASSERT(page_mapping_operations->mapping_find);170 assert(nolock || page_table_locked(as)); 171 172 assert(page_mapping_operations); 173 assert(page_mapping_operations->mapping_find); 174 174 175 175 page_mapping_operations->mapping_update(as, … … 184 184 void page_mapping_make_global(uintptr_t base, size_t size) 185 185 { 186 ASSERT(page_mapping_operations);187 ASSERT(page_mapping_operations->mapping_make_global);186 assert(page_mapping_operations); 187 assert(page_mapping_operations->mapping_make_global); 188 188 189 189 return page_mapping_operations->mapping_make_global(base, size); -
kernel/generic/src/mm/reserve.c
r7354b5e r63e27ef 36 36 */ 37 37 38 #include <assert.h> 38 39 #include <mm/reserve.h> 39 40 #include <mm/frame.h> … … 42 43 #include <typedefs.h> 43 44 #include <arch/types.h> 44 #include <debug.h>45 45 46 46 static bool reserve_initialized = false; … … 72 72 bool reserved = false; 73 73 74 ASSERT(reserve_initialized);74 assert(reserve_initialized); 75 75 76 76 irq_spinlock_lock(&reserve_lock, true); -
kernel/generic/src/mm/slab.c
r7354b5e r63e27ef 101 101 */ 102 102 103 #include <assert.h> 103 104 #include <synch/spinlock.h> 104 105 #include <mm/slab.h> … … 111 112 #include <arch.h> 112 113 #include <panic.h> 113 #include <debug.h>114 114 #include <bitops.h> 115 115 #include <macros.h> … … 260 260 slab = obj2slab(obj); 261 261 262 ASSERT(slab->cache == cache);262 assert(slab->cache == cache); 263 263 264 264 size_t freed = 0; … … 268 268 269 269 irq_spinlock_lock(&cache->slablock, true); 270 ASSERT(slab->available < cache->objects);270 assert(slab->available < cache->objects); 271 271 272 272 *((size_t *) obj) = slab->nextavail; … … 417 417 slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last; 418 418 419 ASSERT(irq_spinlock_locked(&cache->mag_cache[CPU->id].lock));419 assert(irq_spinlock_locked(&cache->mag_cache[CPU->id].lock)); 420 420 421 421 if (cmag) { /* First try local CPU magazines */ … … 484 484 slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last; 485 485 486 ASSERT(irq_spinlock_locked(&cache->mag_cache[CPU->id].lock));486 assert(irq_spinlock_locked(&cache->mag_cache[CPU->id].lock)); 487 487 488 488 if (cmag) { … … 586 586 NO_TRACE static bool make_magcache(slab_cache_t *cache) 587 587 { 588 ASSERT(_slab_initialized >= 2);588 assert(_slab_initialized >= 2); 589 589 590 590 cache->mag_cache = malloc(sizeof(slab_mag_cache_t) * config.cpu_count, … … 610 610 unsigned int kmflag), size_t (*destructor)(void *obj), unsigned int flags) 611 611 { 612 ASSERT(size > 0);612 assert(size > 0); 613 613 614 614 memsetb(cache, sizeof(*cache), 0); … … 948 948 void *malloc(size_t size, unsigned int flags) 949 949 { 950 ASSERT(_slab_initialized);951 ASSERT(size <= (1 << SLAB_MAX_MALLOC_W));950 assert(_slab_initialized); 951 assert(size <= (1 << SLAB_MAX_MALLOC_W)); 952 952 953 953 if (size < (1 << SLAB_MIN_MALLOC_W)) … … 961 961 void *realloc(void *ptr, size_t size, unsigned int flags) 962 962 { 963 ASSERT(_slab_initialized);964 ASSERT(size <= (1 << SLAB_MAX_MALLOC_W));963 assert(_slab_initialized); 964 assert(size <= (1 << SLAB_MAX_MALLOC_W)); 965 965 966 966 void *new_ptr; -
kernel/generic/src/mm/tlb.c
r7354b5e r63e27ef 43 43 #include <mm/asid.h> 44 44 #include <arch/mm/tlb.h> 45 #include <assert.h> 45 46 #include <smp/ipi.h> 46 47 #include <synch/spinlock.h> … … 50 51 #include <arch.h> 51 52 #include <panic.h> 52 #include <debug.h>53 53 #include <cpu.h> 54 54 … … 152 152 void tlb_shootdown_ipi_recv(void) 153 153 { 154 ASSERT(CPU);154 assert(CPU); 155 155 156 156 CPU->tlb_active = false; … … 159 159 160 160 irq_spinlock_lock(&CPU->lock, false); 161 ASSERT(CPU->tlb_messages_count <= TLB_MESSAGE_QUEUE_LEN);161 assert(CPU->tlb_messages_count <= TLB_MESSAGE_QUEUE_LEN); 162 162 163 163 size_t i; … … 176 176 break; 177 177 case TLB_INVL_PAGES: 178 ASSERT(count);178 assert(count); 179 179 tlb_invalidate_pages(asid, page, count); 180 180 break;
Note:
See TracChangeset
for help on using the changeset viewer.