Changes in kernel/generic/src/mm/as.c [f97f1e51:826599a2] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/as.c
rf97f1e51 r826599a2 79 79 #include <syscall/copy.h> 80 80 #include <arch/interrupt.h> 81 #include <interrupt.h> 81 82 82 83 /** … … 285 286 /** Check area conflicts with other areas. 286 287 * 287 * @param as Address space. 288 * @param addr Starting virtual address of the area being tested. 289 * @param count Number of pages in the area being tested. 290 * @param avoid Do not touch this area. 288 * @param as Address space. 289 * @param addr Starting virtual address of the area being tested. 290 * @param count Number of pages in the area being tested. 291 * @param guarded True if the area being tested is protected by guard pages. 292 * @param avoid Do not touch this area. 291 293 * 292 294 * @return True if there is no conflict, false otherwise. … … 294 296 */ 295 297 NO_TRACE static bool check_area_conflicts(as_t *as, uintptr_t addr, 296 size_t count, as_area_t *avoid)298 size_t count, bool guarded, as_area_t *avoid) 297 299 { 298 300 ASSERT((addr % PAGE_SIZE) == 0); 299 301 ASSERT(mutex_locked(&as->lock)); 302 303 /* 304 * If the addition of the supposed area address and size overflows, 305 * report conflict. 306 */ 307 if (overflows_into_positive(addr, P2SZ(count))) 308 return false; 300 309 301 310 /* … … 304 313 if (overlaps(addr, P2SZ(count), (uintptr_t) NULL, PAGE_SIZE)) 305 314 return false; 306 315 307 316 /* 308 317 * The leaf node is found in O(log n), where n is proportional to … … 328 337 if (area != avoid) { 329 338 mutex_lock(&area->lock); 330 339 340 /* 341 * If at least one of the two areas are protected 342 * by the AS_AREA_GUARD flag then we must be sure 343 * that they are separated by at least one unmapped 344 * page. 345 */ 346 int const gp = (guarded || 347 (area->flags & AS_AREA_GUARD)) ? 1 : 0; 348 349 /* 350 * The area comes from the left neighbour node, which 351 * means that there already are some areas in the leaf 352 * node, which in turn means that adding gp is safe and 353 * will not cause an integer overflow. 354 */ 331 355 if (overlaps(addr, P2SZ(count), area->base, 356 P2SZ(area->pages + gp))) { 357 mutex_unlock(&area->lock); 358 return false; 359 } 360 361 mutex_unlock(&area->lock); 362 } 363 } 364 365 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf); 366 if (node) { 367 area = (as_area_t *) node->value[0]; 368 369 if (area != avoid) { 370 int gp; 371 372 mutex_lock(&area->lock); 373 374 gp = (guarded || (area->flags & AS_AREA_GUARD)) ? 1 : 0; 375 if (gp && overflows(addr, P2SZ(count))) { 376 /* 377 * Guard page not needed if the supposed area 378 * is adjacent to the end of the address space. 379 * We already know that the following test is 380 * going to fail... 381 */ 382 gp--; 383 } 384 385 if (overlaps(addr, P2SZ(count + gp), area->base, 332 386 P2SZ(area->pages))) { 333 387 mutex_unlock(&area->lock); … … 339 393 } 340 394 341 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf);342 if (node) {343 area = (as_area_t *) node->value[0];344 345 if (area != avoid) {346 mutex_lock(&area->lock);347 348 if (overlaps(addr, P2SZ(count), area->base,349 P2SZ(area->pages))) {350 mutex_unlock(&area->lock);351 return false;352 }353 354 mutex_unlock(&area->lock);355 }356 }357 358 395 /* Second, check the leaf node. */ 359 396 btree_key_t i; 360 397 for (i = 0; i < leaf->keys; i++) { 361 398 area = (as_area_t *) leaf->value[i]; 399 int agp; 400 int gp; 362 401 363 402 if (area == avoid) … … 365 404 366 405 mutex_lock(&area->lock); 367 368 if (overlaps(addr, P2SZ(count), area->base, 369 P2SZ(area->pages))) { 406 407 gp = (guarded || (area->flags & AS_AREA_GUARD)) ? 1 : 0; 408 agp = gp; 409 410 /* 411 * Sanitize the two possible unsigned integer overflows. 412 */ 413 if (gp && overflows(addr, P2SZ(count))) 414 gp--; 415 if (agp && overflows(area->base, P2SZ(area->pages))) 416 agp--; 417 418 if (overlaps(addr, P2SZ(count + gp), area->base, 419 P2SZ(area->pages + agp))) { 370 420 mutex_unlock(&area->lock); 371 421 return false; … … 377 427 /* 378 428 * So far, the area does not conflict with other areas. 379 * Check if it doesn't conflict with kerneladdress space.429 * Check if it is contained in the user address space. 380 430 */ 381 431 if (!KERNEL_ADDRESS_SPACE_SHADOWED) { 382 return !overlaps(addr, P2SZ(count), KERNEL_ADDRESS_SPACE_START, 383 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START); 432 return iswithin(USER_ADDRESS_SPACE_START, 433 (USER_ADDRESS_SPACE_END - USER_ADDRESS_SPACE_START) + 1, 434 addr, P2SZ(count)); 384 435 } 385 436 … … 392 443 * this function. 393 444 * 394 * @param as Address space. 395 * @param bound Lowest address bound. 396 * @param size Requested size of the allocation. 445 * @param as Address space. 446 * @param bound Lowest address bound. 447 * @param size Requested size of the allocation. 448 * @param guarded True if the allocation must be protected by guard pages. 397 449 * 398 450 * @return Address of the beginning of unmapped address space area. … … 401 453 */ 402 454 NO_TRACE static uintptr_t as_get_unmapped_area(as_t *as, uintptr_t bound, 403 size_t size )455 size_t size, bool guarded) 404 456 { 405 457 ASSERT(mutex_locked(&as->lock)); … … 423 475 /* First check the bound address itself */ 424 476 uintptr_t addr = ALIGN_UP(bound, PAGE_SIZE); 425 if ((addr >= bound) && 426 (check_area_conflicts(as, addr, pages, NULL))) 427 return addr; 477 if (addr >= bound) { 478 if (guarded) { 479 /* Leave an unmapped page between the lower 480 * bound and the area's start address. 481 */ 482 addr += P2SZ(1); 483 } 484 485 if (check_area_conflicts(as, addr, pages, guarded, NULL)) 486 return addr; 487 } 428 488 429 489 /* Eventually check the addresses behind each area */ 430 list_foreach(as->as_area_btree.leaf_list, cur) { 431 btree_node_t *node = 432 list_get_instance(cur, btree_node_t, leaf_link); 490 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, node) { 433 491 434 492 for (btree_key_t i = 0; i < node->keys; i++) { … … 439 497 addr = 440 498 ALIGN_UP(area->base + P2SZ(area->pages), PAGE_SIZE); 499 500 if (guarded || area->flags & AS_AREA_GUARD) { 501 /* We must leave an unmapped page 502 * between the two areas. 503 */ 504 addr += P2SZ(1); 505 } 506 441 507 bool avail = 442 508 ((addr >= bound) && (addr >= area->base) && 443 (check_area_conflicts(as, addr, pages, area)));509 (check_area_conflicts(as, addr, pages, guarded, area))); 444 510 445 511 mutex_unlock(&area->lock); … … 453 519 return (uintptr_t) -1; 454 520 } 521 522 /** Remove reference to address space area share info. 523 * 524 * If the reference count drops to 0, the sh_info is deallocated. 525 * 526 * @param sh_info Pointer to address space area share info. 527 * 528 */ 529 NO_TRACE static void sh_info_remove_reference(share_info_t *sh_info) 530 { 531 bool dealloc = false; 532 533 mutex_lock(&sh_info->lock); 534 ASSERT(sh_info->refcount); 535 536 if (--sh_info->refcount == 0) { 537 dealloc = true; 538 539 /* 540 * Now walk carefully the pagemap B+tree and free/remove 541 * reference from all frames found there. 542 */ 543 list_foreach(sh_info->pagemap.leaf_list, leaf_link, 544 btree_node_t, node) { 545 btree_key_t i; 546 547 for (i = 0; i < node->keys; i++) 548 frame_free((uintptr_t) node->value[i], 1); 549 } 550 551 } 552 mutex_unlock(&sh_info->lock); 553 554 if (dealloc) { 555 if (sh_info->backend && sh_info->backend->destroy_shared_data) { 556 sh_info->backend->destroy_shared_data( 557 sh_info->backend_shared_data); 558 } 559 btree_destroy(&sh_info->pagemap); 560 free(sh_info); 561 } 562 } 563 455 564 456 565 /** Create address space area of common attributes. … … 463 572 * @param attrs Attributes of the area. 464 573 * @param backend Address space area backend. NULL if no backend is used. 465 * @param backend_data NULL or a pointer to an array holding two void *.574 * @param backend_data NULL or a pointer to custom backend data. 466 575 * @param base Starting virtual address of the area. 467 576 * If set to -1, a suitable mappable area is found. … … 476 585 mem_backend_data_t *backend_data, uintptr_t *base, uintptr_t bound) 477 586 { 478 if ((*base != (uintptr_t) -1) && ((*base % PAGE_SIZE) != 0))587 if ((*base != (uintptr_t) -1) && !IS_ALIGNED(*base, PAGE_SIZE)) 479 588 return NULL; 480 589 481 590 if (size == 0) 482 591 return NULL; 483 592 484 593 size_t pages = SIZE2FRAMES(size); 485 594 … … 487 596 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE)) 488 597 return NULL; 598 599 bool const guarded = flags & AS_AREA_GUARD; 489 600 490 601 mutex_lock(&as->lock); 491 602 492 603 if (*base == (uintptr_t) -1) { 493 *base = as_get_unmapped_area(as, bound, size );604 *base = as_get_unmapped_area(as, bound, size, guarded); 494 605 if (*base == (uintptr_t) -1) { 495 606 mutex_unlock(&as->lock); … … 497 608 } 498 609 } 499 500 if (!check_area_conflicts(as, *base, pages, NULL)) { 610 611 if (overflows_into_positive(*base, size)) { 612 mutex_unlock(&as->lock); 613 return NULL; 614 } 615 616 if (!check_area_conflicts(as, *base, pages, guarded, NULL)) { 501 617 mutex_unlock(&as->lock); 502 618 return NULL; … … 513 629 area->resident = 0; 514 630 area->base = *base; 631 area->backend = backend; 515 632 area->sh_info = NULL; 516 area->backend = backend;517 633 518 634 if (backend_data) … … 520 636 else 521 637 memsetb(&area->backend_data, sizeof(area->backend_data), 0); 522 638 639 share_info_t *si = NULL; 640 641 /* 642 * Create the sharing info structure. 643 * We do this in advance for every new area, even if it is not going 644 * to be shared. 645 */ 646 if (!(attrs & AS_AREA_ATTR_PARTIAL)) { 647 si = (share_info_t *) malloc(sizeof(share_info_t), 0); 648 mutex_initialize(&si->lock, MUTEX_PASSIVE); 649 si->refcount = 1; 650 si->shared = false; 651 si->backend_shared_data = NULL; 652 si->backend = backend; 653 btree_create(&si->pagemap); 654 655 area->sh_info = si; 656 657 if (area->backend && area->backend->create_shared_data) { 658 if (!area->backend->create_shared_data(area)) { 659 free(area); 660 mutex_unlock(&as->lock); 661 sh_info_remove_reference(si); 662 return NULL; 663 } 664 } 665 } 666 523 667 if (area->backend && area->backend->create) { 524 668 if (!area->backend->create(area)) { 525 669 free(area); 526 670 mutex_unlock(&as->lock); 671 if (!(attrs & AS_AREA_ATTR_PARTIAL)) 672 sh_info_remove_reference(si); 527 673 return NULL; 528 674 } 529 675 } 530 676 531 677 btree_create(&area->used_space); 532 678 btree_insert(&as->as_area_btree, *base, (void *) area, … … 615 761 int as_area_resize(as_t *as, uintptr_t address, size_t size, unsigned int flags) 616 762 { 763 if (!IS_ALIGNED(address, PAGE_SIZE)) 764 return EINVAL; 765 617 766 mutex_lock(&as->lock); 618 767 … … 625 774 return ENOENT; 626 775 } 627 628 if (area->backend == &phys_backend) { 629 /* 630 * Remapping of address space areas associated 631 * with memory mapped devices is not supported. 776 777 if (!area->backend->is_resizable(area)) { 778 /* 779 * The backend does not support resizing for this area. 632 780 */ 633 781 mutex_unlock(&area->lock); … … 636 784 } 637 785 638 if (area->sh_info) { 786 mutex_lock(&area->sh_info->lock); 787 if (area->sh_info->shared) { 639 788 /* 640 789 * Remapping of shared address space areas 641 790 * is not supported. 642 791 */ 792 mutex_unlock(&area->sh_info->lock); 643 793 mutex_unlock(&area->lock); 644 794 mutex_unlock(&as->lock); 645 795 return ENOTSUP; 646 796 } 797 mutex_unlock(&area->sh_info->lock); 647 798 648 799 size_t pages = SIZE2FRAMES((address - area->base) + size); … … 665 816 666 817 page_table_lock(as, false); 667 668 /*669 * Start TLB shootdown sequence.670 */671 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid,672 area->base + P2SZ(pages), area->pages - pages);673 818 674 819 /* … … 726 871 } 727 872 873 /* 874 * Start TLB shootdown sequence. 875 * 876 * The sequence is rather short and can be 877 * repeated multiple times. The reason is that 878 * we don't want to have used_space_remove() 879 * inside the sequence as it may use a blocking 880 * memory allocation for its B+tree. Blocking 881 * while holding the tlblock spinlock is 882 * forbidden and would hit a kernel assertion. 883 */ 884 885 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, 886 as->asid, area->base + P2SZ(pages), 887 area->pages - pages); 888 728 889 for (; i < size; i++) { 729 890 pte_t *pte = page_mapping_find(as, … … 743 904 page_mapping_remove(as, ptr + P2SZ(i)); 744 905 } 906 907 /* 908 * Finish TLB shootdown sequence. 909 */ 910 911 tlb_invalidate_pages(as->asid, 912 area->base + P2SZ(pages), 913 area->pages - pages); 914 915 /* 916 * Invalidate software translation caches 917 * (e.g. TSB on sparc64, PHT on ppc32). 918 */ 919 as_invalidate_translation_cache(as, 920 area->base + P2SZ(pages), 921 area->pages - pages); 922 tlb_shootdown_finalize(ipl); 745 923 } 746 924 } 747 748 /*749 * Finish TLB shootdown sequence.750 */751 752 tlb_invalidate_pages(as->asid, area->base + P2SZ(pages),753 area->pages - pages);754 755 /*756 * Invalidate software translation caches757 * (e.g. TSB on sparc64, PHT on ppc32).758 */759 as_invalidate_translation_cache(as, area->base + P2SZ(pages),760 area->pages - pages);761 tlb_shootdown_finalize(ipl);762 763 925 page_table_unlock(as, false); 764 926 } else { 765 927 /* 766 928 * Growing the area. 929 */ 930 931 if (overflows_into_positive(address, P2SZ(pages))) 932 return EINVAL; 933 934 /* 767 935 * Check for overlaps with other address space areas. 768 936 */ 769 if (!check_area_conflicts(as, address, pages, area)) { 937 bool const guarded = area->flags & AS_AREA_GUARD; 938 if (!check_area_conflicts(as, address, pages, guarded, area)) { 770 939 mutex_unlock(&area->lock); 771 940 mutex_unlock(&as->lock); … … 790 959 } 791 960 792 /** Remove reference to address space area share info.793 *794 * If the reference count drops to 0, the sh_info is deallocated.795 *796 * @param sh_info Pointer to address space area share info.797 *798 */799 NO_TRACE static void sh_info_remove_reference(share_info_t *sh_info)800 {801 bool dealloc = false;802 803 mutex_lock(&sh_info->lock);804 ASSERT(sh_info->refcount);805 806 if (--sh_info->refcount == 0) {807 dealloc = true;808 809 /*810 * Now walk carefully the pagemap B+tree and free/remove811 * reference from all frames found there.812 */813 list_foreach(sh_info->pagemap.leaf_list, cur) {814 btree_node_t *node815 = list_get_instance(cur, btree_node_t, leaf_link);816 btree_key_t i;817 818 for (i = 0; i < node->keys; i++)819 frame_free((uintptr_t) node->value[i]);820 }821 822 }823 mutex_unlock(&sh_info->lock);824 825 if (dealloc) {826 btree_destroy(&sh_info->pagemap);827 free(sh_info);828 }829 }830 831 961 /** Destroy address space area. 832 962 * … … 863 993 * Visit only the pages mapped by used_space B+tree. 864 994 */ 865 list_foreach(area->used_space.leaf_list, cur) {866 btree_node_t *node;995 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 996 node) { 867 997 btree_key_t i; 868 998 869 node = list_get_instance(cur, btree_node_t, leaf_link);870 999 for (i = 0; i < node->keys; i++) { 871 1000 uintptr_t ptr = node->key[i]; … … 911 1040 area->attributes |= AS_AREA_ATTR_PARTIAL; 912 1041 913 if (area->sh_info) 914 sh_info_remove_reference(area->sh_info); 1042 sh_info_remove_reference(area->sh_info); 915 1043 916 1044 mutex_unlock(&area->lock); … … 968 1096 } 969 1097 970 if ((!src_area->backend) || (!src_area->backend->share)) { 971 /* 972 * There is no backend or the backend does not 973 * know how to share the area. 1098 if (!src_area->backend->is_shareable(src_area)) { 1099 /* 1100 * The backend does not permit sharing of this area. 974 1101 */ 975 1102 mutex_unlock(&src_area->lock); … … 1000 1127 */ 1001 1128 share_info_t *sh_info = src_area->sh_info; 1002 if (!sh_info) { 1003 sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0); 1004 mutex_initialize(&sh_info->lock, MUTEX_PASSIVE); 1005 sh_info->refcount = 2; 1006 btree_create(&sh_info->pagemap); 1007 src_area->sh_info = sh_info; 1008 1129 1130 mutex_lock(&sh_info->lock); 1131 sh_info->refcount++; 1132 bool shared = sh_info->shared; 1133 sh_info->shared = true; 1134 mutex_unlock(&sh_info->lock); 1135 1136 if (!shared) { 1009 1137 /* 1010 1138 * Call the backend to setup sharing. 1139 * This only happens once for each sh_info. 1011 1140 */ 1012 1141 src_area->backend->share(src_area); 1013 } else {1014 mutex_lock(&sh_info->lock);1015 sh_info->refcount++;1016 mutex_unlock(&sh_info->lock);1017 1142 } 1018 1143 … … 1133 1258 } 1134 1259 1135 if ((area->sh_info) || (area->backend != &anon_backend)) { 1136 /* Copying shared areas not supported yet */ 1260 if (area->backend != &anon_backend) { 1137 1261 /* Copying non-anonymous memory not supported yet */ 1138 1262 mutex_unlock(&area->lock); … … 1140 1264 return ENOTSUP; 1141 1265 } 1266 1267 mutex_lock(&area->sh_info->lock); 1268 if (area->sh_info->shared) { 1269 /* Copying shared areas not supported yet */ 1270 mutex_unlock(&area->sh_info->lock); 1271 mutex_unlock(&area->lock); 1272 mutex_unlock(&as->lock); 1273 return ENOTSUP; 1274 } 1275 mutex_unlock(&area->sh_info->lock); 1142 1276 1143 1277 /* … … 1146 1280 size_t used_pages = 0; 1147 1281 1148 list_foreach(area->used_space.leaf_list, cur) { 1149 btree_node_t *node 1150 = list_get_instance(cur, btree_node_t, leaf_link); 1282 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 1283 node) { 1151 1284 btree_key_t i; 1152 1285 … … 1172 1305 size_t frame_idx = 0; 1173 1306 1174 list_foreach(area->used_space.leaf_list, cur) { 1175 btree_node_t *node = list_get_instance(cur, btree_node_t, 1176 leaf_link); 1307 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 1308 node) { 1177 1309 btree_key_t i; 1178 1310 … … 1224 1356 frame_idx = 0; 1225 1357 1226 list_foreach(area->used_space.leaf_list, cur) { 1227 btree_node_t *node 1228 = list_get_instance(cur, btree_node_t, leaf_link); 1358 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 1359 node) { 1229 1360 btree_key_t i; 1230 1361 … … 1261 1392 * Interrupts are assumed disabled. 1262 1393 * 1263 * @param page Faulting page.1264 * @param access Access mode that caused the page fault (i.e.1265 * read/write/exec).1266 * @param istate Pointer to the interrupted state.1394 * @param address Faulting address. 1395 * @param access Access mode that caused the page fault (i.e. 1396 * read/write/exec). 1397 * @param istate Pointer to the interrupted state. 1267 1398 * 1268 1399 * @return AS_PF_FAULT on page fault. … … 1272 1403 * 1273 1404 */ 1274 int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate) 1275 { 1405 int as_page_fault(uintptr_t address, pf_access_t access, istate_t *istate) 1406 { 1407 uintptr_t page = ALIGN_DOWN(address, PAGE_SIZE); 1408 int rc = AS_PF_FAULT; 1409 1276 1410 if (!THREAD) 1277 return AS_PF_FAULT;1411 goto page_fault; 1278 1412 1279 1413 if (!AS) 1280 return AS_PF_FAULT;1414 goto page_fault; 1281 1415 1282 1416 mutex_lock(&AS->lock); … … 1334 1468 * Resort to the backend page fault handler. 1335 1469 */ 1336 if (area->backend->page_fault(area, page, access) != AS_PF_OK) { 1470 rc = area->backend->page_fault(area, page, access); 1471 if (rc != AS_PF_OK) { 1337 1472 page_table_unlock(AS, false); 1338 1473 mutex_unlock(&area->lock); … … 1355 1490 istate_set_retaddr(istate, 1356 1491 (uintptr_t) &memcpy_to_uspace_failover_address); 1492 } else if (rc == AS_PF_SILENT) { 1493 printf("Killing task %" PRIu64 " due to a " 1494 "failed late reservation request.\n", TASK->taskid); 1495 task_kill_self(true); 1357 1496 } else { 1358 return AS_PF_FAULT; 1497 fault_if_from_uspace(istate, "Page fault: %p.", (void *) address); 1498 panic_memtrap(istate, access, address, NULL); 1359 1499 } 1360 1500 … … 1582 1722 { 1583 1723 ASSERT(mutex_locked(&area->lock)); 1584 ASSERT( page == ALIGN_DOWN(page, PAGE_SIZE));1724 ASSERT(IS_ALIGNED(page, PAGE_SIZE)); 1585 1725 ASSERT(count); 1586 1726 1587 btree_node_t *leaf ;1727 btree_node_t *leaf = NULL; 1588 1728 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf); 1589 1729 if (pages) { … … 1593 1733 return false; 1594 1734 } 1735 1736 ASSERT(leaf != NULL); 1595 1737 1596 1738 if (!leaf->keys) { … … 1866 2008 { 1867 2009 ASSERT(mutex_locked(&area->lock)); 1868 ASSERT( page == ALIGN_DOWN(page, PAGE_SIZE));2010 ASSERT(IS_ALIGNED(page, PAGE_SIZE)); 1869 2011 ASSERT(count); 1870 2012 … … 2043 2185 { 2044 2186 uintptr_t virt = base; 2045 as_area_t *area = as_area_create(AS, flags | AS_AREA_CACHEABLE, size,2187 as_area_t *area = as_area_create(AS, flags, size, 2046 2188 AS_AREA_ATTR_NONE, &anon_backend, NULL, &virt, bound); 2047 2189 if (area == NULL) … … 2081 2223 size_t area_cnt = 0; 2082 2224 2083 list_foreach(as->as_area_btree.leaf_list, cur) { 2084 btree_node_t *node = 2085 list_get_instance(cur, btree_node_t, leaf_link); 2225 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, 2226 node) { 2086 2227 area_cnt += node->keys; 2087 2228 } … … 2094 2235 size_t area_idx = 0; 2095 2236 2096 list_foreach(as->as_area_btree.leaf_list, cur) { 2097 btree_node_t *node = 2098 list_get_instance(cur, btree_node_t, leaf_link); 2237 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, 2238 node) { 2099 2239 btree_key_t i; 2100 2240 … … 2130 2270 2131 2271 /* Print out info about address space areas */ 2132 list_foreach(as->as_area_btree.leaf_list, cur) { 2133 btree_node_t *node 2134 = list_get_instance(cur, btree_node_t, leaf_link); 2272 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, 2273 node) { 2135 2274 btree_key_t i; 2136 2275
Note:
See TracChangeset
for help on using the changeset viewer.