Changes in kernel/generic/src/mm/as.c [826599a2:f97f1e51] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/as.c
r826599a2 rf97f1e51 79 79 #include <syscall/copy.h> 80 80 #include <arch/interrupt.h> 81 #include <interrupt.h>82 81 83 82 /** … … 286 285 /** Check area conflicts with other areas. 287 286 * 288 * @param as Address space. 289 * @param addr Starting virtual address of the area being tested. 290 * @param count Number of pages in the area being tested. 291 * @param guarded True if the area being tested is protected by guard pages. 292 * @param avoid Do not touch this area. 287 * @param as Address space. 288 * @param addr Starting virtual address of the area being tested. 289 * @param count Number of pages in the area being tested. 290 * @param avoid Do not touch this area. 293 291 * 294 292 * @return True if there is no conflict, false otherwise. … … 296 294 */ 297 295 NO_TRACE static bool check_area_conflicts(as_t *as, uintptr_t addr, 298 size_t count, bool guarded,as_area_t *avoid)296 size_t count, as_area_t *avoid) 299 297 { 300 298 ASSERT((addr % PAGE_SIZE) == 0); 301 299 ASSERT(mutex_locked(&as->lock)); 302 303 /*304 * If the addition of the supposed area address and size overflows,305 * report conflict.306 */307 if (overflows_into_positive(addr, P2SZ(count)))308 return false;309 300 310 301 /* … … 313 304 if (overlaps(addr, P2SZ(count), (uintptr_t) NULL, PAGE_SIZE)) 314 305 return false; 315 306 316 307 /* 317 308 * The leaf node is found in O(log n), where n is proportional to … … 337 328 if (area != avoid) { 338 329 mutex_lock(&area->lock); 339 340 /* 341 * If at least one of the two areas are protected 342 * by the AS_AREA_GUARD flag then we must be sure 343 * that they are separated by at least one unmapped 344 * page. 345 */ 346 int const gp = (guarded || 347 (area->flags & AS_AREA_GUARD)) ? 1 : 0; 348 349 /* 350 * The area comes from the left neighbour node, which 351 * means that there already are some areas in the leaf 352 * node, which in turn means that adding gp is safe and 353 * will not cause an integer overflow. 354 */ 330 355 331 if (overlaps(addr, P2SZ(count), area->base, 356 P2SZ(area->pages + gp))) {357 mutex_unlock(&area->lock);358 return false;359 }360 361 mutex_unlock(&area->lock);362 }363 }364 365 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf);366 if (node) {367 area = (as_area_t *) node->value[0];368 369 if (area != avoid) {370 int gp;371 372 mutex_lock(&area->lock);373 374 gp = (guarded || (area->flags & AS_AREA_GUARD)) ? 1 : 0;375 if (gp && overflows(addr, P2SZ(count))) {376 /*377 * Guard page not needed if the supposed area378 * is adjacent to the end of the address space.379 * We already know that the following test is380 * going to fail...381 */382 gp--;383 }384 385 if (overlaps(addr, P2SZ(count + gp), area->base,386 332 P2SZ(area->pages))) { 387 333 mutex_unlock(&area->lock); … … 393 339 } 394 340 341 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf); 342 if (node) { 343 area = (as_area_t *) node->value[0]; 344 345 if (area != avoid) { 346 mutex_lock(&area->lock); 347 348 if (overlaps(addr, P2SZ(count), area->base, 349 P2SZ(area->pages))) { 350 mutex_unlock(&area->lock); 351 return false; 352 } 353 354 mutex_unlock(&area->lock); 355 } 356 } 357 395 358 /* Second, check the leaf node. */ 396 359 btree_key_t i; 397 360 for (i = 0; i < leaf->keys; i++) { 398 361 area = (as_area_t *) leaf->value[i]; 399 int agp;400 int gp;401 362 402 363 if (area == avoid) … … 404 365 405 366 mutex_lock(&area->lock); 406 407 gp = (guarded || (area->flags & AS_AREA_GUARD)) ? 1 : 0; 408 agp = gp; 409 410 /* 411 * Sanitize the two possible unsigned integer overflows. 412 */ 413 if (gp && overflows(addr, P2SZ(count))) 414 gp--; 415 if (agp && overflows(area->base, P2SZ(area->pages))) 416 agp--; 417 418 if (overlaps(addr, P2SZ(count + gp), area->base, 419 P2SZ(area->pages + agp))) { 367 368 if (overlaps(addr, P2SZ(count), area->base, 369 P2SZ(area->pages))) { 420 370 mutex_unlock(&area->lock); 421 371 return false; … … 427 377 /* 428 378 * So far, the area does not conflict with other areas. 429 * Check if it is contained in the useraddress space.379 * Check if it doesn't conflict with kernel address space. 430 380 */ 431 381 if (!KERNEL_ADDRESS_SPACE_SHADOWED) { 432 return iswithin(USER_ADDRESS_SPACE_START, 433 (USER_ADDRESS_SPACE_END - USER_ADDRESS_SPACE_START) + 1, 434 addr, P2SZ(count)); 382 return !overlaps(addr, P2SZ(count), KERNEL_ADDRESS_SPACE_START, 383 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START); 435 384 } 436 385 … … 443 392 * this function. 444 393 * 445 * @param as Address space. 446 * @param bound Lowest address bound. 447 * @param size Requested size of the allocation. 448 * @param guarded True if the allocation must be protected by guard pages. 394 * @param as Address space. 395 * @param bound Lowest address bound. 396 * @param size Requested size of the allocation. 449 397 * 450 398 * @return Address of the beginning of unmapped address space area. … … 453 401 */ 454 402 NO_TRACE static uintptr_t as_get_unmapped_area(as_t *as, uintptr_t bound, 455 size_t size , bool guarded)403 size_t size) 456 404 { 457 405 ASSERT(mutex_locked(&as->lock)); … … 475 423 /* First check the bound address itself */ 476 424 uintptr_t addr = ALIGN_UP(bound, PAGE_SIZE); 477 if (addr >= bound) { 478 if (guarded) { 479 /* Leave an unmapped page between the lower 480 * bound and the area's start address. 481 */ 482 addr += P2SZ(1); 483 } 484 485 if (check_area_conflicts(as, addr, pages, guarded, NULL)) 486 return addr; 487 } 425 if ((addr >= bound) && 426 (check_area_conflicts(as, addr, pages, NULL))) 427 return addr; 488 428 489 429 /* Eventually check the addresses behind each area */ 490 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, node) { 430 list_foreach(as->as_area_btree.leaf_list, cur) { 431 btree_node_t *node = 432 list_get_instance(cur, btree_node_t, leaf_link); 491 433 492 434 for (btree_key_t i = 0; i < node->keys; i++) { … … 497 439 addr = 498 440 ALIGN_UP(area->base + P2SZ(area->pages), PAGE_SIZE); 499 500 if (guarded || area->flags & AS_AREA_GUARD) {501 /* We must leave an unmapped page502 * between the two areas.503 */504 addr += P2SZ(1);505 }506 507 441 bool avail = 508 442 ((addr >= bound) && (addr >= area->base) && 509 (check_area_conflicts(as, addr, pages, guarded,area)));443 (check_area_conflicts(as, addr, pages, area))); 510 444 511 445 mutex_unlock(&area->lock); … … 519 453 return (uintptr_t) -1; 520 454 } 521 522 /** Remove reference to address space area share info.523 *524 * If the reference count drops to 0, the sh_info is deallocated.525 *526 * @param sh_info Pointer to address space area share info.527 *528 */529 NO_TRACE static void sh_info_remove_reference(share_info_t *sh_info)530 {531 bool dealloc = false;532 533 mutex_lock(&sh_info->lock);534 ASSERT(sh_info->refcount);535 536 if (--sh_info->refcount == 0) {537 dealloc = true;538 539 /*540 * Now walk carefully the pagemap B+tree and free/remove541 * reference from all frames found there.542 */543 list_foreach(sh_info->pagemap.leaf_list, leaf_link,544 btree_node_t, node) {545 btree_key_t i;546 547 for (i = 0; i < node->keys; i++)548 frame_free((uintptr_t) node->value[i], 1);549 }550 551 }552 mutex_unlock(&sh_info->lock);553 554 if (dealloc) {555 if (sh_info->backend && sh_info->backend->destroy_shared_data) {556 sh_info->backend->destroy_shared_data(557 sh_info->backend_shared_data);558 }559 btree_destroy(&sh_info->pagemap);560 free(sh_info);561 }562 }563 564 455 565 456 /** Create address space area of common attributes. … … 572 463 * @param attrs Attributes of the area. 573 464 * @param backend Address space area backend. NULL if no backend is used. 574 * @param backend_data NULL or a pointer to custom backend data.465 * @param backend_data NULL or a pointer to an array holding two void *. 575 466 * @param base Starting virtual address of the area. 576 467 * If set to -1, a suitable mappable area is found. … … 585 476 mem_backend_data_t *backend_data, uintptr_t *base, uintptr_t bound) 586 477 { 587 if ((*base != (uintptr_t) -1) && !IS_ALIGNED(*base, PAGE_SIZE))478 if ((*base != (uintptr_t) -1) && ((*base % PAGE_SIZE) != 0)) 588 479 return NULL; 589 480 590 481 if (size == 0) 591 482 return NULL; 592 483 593 484 size_t pages = SIZE2FRAMES(size); 594 485 … … 596 487 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE)) 597 488 return NULL; 598 599 bool const guarded = flags & AS_AREA_GUARD;600 489 601 490 mutex_lock(&as->lock); 602 491 603 492 if (*base == (uintptr_t) -1) { 604 *base = as_get_unmapped_area(as, bound, size , guarded);493 *base = as_get_unmapped_area(as, bound, size); 605 494 if (*base == (uintptr_t) -1) { 606 495 mutex_unlock(&as->lock); … … 608 497 } 609 498 } 610 611 if (overflows_into_positive(*base, size)) { 612 mutex_unlock(&as->lock); 613 return NULL; 614 } 615 616 if (!check_area_conflicts(as, *base, pages, guarded, NULL)) { 499 500 if (!check_area_conflicts(as, *base, pages, NULL)) { 617 501 mutex_unlock(&as->lock); 618 502 return NULL; … … 629 513 area->resident = 0; 630 514 area->base = *base; 515 area->sh_info = NULL; 631 516 area->backend = backend; 632 area->sh_info = NULL;633 517 634 518 if (backend_data) … … 636 520 else 637 521 memsetb(&area->backend_data, sizeof(area->backend_data), 0); 638 639 share_info_t *si = NULL; 640 641 /* 642 * Create the sharing info structure. 643 * We do this in advance for every new area, even if it is not going 644 * to be shared. 645 */ 646 if (!(attrs & AS_AREA_ATTR_PARTIAL)) { 647 si = (share_info_t *) malloc(sizeof(share_info_t), 0); 648 mutex_initialize(&si->lock, MUTEX_PASSIVE); 649 si->refcount = 1; 650 si->shared = false; 651 si->backend_shared_data = NULL; 652 si->backend = backend; 653 btree_create(&si->pagemap); 654 655 area->sh_info = si; 656 657 if (area->backend && area->backend->create_shared_data) { 658 if (!area->backend->create_shared_data(area)) { 659 free(area); 660 mutex_unlock(&as->lock); 661 sh_info_remove_reference(si); 662 return NULL; 663 } 664 } 665 } 666 522 667 523 if (area->backend && area->backend->create) { 668 524 if (!area->backend->create(area)) { 669 525 free(area); 670 526 mutex_unlock(&as->lock); 671 if (!(attrs & AS_AREA_ATTR_PARTIAL))672 sh_info_remove_reference(si);673 527 return NULL; 674 528 } 675 529 } 676 530 677 531 btree_create(&area->used_space); 678 532 btree_insert(&as->as_area_btree, *base, (void *) area, … … 761 615 int as_area_resize(as_t *as, uintptr_t address, size_t size, unsigned int flags) 762 616 { 763 if (!IS_ALIGNED(address, PAGE_SIZE))764 return EINVAL;765 766 617 mutex_lock(&as->lock); 767 618 … … 774 625 return ENOENT; 775 626 } 776 777 if (!area->backend->is_resizable(area)) { 778 /* 779 * The backend does not support resizing for this area. 627 628 if (area->backend == &phys_backend) { 629 /* 630 * Remapping of address space areas associated 631 * with memory mapped devices is not supported. 780 632 */ 781 633 mutex_unlock(&area->lock); … … 784 636 } 785 637 786 mutex_lock(&area->sh_info->lock); 787 if (area->sh_info->shared) { 638 if (area->sh_info) { 788 639 /* 789 640 * Remapping of shared address space areas 790 641 * is not supported. 791 642 */ 792 mutex_unlock(&area->sh_info->lock);793 643 mutex_unlock(&area->lock); 794 644 mutex_unlock(&as->lock); 795 645 return ENOTSUP; 796 646 } 797 mutex_unlock(&area->sh_info->lock);798 647 799 648 size_t pages = SIZE2FRAMES((address - area->base) + size); … … 816 665 817 666 page_table_lock(as, false); 667 668 /* 669 * Start TLB shootdown sequence. 670 */ 671 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, 672 area->base + P2SZ(pages), area->pages - pages); 818 673 819 674 /* … … 871 726 } 872 727 873 /*874 * Start TLB shootdown sequence.875 *876 * The sequence is rather short and can be877 * repeated multiple times. The reason is that878 * we don't want to have used_space_remove()879 * inside the sequence as it may use a blocking880 * memory allocation for its B+tree. Blocking881 * while holding the tlblock spinlock is882 * forbidden and would hit a kernel assertion.883 */884 885 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES,886 as->asid, area->base + P2SZ(pages),887 area->pages - pages);888 889 728 for (; i < size; i++) { 890 729 pte_t *pte = page_mapping_find(as, … … 904 743 page_mapping_remove(as, ptr + P2SZ(i)); 905 744 } 906 907 /*908 * Finish TLB shootdown sequence.909 */910 911 tlb_invalidate_pages(as->asid,912 area->base + P2SZ(pages),913 area->pages - pages);914 915 /*916 * Invalidate software translation caches917 * (e.g. TSB on sparc64, PHT on ppc32).918 */919 as_invalidate_translation_cache(as,920 area->base + P2SZ(pages),921 area->pages - pages);922 tlb_shootdown_finalize(ipl);923 745 } 924 746 } 747 748 /* 749 * Finish TLB shootdown sequence. 750 */ 751 752 tlb_invalidate_pages(as->asid, area->base + P2SZ(pages), 753 area->pages - pages); 754 755 /* 756 * Invalidate software translation caches 757 * (e.g. TSB on sparc64, PHT on ppc32). 758 */ 759 as_invalidate_translation_cache(as, area->base + P2SZ(pages), 760 area->pages - pages); 761 tlb_shootdown_finalize(ipl); 762 925 763 page_table_unlock(as, false); 926 764 } else { 927 765 /* 928 766 * Growing the area. 929 */930 931 if (overflows_into_positive(address, P2SZ(pages)))932 return EINVAL;933 934 /*935 767 * Check for overlaps with other address space areas. 936 768 */ 937 bool const guarded = area->flags & AS_AREA_GUARD; 938 if (!check_area_conflicts(as, address, pages, guarded, area)) { 769 if (!check_area_conflicts(as, address, pages, area)) { 939 770 mutex_unlock(&area->lock); 940 771 mutex_unlock(&as->lock); … … 959 790 } 960 791 792 /** Remove reference to address space area share info. 793 * 794 * If the reference count drops to 0, the sh_info is deallocated. 795 * 796 * @param sh_info Pointer to address space area share info. 797 * 798 */ 799 NO_TRACE static void sh_info_remove_reference(share_info_t *sh_info) 800 { 801 bool dealloc = false; 802 803 mutex_lock(&sh_info->lock); 804 ASSERT(sh_info->refcount); 805 806 if (--sh_info->refcount == 0) { 807 dealloc = true; 808 809 /* 810 * Now walk carefully the pagemap B+tree and free/remove 811 * reference from all frames found there. 812 */ 813 list_foreach(sh_info->pagemap.leaf_list, cur) { 814 btree_node_t *node 815 = list_get_instance(cur, btree_node_t, leaf_link); 816 btree_key_t i; 817 818 for (i = 0; i < node->keys; i++) 819 frame_free((uintptr_t) node->value[i]); 820 } 821 822 } 823 mutex_unlock(&sh_info->lock); 824 825 if (dealloc) { 826 btree_destroy(&sh_info->pagemap); 827 free(sh_info); 828 } 829 } 830 961 831 /** Destroy address space area. 962 832 * … … 993 863 * Visit only the pages mapped by used_space B+tree. 994 864 */ 995 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t,996 node) {865 list_foreach(area->used_space.leaf_list, cur) { 866 btree_node_t *node; 997 867 btree_key_t i; 998 868 869 node = list_get_instance(cur, btree_node_t, leaf_link); 999 870 for (i = 0; i < node->keys; i++) { 1000 871 uintptr_t ptr = node->key[i]; … … 1040 911 area->attributes |= AS_AREA_ATTR_PARTIAL; 1041 912 1042 sh_info_remove_reference(area->sh_info); 913 if (area->sh_info) 914 sh_info_remove_reference(area->sh_info); 1043 915 1044 916 mutex_unlock(&area->lock); … … 1096 968 } 1097 969 1098 if (!src_area->backend->is_shareable(src_area)) { 1099 /* 1100 * The backend does not permit sharing of this area. 970 if ((!src_area->backend) || (!src_area->backend->share)) { 971 /* 972 * There is no backend or the backend does not 973 * know how to share the area. 1101 974 */ 1102 975 mutex_unlock(&src_area->lock); … … 1127 1000 */ 1128 1001 share_info_t *sh_info = src_area->sh_info; 1129 1130 mutex_lock(&sh_info->lock); 1131 sh_info->refcount++; 1132 bool shared = sh_info->shared; 1133 sh_info->shared = true; 1134 mutex_unlock(&sh_info->lock); 1135 1136 if (!shared) { 1002 if (!sh_info) { 1003 sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0); 1004 mutex_initialize(&sh_info->lock, MUTEX_PASSIVE); 1005 sh_info->refcount = 2; 1006 btree_create(&sh_info->pagemap); 1007 src_area->sh_info = sh_info; 1008 1137 1009 /* 1138 1010 * Call the backend to setup sharing. 1139 * This only happens once for each sh_info.1140 1011 */ 1141 1012 src_area->backend->share(src_area); 1013 } else { 1014 mutex_lock(&sh_info->lock); 1015 sh_info->refcount++; 1016 mutex_unlock(&sh_info->lock); 1142 1017 } 1143 1018 … … 1258 1133 } 1259 1134 1260 if (area->backend != &anon_backend) { 1135 if ((area->sh_info) || (area->backend != &anon_backend)) { 1136 /* Copying shared areas not supported yet */ 1261 1137 /* Copying non-anonymous memory not supported yet */ 1262 1138 mutex_unlock(&area->lock); … … 1264 1140 return ENOTSUP; 1265 1141 } 1266 1267 mutex_lock(&area->sh_info->lock);1268 if (area->sh_info->shared) {1269 /* Copying shared areas not supported yet */1270 mutex_unlock(&area->sh_info->lock);1271 mutex_unlock(&area->lock);1272 mutex_unlock(&as->lock);1273 return ENOTSUP;1274 }1275 mutex_unlock(&area->sh_info->lock);1276 1142 1277 1143 /* … … 1280 1146 size_t used_pages = 0; 1281 1147 1282 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 1283 node) { 1148 list_foreach(area->used_space.leaf_list, cur) { 1149 btree_node_t *node 1150 = list_get_instance(cur, btree_node_t, leaf_link); 1284 1151 btree_key_t i; 1285 1152 … … 1305 1172 size_t frame_idx = 0; 1306 1173 1307 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 1308 node) { 1174 list_foreach(area->used_space.leaf_list, cur) { 1175 btree_node_t *node = list_get_instance(cur, btree_node_t, 1176 leaf_link); 1309 1177 btree_key_t i; 1310 1178 … … 1356 1224 frame_idx = 0; 1357 1225 1358 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 1359 node) { 1226 list_foreach(area->used_space.leaf_list, cur) { 1227 btree_node_t *node 1228 = list_get_instance(cur, btree_node_t, leaf_link); 1360 1229 btree_key_t i; 1361 1230 … … 1392 1261 * Interrupts are assumed disabled. 1393 1262 * 1394 * @param address Faulting address.1395 * @param access 1396 * 1397 * @param istate 1263 * @param page Faulting page. 1264 * @param access Access mode that caused the page fault (i.e. 1265 * read/write/exec). 1266 * @param istate Pointer to the interrupted state. 1398 1267 * 1399 1268 * @return AS_PF_FAULT on page fault. … … 1403 1272 * 1404 1273 */ 1405 int as_page_fault(uintptr_t address, pf_access_t access, istate_t *istate) 1406 { 1407 uintptr_t page = ALIGN_DOWN(address, PAGE_SIZE); 1408 int rc = AS_PF_FAULT; 1409 1274 int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate) 1275 { 1410 1276 if (!THREAD) 1411 goto page_fault;1277 return AS_PF_FAULT; 1412 1278 1413 1279 if (!AS) 1414 goto page_fault;1280 return AS_PF_FAULT; 1415 1281 1416 1282 mutex_lock(&AS->lock); … … 1468 1334 * Resort to the backend page fault handler. 1469 1335 */ 1470 rc = area->backend->page_fault(area, page, access); 1471 if (rc != AS_PF_OK) { 1336 if (area->backend->page_fault(area, page, access) != AS_PF_OK) { 1472 1337 page_table_unlock(AS, false); 1473 1338 mutex_unlock(&area->lock); … … 1490 1355 istate_set_retaddr(istate, 1491 1356 (uintptr_t) &memcpy_to_uspace_failover_address); 1492 } else if (rc == AS_PF_SILENT) {1493 printf("Killing task %" PRIu64 " due to a "1494 "failed late reservation request.\n", TASK->taskid);1495 task_kill_self(true);1496 1357 } else { 1497 fault_if_from_uspace(istate, "Page fault: %p.", (void *) address); 1498 panic_memtrap(istate, access, address, NULL); 1358 return AS_PF_FAULT; 1499 1359 } 1500 1360 … … 1722 1582 { 1723 1583 ASSERT(mutex_locked(&area->lock)); 1724 ASSERT( IS_ALIGNED(page, PAGE_SIZE));1584 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE)); 1725 1585 ASSERT(count); 1726 1586 1727 btree_node_t *leaf = NULL;1587 btree_node_t *leaf; 1728 1588 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf); 1729 1589 if (pages) { … … 1733 1593 return false; 1734 1594 } 1735 1736 ASSERT(leaf != NULL);1737 1595 1738 1596 if (!leaf->keys) { … … 2008 1866 { 2009 1867 ASSERT(mutex_locked(&area->lock)); 2010 ASSERT( IS_ALIGNED(page, PAGE_SIZE));1868 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE)); 2011 1869 ASSERT(count); 2012 1870 … … 2185 2043 { 2186 2044 uintptr_t virt = base; 2187 as_area_t *area = as_area_create(AS, flags , size,2045 as_area_t *area = as_area_create(AS, flags | AS_AREA_CACHEABLE, size, 2188 2046 AS_AREA_ATTR_NONE, &anon_backend, NULL, &virt, bound); 2189 2047 if (area == NULL) … … 2223 2081 size_t area_cnt = 0; 2224 2082 2225 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, 2226 node) { 2083 list_foreach(as->as_area_btree.leaf_list, cur) { 2084 btree_node_t *node = 2085 list_get_instance(cur, btree_node_t, leaf_link); 2227 2086 area_cnt += node->keys; 2228 2087 } … … 2235 2094 size_t area_idx = 0; 2236 2095 2237 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, 2238 node) { 2096 list_foreach(as->as_area_btree.leaf_list, cur) { 2097 btree_node_t *node = 2098 list_get_instance(cur, btree_node_t, leaf_link); 2239 2099 btree_key_t i; 2240 2100 … … 2270 2130 2271 2131 /* Print out info about address space areas */ 2272 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, 2273 node) { 2132 list_foreach(as->as_area_btree.leaf_list, cur) { 2133 btree_node_t *node 2134 = list_get_instance(cur, btree_node_t, leaf_link); 2274 2135 btree_key_t i; 2275 2136
Note:
See TracChangeset
for help on using the changeset viewer.