Changeset 8b655705 in mainline for kernel/generic/src/mm/as.c
- Timestamp:
- 2011-04-15T19:38:07Z (14 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 9dd730d1
- Parents:
- 6b9e85b (diff), b2fb47f (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/as.c
r6b9e85b r8b655705 71 71 #include <memstr.h> 72 72 #include <macros.h> 73 #include <bitops.h> 73 74 #include <arch.h> 74 75 #include <errno.h> … … 82 83 * Each architecture decides what functions will be used to carry out 83 84 * address space operations such as creating or locking page tables. 84 *85 85 */ 86 86 as_operations_t *as_operations = NULL; 87 87 88 /** 89 * Slab for as_t objects. 88 /** Slab for as_t objects. 90 89 * 91 90 */ 92 91 static slab_cache_t *as_slab; 93 92 94 /** 95 * This lock serializes access to the ASID subsystem.96 * Itprotects:93 /** ASID subsystem lock. 94 * 95 * This lock protects: 97 96 * - inactive_as_with_asid_head list 98 97 * - as->asid for each as of the as_t type … … 103 102 104 103 /** 105 * This list contains address spaces that are not active on any 106 * processor and that have valid ASID. 107 * 104 * Inactive address spaces (on all processors) 105 * that have valid ASID. 108 106 */ 109 107 LIST_INITIALIZE(inactive_as_with_asid_head); … … 119 117 mutex_initialize(&as->lock, MUTEX_PASSIVE); 120 118 121 int rc = as_constructor_arch(as, flags); 122 123 return rc; 119 return as_constructor_arch(as, flags); 124 120 } 125 121 126 122 NO_TRACE static size_t as_destructor(void *obj) 127 123 { 128 as_t *as = (as_t *) obj; 129 return as_destructor_arch(as); 124 return as_destructor_arch((as_t *) obj); 130 125 } 131 126 … … 142 137 panic("Cannot create kernel address space."); 143 138 144 /* Make sure the kernel address space 139 /* 140 * Make sure the kernel address space 145 141 * reference count never drops to zero. 146 142 */ … … 191 187 { 192 188 DEADLOCK_PROBE_INIT(p_asidlock); 193 189 194 190 ASSERT(as != AS); 195 191 ASSERT(atomic_get(&as->refcount) == 0); … … 199 195 * lock its mutex. 200 196 */ 201 197 202 198 /* 203 199 * We need to avoid deadlock between TLB shootdown and asidlock. … … 206 202 * disabled to prevent nested context switches. We also depend on the 207 203 * fact that so far no spinlocks are held. 208 *209 204 */ 210 205 preemption_disable(); … … 231 226 spinlock_unlock(&asidlock); 232 227 interrupts_restore(ipl); 233 228 234 229 235 230 /* … … 237 232 * The B+tree must be walked carefully because it is 238 233 * also being destroyed. 239 *240 234 */ 241 235 bool cond = true; … … 264 258 /** Hold a reference to an address space. 265 259 * 266 * Holding a reference to an address space prevents destruction of that address267 * space.260 * Holding a reference to an address space prevents destruction 261 * of that address space. 268 262 * 269 263 * @param as Address space to be held. … … 277 271 /** Release a reference to an address space. 278 272 * 279 * The last one to release a reference to an address space destroys the address280 * space.273 * The last one to release a reference to an address space 274 * destroys the address space. 281 275 * 282 276 * @param asAddress space to be released. … … 291 285 /** Check area conflicts with other areas. 292 286 * 293 * @param as 294 * @param vaStarting virtual address of the area being tested.295 * @param size Size ofthe area being tested.296 * @param avoid _areaDo not touch this area.287 * @param as Address space. 288 * @param addr Starting virtual address of the area being tested. 289 * @param count Number of pages in the area being tested. 290 * @param avoid Do not touch this area. 297 291 * 298 292 * @return True if there is no conflict, false otherwise. 299 293 * 300 294 */ 301 NO_TRACE static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, 302 as_area_t *avoid_area) 303 { 295 NO_TRACE static bool check_area_conflicts(as_t *as, uintptr_t addr, 296 size_t count, as_area_t *avoid) 297 { 298 ASSERT((addr % PAGE_SIZE) == 0); 304 299 ASSERT(mutex_locked(&as->lock)); 305 300 306 301 /* 307 302 * We don't want any area to have conflicts with NULL page. 308 * 309 */ 310 if (overlaps(va, size, (uintptr_t) NULL, PAGE_SIZE)) 303 */ 304 if (overlaps(addr, count << PAGE_WIDTH, (uintptr_t) NULL, PAGE_SIZE)) 311 305 return false; 312 306 … … 317 311 * record in the left neighbour, the leftmost record in the right 318 312 * neighbour and all records in the leaf node itself. 319 *320 313 */ 321 314 btree_node_t *leaf; 322 315 as_area_t *area = 323 (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);316 (as_area_t *) btree_search(&as->as_area_btree, addr, &leaf); 324 317 if (area) { 325 if (area != avoid _area)318 if (area != avoid) 326 319 return false; 327 320 } … … 333 326 area = (as_area_t *) node->value[node->keys - 1]; 334 327 335 mutex_lock(&area->lock); 336 337 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 328 if (area != avoid) { 329 mutex_lock(&area->lock); 330 331 if (overlaps(addr, count << PAGE_WIDTH, 332 area->base, area->pages << PAGE_WIDTH)) { 333 mutex_unlock(&area->lock); 334 return false; 335 } 336 338 337 mutex_unlock(&area->lock); 339 return false; 340 } 341 342 mutex_unlock(&area->lock); 338 } 343 339 } 344 340 … … 347 343 area = (as_area_t *) node->value[0]; 348 344 349 mutex_lock(&area->lock); 350 351 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 345 if (area != avoid) { 346 mutex_lock(&area->lock); 347 348 if (overlaps(addr, count << PAGE_WIDTH, 349 area->base, area->pages << PAGE_WIDTH)) { 350 mutex_unlock(&area->lock); 351 return false; 352 } 353 352 354 mutex_unlock(&area->lock); 353 return false; 354 } 355 356 mutex_unlock(&area->lock); 355 } 357 356 } 358 357 … … 362 361 area = (as_area_t *) leaf->value[i]; 363 362 364 if (area == avoid _area)363 if (area == avoid) 365 364 continue; 366 365 367 366 mutex_lock(&area->lock); 368 367 369 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 368 if (overlaps(addr, count << PAGE_WIDTH, 369 area->base, area->pages << PAGE_WIDTH)) { 370 370 mutex_unlock(&area->lock); 371 371 return false; … … 378 378 * So far, the area does not conflict with other areas. 379 379 * Check if it doesn't conflict with kernel address space. 380 *381 380 */ 382 381 if (!KERNEL_ADDRESS_SPACE_SHADOWED) { 383 return !overlaps( va, size,382 return !overlaps(addr, count << PAGE_WIDTH, 384 383 KERNEL_ADDRESS_SPACE_START, 385 384 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START); … … 408 407 mem_backend_data_t *backend_data) 409 408 { 410 if ( base % PAGE_SIZE)409 if ((base % PAGE_SIZE) != 0) 411 410 return NULL; 412 411 413 if ( !size)412 if (size == 0) 414 413 return NULL; 414 415 size_t pages = SIZE2FRAMES(size); 415 416 416 417 /* Writeable executable areas are not supported. */ … … 420 421 mutex_lock(&as->lock); 421 422 422 if (!check_area_conflicts(as, base, size, NULL)) {423 if (!check_area_conflicts(as, base, pages, NULL)) { 423 424 mutex_unlock(&as->lock); 424 425 return NULL; … … 432 433 area->flags = flags; 433 434 area->attributes = attrs; 434 area->pages = SIZE2FRAMES(size); 435 area->pages = pages; 436 area->resident = 0; 435 437 area->base = base; 436 438 area->sh_info = NULL; … … 475 477 * to find out whether this is a miss or va belongs to an address 476 478 * space area found there. 477 *478 479 */ 479 480 … … 486 487 mutex_lock(&area->lock); 487 488 488 if ((area->base <= va) && (va < area->base + area->pages * PAGE_SIZE)) 489 if ((area->base <= va) && 490 (va < area->base + (area->pages << PAGE_WIDTH))) 489 491 return area; 490 492 … … 495 497 * Second, locate the left neighbour and test its last record. 496 498 * Because of its position in the B+tree, it must have base < va. 497 *498 499 */ 499 500 btree_node_t *lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); … … 503 504 mutex_lock(&area->lock); 504 505 505 if (va < area->base + area->pages * PAGE_SIZE)506 if (va < area->base + (area->pages << PAGE_WIDTH)) 506 507 return area; 507 508 … … 530 531 /* 531 532 * Locate the area. 532 *533 533 */ 534 534 as_area_t *area = find_area_and_lock(as, address); … … 542 542 * Remapping of address space areas associated 543 543 * with memory mapped devices is not supported. 544 *545 544 */ 546 545 mutex_unlock(&area->lock); … … 553 552 * Remapping of shared address space areas 554 553 * is not supported. 555 *556 554 */ 557 555 mutex_unlock(&area->lock); … … 564 562 /* 565 563 * Zero size address space areas are not allowed. 566 *567 564 */ 568 565 mutex_unlock(&area->lock); … … 572 569 573 570 if (pages < area->pages) { 574 uintptr_t start_free = area->base + pages * PAGE_SIZE;571 uintptr_t start_free = area->base + (pages << PAGE_WIDTH); 575 572 576 573 /* 577 574 * Shrinking the area. 578 575 * No need to check for overlaps. 579 *580 576 */ 581 577 … … 584 580 /* 585 581 * Start TLB shootdown sequence. 586 *587 582 */ 588 583 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, 589 area->base + pages * PAGE_SIZE, area->pages - pages);584 area->base + (pages << PAGE_WIDTH), area->pages - pages); 590 585 591 586 /* … … 595 590 * is also the right way to remove part of the used_space 596 591 * B+tree leaf list. 597 *598 592 */ 599 593 bool cond = true; … … 611 605 size_t i = 0; 612 606 613 if (overlaps(ptr, size * PAGE_SIZE, area->base,614 pages * PAGE_SIZE)) {607 if (overlaps(ptr, size << PAGE_WIDTH, area->base, 608 pages << PAGE_WIDTH)) { 615 609 616 if (ptr + size * PAGE_SIZE<= start_free) {610 if (ptr + (size << PAGE_WIDTH) <= start_free) { 617 611 /* 618 612 * The whole interval fits 619 613 * completely in the resized 620 614 * address space area. 621 *622 615 */ 623 616 break; … … 628 621 * to b and c overlaps with the resized 629 622 * address space area. 630 *631 623 */ 632 624 … … 648 640 for (; i < size; i++) { 649 641 pte_t *pte = page_mapping_find(as, ptr + 650 i * PAGE_SIZE);642 (i << PAGE_WIDTH)); 651 643 652 644 ASSERT(pte); … … 657 649 (area->backend->frame_free)) { 658 650 area->backend->frame_free(area, 659 ptr + i * PAGE_SIZE,651 ptr + (i << PAGE_WIDTH), 660 652 PTE_GET_FRAME(pte)); 661 653 } 662 654 663 655 page_mapping_remove(as, ptr + 664 i * PAGE_SIZE);656 (i << PAGE_WIDTH)); 665 657 } 666 658 } … … 669 661 /* 670 662 * Finish TLB shootdown sequence. 671 * 672 */ 673 674 tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE, 663 */ 664 665 tlb_invalidate_pages(as->asid, area->base + (pages << PAGE_WIDTH), 675 666 area->pages - pages); 676 667 677 668 /* 678 669 * Invalidate software translation caches (e.g. TSB on sparc64). 679 *680 670 */ 681 671 as_invalidate_translation_cache(as, area->base + 682 pages * PAGE_SIZE, area->pages - pages);672 (pages << PAGE_WIDTH), area->pages - pages); 683 673 tlb_shootdown_finalize(ipl); 684 674 … … 688 678 * Growing the area. 689 679 * Check for overlaps with other address space areas. 690 * 691 */ 692 if (!check_area_conflicts(as, address, pages * PAGE_SIZE, 693 area)) { 680 */ 681 if (!check_area_conflicts(as, address, pages, area)) { 694 682 mutex_unlock(&area->lock); 695 683 mutex_unlock(&as->lock); … … 790 778 791 779 for (size = 0; size < (size_t) node->value[i]; size++) { 792 pte_t *pte = page_mapping_find(as, ptr + size * PAGE_SIZE); 780 pte_t *pte = 781 page_mapping_find(as, ptr + (size << PAGE_WIDTH)); 793 782 794 783 ASSERT(pte); … … 799 788 (area->backend->frame_free)) { 800 789 area->backend->frame_free(area, 801 ptr + size * PAGE_SIZE, PTE_GET_FRAME(pte));790 ptr + (size << PAGE_WIDTH), PTE_GET_FRAME(pte)); 802 791 } 803 792 804 page_mapping_remove(as, ptr + size * PAGE_SIZE);793 page_mapping_remove(as, ptr + (size << PAGE_WIDTH)); 805 794 } 806 795 } … … 809 798 /* 810 799 * Finish TLB shootdown sequence. 811 *812 800 */ 813 801 … … 817 805 * Invalidate potential software translation caches (e.g. TSB on 818 806 * sparc64). 819 *820 807 */ 821 808 as_invalidate_translation_cache(as, area->base, area->pages); … … 835 822 /* 836 823 * Remove the empty area from address space. 837 *838 824 */ 839 825 btree_remove(&as->as_area_btree, base, NULL); … … 877 863 /* 878 864 * Could not find the source address space area. 879 *880 865 */ 881 866 mutex_unlock(&src_as->lock); … … 887 872 * There is no backend or the backend does not 888 873 * know how to share the area. 889 *890 874 */ 891 875 mutex_unlock(&src_area->lock); … … 894 878 } 895 879 896 size_t src_size = src_area->pages * PAGE_SIZE;880 size_t src_size = src_area->pages << PAGE_WIDTH; 897 881 unsigned int src_flags = src_area->flags; 898 882 mem_backend_t *src_backend = src_area->backend; … … 914 898 * First, prepare the area for sharing. 915 899 * Then it will be safe to unlock it. 916 *917 900 */ 918 901 share_info_t *sh_info = src_area->sh_info; … … 926 909 /* 927 910 * Call the backend to setup sharing. 928 *929 911 */ 930 912 src_area->backend->share(src_area); … … 945 927 * The flags of the source area are masked against dst_flags_mask 946 928 * to support sharing in less privileged mode. 947 *948 929 */ 949 930 as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask, src_size, … … 962 943 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL 963 944 * attribute and set the sh_info. 964 *965 945 */ 966 946 mutex_lock(&dst_as->lock); … … 985 965 NO_TRACE bool as_area_check_access(as_area_t *area, pf_access_t access) 986 966 { 967 ASSERT(mutex_locked(&area->lock)); 968 987 969 int flagmap[] = { 988 970 [PF_ACCESS_READ] = AS_AREA_READ, … … 990 972 [PF_ACCESS_EXEC] = AS_AREA_EXEC 991 973 }; 992 993 ASSERT(mutex_locked(&area->lock));994 974 995 975 if (!(area->flags & flagmap[access])) … … 1062 1042 /* 1063 1043 * Compute total number of used pages in the used_space B+tree 1064 *1065 1044 */ 1066 1045 size_t used_pages = 0; … … 1084 1063 /* 1085 1064 * Start TLB shootdown sequence. 1086 *1087 1065 */ 1088 1066 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, … … 1092 1070 * Remove used pages from page tables and remember their frame 1093 1071 * numbers. 1094 *1095 1072 */ 1096 1073 size_t frame_idx = 0; … … 1107 1084 1108 1085 for (size = 0; size < (size_t) node->value[i]; size++) { 1109 pte_t *pte = page_mapping_find(as, ptr + size * PAGE_SIZE); 1086 pte_t *pte = 1087 page_mapping_find(as, ptr + (size << PAGE_WIDTH)); 1110 1088 1111 1089 ASSERT(pte); … … 1116 1094 1117 1095 /* Remove old mapping */ 1118 page_mapping_remove(as, ptr + size * PAGE_SIZE);1096 page_mapping_remove(as, ptr + (size << PAGE_WIDTH)); 1119 1097 } 1120 1098 } … … 1123 1101 /* 1124 1102 * Finish TLB shootdown sequence. 1125 *1126 1103 */ 1127 1104 … … 1131 1108 * Invalidate potential software translation caches (e.g. TSB on 1132 1109 * sparc64). 1133 *1134 1110 */ 1135 1111 as_invalidate_translation_cache(as, area->base, area->pages); … … 1164 1140 1165 1141 /* Insert the new mapping */ 1166 page_mapping_insert(as, ptr + size * PAGE_SIZE,1142 page_mapping_insert(as, ptr + (size << PAGE_WIDTH), 1167 1143 old_frame[frame_idx++], page_flags); 1168 1144 … … 1213 1189 * No area contained mapping for 'page'. 1214 1190 * Signal page fault to low-level handler. 1215 *1216 1191 */ 1217 1192 mutex_unlock(&AS->lock); … … 1233 1208 * The address space area is not backed by any backend 1234 1209 * or the backend cannot handle page faults. 1235 *1236 1210 */ 1237 1211 mutex_unlock(&area->lock); … … 1245 1219 * To avoid race condition between two page faults on the same address, 1246 1220 * we need to make sure the mapping has not been already inserted. 1247 *1248 1221 */ 1249 1222 pte_t *pte; … … 1263 1236 /* 1264 1237 * Resort to the backend page fault handler. 1265 *1266 1238 */ 1267 1239 if (area->backend->page_fault(area, page, access) != AS_PF_OK) { … … 1318 1290 * preemption is disabled. We should not be 1319 1291 * holding any other lock. 1320 *1321 1292 */ 1322 1293 (void) interrupts_enable(); … … 1338 1309 * list of inactive address spaces with assigned 1339 1310 * ASID. 1340 *1341 1311 */ 1342 1312 ASSERT(old_as->asid != ASID_INVALID); … … 1349 1319 * Perform architecture-specific tasks when the address space 1350 1320 * is being removed from the CPU. 1351 *1352 1321 */ 1353 1322 as_deinstall_arch(old_as); … … 1356 1325 /* 1357 1326 * Second, prepare the new address space. 1358 *1359 1327 */ 1360 1328 if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) { … … 1372 1340 * Perform architecture-specific steps. 1373 1341 * (e.g. write ASID to hardware register etc.) 1374 *1375 1342 */ 1376 1343 as_install_arch(new_as); … … 1391 1358 { 1392 1359 ASSERT(mutex_locked(&area->lock)); 1393 1360 1394 1361 return area_flags_to_page_flags(area->flags); 1395 1362 } … … 1495 1462 1496 1463 if (src_area) { 1497 size = src_area->pages * PAGE_SIZE;1464 size = src_area->pages << PAGE_WIDTH; 1498 1465 mutex_unlock(&src_area->lock); 1499 1466 } else … … 1512 1479 * @param count Number of page to be marked. 1513 1480 * 1514 * @return Zero on failure and non-zeroon success.1515 * 1516 */ 1517 intused_space_insert(as_area_t *area, uintptr_t page, size_t count)1481 * @return False on failure or true on success. 1482 * 1483 */ 1484 bool used_space_insert(as_area_t *area, uintptr_t page, size_t count) 1518 1485 { 1519 1486 ASSERT(mutex_locked(&area->lock)); … … 1526 1493 /* 1527 1494 * We hit the beginning of some used space. 1528 * 1529 */ 1530 return 0; 1495 */ 1496 return false; 1531 1497 } 1532 1498 1533 1499 if (!leaf->keys) { 1534 1500 btree_insert(&area->used_space, page, (void *) count, leaf); 1535 return 1;1501 goto success; 1536 1502 } 1537 1503 … … 1547 1513 * somewhere between the rightmost interval of 1548 1514 * the left neigbour and the first interval of the leaf. 1549 *1550 1515 */ 1551 1516 1552 1517 if (page >= right_pg) { 1553 1518 /* Do nothing. */ 1554 } else if (overlaps(page, count * PAGE_SIZE, left_pg,1555 left_cnt * PAGE_SIZE)) {1519 } else if (overlaps(page, count << PAGE_WIDTH, left_pg, 1520 left_cnt << PAGE_WIDTH)) { 1556 1521 /* The interval intersects with the left interval. */ 1557 return 0;1558 } else if (overlaps(page, count * PAGE_SIZE, right_pg,1559 right_cnt * PAGE_SIZE)) {1522 return false; 1523 } else if (overlaps(page, count << PAGE_WIDTH, right_pg, 1524 right_cnt << PAGE_WIDTH)) { 1560 1525 /* The interval intersects with the right interval. */ 1561 return 0;1562 } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&1563 (page + count * PAGE_SIZE== right_pg)) {1526 return false; 1527 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) && 1528 (page + (count << PAGE_WIDTH) == right_pg)) { 1564 1529 /* 1565 1530 * The interval can be added by merging the two already 1566 1531 * present intervals. 1567 *1568 1532 */ 1569 1533 node->value[node->keys - 1] += count + right_cnt; 1570 1534 btree_remove(&area->used_space, right_pg, leaf); 1571 return 1;1572 } else if (page == left_pg + left_cnt * PAGE_SIZE) {1535 goto success; 1536 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) { 1573 1537 /* 1574 1538 * The interval can be added by simply growing the left 1575 1539 * interval. 1576 *1577 1540 */ 1578 1541 node->value[node->keys - 1] += count; 1579 return 1;1580 } else if (page + count * PAGE_SIZE== right_pg) {1542 goto success; 1543 } else if (page + (count << PAGE_WIDTH) == right_pg) { 1581 1544 /* 1582 1545 * The interval can be addded by simply moving base of 1583 1546 * the right interval down and increasing its size 1584 1547 * accordingly. 1585 *1586 1548 */ 1587 1549 leaf->value[0] += count; 1588 1550 leaf->key[0] = page; 1589 return 1;1551 goto success; 1590 1552 } else { 1591 1553 /* 1592 1554 * The interval is between both neigbouring intervals, 1593 1555 * but cannot be merged with any of them. 1594 *1595 1556 */ 1596 1557 btree_insert(&area->used_space, page, (void *) count, 1597 1558 leaf); 1598 return 1;1559 goto success; 1599 1560 } 1600 1561 } else if (page < leaf->key[0]) { … … 1605 1566 * Investigate the border case in which the left neighbour does 1606 1567 * not exist but the interval fits from the left. 1607 * 1608 */ 1609 1610 if (overlaps(page, count * PAGE_SIZE, right_pg, 1611 right_cnt * PAGE_SIZE)) { 1568 */ 1569 1570 if (overlaps(page, count << PAGE_WIDTH, right_pg, 1571 right_cnt << PAGE_WIDTH)) { 1612 1572 /* The interval intersects with the right interval. */ 1613 return 0;1614 } else if (page + count * PAGE_SIZE== right_pg) {1573 return false; 1574 } else if (page + (count << PAGE_WIDTH) == right_pg) { 1615 1575 /* 1616 1576 * The interval can be added by moving the base of the 1617 1577 * right interval down and increasing its size 1618 1578 * accordingly. 1619 *1620 1579 */ 1621 1580 leaf->key[0] = page; 1622 1581 leaf->value[0] += count; 1623 return 1;1582 goto success; 1624 1583 } else { 1625 1584 /* 1626 1585 * The interval doesn't adjoin with the right interval. 1627 1586 * It must be added individually. 1628 *1629 1587 */ 1630 1588 btree_insert(&area->used_space, page, (void *) count, 1631 1589 leaf); 1632 return 1;1590 goto success; 1633 1591 } 1634 1592 } … … 1645 1603 * somewhere between the leftmost interval of 1646 1604 * the right neigbour and the last interval of the leaf. 1647 *1648 1605 */ 1649 1606 1650 1607 if (page < left_pg) { 1651 1608 /* Do nothing. */ 1652 } else if (overlaps(page, count * PAGE_SIZE, left_pg,1653 left_cnt * PAGE_SIZE)) {1609 } else if (overlaps(page, count << PAGE_WIDTH, left_pg, 1610 left_cnt << PAGE_WIDTH)) { 1654 1611 /* The interval intersects with the left interval. */ 1655 return 0;1656 } else if (overlaps(page, count * PAGE_SIZE, right_pg,1657 right_cnt * PAGE_SIZE)) {1612 return false; 1613 } else if (overlaps(page, count << PAGE_WIDTH, right_pg, 1614 right_cnt << PAGE_WIDTH)) { 1658 1615 /* The interval intersects with the right interval. */ 1659 return 0;1660 } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&1661 (page + count * PAGE_SIZE== right_pg)) {1616 return false; 1617 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) && 1618 (page + (count << PAGE_WIDTH) == right_pg)) { 1662 1619 /* 1663 1620 * The interval can be added by merging the two already 1664 1621 * present intervals. 1665 *1666 1622 */ 1667 1623 leaf->value[leaf->keys - 1] += count + right_cnt; 1668 1624 btree_remove(&area->used_space, right_pg, node); 1669 return 1;1670 } else if (page == left_pg + left_cnt * PAGE_SIZE) {1625 goto success; 1626 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) { 1671 1627 /* 1672 1628 * The interval can be added by simply growing the left 1673 1629 * interval. 1674 *1675 1630 */ 1676 leaf->value[leaf->keys - 1] += 1677 return 1;1678 } else if (page + count * PAGE_SIZE== right_pg) {1631 leaf->value[leaf->keys - 1] += count; 1632 goto success; 1633 } else if (page + (count << PAGE_WIDTH) == right_pg) { 1679 1634 /* 1680 1635 * The interval can be addded by simply moving base of 1681 1636 * the right interval down and increasing its size 1682 1637 * accordingly. 1683 *1684 1638 */ 1685 1639 node->value[0] += count; 1686 1640 node->key[0] = page; 1687 return 1;1641 goto success; 1688 1642 } else { 1689 1643 /* 1690 1644 * The interval is between both neigbouring intervals, 1691 1645 * but cannot be merged with any of them. 1692 *1693 1646 */ 1694 1647 btree_insert(&area->used_space, page, (void *) count, 1695 1648 leaf); 1696 return 1;1649 goto success; 1697 1650 } 1698 1651 } else if (page >= leaf->key[leaf->keys - 1]) { … … 1703 1656 * Investigate the border case in which the right neighbour 1704 1657 * does not exist but the interval fits from the right. 1705 * 1706 */ 1707 1708 if (overlaps(page, count * PAGE_SIZE, left_pg, 1709 left_cnt * PAGE_SIZE)) { 1658 */ 1659 1660 if (overlaps(page, count << PAGE_WIDTH, left_pg, 1661 left_cnt << PAGE_WIDTH)) { 1710 1662 /* The interval intersects with the left interval. */ 1711 return 0;1712 } else if (left_pg + left_cnt * PAGE_SIZE== page) {1663 return false; 1664 } else if (left_pg + (left_cnt << PAGE_WIDTH) == page) { 1713 1665 /* 1714 1666 * The interval can be added by growing the left 1715 1667 * interval. 1716 *1717 1668 */ 1718 1669 leaf->value[leaf->keys - 1] += count; 1719 return 1;1670 goto success; 1720 1671 } else { 1721 1672 /* 1722 1673 * The interval doesn't adjoin with the left interval. 1723 1674 * It must be added individually. 1724 *1725 1675 */ 1726 1676 btree_insert(&area->used_space, page, (void *) count, 1727 1677 leaf); 1728 return 1;1678 goto success; 1729 1679 } 1730 1680 } … … 1734 1684 * only between two other intervals of the leaf. The two border cases 1735 1685 * were already resolved. 1736 *1737 1686 */ 1738 1687 btree_key_t i; … … 1746 1695 /* 1747 1696 * The interval fits between left_pg and right_pg. 1748 *1749 1697 */ 1750 1698 1751 if (overlaps(page, count * PAGE_SIZE, left_pg,1752 left_cnt * PAGE_SIZE)) {1699 if (overlaps(page, count << PAGE_WIDTH, left_pg, 1700 left_cnt << PAGE_WIDTH)) { 1753 1701 /* 1754 1702 * The interval intersects with the left 1755 1703 * interval. 1756 *1757 1704 */ 1758 return 0;1759 } else if (overlaps(page, count * PAGE_SIZE, right_pg,1760 right_cnt * PAGE_SIZE)) {1705 return false; 1706 } else if (overlaps(page, count << PAGE_WIDTH, right_pg, 1707 right_cnt << PAGE_WIDTH)) { 1761 1708 /* 1762 1709 * The interval intersects with the right 1763 1710 * interval. 1764 *1765 1711 */ 1766 return 0;1767 } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&1768 (page + count * PAGE_SIZE== right_pg)) {1712 return false; 1713 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) && 1714 (page + (count << PAGE_WIDTH) == right_pg)) { 1769 1715 /* 1770 1716 * The interval can be added by merging the two 1771 1717 * already present intervals. 1772 *1773 1718 */ 1774 1719 leaf->value[i - 1] += count + right_cnt; 1775 1720 btree_remove(&area->used_space, right_pg, leaf); 1776 return 1;1777 } else if (page == left_pg + left_cnt * PAGE_SIZE) {1721 goto success; 1722 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) { 1778 1723 /* 1779 1724 * The interval can be added by simply growing 1780 1725 * the left interval. 1781 *1782 1726 */ 1783 1727 leaf->value[i - 1] += count; 1784 return 1;1785 } else if (page + count * PAGE_SIZE== right_pg) {1728 goto success; 1729 } else if (page + (count << PAGE_WIDTH) == right_pg) { 1786 1730 /* 1787 1731 * The interval can be addded by simply moving 1788 1732 * base of the right interval down and 1789 1733 * increasing its size accordingly. 1790 *1791 1734 */ 1792 1735 leaf->value[i] += count; 1793 1736 leaf->key[i] = page; 1794 return 1;1737 goto success; 1795 1738 } else { 1796 1739 /* … … 1798 1741 * intervals, but cannot be merged with any of 1799 1742 * them. 1800 *1801 1743 */ 1802 1744 btree_insert(&area->used_space, page, 1803 1745 (void *) count, leaf); 1804 return 1;1746 goto success; 1805 1747 } 1806 1748 } … … 1809 1751 panic("Inconsistency detected while adding %zu pages of used " 1810 1752 "space at %p.", count, (void *) page); 1753 1754 success: 1755 area->resident += count; 1756 return true; 1811 1757 } 1812 1758 … … 1819 1765 * @param count Number of page to be marked. 1820 1766 * 1821 * @return Zero on failure and non-zeroon success.1822 * 1823 */ 1824 intused_space_remove(as_area_t *area, uintptr_t page, size_t count)1767 * @return False on failure or true on success. 1768 * 1769 */ 1770 bool used_space_remove(as_area_t *area, uintptr_t page, size_t count) 1825 1771 { 1826 1772 ASSERT(mutex_locked(&area->lock)); … … 1833 1779 /* 1834 1780 * We are lucky, page is the beginning of some interval. 1835 *1836 1781 */ 1837 1782 if (count > pages) { 1838 return 0;1783 return false; 1839 1784 } else if (count == pages) { 1840 1785 btree_remove(&area->used_space, page, leaf); 1841 return 1;1786 goto success; 1842 1787 } else { 1843 1788 /* 1844 1789 * Find the respective interval. 1845 1790 * Decrease its size and relocate its start address. 1846 *1847 1791 */ 1848 1792 btree_key_t i; 1849 1793 for (i = 0; i < leaf->keys; i++) { 1850 1794 if (leaf->key[i] == page) { 1851 leaf->key[i] += count * PAGE_SIZE;1795 leaf->key[i] += count << PAGE_WIDTH; 1852 1796 leaf->value[i] -= count; 1853 return 1;1797 goto success; 1854 1798 } 1855 1799 } 1800 1856 1801 goto error; 1857 1802 } … … 1863 1808 size_t left_cnt = (size_t) node->value[node->keys - 1]; 1864 1809 1865 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,1866 count * PAGE_SIZE)) {1867 if (page + count * PAGE_SIZE==1868 left_pg + left_cnt * PAGE_SIZE) {1810 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page, 1811 count << PAGE_WIDTH)) { 1812 if (page + (count << PAGE_WIDTH) == 1813 left_pg + (left_cnt << PAGE_WIDTH)) { 1869 1814 /* 1870 1815 * The interval is contained in the rightmost … … 1872 1817 * removed by updating the size of the bigger 1873 1818 * interval. 1874 *1875 1819 */ 1876 1820 node->value[node->keys - 1] -= count; 1877 return 1;1878 } else if (page + count * PAGE_SIZE<1879 left_pg + left_cnt*PAGE_SIZE) {1821 goto success; 1822 } else if (page + (count << PAGE_WIDTH) < 1823 left_pg + (left_cnt << PAGE_WIDTH)) { 1880 1824 /* 1881 1825 * The interval is contained in the rightmost … … 1884 1828 * the original interval and also inserting a 1885 1829 * new interval. 1886 *1887 1830 */ 1888 size_t new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -1889 (page + count*PAGE_SIZE)) >> PAGE_WIDTH;1831 size_t new_cnt = ((left_pg + (left_cnt << PAGE_WIDTH)) - 1832 (page + (count << PAGE_WIDTH))) >> PAGE_WIDTH; 1890 1833 node->value[node->keys - 1] -= count + new_cnt; 1891 1834 btree_insert(&area->used_space, page + 1892 count * PAGE_SIZE, (void *) new_cnt, leaf);1893 return 1;1835 (count << PAGE_WIDTH), (void *) new_cnt, leaf); 1836 goto success; 1894 1837 } 1895 1838 } 1896 return 0; 1839 1840 return false; 1897 1841 } else if (page < leaf->key[0]) 1898 return 0;1842 return false; 1899 1843 1900 1844 if (page > leaf->key[leaf->keys - 1]) { … … 1902 1846 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 1903 1847 1904 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,1905 count * PAGE_SIZE)) {1906 if (page + count * PAGE_SIZE==1907 left_pg + left_cnt * PAGE_SIZE) {1848 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page, 1849 count << PAGE_WIDTH)) { 1850 if (page + (count << PAGE_WIDTH) == 1851 left_pg + (left_cnt << PAGE_WIDTH)) { 1908 1852 /* 1909 1853 * The interval is contained in the rightmost 1910 1854 * interval of the leaf and can be removed by 1911 1855 * updating the size of the bigger interval. 1912 *1913 1856 */ 1914 1857 leaf->value[leaf->keys - 1] -= count; 1915 return 1;1916 } else if (page + count * PAGE_SIZE< left_pg +1917 left_cnt * PAGE_SIZE) {1858 goto success; 1859 } else if (page + (count << PAGE_WIDTH) < left_pg + 1860 (left_cnt << PAGE_WIDTH)) { 1918 1861 /* 1919 1862 * The interval is contained in the rightmost … … 1922 1865 * original interval and also inserting a new 1923 1866 * interval. 1924 *1925 1867 */ 1926 size_t new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -1927 (page + count * PAGE_SIZE)) >> PAGE_WIDTH;1868 size_t new_cnt = ((left_pg + (left_cnt << PAGE_WIDTH)) - 1869 (page + (count << PAGE_WIDTH))) >> PAGE_WIDTH; 1928 1870 leaf->value[leaf->keys - 1] -= count + new_cnt; 1929 1871 btree_insert(&area->used_space, page + 1930 count * PAGE_SIZE, (void *) new_cnt, leaf);1931 return 1;1872 (count << PAGE_WIDTH), (void *) new_cnt, leaf); 1873 goto success; 1932 1874 } 1933 1875 } 1934 return 0; 1876 1877 return false; 1935 1878 } 1936 1879 1937 1880 /* 1938 1881 * The border cases have been already resolved. 1939 * Now the interval can be only between intervals of the leaf. 1882 * Now the interval can be only between intervals of the leaf. 1940 1883 */ 1941 1884 btree_key_t i; … … 1949 1892 * to (i - 1) and i. 1950 1893 */ 1951 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,1952 count * PAGE_SIZE)) {1953 if (page + count * PAGE_SIZE==1954 left_pg + left_cnt*PAGE_SIZE) {1894 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page, 1895 count << PAGE_WIDTH)) { 1896 if (page + (count << PAGE_WIDTH) == 1897 left_pg + (left_cnt << PAGE_WIDTH)) { 1955 1898 /* 1956 1899 * The interval is contained in the … … 1958 1901 * be removed by updating the size of 1959 1902 * the bigger interval. 1960 *1961 1903 */ 1962 1904 leaf->value[i - 1] -= count; 1963 return 1;1964 } else if (page + count * PAGE_SIZE<1965 left_pg + left_cnt * PAGE_SIZE) {1905 goto success; 1906 } else if (page + (count << PAGE_WIDTH) < 1907 left_pg + (left_cnt << PAGE_WIDTH)) { 1966 1908 /* 1967 1909 * The interval is contained in the … … 1972 1914 */ 1973 1915 size_t new_cnt = ((left_pg + 1974 left_cnt * PAGE_SIZE) -1975 (page + count * PAGE_SIZE)) >>1916 (left_cnt << PAGE_WIDTH)) - 1917 (page + (count << PAGE_WIDTH))) >> 1976 1918 PAGE_WIDTH; 1977 1919 leaf->value[i - 1] -= count + new_cnt; 1978 1920 btree_insert(&area->used_space, page + 1979 count * PAGE_SIZE, (void *) new_cnt,1921 (count << PAGE_WIDTH), (void *) new_cnt, 1980 1922 leaf); 1981 return 1;1923 goto success; 1982 1924 } 1983 1925 } 1984 return 0; 1926 1927 return false; 1985 1928 } 1986 1929 } … … 1989 1932 panic("Inconsistency detected while removing %zu pages of used " 1990 1933 "space from %p.", count, (void *) page); 1934 1935 success: 1936 area->resident -= count; 1937 return true; 1991 1938 } 1992 1939 … … 2023 1970 } 2024 1971 1972 /** Return pointer to unmapped address space area 1973 * 1974 * @param base Lowest address bound. 1975 * @param size Requested size of the allocation. 1976 * 1977 * @return Pointer to the beginning of unmapped address space area. 1978 * 1979 */ 1980 sysarg_t sys_as_get_unmapped_area(uintptr_t base, size_t size) 1981 { 1982 if (size == 0) 1983 return 0; 1984 1985 /* 1986 * Make sure we allocate from page-aligned 1987 * address. Check for possible overflow in 1988 * each step. 1989 */ 1990 1991 size_t pages = SIZE2FRAMES(size); 1992 uintptr_t ret = 0; 1993 1994 /* 1995 * Find the lowest unmapped address aligned on the sz 1996 * boundary, not smaller than base and of the required size. 1997 */ 1998 1999 mutex_lock(&AS->lock); 2000 2001 /* First check the base address itself */ 2002 uintptr_t addr = ALIGN_UP(base, PAGE_SIZE); 2003 if ((addr >= base) && 2004 (check_area_conflicts(AS, addr, pages, NULL))) 2005 ret = addr; 2006 2007 /* Eventually check the addresses behind each area */ 2008 link_t *cur; 2009 for (cur = AS->as_area_btree.leaf_head.next; 2010 (ret == 0) && (cur != &AS->as_area_btree.leaf_head); 2011 cur = cur->next) { 2012 btree_node_t *node = 2013 list_get_instance(cur, btree_node_t, leaf_link); 2014 2015 btree_key_t i; 2016 for (i = 0; (ret == 0) && (i < node->keys); i++) { 2017 as_area_t *area = (as_area_t *) node->value[i]; 2018 2019 mutex_lock(&area->lock); 2020 2021 uintptr_t addr = 2022 ALIGN_UP(area->base + (area->pages << PAGE_WIDTH), 2023 PAGE_SIZE); 2024 2025 if ((addr >= base) && (addr >= area->base) && 2026 (check_area_conflicts(AS, addr, pages, area))) 2027 ret = addr; 2028 2029 mutex_unlock(&area->lock); 2030 } 2031 } 2032 2033 mutex_unlock(&AS->lock); 2034 2035 return (sysarg_t) ret; 2036 } 2037 2025 2038 /** Get list of adress space areas. 2026 2039 * … … 2089 2102 mutex_lock(&as->lock); 2090 2103 2091 /* print out info about address space areas */2104 /* Print out info about address space areas */ 2092 2105 link_t *cur; 2093 2106 for (cur = as->as_area_btree.leaf_head.next;
Note:
See TracChangeset
for help on using the changeset viewer.