Changeset fc47885 in mainline
- Timestamp:
- 2011-02-03T13:41:07Z (14 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- bd81386
- Parents:
- 86d7bfa
- Location:
- kernel/generic
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/include/mm/as.h
r86d7bfa rfc47885 115 115 116 116 /** 117 * Number of processors on wich is this address space active. 118 * Protected by asidlock. 117 * Number of processors on which this 118 * address space is active. Protected by 119 * asidlock. 119 120 */ 120 121 size_t cpu_refcount; 121 122 122 /** 123 * Address space identifier. 124 * Constant on architectures that do not support ASIDs. 125 * Protected by asidlock. 123 /** Address space identifier. 124 * 125 * Constant on architectures that do not 126 * support ASIDs. Protected by asidlock. 127 * 126 128 */ 127 129 asid_t asid; 128 130 129 /** Number of references (i.e tasks that reference this as). */131 /** Number of references (i.e. tasks that reference this as). */ 130 132 atomic_t refcount; 131 133 … … 199 201 typedef struct { 200 202 mutex_t lock; 203 201 204 /** Containing address space. */ 202 205 as_t *as; 203 206 204 /** 205 * Flags related to the memory represented by the address space area. 206 */ 207 /** Memory flags. */ 207 208 unsigned int flags; 208 209 209 /** A ttributes related to the address space area itself. */210 /** Address space area attributes. */ 210 211 unsigned int attributes; 211 /** Size of this area in multiples of PAGE_SIZE. */ 212 213 /** Number of pages in the area. */ 212 214 size_t pages; 215 216 /** Number of resident pages in the area. */ 217 size_t resident; 218 213 219 /** Base address of this area. */ 214 220 uintptr_t base; 221 215 222 /** Map of used space. */ 216 223 btree_t used_space; 217 224 218 225 /** 219 * If the address space area has been shared, this pointer will220 * referencethe share info structure.226 * If the address space area is shared. this is 227 * a reference to the share info structure. 221 228 */ 222 229 share_info_t *sh_info; … … 261 268 extern bool as_area_check_access(as_area_t *, pf_access_t); 262 269 extern size_t as_area_get_size(uintptr_t); 263 extern int used_space_insert(as_area_t *, uintptr_t, size_t); 264 extern int used_space_remove(as_area_t *, uintptr_t, size_t); 265 270 extern bool used_space_insert(as_area_t *, uintptr_t, size_t); 271 extern bool used_space_remove(as_area_t *, uintptr_t, size_t); 266 272 267 273 /* Interface to be implemented by architectures. */ -
kernel/generic/src/mm/as.c
r86d7bfa rfc47885 86 86 * Each architecture decides what functions will be used to carry out 87 87 * address space operations such as creating or locking page tables. 88 *89 88 */ 90 89 as_operations_t *as_operations = NULL; 91 90 92 /** 93 * Slab for as_t objects. 91 /** Slab for as_t objects. 94 92 * 95 93 */ 96 94 static slab_cache_t *as_slab; 97 95 98 /** 99 * This lock serializes access to the ASID subsystem.100 * Itprotects:96 /** ASID subsystem lock. 97 * 98 * This lock protects: 101 99 * - inactive_as_with_asid_head list 102 100 * - as->asid for each as of the as_t type … … 107 105 108 106 /** 109 * This list contains address spaces that are not active on any 110 * processor and that have valid ASID. 111 * 107 * Inactive address spaces (on all processors) 108 * that have valid ASID. 112 109 */ 113 110 LIST_INITIALIZE(inactive_as_with_asid_head); … … 123 120 mutex_initialize(&as->lock, MUTEX_PASSIVE); 124 121 125 int rc = as_constructor_arch(as, flags); 126 127 return rc; 122 return as_constructor_arch(as, flags); 128 123 } 129 124 130 125 NO_TRACE static size_t as_destructor(void *obj) 131 126 { 132 as_t *as = (as_t *) obj; 133 return as_destructor_arch(as); 127 return as_destructor_arch((as_t *) obj); 134 128 } 135 129 … … 146 140 panic("Cannot create kernel address space."); 147 141 148 /* Make sure the kernel address space 142 /* 143 * Make sure the kernel address space 149 144 * reference count never drops to zero. 150 145 */ … … 195 190 { 196 191 DEADLOCK_PROBE_INIT(p_asidlock); 197 192 198 193 ASSERT(as != AS); 199 194 ASSERT(atomic_get(&as->refcount) == 0); … … 203 198 * lock its mutex. 204 199 */ 205 200 206 201 /* 207 202 * We need to avoid deadlock between TLB shootdown and asidlock. … … 210 205 * disabled to prevent nested context switches. We also depend on the 211 206 * fact that so far no spinlocks are held. 212 *213 207 */ 214 208 preemption_disable(); … … 235 229 spinlock_unlock(&asidlock); 236 230 interrupts_restore(ipl); 237 231 238 232 239 233 /* … … 241 235 * The B+tree must be walked carefully because it is 242 236 * also being destroyed. 243 *244 237 */ 245 238 bool cond = true; … … 268 261 /** Hold a reference to an address space. 269 262 * 270 * Holding a reference to an address space prevents destruction of that address271 * space.263 * Holding a reference to an address space prevents destruction 264 * of that address space. 272 265 * 273 266 * @param as Address space to be held. … … 281 274 /** Release a reference to an address space. 282 275 * 283 * The last one to release a reference to an address space destroys the address284 * space.276 * The last one to release a reference to an address space 277 * destroys the address space. 285 278 * 286 279 * @param asAddress space to be released. … … 310 303 /* 311 304 * We don't want any area to have conflicts with NULL page. 312 *313 305 */ 314 306 if (overlaps(va, size, (uintptr_t) NULL, PAGE_SIZE)) … … 321 313 * record in the left neighbour, the leftmost record in the right 322 314 * neighbour and all records in the leaf node itself. 323 *324 315 */ 325 316 btree_node_t *leaf; … … 382 373 * So far, the area does not conflict with other areas. 383 374 * Check if it doesn't conflict with kernel address space. 384 *385 375 */ 386 376 if (!KERNEL_ADDRESS_SPACE_SHADOWED) { … … 437 427 area->attributes = attrs; 438 428 area->pages = SIZE2FRAMES(size); 429 area->resident = 0; 439 430 area->base = base; 440 431 area->sh_info = NULL; … … 479 470 * to find out whether this is a miss or va belongs to an address 480 471 * space area found there. 481 *482 472 */ 483 473 … … 499 489 * Second, locate the left neighbour and test its last record. 500 490 * Because of its position in the B+tree, it must have base < va. 501 *502 491 */ 503 492 btree_node_t *lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); … … 534 523 /* 535 524 * Locate the area. 536 *537 525 */ 538 526 as_area_t *area = find_area_and_lock(as, address); … … 546 534 * Remapping of address space areas associated 547 535 * with memory mapped devices is not supported. 548 *549 536 */ 550 537 mutex_unlock(&area->lock); … … 557 544 * Remapping of shared address space areas 558 545 * is not supported. 559 *560 546 */ 561 547 mutex_unlock(&area->lock); … … 568 554 /* 569 555 * Zero size address space areas are not allowed. 570 *571 556 */ 572 557 mutex_unlock(&area->lock); … … 581 566 * Shrinking the area. 582 567 * No need to check for overlaps. 583 *584 568 */ 585 569 … … 588 572 /* 589 573 * Start TLB shootdown sequence. 590 *591 574 */ 592 575 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, … … 599 582 * is also the right way to remove part of the used_space 600 583 * B+tree leaf list. 601 *602 584 */ 603 585 bool cond = true; … … 623 605 * completely in the resized 624 606 * address space area. 625 *626 607 */ 627 608 break; … … 632 613 * to b and c overlaps with the resized 633 614 * address space area. 634 *635 615 */ 636 616 … … 673 653 /* 674 654 * Finish TLB shootdown sequence. 675 *676 655 */ 677 656 … … 681 660 /* 682 661 * Invalidate software translation caches (e.g. TSB on sparc64). 683 *684 662 */ 685 663 as_invalidate_translation_cache(as, area->base + … … 692 670 * Growing the area. 693 671 * Check for overlaps with other address space areas. 694 *695 672 */ 696 673 if (!check_area_conflicts(as, address, pages * PAGE_SIZE, … … 813 790 /* 814 791 * Finish TLB shootdown sequence. 815 *816 792 */ 817 793 … … 821 797 * Invalidate potential software translation caches (e.g. TSB on 822 798 * sparc64). 823 *824 799 */ 825 800 as_invalidate_translation_cache(as, area->base, area->pages); … … 839 814 /* 840 815 * Remove the empty area from address space. 841 *842 816 */ 843 817 btree_remove(&as->as_area_btree, base, NULL); … … 881 855 /* 882 856 * Could not find the source address space area. 883 *884 857 */ 885 858 mutex_unlock(&src_as->lock); … … 891 864 * There is no backend or the backend does not 892 865 * know how to share the area. 893 *894 866 */ 895 867 mutex_unlock(&src_area->lock); … … 918 890 * First, prepare the area for sharing. 919 891 * Then it will be safe to unlock it. 920 *921 892 */ 922 893 share_info_t *sh_info = src_area->sh_info; … … 930 901 /* 931 902 * Call the backend to setup sharing. 932 *933 903 */ 934 904 src_area->backend->share(src_area); … … 949 919 * The flags of the source area are masked against dst_flags_mask 950 920 * to support sharing in less privileged mode. 951 *952 921 */ 953 922 as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask, src_size, … … 966 935 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL 967 936 * attribute and set the sh_info. 968 *969 937 */ 970 938 mutex_lock(&dst_as->lock); … … 989 957 NO_TRACE bool as_area_check_access(as_area_t *area, pf_access_t access) 990 958 { 959 ASSERT(mutex_locked(&area->lock)); 960 991 961 int flagmap[] = { 992 962 [PF_ACCESS_READ] = AS_AREA_READ, … … 994 964 [PF_ACCESS_EXEC] = AS_AREA_EXEC 995 965 }; 996 997 ASSERT(mutex_locked(&area->lock));998 966 999 967 if (!(area->flags & flagmap[access])) … … 1066 1034 /* 1067 1035 * Compute total number of used pages in the used_space B+tree 1068 *1069 1036 */ 1070 1037 size_t used_pages = 0; … … 1088 1055 /* 1089 1056 * Start TLB shootdown sequence. 1090 *1091 1057 */ 1092 1058 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, … … 1096 1062 * Remove used pages from page tables and remember their frame 1097 1063 * numbers. 1098 *1099 1064 */ 1100 1065 size_t frame_idx = 0; … … 1127 1092 /* 1128 1093 * Finish TLB shootdown sequence. 1129 *1130 1094 */ 1131 1095 … … 1135 1099 * Invalidate potential software translation caches (e.g. TSB on 1136 1100 * sparc64). 1137 *1138 1101 */ 1139 1102 as_invalidate_translation_cache(as, area->base, area->pages); … … 1217 1180 * No area contained mapping for 'page'. 1218 1181 * Signal page fault to low-level handler. 1219 *1220 1182 */ 1221 1183 mutex_unlock(&AS->lock); … … 1237 1199 * The address space area is not backed by any backend 1238 1200 * or the backend cannot handle page faults. 1239 *1240 1201 */ 1241 1202 mutex_unlock(&area->lock); … … 1249 1210 * To avoid race condition between two page faults on the same address, 1250 1211 * we need to make sure the mapping has not been already inserted. 1251 *1252 1212 */ 1253 1213 pte_t *pte; … … 1267 1227 /* 1268 1228 * Resort to the backend page fault handler. 1269 *1270 1229 */ 1271 1230 if (area->backend->page_fault(area, page, access) != AS_PF_OK) { … … 1322 1281 * preemption is disabled. We should not be 1323 1282 * holding any other lock. 1324 *1325 1283 */ 1326 1284 (void) interrupts_enable(); … … 1342 1300 * list of inactive address spaces with assigned 1343 1301 * ASID. 1344 *1345 1302 */ 1346 1303 ASSERT(old_as->asid != ASID_INVALID); … … 1353 1310 * Perform architecture-specific tasks when the address space 1354 1311 * is being removed from the CPU. 1355 *1356 1312 */ 1357 1313 as_deinstall_arch(old_as); … … 1360 1316 /* 1361 1317 * Second, prepare the new address space. 1362 *1363 1318 */ 1364 1319 if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) { … … 1376 1331 * Perform architecture-specific steps. 1377 1332 * (e.g. write ASID to hardware register etc.) 1378 *1379 1333 */ 1380 1334 as_install_arch(new_as); … … 1395 1349 { 1396 1350 ASSERT(mutex_locked(&area->lock)); 1397 1351 1398 1352 return area_flags_to_page_flags(area->flags); 1399 1353 } … … 1516 1470 * @param count Number of page to be marked. 1517 1471 * 1518 * @return Zero on failure and non-zeroon success.1519 * 1520 */ 1521 intused_space_insert(as_area_t *area, uintptr_t page, size_t count)1472 * @return False on failure or true on success. 1473 * 1474 */ 1475 bool used_space_insert(as_area_t *area, uintptr_t page, size_t count) 1522 1476 { 1523 1477 ASSERT(mutex_locked(&area->lock)); … … 1530 1484 /* 1531 1485 * We hit the beginning of some used space. 1532 * 1533 */ 1534 return 0; 1486 */ 1487 return false; 1535 1488 } 1536 1489 1537 1490 if (!leaf->keys) { 1538 1491 btree_insert(&area->used_space, page, (void *) count, leaf); 1539 return 1;1492 goto success; 1540 1493 } 1541 1494 … … 1551 1504 * somewhere between the rightmost interval of 1552 1505 * the left neigbour and the first interval of the leaf. 1553 *1554 1506 */ 1555 1507 … … 1559 1511 left_cnt * PAGE_SIZE)) { 1560 1512 /* The interval intersects with the left interval. */ 1561 return 0;1513 return false; 1562 1514 } else if (overlaps(page, count * PAGE_SIZE, right_pg, 1563 1515 right_cnt * PAGE_SIZE)) { 1564 1516 /* The interval intersects with the right interval. */ 1565 return 0;1517 return false; 1566 1518 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1567 1519 (page + count * PAGE_SIZE == right_pg)) { … … 1569 1521 * The interval can be added by merging the two already 1570 1522 * present intervals. 1571 *1572 1523 */ 1573 1524 node->value[node->keys - 1] += count + right_cnt; 1574 1525 btree_remove(&area->used_space, right_pg, leaf); 1575 return 1;1526 goto success; 1576 1527 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1577 1528 /* 1578 1529 * The interval can be added by simply growing the left 1579 1530 * interval. 1580 *1581 1531 */ 1582 1532 node->value[node->keys - 1] += count; 1583 return 1;1533 goto success; 1584 1534 } else if (page + count * PAGE_SIZE == right_pg) { 1585 1535 /* … … 1587 1537 * the right interval down and increasing its size 1588 1538 * accordingly. 1589 *1590 1539 */ 1591 1540 leaf->value[0] += count; 1592 1541 leaf->key[0] = page; 1593 return 1;1542 goto success; 1594 1543 } else { 1595 1544 /* 1596 1545 * The interval is between both neigbouring intervals, 1597 1546 * but cannot be merged with any of them. 1598 *1599 1547 */ 1600 1548 btree_insert(&area->used_space, page, (void *) count, 1601 1549 leaf); 1602 return 1;1550 goto success; 1603 1551 } 1604 1552 } else if (page < leaf->key[0]) { … … 1609 1557 * Investigate the border case in which the left neighbour does 1610 1558 * not exist but the interval fits from the left. 1611 *1612 1559 */ 1613 1560 … … 1615 1562 right_cnt * PAGE_SIZE)) { 1616 1563 /* The interval intersects with the right interval. */ 1617 return 0;1564 return false; 1618 1565 } else if (page + count * PAGE_SIZE == right_pg) { 1619 1566 /* … … 1621 1568 * right interval down and increasing its size 1622 1569 * accordingly. 1623 *1624 1570 */ 1625 1571 leaf->key[0] = page; 1626 1572 leaf->value[0] += count; 1627 return 1;1573 goto success; 1628 1574 } else { 1629 1575 /* 1630 1576 * The interval doesn't adjoin with the right interval. 1631 1577 * It must be added individually. 1632 *1633 1578 */ 1634 1579 btree_insert(&area->used_space, page, (void *) count, 1635 1580 leaf); 1636 return 1;1581 goto success; 1637 1582 } 1638 1583 } … … 1649 1594 * somewhere between the leftmost interval of 1650 1595 * the right neigbour and the last interval of the leaf. 1651 *1652 1596 */ 1653 1597 … … 1657 1601 left_cnt * PAGE_SIZE)) { 1658 1602 /* The interval intersects with the left interval. */ 1659 return 0;1603 return false; 1660 1604 } else if (overlaps(page, count * PAGE_SIZE, right_pg, 1661 1605 right_cnt * PAGE_SIZE)) { 1662 1606 /* The interval intersects with the right interval. */ 1663 return 0;1607 return false; 1664 1608 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1665 1609 (page + count * PAGE_SIZE == right_pg)) { … … 1667 1611 * The interval can be added by merging the two already 1668 1612 * present intervals. 1669 *1670 1613 */ 1671 1614 leaf->value[leaf->keys - 1] += count + right_cnt; 1672 1615 btree_remove(&area->used_space, right_pg, node); 1673 return 1;1616 goto success; 1674 1617 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1675 1618 /* 1676 1619 * The interval can be added by simply growing the left 1677 1620 * interval. 1678 *1679 1621 */ 1680 leaf->value[leaf->keys - 1] += 1681 return 1;1622 leaf->value[leaf->keys - 1] += count; 1623 goto success; 1682 1624 } else if (page + count * PAGE_SIZE == right_pg) { 1683 1625 /* … … 1685 1627 * the right interval down and increasing its size 1686 1628 * accordingly. 1687 *1688 1629 */ 1689 1630 node->value[0] += count; 1690 1631 node->key[0] = page; 1691 return 1;1632 goto success; 1692 1633 } else { 1693 1634 /* 1694 1635 * The interval is between both neigbouring intervals, 1695 1636 * but cannot be merged with any of them. 1696 *1697 1637 */ 1698 1638 btree_insert(&area->used_space, page, (void *) count, 1699 1639 leaf); 1700 return 1;1640 goto success; 1701 1641 } 1702 1642 } else if (page >= leaf->key[leaf->keys - 1]) { … … 1707 1647 * Investigate the border case in which the right neighbour 1708 1648 * does not exist but the interval fits from the right. 1709 *1710 1649 */ 1711 1650 … … 1713 1652 left_cnt * PAGE_SIZE)) { 1714 1653 /* The interval intersects with the left interval. */ 1715 return 0;1654 return false; 1716 1655 } else if (left_pg + left_cnt * PAGE_SIZE == page) { 1717 1656 /* 1718 1657 * The interval can be added by growing the left 1719 1658 * interval. 1720 *1721 1659 */ 1722 1660 leaf->value[leaf->keys - 1] += count; 1723 return 1;1661 goto success; 1724 1662 } else { 1725 1663 /* 1726 1664 * The interval doesn't adjoin with the left interval. 1727 1665 * It must be added individually. 1728 *1729 1666 */ 1730 1667 btree_insert(&area->used_space, page, (void *) count, 1731 1668 leaf); 1732 return 1;1669 goto success; 1733 1670 } 1734 1671 } … … 1738 1675 * only between two other intervals of the leaf. The two border cases 1739 1676 * were already resolved. 1740 *1741 1677 */ 1742 1678 btree_key_t i; … … 1750 1686 /* 1751 1687 * The interval fits between left_pg and right_pg. 1752 *1753 1688 */ 1754 1689 … … 1758 1693 * The interval intersects with the left 1759 1694 * interval. 1760 *1761 1695 */ 1762 return 0;1696 return false; 1763 1697 } else if (overlaps(page, count * PAGE_SIZE, right_pg, 1764 1698 right_cnt * PAGE_SIZE)) { … … 1766 1700 * The interval intersects with the right 1767 1701 * interval. 1768 *1769 1702 */ 1770 return 0;1703 return false; 1771 1704 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1772 1705 (page + count * PAGE_SIZE == right_pg)) { … … 1774 1707 * The interval can be added by merging the two 1775 1708 * already present intervals. 1776 *1777 1709 */ 1778 1710 leaf->value[i - 1] += count + right_cnt; 1779 1711 btree_remove(&area->used_space, right_pg, leaf); 1780 return 1;1712 goto success; 1781 1713 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1782 1714 /* 1783 1715 * The interval can be added by simply growing 1784 1716 * the left interval. 1785 *1786 1717 */ 1787 1718 leaf->value[i - 1] += count; 1788 return 1;1719 goto success; 1789 1720 } else if (page + count * PAGE_SIZE == right_pg) { 1790 1721 /* … … 1792 1723 * base of the right interval down and 1793 1724 * increasing its size accordingly. 1794 *1795 1725 */ 1796 1726 leaf->value[i] += count; 1797 1727 leaf->key[i] = page; 1798 return 1;1728 goto success; 1799 1729 } else { 1800 1730 /* … … 1802 1732 * intervals, but cannot be merged with any of 1803 1733 * them. 1804 *1805 1734 */ 1806 1735 btree_insert(&area->used_space, page, 1807 1736 (void *) count, leaf); 1808 return 1;1737 goto success; 1809 1738 } 1810 1739 } … … 1813 1742 panic("Inconsistency detected while adding %zu pages of used " 1814 1743 "space at %p.", count, (void *) page); 1744 1745 success: 1746 area->resident += count; 1747 return true; 1815 1748 } 1816 1749 … … 1823 1756 * @param count Number of page to be marked. 1824 1757 * 1825 * @return Zero on failure and non-zeroon success.1826 * 1827 */ 1828 intused_space_remove(as_area_t *area, uintptr_t page, size_t count)1758 * @return False on failure or true on success. 1759 * 1760 */ 1761 bool used_space_remove(as_area_t *area, uintptr_t page, size_t count) 1829 1762 { 1830 1763 ASSERT(mutex_locked(&area->lock)); … … 1837 1770 /* 1838 1771 * We are lucky, page is the beginning of some interval. 1839 *1840 1772 */ 1841 1773 if (count > pages) { 1842 return 0;1774 return false; 1843 1775 } else if (count == pages) { 1844 1776 btree_remove(&area->used_space, page, leaf); 1845 return 1;1777 goto success; 1846 1778 } else { 1847 1779 /* 1848 1780 * Find the respective interval. 1849 1781 * Decrease its size and relocate its start address. 1850 *1851 1782 */ 1852 1783 btree_key_t i; … … 1855 1786 leaf->key[i] += count * PAGE_SIZE; 1856 1787 leaf->value[i] -= count; 1857 return 1;1788 goto success; 1858 1789 } 1859 1790 } 1791 1860 1792 goto error; 1861 1793 } … … 1876 1808 * removed by updating the size of the bigger 1877 1809 * interval. 1878 *1879 1810 */ 1880 1811 node->value[node->keys - 1] -= count; 1881 return 1;1812 goto success; 1882 1813 } else if (page + count * PAGE_SIZE < 1883 1814 left_pg + left_cnt*PAGE_SIZE) { … … 1888 1819 * the original interval and also inserting a 1889 1820 * new interval. 1890 *1891 1821 */ 1892 1822 size_t new_cnt = ((left_pg + left_cnt * PAGE_SIZE) - … … 1895 1825 btree_insert(&area->used_space, page + 1896 1826 count * PAGE_SIZE, (void *) new_cnt, leaf); 1897 return 1;1827 goto success; 1898 1828 } 1899 1829 } 1900 return 0; 1830 1831 return false; 1901 1832 } else if (page < leaf->key[0]) 1902 return 0;1833 return false; 1903 1834 1904 1835 if (page > leaf->key[leaf->keys - 1]) { … … 1914 1845 * interval of the leaf and can be removed by 1915 1846 * updating the size of the bigger interval. 1916 *1917 1847 */ 1918 1848 leaf->value[leaf->keys - 1] -= count; 1919 return 1;1849 goto success; 1920 1850 } else if (page + count * PAGE_SIZE < left_pg + 1921 1851 left_cnt * PAGE_SIZE) { … … 1926 1856 * original interval and also inserting a new 1927 1857 * interval. 1928 *1929 1858 */ 1930 1859 size_t new_cnt = ((left_pg + left_cnt * PAGE_SIZE) - … … 1933 1862 btree_insert(&area->used_space, page + 1934 1863 count * PAGE_SIZE, (void *) new_cnt, leaf); 1935 return 1;1864 goto success; 1936 1865 } 1937 1866 } 1938 return 0; 1867 1868 return false; 1939 1869 } 1940 1870 1941 1871 /* 1942 1872 * The border cases have been already resolved. 1943 * Now the interval can be only between intervals of the leaf. 1873 * Now the interval can be only between intervals of the leaf. 1944 1874 */ 1945 1875 btree_key_t i; … … 1962 1892 * be removed by updating the size of 1963 1893 * the bigger interval. 1964 *1965 1894 */ 1966 1895 leaf->value[i - 1] -= count; 1967 return 1;1896 goto success; 1968 1897 } else if (page + count * PAGE_SIZE < 1969 1898 left_pg + left_cnt * PAGE_SIZE) { … … 1983 1912 count * PAGE_SIZE, (void *) new_cnt, 1984 1913 leaf); 1985 return 1;1914 goto success; 1986 1915 } 1987 1916 } 1988 return 0; 1917 1918 return false; 1989 1919 } 1990 1920 } … … 1993 1923 panic("Inconsistency detected while removing %zu pages of used " 1994 1924 "space from %p.", count, (void *) page); 1925 1926 success: 1927 area->resident -= count; 1928 return true; 1995 1929 } 1996 1930 -
kernel/generic/src/sysinfo/stats.c
r86d7bfa rfc47885 160 160 static size_t get_task_virtmem(as_t *as) 161 161 { 162 size_t result = 0;163 164 162 /* 165 * We are holding some spinlocks here and therefore are not allowed to 166 * block. Only attempt to lock the address space and address space area 167 * mutexes conditionally. If it is not possible to lock either object, 168 * allow the statistics to be inexact by skipping the respective object. 169 * 170 * Note that it may be infinitely better to let the address space 171 * management code compute these statistics as it proceeds instead of 172 * having them calculated over and over again here. 163 * We are holding spinlocks here and therefore are not allowed to 164 * block. Only attempt to lock the address space and address space 165 * area mutexes conditionally. If it is not possible to lock either 166 * object, return inexact statistics by skipping the respective object. 173 167 */ 174 168 175 169 if (SYNCH_FAILED(mutex_trylock(&as->lock))) 176 return result * PAGE_SIZE; 170 return 0; 171 172 size_t pages = 0; 177 173 178 174 /* Walk the B+ tree and count pages */ 179 link_t *cur;180 for (cur = as->as_area_btree.leaf_head.next;181 cur != &as->as_area_btree.leaf_head; cur = cur->next) {182 btree_node_t *node =183 list_get_instance(cur, btree_node_t, leaf_link);184 185 unsigned int i;186 for (i = 0; i < node->keys; i++) {187 as_area_t *area = node->value[i];188 189 if (SYNCH_FAILED(mutex_trylock(&area->lock)))190 continue;191 result += area->pages;192 mutex_unlock(&area->lock);193 }194 }195 196 mutex_unlock(&as->lock);197 198 return result * PAGE_SIZE;199 }200 201 /** Get the resident (used) size of a virtual address space202 *203 * @param as Address space.204 *205 * @return Size of the resident (used) virtual address space (bytes).206 *207 */208 static size_t get_task_resmem(as_t *as)209 {210 size_t result = 0;211 212 /*213 * We are holding some spinlocks here and therefore are not allowed to214 * block. Only attempt to lock the address space and address space area215 * mutexes conditionally. If it is not possible to lock either object,216 * allow the statistics to be inexact by skipping the respective object.217 *218 * Note that it may be infinitely better to let the address space219 * management code compute these statistics as it proceeds instead of220 * having them calculated over and over again here.221 */222 223 if (SYNCH_FAILED(mutex_trylock(&as->lock)))224 return result * PAGE_SIZE;225 226 /* Walk the B+ tree of AS areas */227 175 link_t *cur; 228 176 for (cur = as->as_area_btree.leaf_head.next; … … 238 186 continue; 239 187 240 /* Walk the B+ tree of resident pages */ 241 link_t *rcur; 242 for (rcur = area->used_space.leaf_head.next; 243 rcur != &area->used_space.leaf_head; rcur = rcur->next) { 244 btree_node_t *rnode = 245 list_get_instance(rcur, btree_node_t, leaf_link); 246 247 unsigned int j; 248 for (j = 0; j < rnode->keys; j++) 249 result += (size_t) rnode->value[i]; 250 } 251 188 pages += area->pages; 252 189 mutex_unlock(&area->lock); 253 190 } … … 256 193 mutex_unlock(&as->lock); 257 194 258 return result * PAGE_SIZE; 195 return (pages << PAGE_WIDTH); 196 } 197 198 /** Get the resident (used) size of a virtual address space 199 * 200 * @param as Address space. 201 * 202 * @return Size of the resident (used) virtual address space (bytes). 203 * 204 */ 205 static size_t get_task_resmem(as_t *as) 206 { 207 /* 208 * We are holding spinlocks here and therefore are not allowed to 209 * block. Only attempt to lock the address space and address space 210 * area mutexes conditionally. If it is not possible to lock either 211 * object, return inexact statistics by skipping the respective object. 212 */ 213 214 if (SYNCH_FAILED(mutex_trylock(&as->lock))) 215 return 0; 216 217 size_t pages = 0; 218 219 /* Walk the B+ tree and count pages */ 220 link_t *cur; 221 for (cur = as->as_area_btree.leaf_head.next; 222 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 223 btree_node_t *node = 224 list_get_instance(cur, btree_node_t, leaf_link); 225 226 unsigned int i; 227 for (i = 0; i < node->keys; i++) { 228 as_area_t *area = node->value[i]; 229 230 if (SYNCH_FAILED(mutex_trylock(&area->lock))) 231 continue; 232 233 pages += area->resident; 234 mutex_unlock(&area->lock); 235 } 236 } 237 238 mutex_unlock(&as->lock); 239 240 return (pages << PAGE_WIDTH); 259 241 } 260 242
Note:
See TracChangeset
for help on using the changeset viewer.