Changes in kernel/generic/src/mm/as.c [59fb782:560b81c] in mainline
- File:
-
- 1 edited
-
kernel/generic/src/mm/as.c (modified) (33 diffs)
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/as.c
r59fb782 r560b81c 488 488 489 489 /* Eventually check the addresses behind each area */ 490 list_foreach(as->as_area_btree.leaf_list, cur) { 491 btree_node_t *node = 492 list_get_instance(cur, btree_node_t, leaf_link); 490 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, node) { 493 491 494 492 for (btree_key_t i = 0; i < node->keys; i++) { … … 522 520 } 523 521 522 /** Remove reference to address space area share info. 523 * 524 * If the reference count drops to 0, the sh_info is deallocated. 525 * 526 * @param sh_info Pointer to address space area share info. 527 * 528 */ 529 NO_TRACE static void sh_info_remove_reference(share_info_t *sh_info) 530 { 531 bool dealloc = false; 532 533 mutex_lock(&sh_info->lock); 534 ASSERT(sh_info->refcount); 535 536 if (--sh_info->refcount == 0) { 537 dealloc = true; 538 539 /* 540 * Now walk carefully the pagemap B+tree and free/remove 541 * reference from all frames found there. 542 */ 543 list_foreach(sh_info->pagemap.leaf_list, leaf_link, 544 btree_node_t, node) { 545 btree_key_t i; 546 547 for (i = 0; i < node->keys; i++) 548 frame_free((uintptr_t) node->value[i], 1); 549 } 550 551 } 552 mutex_unlock(&sh_info->lock); 553 554 if (dealloc) { 555 if (sh_info->backend && sh_info->backend->destroy_shared_data) { 556 sh_info->backend->destroy_shared_data( 557 sh_info->backend_shared_data); 558 } 559 btree_destroy(&sh_info->pagemap); 560 free(sh_info); 561 } 562 } 563 564 524 565 /** Create address space area of common attributes. 525 566 * … … 531 572 * @param attrs Attributes of the area. 532 573 * @param backend Address space area backend. NULL if no backend is used. 533 * @param backend_data NULL or a pointer to an array holding two void *.574 * @param backend_data NULL or a pointer to custom backend data. 534 575 * @param base Starting virtual address of the area. 535 * If set to -1, a suitable mappable area is found. 536 * @param bound Lowest address bound if base is set to -1. 576 * If set to AS_AREA_ANY, a suitable mappable area is 577 * found. 578 * @param bound Lowest address bound if base is set to AS_AREA_ANY. 537 579 * Otherwise ignored. 538 580 * … … 544 586 mem_backend_data_t *backend_data, uintptr_t *base, uintptr_t bound) 545 587 { 546 if ((*base != (uintptr_t) -1) && !IS_ALIGNED(*base, PAGE_SIZE))588 if ((*base != (uintptr_t) AS_AREA_ANY) && !IS_ALIGNED(*base, PAGE_SIZE)) 547 589 return NULL; 548 590 … … 560 602 mutex_lock(&as->lock); 561 603 562 if (*base == (uintptr_t) -1) {604 if (*base == (uintptr_t) AS_AREA_ANY) { 563 605 *base = as_get_unmapped_area(as, bound, size, guarded); 564 606 if (*base == (uintptr_t) -1) { … … 568 610 } 569 611 570 if (overflows_into_positive(*base, size)) 612 if (overflows_into_positive(*base, size)) { 613 mutex_unlock(&as->lock); 571 614 return NULL; 615 } 572 616 573 617 if (!check_area_conflicts(as, *base, pages, guarded, NULL)) { … … 586 630 area->resident = 0; 587 631 area->base = *base; 632 area->backend = backend; 588 633 area->sh_info = NULL; 589 area->backend = backend;590 634 591 635 if (backend_data) … … 593 637 else 594 638 memsetb(&area->backend_data, sizeof(area->backend_data), 0); 595 639 640 share_info_t *si = NULL; 641 642 /* 643 * Create the sharing info structure. 644 * We do this in advance for every new area, even if it is not going 645 * to be shared. 646 */ 647 if (!(attrs & AS_AREA_ATTR_PARTIAL)) { 648 si = (share_info_t *) malloc(sizeof(share_info_t), 0); 649 mutex_initialize(&si->lock, MUTEX_PASSIVE); 650 si->refcount = 1; 651 si->shared = false; 652 si->backend_shared_data = NULL; 653 si->backend = backend; 654 btree_create(&si->pagemap); 655 656 area->sh_info = si; 657 658 if (area->backend && area->backend->create_shared_data) { 659 if (!area->backend->create_shared_data(area)) { 660 free(area); 661 mutex_unlock(&as->lock); 662 sh_info_remove_reference(si); 663 return NULL; 664 } 665 } 666 } 667 596 668 if (area->backend && area->backend->create) { 597 669 if (!area->backend->create(area)) { 598 670 free(area); 599 671 mutex_unlock(&as->lock); 672 if (!(attrs & AS_AREA_ATTR_PARTIAL)) 673 sh_info_remove_reference(si); 600 674 return NULL; 601 675 } 602 676 } 603 677 604 678 btree_create(&area->used_space); 605 679 btree_insert(&as->as_area_btree, *base, (void *) area, … … 711 785 } 712 786 713 if (area->sh_info) { 787 mutex_lock(&area->sh_info->lock); 788 if (area->sh_info->shared) { 714 789 /* 715 790 * Remapping of shared address space areas 716 791 * is not supported. 717 792 */ 793 mutex_unlock(&area->sh_info->lock); 718 794 mutex_unlock(&area->lock); 719 795 mutex_unlock(&as->lock); 720 796 return ENOTSUP; 721 797 } 798 mutex_unlock(&area->sh_info->lock); 722 799 723 800 size_t pages = SIZE2FRAMES((address - area->base) + size); … … 758 835 if ((cond = (bool) node->keys)) { 759 836 uintptr_t ptr = node->key[node->keys - 1]; 760 size_t size =837 size_t node_size = 761 838 (size_t) node->value[node->keys - 1]; 762 839 size_t i = 0; 763 840 764 if (overlaps(ptr, P2SZ( size), area->base,841 if (overlaps(ptr, P2SZ(node_size), area->base, 765 842 P2SZ(pages))) { 766 843 767 if (ptr + P2SZ( size) <= start_free) {844 if (ptr + P2SZ(node_size) <= start_free) { 768 845 /* 769 846 * The whole interval fits … … 784 861 i = (start_free - ptr) >> PAGE_WIDTH; 785 862 if (!used_space_remove(area, start_free, 786 size - i))863 node_size - i)) 787 864 panic("Cannot remove used space."); 788 865 } else { … … 791 868 * completely removed. 792 869 */ 793 if (!used_space_remove(area, ptr, size))870 if (!used_space_remove(area, ptr, node_size)) 794 871 panic("Cannot remove used space."); 795 872 } … … 811 888 area->pages - pages); 812 889 813 for (; i < size; i++) { 814 pte_t *pte = page_mapping_find(as, 815 ptr + P2SZ(i), false); 890 for (; i < node_size; i++) { 891 pte_t pte; 892 bool found = page_mapping_find(as, 893 ptr + P2SZ(i), false, &pte); 816 894 817 ASSERT( pte);818 ASSERT(PTE_VALID( pte));819 ASSERT(PTE_PRESENT( pte));895 ASSERT(found); 896 ASSERT(PTE_VALID(&pte)); 897 ASSERT(PTE_PRESENT(&pte)); 820 898 821 899 if ((area->backend) && … … 823 901 area->backend->frame_free(area, 824 902 ptr + P2SZ(i), 825 PTE_GET_FRAME( pte));903 PTE_GET_FRAME(&pte)); 826 904 } 827 905 … … 883 961 } 884 962 885 /** Remove reference to address space area share info.886 *887 * If the reference count drops to 0, the sh_info is deallocated.888 *889 * @param sh_info Pointer to address space area share info.890 *891 */892 NO_TRACE static void sh_info_remove_reference(share_info_t *sh_info)893 {894 bool dealloc = false;895 896 mutex_lock(&sh_info->lock);897 ASSERT(sh_info->refcount);898 899 if (--sh_info->refcount == 0) {900 dealloc = true;901 902 /*903 * Now walk carefully the pagemap B+tree and free/remove904 * reference from all frames found there.905 */906 list_foreach(sh_info->pagemap.leaf_list, cur) {907 btree_node_t *node908 = list_get_instance(cur, btree_node_t, leaf_link);909 btree_key_t i;910 911 for (i = 0; i < node->keys; i++)912 frame_free((uintptr_t) node->value[i]);913 }914 915 }916 mutex_unlock(&sh_info->lock);917 918 if (dealloc) {919 btree_destroy(&sh_info->pagemap);920 free(sh_info);921 }922 }923 924 963 /** Destroy address space area. 925 964 * … … 956 995 * Visit only the pages mapped by used_space B+tree. 957 996 */ 958 list_foreach(area->used_space.leaf_list, cur) {959 btree_node_t *node;997 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 998 node) { 960 999 btree_key_t i; 961 1000 962 node = list_get_instance(cur, btree_node_t, leaf_link);963 1001 for (i = 0; i < node->keys; i++) { 964 1002 uintptr_t ptr = node->key[i]; … … 966 1004 967 1005 for (size = 0; size < (size_t) node->value[i]; size++) { 968 pte_t *pte = page_mapping_find(as, 969 ptr + P2SZ(size), false); 1006 pte_t pte; 1007 bool found = page_mapping_find(as, 1008 ptr + P2SZ(size), false, &pte); 970 1009 971 ASSERT( pte);972 ASSERT(PTE_VALID( pte));973 ASSERT(PTE_PRESENT( pte));1010 ASSERT(found); 1011 ASSERT(PTE_VALID(&pte)); 1012 ASSERT(PTE_PRESENT(&pte)); 974 1013 975 1014 if ((area->backend) && … … 977 1016 area->backend->frame_free(area, 978 1017 ptr + P2SZ(size), 979 PTE_GET_FRAME( pte));1018 PTE_GET_FRAME(&pte)); 980 1019 } 981 1020 … … 1004 1043 area->attributes |= AS_AREA_ATTR_PARTIAL; 1005 1044 1006 if (area->sh_info) 1007 sh_info_remove_reference(area->sh_info); 1045 sh_info_remove_reference(area->sh_info); 1008 1046 1009 1047 mutex_unlock(&area->lock); … … 1092 1130 */ 1093 1131 share_info_t *sh_info = src_area->sh_info; 1094 if (!sh_info) { 1095 sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0); 1096 mutex_initialize(&sh_info->lock, MUTEX_PASSIVE); 1097 sh_info->refcount = 2; 1098 btree_create(&sh_info->pagemap); 1099 src_area->sh_info = sh_info; 1100 1132 1133 mutex_lock(&sh_info->lock); 1134 sh_info->refcount++; 1135 bool shared = sh_info->shared; 1136 sh_info->shared = true; 1137 mutex_unlock(&sh_info->lock); 1138 1139 if (!shared) { 1101 1140 /* 1102 1141 * Call the backend to setup sharing. 1142 * This only happens once for each sh_info. 1103 1143 */ 1104 1144 src_area->backend->share(src_area); 1105 } else {1106 mutex_lock(&sh_info->lock);1107 sh_info->refcount++;1108 mutex_unlock(&sh_info->lock);1109 1145 } 1110 1146 … … 1225 1261 } 1226 1262 1227 if ((area->sh_info) || (area->backend != &anon_backend)) { 1228 /* Copying shared areas not supported yet */ 1263 if (area->backend != &anon_backend) { 1229 1264 /* Copying non-anonymous memory not supported yet */ 1230 1265 mutex_unlock(&area->lock); … … 1232 1267 return ENOTSUP; 1233 1268 } 1269 1270 mutex_lock(&area->sh_info->lock); 1271 if (area->sh_info->shared) { 1272 /* Copying shared areas not supported yet */ 1273 mutex_unlock(&area->sh_info->lock); 1274 mutex_unlock(&area->lock); 1275 mutex_unlock(&as->lock); 1276 return ENOTSUP; 1277 } 1278 mutex_unlock(&area->sh_info->lock); 1234 1279 1235 1280 /* … … 1238 1283 size_t used_pages = 0; 1239 1284 1240 list_foreach(area->used_space.leaf_list, cur) { 1241 btree_node_t *node 1242 = list_get_instance(cur, btree_node_t, leaf_link); 1285 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 1286 node) { 1243 1287 btree_key_t i; 1244 1288 … … 1264 1308 size_t frame_idx = 0; 1265 1309 1266 list_foreach(area->used_space.leaf_list, cur) { 1267 btree_node_t *node = list_get_instance(cur, btree_node_t, 1268 leaf_link); 1310 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 1311 node) { 1269 1312 btree_key_t i; 1270 1313 … … 1274 1317 1275 1318 for (size = 0; size < (size_t) node->value[i]; size++) { 1276 pte_t *pte = page_mapping_find(as, 1277 ptr + P2SZ(size), false); 1319 pte_t pte; 1320 bool found = page_mapping_find(as, 1321 ptr + P2SZ(size), false, &pte); 1278 1322 1279 ASSERT( pte);1280 ASSERT(PTE_VALID( pte));1281 ASSERT(PTE_PRESENT( pte));1323 ASSERT(found); 1324 ASSERT(PTE_VALID(&pte)); 1325 ASSERT(PTE_PRESENT(&pte)); 1282 1326 1283 old_frame[frame_idx++] = PTE_GET_FRAME( pte);1327 old_frame[frame_idx++] = PTE_GET_FRAME(&pte); 1284 1328 1285 1329 /* Remove old mapping */ … … 1316 1360 frame_idx = 0; 1317 1361 1318 list_foreach(area->used_space.leaf_list, cur) { 1319 btree_node_t *node 1320 = list_get_instance(cur, btree_node_t, leaf_link); 1362 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 1363 node) { 1321 1364 btree_key_t i; 1322 1365 … … 1412 1455 * we need to make sure the mapping has not been already inserted. 1413 1456 */ 1414 pte_t *pte; 1415 if ((pte = page_mapping_find(AS, page, false))) { 1416 if (PTE_PRESENT(pte)) { 1417 if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) || 1418 (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) || 1419 (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) { 1420 page_table_unlock(AS, false); 1421 mutex_unlock(&area->lock); 1422 mutex_unlock(&AS->lock); 1423 return AS_PF_OK; 1424 } 1457 pte_t pte; 1458 bool found = page_mapping_find(AS, page, false, &pte); 1459 if (found && PTE_PRESENT(&pte)) { 1460 if (((access == PF_ACCESS_READ) && PTE_READABLE(&pte)) || 1461 (access == PF_ACCESS_WRITE && PTE_WRITABLE(&pte)) || 1462 (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(&pte))) { 1463 page_table_unlock(AS, false); 1464 mutex_unlock(&area->lock); 1465 mutex_unlock(&AS->lock); 1466 return AS_PF_OK; 1425 1467 } 1426 1468 } … … 1686 1728 ASSERT(count); 1687 1729 1688 btree_node_t *leaf ;1730 btree_node_t *leaf = NULL; 1689 1731 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf); 1690 1732 if (pages) { … … 1694 1736 return false; 1695 1737 } 1738 1739 ASSERT(leaf != NULL); 1696 1740 1697 1741 if (!leaf->keys) { … … 2141 2185 2142 2186 sysarg_t sys_as_area_create(uintptr_t base, size_t size, unsigned int flags, 2143 uintptr_t bound )2187 uintptr_t bound, as_area_pager_info_t *pager_info) 2144 2188 { 2145 2189 uintptr_t virt = base; 2190 mem_backend_t *backend; 2191 mem_backend_data_t backend_data; 2192 2193 if (pager_info == AS_AREA_UNPAGED) 2194 backend = &anon_backend; 2195 else { 2196 backend = &user_backend; 2197 if (copy_from_uspace(&backend_data.pager_info, pager_info, 2198 sizeof(as_area_pager_info_t)) != EOK) { 2199 return (sysarg_t) AS_MAP_FAILED; 2200 } 2201 } 2146 2202 as_area_t *area = as_area_create(AS, flags, size, 2147 AS_AREA_ATTR_NONE, &anon_backend, NULL, &virt, bound);2203 AS_AREA_ATTR_NONE, backend, &backend_data, &virt, bound); 2148 2204 if (area == NULL) 2149 return (sysarg_t) -1;2205 return (sysarg_t) AS_MAP_FAILED; 2150 2206 2151 2207 return (sysarg_t) virt; … … 2182 2238 size_t area_cnt = 0; 2183 2239 2184 list_foreach(as->as_area_btree.leaf_list, cur) { 2185 btree_node_t *node = 2186 list_get_instance(cur, btree_node_t, leaf_link); 2240 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, 2241 node) { 2187 2242 area_cnt += node->keys; 2188 2243 } … … 2195 2250 size_t area_idx = 0; 2196 2251 2197 list_foreach(as->as_area_btree.leaf_list, cur) { 2198 btree_node_t *node = 2199 list_get_instance(cur, btree_node_t, leaf_link); 2252 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, 2253 node) { 2200 2254 btree_key_t i; 2201 2255 … … 2231 2285 2232 2286 /* Print out info about address space areas */ 2233 list_foreach(as->as_area_btree.leaf_list, cur) { 2234 btree_node_t *node 2235 = list_get_instance(cur, btree_node_t, leaf_link); 2287 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, 2288 node) { 2236 2289 btree_key_t i; 2237 2290
Note:
See TracChangeset
for help on using the changeset viewer.
