Changeset a35b458 in mainline for kernel/generic/src/mm/as.c
- Timestamp:
- 2018-03-02T20:10:49Z (7 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- f1380b7
- Parents:
- 3061bc1
- git-author:
- Jiří Zárevúcky <zarevucky.jiri@…> (2018-02-28 17:38:31)
- git-committer:
- Jiří Zárevúcky <zarevucky.jiri@…> (2018-03-02 20:10:49)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/as.c
r3061bc1 ra35b458 114 114 { 115 115 as_t *as = (as_t *) obj; 116 116 117 117 link_initialize(&as->inactive_as_with_asid_link); 118 118 mutex_initialize(&as->lock, MUTEX_PASSIVE); 119 119 120 120 return as_constructor_arch(as, flags); 121 121 } … … 130 130 { 131 131 as_arch_init(); 132 132 133 133 as_cache = slab_cache_create("as_t", sizeof(as_t), 0, 134 134 as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED); 135 135 136 136 AS_KERNEL = as_create(FLAG_AS_KERNEL); 137 137 if (!AS_KERNEL) 138 138 panic("Cannot create kernel address space."); 139 139 140 140 /* 141 141 * Make sure the kernel address space … … 155 155 as_t *as = (as_t *) slab_alloc(as_cache, 0); 156 156 (void) as_create_arch(as, 0); 157 157 158 158 btree_create(&as->as_area_btree); 159 159 160 160 if (flags & FLAG_AS_KERNEL) 161 161 as->asid = ASID_KERNEL; 162 162 else 163 163 as->asid = ASID_INVALID; 164 164 165 165 atomic_set(&as->refcount, 0); 166 166 as->cpu_refcount = 0; 167 167 168 168 #ifdef AS_PAGE_TABLE 169 169 as->genarch.page_table = page_table_create(flags); … … 171 171 page_table_create(flags); 172 172 #endif 173 173 174 174 return as; 175 175 } … … 188 188 { 189 189 DEADLOCK_PROBE_INIT(p_asidlock); 190 190 191 191 assert(as != AS); 192 192 assert(atomic_get(&as->refcount) == 0); 193 193 194 194 /* 195 195 * Since there is no reference to this address space, it is safe not to 196 196 * lock its mutex. 197 197 */ 198 198 199 199 /* 200 200 * We need to avoid deadlock between TLB shootdown and asidlock. … … 206 206 preemption_disable(); 207 207 ipl_t ipl = interrupts_read(); 208 208 209 209 retry: 210 210 interrupts_disable(); … … 214 214 goto retry; 215 215 } 216 216 217 217 /* Interrupts disabled, enable preemption */ 218 218 preemption_enable(); 219 219 220 220 if ((as->asid != ASID_INVALID) && (as != AS_KERNEL)) { 221 221 if (as->cpu_refcount == 0) 222 222 list_remove(&as->inactive_as_with_asid_link); 223 223 224 224 asid_put(as->asid); 225 225 } 226 226 227 227 spinlock_unlock(&asidlock); 228 228 interrupts_restore(ipl); 229 230 229 230 231 231 /* 232 232 * Destroy address space areas of the address space. … … 237 237 while (cond) { 238 238 assert(!list_empty(&as->as_area_btree.leaf_list)); 239 239 240 240 btree_node_t *node = 241 241 list_get_instance(list_first(&as->as_area_btree.leaf_list), 242 242 btree_node_t, leaf_link); 243 243 244 244 if ((cond = node->keys)) 245 245 as_area_destroy(as, node->key[0]); 246 246 } 247 247 248 248 btree_destroy(&as->as_area_btree); 249 249 250 250 #ifdef AS_PAGE_TABLE 251 251 page_table_destroy(as->genarch.page_table); … … 253 253 page_table_destroy(NULL); 254 254 #endif 255 255 256 256 slab_free(as_cache, as); 257 257 } … … 307 307 if (overflows_into_positive(addr, P2SZ(count))) 308 308 return false; 309 309 310 310 /* 311 311 * We don't want any area to have conflicts with NULL page. … … 328 328 return false; 329 329 } 330 330 331 331 /* First, check the two border cases. */ 332 332 btree_node_t *node = … … 334 334 if (node) { 335 335 area = (as_area_t *) node->value[node->keys - 1]; 336 336 337 337 if (area != avoid) { 338 338 mutex_lock(&area->lock); … … 346 346 int const gp = (guarded || 347 347 (area->flags & AS_AREA_GUARD)) ? 1 : 0; 348 348 349 349 /* 350 350 * The area comes from the left neighbour node, which … … 358 358 return false; 359 359 } 360 360 361 361 mutex_unlock(&area->lock); 362 362 } 363 363 } 364 364 365 365 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf); 366 366 if (node) { 367 367 area = (as_area_t *) node->value[0]; 368 368 369 369 if (area != avoid) { 370 370 int gp; … … 382 382 gp--; 383 383 } 384 384 385 385 if (overlaps(addr, P2SZ(count + gp), area->base, 386 386 P2SZ(area->pages))) { … … 388 388 return false; 389 389 } 390 390 391 391 mutex_unlock(&area->lock); 392 392 } 393 393 } 394 394 395 395 /* Second, check the leaf node. */ 396 396 btree_key_t i; … … 399 399 int agp; 400 400 int gp; 401 401 402 402 if (area == avoid) 403 403 continue; 404 404 405 405 mutex_lock(&area->lock); 406 406 … … 421 421 return false; 422 422 } 423 423 424 424 mutex_unlock(&area->lock); 425 425 } 426 426 427 427 /* 428 428 * So far, the area does not conflict with other areas. … … 434 434 addr, P2SZ(count)); 435 435 } 436 436 437 437 return true; 438 438 } … … 456 456 { 457 457 assert(mutex_locked(&as->lock)); 458 458 459 459 if (size == 0) 460 460 return (uintptr_t) -1; 461 461 462 462 /* 463 463 * Make sure we allocate from page-aligned … … 465 465 * each step. 466 466 */ 467 467 468 468 size_t pages = SIZE2FRAMES(size); 469 469 470 470 /* 471 471 * Find the lowest unmapped address aligned on the size 472 472 * boundary, not smaller than bound and of the required size. 473 473 */ 474 474 475 475 /* First check the bound address itself */ 476 476 uintptr_t addr = ALIGN_UP(bound, PAGE_SIZE); … … 486 486 return addr; 487 487 } 488 488 489 489 /* Eventually check the addresses behind each area */ 490 490 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, node) { 491 491 492 492 for (btree_key_t i = 0; i < node->keys; i++) { 493 493 as_area_t *area = (as_area_t *) node->value[i]; 494 494 495 495 mutex_lock(&area->lock); 496 496 497 497 addr = 498 498 ALIGN_UP(area->base + P2SZ(area->pages), PAGE_SIZE); … … 508 508 ((addr >= bound) && (addr >= area->base) && 509 509 (check_area_conflicts(as, addr, pages, guarded, area))); 510 510 511 511 mutex_unlock(&area->lock); 512 512 513 513 if (avail) 514 514 return addr; 515 515 } 516 516 } 517 517 518 518 /* No suitable address space area found */ 519 519 return (uintptr_t) -1; … … 530 530 { 531 531 bool dealloc = false; 532 532 533 533 mutex_lock(&sh_info->lock); 534 534 assert(sh_info->refcount); 535 535 536 536 if (--sh_info->refcount == 0) { 537 537 dealloc = true; 538 538 539 539 /* 540 540 * Now walk carefully the pagemap B+tree and free/remove … … 544 544 btree_node_t, node) { 545 545 btree_key_t i; 546 546 547 547 for (i = 0; i < node->keys; i++) 548 548 frame_free((uintptr_t) node->value[i], 1); 549 549 } 550 550 551 551 } 552 552 mutex_unlock(&sh_info->lock); 553 553 554 554 if (dealloc) { 555 555 if (sh_info->backend && sh_info->backend->destroy_shared_data) { … … 588 588 if ((*base != (uintptr_t) AS_AREA_ANY) && !IS_ALIGNED(*base, PAGE_SIZE)) 589 589 return NULL; 590 590 591 591 if (size == 0) 592 592 return NULL; 593 593 594 594 size_t pages = SIZE2FRAMES(size); 595 595 596 596 /* Writeable executable areas are not supported. */ 597 597 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE)) … … 599 599 600 600 bool const guarded = flags & AS_AREA_GUARD; 601 601 602 602 mutex_lock(&as->lock); 603 603 604 604 if (*base == (uintptr_t) AS_AREA_ANY) { 605 605 *base = as_get_unmapped_area(as, bound, size, guarded); … … 619 619 return NULL; 620 620 } 621 621 622 622 as_area_t *area = (as_area_t *) malloc(sizeof(as_area_t), 0); 623 623 624 624 mutex_initialize(&area->lock, MUTEX_PASSIVE); 625 625 626 626 area->as = as; 627 627 area->flags = flags; … … 632 632 area->backend = backend; 633 633 area->sh_info = NULL; 634 634 635 635 if (backend_data) 636 636 area->backend_data = *backend_data; … … 655 655 656 656 area->sh_info = si; 657 657 658 658 if (area->backend && area->backend->create_shared_data) { 659 659 if (!area->backend->create_shared_data(area)) { … … 679 679 btree_insert(&as->as_area_btree, *base, (void *) area, 680 680 NULL); 681 681 682 682 mutex_unlock(&as->lock); 683 683 684 684 return area; 685 685 } … … 697 697 { 698 698 assert(mutex_locked(&as->lock)); 699 699 700 700 btree_node_t *leaf; 701 701 as_area_t *area = (as_area_t *) btree_search(&as->as_area_btree, va, … … 706 706 return area; 707 707 } 708 708 709 709 /* 710 710 * Search the leaf node and the rightmost record of its left neighbour … … 712 712 * space area found there. 713 713 */ 714 714 715 715 /* First, search the leaf node itself. */ 716 716 btree_key_t i; 717 717 718 718 for (i = 0; i < leaf->keys; i++) { 719 719 area = (as_area_t *) leaf->value[i]; 720 720 721 721 mutex_lock(&area->lock); 722 722 … … 724 724 (va <= area->base + (P2SZ(area->pages) - 1))) 725 725 return area; 726 726 727 727 mutex_unlock(&area->lock); 728 728 } 729 729 730 730 /* 731 731 * Second, locate the left neighbour and test its last record. … … 736 736 if (lnode) { 737 737 area = (as_area_t *) lnode->value[lnode->keys - 1]; 738 738 739 739 mutex_lock(&area->lock); 740 740 741 741 if (va <= area->base + (P2SZ(area->pages) - 1)) 742 742 return area; 743 743 744 744 mutex_unlock(&area->lock); 745 745 } 746 746 747 747 return NULL; 748 748 } … … 766 766 767 767 mutex_lock(&as->lock); 768 768 769 769 /* 770 770 * Locate the area. … … 784 784 return ENOTSUP; 785 785 } 786 786 787 787 mutex_lock(&area->sh_info->lock); 788 788 if (area->sh_info->shared) { … … 797 797 } 798 798 mutex_unlock(&area->sh_info->lock); 799 799 800 800 size_t pages = SIZE2FRAMES((address - area->base) + size); 801 801 if (!pages) { … … 807 807 return EPERM; 808 808 } 809 809 810 810 if (pages < area->pages) { 811 811 uintptr_t start_free = area->base + P2SZ(pages); 812 812 813 813 /* 814 814 * Shrinking the area. 815 815 * No need to check for overlaps. 816 816 */ 817 817 818 818 page_table_lock(as, false); 819 819 820 820 /* 821 821 * Remove frames belonging to used space starting from … … 828 828 while (cond) { 829 829 assert(!list_empty(&area->used_space.leaf_list)); 830 830 831 831 btree_node_t *node = 832 832 list_get_instance(list_last(&area->used_space.leaf_list), 833 833 btree_node_t, leaf_link); 834 834 835 835 if ((cond = (node->keys != 0))) { 836 836 uintptr_t ptr = node->key[node->keys - 1]; … … 838 838 (size_t) node->value[node->keys - 1]; 839 839 size_t i = 0; 840 840 841 841 if (overlaps(ptr, P2SZ(node_size), area->base, 842 842 P2SZ(pages))) { 843 843 844 844 if (ptr + P2SZ(node_size) <= start_free) { 845 845 /* … … 850 850 break; 851 851 } 852 852 853 853 /* 854 854 * Part of the interval corresponding … … 856 856 * address space area. 857 857 */ 858 858 859 859 /* We are almost done */ 860 860 cond = false; … … 871 871 panic("Cannot remove used space."); 872 872 } 873 873 874 874 /* 875 875 * Start TLB shootdown sequence. … … 887 887 as->asid, area->base + P2SZ(pages), 888 888 area->pages - pages); 889 889 890 890 for (; i < node_size; i++) { 891 891 pte_t pte; 892 892 bool found = page_mapping_find(as, 893 893 ptr + P2SZ(i), false, &pte); 894 894 895 895 assert(found); 896 896 assert(PTE_VALID(&pte)); 897 897 assert(PTE_PRESENT(&pte)); 898 898 899 899 if ((area->backend) && 900 900 (area->backend->frame_free)) { … … 903 903 PTE_GET_FRAME(&pte)); 904 904 } 905 905 906 906 page_mapping_remove(as, ptr + P2SZ(i)); 907 907 } 908 908 909 909 /* 910 910 * Finish TLB shootdown sequence. 911 911 */ 912 912 913 913 tlb_invalidate_pages(as->asid, 914 914 area->base + P2SZ(pages), 915 915 area->pages - pages); 916 916 917 917 /* 918 918 * Invalidate software translation caches … … 944 944 } 945 945 } 946 946 947 947 if (area->backend && area->backend->resize) { 948 948 if (!area->backend->resize(area, pages)) { … … 952 952 } 953 953 } 954 954 955 955 area->pages = pages; 956 956 957 957 mutex_unlock(&area->lock); 958 958 mutex_unlock(&as->lock); 959 959 960 960 return 0; 961 961 } … … 972 972 { 973 973 mutex_lock(&as->lock); 974 974 975 975 as_area_t *area = find_area_and_lock(as, address); 976 976 if (!area) { … … 981 981 if (area->backend && area->backend->destroy) 982 982 area->backend->destroy(area); 983 983 984 984 uintptr_t base = area->base; 985 985 986 986 page_table_lock(as, false); 987 987 988 988 /* 989 989 * Start TLB shootdown sequence. … … 991 991 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, 992 992 area->pages); 993 993 994 994 /* 995 995 * Visit only the pages mapped by used_space B+tree. … … 998 998 node) { 999 999 btree_key_t i; 1000 1000 1001 1001 for (i = 0; i < node->keys; i++) { 1002 1002 uintptr_t ptr = node->key[i]; 1003 1003 size_t size; 1004 1004 1005 1005 for (size = 0; size < (size_t) node->value[i]; size++) { 1006 1006 pte_t pte; 1007 1007 bool found = page_mapping_find(as, 1008 1008 ptr + P2SZ(size), false, &pte); 1009 1009 1010 1010 assert(found); 1011 1011 assert(PTE_VALID(&pte)); 1012 1012 assert(PTE_PRESENT(&pte)); 1013 1013 1014 1014 if ((area->backend) && 1015 1015 (area->backend->frame_free)) { … … 1018 1018 PTE_GET_FRAME(&pte)); 1019 1019 } 1020 1020 1021 1021 page_mapping_remove(as, ptr + P2SZ(size)); 1022 1022 } 1023 1023 } 1024 1024 } 1025 1025 1026 1026 /* 1027 1027 * Finish TLB shootdown sequence. 1028 1028 */ 1029 1029 1030 1030 tlb_invalidate_pages(as->asid, area->base, area->pages); 1031 1031 1032 1032 /* 1033 1033 * Invalidate potential software translation caches … … 1036 1036 as_invalidate_translation_cache(as, area->base, area->pages); 1037 1037 tlb_shootdown_finalize(ipl); 1038 1038 1039 1039 page_table_unlock(as, false); 1040 1040 1041 1041 btree_destroy(&area->used_space); 1042 1042 1043 1043 area->attributes |= AS_AREA_ATTR_PARTIAL; 1044 1044 1045 1045 sh_info_remove_reference(area->sh_info); 1046 1046 1047 1047 mutex_unlock(&area->lock); 1048 1048 1049 1049 /* 1050 1050 * Remove the empty area from address space. 1051 1051 */ 1052 1052 btree_remove(&as->as_area_btree, base, NULL); 1053 1053 1054 1054 free(area); 1055 1055 1056 1056 mutex_unlock(&as->lock); 1057 1057 return 0; … … 1098 1098 return ENOENT; 1099 1099 } 1100 1100 1101 1101 if (!src_area->backend->is_shareable(src_area)) { 1102 1102 /* … … 1107 1107 return ENOTSUP; 1108 1108 } 1109 1109 1110 1110 size_t src_size = P2SZ(src_area->pages); 1111 1111 unsigned int src_flags = src_area->flags; 1112 1112 mem_backend_t *src_backend = src_area->backend; 1113 1113 mem_backend_data_t src_backend_data = src_area->backend_data; 1114 1114 1115 1115 /* Share the cacheable flag from the original mapping */ 1116 1116 if (src_flags & AS_AREA_CACHEABLE) 1117 1117 dst_flags_mask |= AS_AREA_CACHEABLE; 1118 1118 1119 1119 if ((src_size != acc_size) || 1120 1120 ((src_flags & dst_flags_mask) != dst_flags_mask)) { … … 1123 1123 return EPERM; 1124 1124 } 1125 1125 1126 1126 /* 1127 1127 * Now we are committed to sharing the area. … … 1130 1130 */ 1131 1131 share_info_t *sh_info = src_area->sh_info; 1132 1132 1133 1133 mutex_lock(&sh_info->lock); 1134 1134 sh_info->refcount++; … … 1144 1144 src_area->backend->share(src_area); 1145 1145 } 1146 1146 1147 1147 mutex_unlock(&src_area->lock); 1148 1148 mutex_unlock(&src_as->lock); 1149 1149 1150 1150 /* 1151 1151 * Create copy of the source address space area. … … 1164 1164 */ 1165 1165 sh_info_remove_reference(sh_info); 1166 1166 1167 1167 return ENOMEM; 1168 1168 } 1169 1169 1170 1170 /* 1171 1171 * Now the destination address space area has been … … 1179 1179 mutex_unlock(&dst_area->lock); 1180 1180 mutex_unlock(&dst_as->lock); 1181 1181 1182 1182 return 0; 1183 1183 } … … 1195 1195 { 1196 1196 assert(mutex_locked(&area->lock)); 1197 1197 1198 1198 int flagmap[] = { 1199 1199 [PF_ACCESS_READ] = AS_AREA_READ, … … 1201 1201 [PF_ACCESS_EXEC] = AS_AREA_EXEC 1202 1202 }; 1203 1203 1204 1204 if (!(area->flags & flagmap[access])) 1205 1205 return false; 1206 1206 1207 1207 return true; 1208 1208 } … … 1218 1218 { 1219 1219 unsigned int flags = PAGE_USER | PAGE_PRESENT; 1220 1220 1221 1221 if (aflags & AS_AREA_READ) 1222 1222 flags |= PAGE_READ; 1223 1223 1224 1224 if (aflags & AS_AREA_WRITE) 1225 1225 flags |= PAGE_WRITE; 1226 1226 1227 1227 if (aflags & AS_AREA_EXEC) 1228 1228 flags |= PAGE_EXEC; 1229 1229 1230 1230 if (aflags & AS_AREA_CACHEABLE) 1231 1231 flags |= PAGE_CACHEABLE; 1232 1232 1233 1233 return flags; 1234 1234 } … … 1252 1252 /* Flags for the new memory mapping */ 1253 1253 unsigned int page_flags = area_flags_to_page_flags(flags); 1254 1254 1255 1255 mutex_lock(&as->lock); 1256 1256 1257 1257 as_area_t *area = find_area_and_lock(as, address); 1258 1258 if (!area) { … … 1260 1260 return ENOENT; 1261 1261 } 1262 1262 1263 1263 if (area->backend != &anon_backend) { 1264 1264 /* Copying non-anonymous memory not supported yet */ … … 1277 1277 } 1278 1278 mutex_unlock(&area->sh_info->lock); 1279 1279 1280 1280 /* 1281 1281 * Compute total number of used pages in the used_space B+tree 1282 1282 */ 1283 1283 size_t used_pages = 0; 1284 1284 1285 1285 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 1286 1286 node) { 1287 1287 btree_key_t i; 1288 1288 1289 1289 for (i = 0; i < node->keys; i++) 1290 1290 used_pages += (size_t) node->value[i]; 1291 1291 } 1292 1292 1293 1293 /* An array for storing frame numbers */ 1294 1294 uintptr_t *old_frame = malloc(used_pages * sizeof(uintptr_t), 0); 1295 1295 1296 1296 page_table_lock(as, false); 1297 1297 1298 1298 /* 1299 1299 * Start TLB shootdown sequence. … … 1301 1301 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, 1302 1302 area->pages); 1303 1303 1304 1304 /* 1305 1305 * Remove used pages from page tables and remember their frame … … 1307 1307 */ 1308 1308 size_t frame_idx = 0; 1309 1309 1310 1310 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 1311 1311 node) { 1312 1312 btree_key_t i; 1313 1313 1314 1314 for (i = 0; i < node->keys; i++) { 1315 1315 uintptr_t ptr = node->key[i]; 1316 1316 size_t size; 1317 1317 1318 1318 for (size = 0; size < (size_t) node->value[i]; size++) { 1319 1319 pte_t pte; 1320 1320 bool found = page_mapping_find(as, 1321 1321 ptr + P2SZ(size), false, &pte); 1322 1322 1323 1323 assert(found); 1324 1324 assert(PTE_VALID(&pte)); 1325 1325 assert(PTE_PRESENT(&pte)); 1326 1326 1327 1327 old_frame[frame_idx++] = PTE_GET_FRAME(&pte); 1328 1328 1329 1329 /* Remove old mapping */ 1330 1330 page_mapping_remove(as, ptr + P2SZ(size)); … … 1332 1332 } 1333 1333 } 1334 1334 1335 1335 /* 1336 1336 * Finish TLB shootdown sequence. 1337 1337 */ 1338 1338 1339 1339 tlb_invalidate_pages(as->asid, area->base, area->pages); 1340 1340 1341 1341 /* 1342 1342 * Invalidate potential software translation caches … … 1345 1345 as_invalidate_translation_cache(as, area->base, area->pages); 1346 1346 tlb_shootdown_finalize(ipl); 1347 1347 1348 1348 page_table_unlock(as, false); 1349 1349 1350 1350 /* 1351 1351 * Set the new flags. 1352 1352 */ 1353 1353 area->flags = flags; 1354 1354 1355 1355 /* 1356 1356 * Map pages back in with new flags. This step is kept separate … … 1359 1359 */ 1360 1360 frame_idx = 0; 1361 1361 1362 1362 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 1363 1363 node) { 1364 1364 btree_key_t i; 1365 1365 1366 1366 for (i = 0; i < node->keys; i++) { 1367 1367 uintptr_t ptr = node->key[i]; 1368 1368 size_t size; 1369 1369 1370 1370 for (size = 0; size < (size_t) node->value[i]; size++) { 1371 1371 page_table_lock(as, false); 1372 1372 1373 1373 /* Insert the new mapping */ 1374 1374 page_mapping_insert(as, ptr + P2SZ(size), 1375 1375 old_frame[frame_idx++], page_flags); 1376 1376 1377 1377 page_table_unlock(as, false); 1378 1378 } 1379 1379 } 1380 1380 } 1381 1381 1382 1382 free(old_frame); 1383 1383 1384 1384 mutex_unlock(&area->lock); 1385 1385 mutex_unlock(&as->lock); 1386 1386 1387 1387 return 0; 1388 1388 } … … 1414 1414 if (!THREAD) 1415 1415 goto page_fault; 1416 1416 1417 1417 if (!AS) 1418 1418 goto page_fault; 1419 1419 1420 1420 mutex_lock(&AS->lock); 1421 1421 as_area_t *area = find_area_and_lock(AS, page); … … 1428 1428 goto page_fault; 1429 1429 } 1430 1430 1431 1431 if (area->attributes & AS_AREA_ATTR_PARTIAL) { 1432 1432 /* … … 1438 1438 goto page_fault; 1439 1439 } 1440 1440 1441 1441 if ((!area->backend) || (!area->backend->page_fault)) { 1442 1442 /* … … 1448 1448 goto page_fault; 1449 1449 } 1450 1450 1451 1451 page_table_lock(AS, false); 1452 1452 1453 1453 /* 1454 1454 * To avoid race condition between two page faults on the same address, … … 1467 1467 } 1468 1468 } 1469 1469 1470 1470 /* 1471 1471 * Resort to the backend page fault handler. … … 1478 1478 goto page_fault; 1479 1479 } 1480 1480 1481 1481 page_table_unlock(AS, false); 1482 1482 mutex_unlock(&area->lock); 1483 1483 mutex_unlock(&AS->lock); 1484 1484 return AS_PF_OK; 1485 1485 1486 1486 page_fault: 1487 1487 if (THREAD->in_copy_from_uspace) { … … 1501 1501 panic_memtrap(istate, access, address, NULL); 1502 1502 } 1503 1503 1504 1504 return AS_PF_DEFER; 1505 1505 } … … 1521 1521 DEADLOCK_PROBE_INIT(p_asidlock); 1522 1522 preemption_disable(); 1523 1523 1524 1524 retry: 1525 1525 (void) interrupts_disable(); … … 1536 1536 } 1537 1537 preemption_enable(); 1538 1538 1539 1539 /* 1540 1540 * First, take care of the old address space. … … 1542 1542 if (old_as) { 1543 1543 assert(old_as->cpu_refcount); 1544 1544 1545 1545 if ((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) { 1546 1546 /* … … 1551 1551 */ 1552 1552 assert(old_as->asid != ASID_INVALID); 1553 1553 1554 1554 list_append(&old_as->inactive_as_with_asid_link, 1555 1555 &inactive_as_with_asid_list); 1556 1556 } 1557 1557 1558 1558 /* 1559 1559 * Perform architecture-specific tasks when the address space … … 1562 1562 as_deinstall_arch(old_as); 1563 1563 } 1564 1564 1565 1565 /* 1566 1566 * Second, prepare the new address space. … … 1572 1572 new_as->asid = asid_get(); 1573 1573 } 1574 1574 1575 1575 #ifdef AS_PAGE_TABLE 1576 1576 SET_PTL0_ADDRESS(new_as->genarch.page_table); 1577 1577 #endif 1578 1578 1579 1579 /* 1580 1580 * Perform architecture-specific steps. … … 1582 1582 */ 1583 1583 as_install_arch(new_as); 1584 1584 1585 1585 spinlock_unlock(&asidlock); 1586 1586 1587 1587 AS = new_as; 1588 1588 } … … 1598 1598 { 1599 1599 assert(mutex_locked(&area->lock)); 1600 1600 1601 1601 return area_flags_to_page_flags(area->flags); 1602 1602 } … … 1617 1617 assert(as_operations); 1618 1618 assert(as_operations->page_table_create); 1619 1619 1620 1620 return as_operations->page_table_create(flags); 1621 1621 } … … 1632 1632 assert(as_operations); 1633 1633 assert(as_operations->page_table_destroy); 1634 1634 1635 1635 as_operations->page_table_destroy(page_table); 1636 1636 } … … 1653 1653 assert(as_operations); 1654 1654 assert(as_operations->page_table_lock); 1655 1655 1656 1656 as_operations->page_table_lock(as, lock); 1657 1657 } … … 1667 1667 assert(as_operations); 1668 1668 assert(as_operations->page_table_unlock); 1669 1669 1670 1670 as_operations->page_table_unlock(as, unlock); 1671 1671 } … … 1697 1697 { 1698 1698 size_t size; 1699 1699 1700 1700 page_table_lock(AS, true); 1701 1701 as_area_t *src_area = find_area_and_lock(AS, base); 1702 1702 1703 1703 if (src_area) { 1704 1704 size = P2SZ(src_area->pages); … … 1706 1706 } else 1707 1707 size = 0; 1708 1708 1709 1709 page_table_unlock(AS, true); 1710 1710 return size; … … 1727 1727 assert(IS_ALIGNED(page, PAGE_SIZE)); 1728 1728 assert(count); 1729 1729 1730 1730 btree_node_t *leaf = NULL; 1731 1731 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf); … … 1738 1738 1739 1739 assert(leaf != NULL); 1740 1740 1741 1741 if (!leaf->keys) { 1742 1742 btree_insert(&area->used_space, page, (void *) count, leaf); 1743 1743 goto success; 1744 1744 } 1745 1745 1746 1746 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, leaf); 1747 1747 if (node) { … … 1750 1750 size_t left_cnt = (size_t) node->value[node->keys - 1]; 1751 1751 size_t right_cnt = (size_t) leaf->value[0]; 1752 1752 1753 1753 /* 1754 1754 * Examine the possibility that the interval fits … … 1756 1756 * the left neigbour and the first interval of the leaf. 1757 1757 */ 1758 1758 1759 1759 if (page >= right_pg) { 1760 1760 /* Do nothing. */ … … 1804 1804 uintptr_t right_pg = leaf->key[0]; 1805 1805 size_t right_cnt = (size_t) leaf->value[0]; 1806 1806 1807 1807 /* 1808 1808 * Investigate the border case in which the left neighbour does 1809 1809 * not exist but the interval fits from the left. 1810 1810 */ 1811 1811 1812 1812 if (overlaps(page, P2SZ(count), right_pg, P2SZ(right_cnt))) { 1813 1813 /* The interval intersects with the right interval. */ … … 1832 1832 } 1833 1833 } 1834 1834 1835 1835 node = btree_leaf_node_right_neighbour(&area->used_space, leaf); 1836 1836 if (node) { … … 1839 1839 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 1840 1840 size_t right_cnt = (size_t) node->value[0]; 1841 1841 1842 1842 /* 1843 1843 * Examine the possibility that the interval fits … … 1845 1845 * the right neigbour and the last interval of the leaf. 1846 1846 */ 1847 1847 1848 1848 if (page < left_pg) { 1849 1849 /* Do nothing. */ … … 1893 1893 uintptr_t left_pg = leaf->key[leaf->keys - 1]; 1894 1894 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 1895 1895 1896 1896 /* 1897 1897 * Investigate the border case in which the right neighbour 1898 1898 * does not exist but the interval fits from the right. 1899 1899 */ 1900 1900 1901 1901 if (overlaps(page, P2SZ(count), left_pg, P2SZ(left_cnt))) { 1902 1902 /* The interval intersects with the left interval. */ … … 1919 1919 } 1920 1920 } 1921 1921 1922 1922 /* 1923 1923 * Note that if the algorithm made it thus far, the interval can fit … … 1932 1932 size_t left_cnt = (size_t) leaf->value[i - 1]; 1933 1933 size_t right_cnt = (size_t) leaf->value[i]; 1934 1934 1935 1935 /* 1936 1936 * The interval fits between left_pg and right_pg. 1937 1937 */ 1938 1938 1939 1939 if (overlaps(page, P2SZ(count), left_pg, 1940 1940 P2SZ(left_cnt))) { … … 1988 1988 } 1989 1989 } 1990 1990 1991 1991 panic("Inconsistency detected while adding %zu pages of used " 1992 1992 "space at %p.", count, (void *) page); 1993 1993 1994 1994 success: 1995 1995 area->resident += count; … … 2013 2013 assert(IS_ALIGNED(page, PAGE_SIZE)); 2014 2014 assert(count); 2015 2015 2016 2016 btree_node_t *leaf; 2017 2017 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf); … … 2038 2038 } 2039 2039 } 2040 2040 2041 2041 goto error; 2042 2042 } 2043 2043 } 2044 2044 2045 2045 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, 2046 2046 leaf); … … 2048 2048 uintptr_t left_pg = node->key[node->keys - 1]; 2049 2049 size_t left_cnt = (size_t) node->value[node->keys - 1]; 2050 2050 2051 2051 if (overlaps(left_pg, P2SZ(left_cnt), page, P2SZ(count))) { 2052 2052 if (page + P2SZ(count) == left_pg + P2SZ(left_cnt)) { … … 2078 2078 } 2079 2079 } 2080 2080 2081 2081 return false; 2082 2082 } else if (page < leaf->key[0]) 2083 2083 return false; 2084 2084 2085 2085 if (page > leaf->key[leaf->keys - 1]) { 2086 2086 uintptr_t left_pg = leaf->key[leaf->keys - 1]; 2087 2087 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 2088 2088 2089 2089 if (overlaps(left_pg, P2SZ(left_cnt), page, P2SZ(count))) { 2090 2090 if (page + P2SZ(count) == left_pg + P2SZ(left_cnt)) { … … 2115 2115 } 2116 2116 } 2117 2117 2118 2118 return false; 2119 2119 } 2120 2120 2121 2121 /* 2122 2122 * The border cases have been already resolved. … … 2128 2128 uintptr_t left_pg = leaf->key[i - 1]; 2129 2129 size_t left_cnt = (size_t) leaf->value[i - 1]; 2130 2130 2131 2131 /* 2132 2132 * Now the interval is between intervals corresponding … … 2166 2166 } 2167 2167 } 2168 2168 2169 2169 return false; 2170 2170 } 2171 2171 } 2172 2172 2173 2173 error: 2174 2174 panic("Inconsistency detected while removing %zu pages of used " 2175 2175 "space from %p.", count, (void *) page); 2176 2176 2177 2177 success: 2178 2178 area->resident -= count; … … 2204 2204 if (area == NULL) 2205 2205 return (sysarg_t) AS_MAP_FAILED; 2206 2206 2207 2207 return (sysarg_t) virt; 2208 2208 } … … 2233 2233 { 2234 2234 mutex_lock(&as->lock); 2235 2235 2236 2236 /* First pass, count number of areas. */ 2237 2237 2238 2238 size_t area_cnt = 0; 2239 2239 2240 2240 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, 2241 2241 node) { 2242 2242 area_cnt += node->keys; 2243 2243 } 2244 2244 2245 2245 size_t isize = area_cnt * sizeof(as_area_info_t); 2246 2246 as_area_info_t *info = malloc(isize, 0); 2247 2247 2248 2248 /* Second pass, record data. */ 2249 2249 2250 2250 size_t area_idx = 0; 2251 2251 2252 2252 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, 2253 2253 node) { 2254 2254 btree_key_t i; 2255 2255 2256 2256 for (i = 0; i < node->keys; i++) { 2257 2257 as_area_t *area = node->value[i]; 2258 2258 2259 2259 assert(area_idx < area_cnt); 2260 2260 mutex_lock(&area->lock); 2261 2261 2262 2262 info[area_idx].start_addr = area->base; 2263 2263 info[area_idx].size = P2SZ(area->pages); 2264 2264 info[area_idx].flags = area->flags; 2265 2265 ++area_idx; 2266 2266 2267 2267 mutex_unlock(&area->lock); 2268 2268 } 2269 2269 } 2270 2270 2271 2271 mutex_unlock(&as->lock); 2272 2272 2273 2273 *obuf = info; 2274 2274 *osize = isize; … … 2283 2283 { 2284 2284 mutex_lock(&as->lock); 2285 2285 2286 2286 /* Print out info about address space areas */ 2287 2287 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, 2288 2288 node) { 2289 2289 btree_key_t i; 2290 2290 2291 2291 for (i = 0; i < node->keys; i++) { 2292 2292 as_area_t *area = node->value[i]; 2293 2293 2294 2294 mutex_lock(&area->lock); 2295 2295 printf("as_area: %p, base=%p, pages=%zu" … … 2300 2300 } 2301 2301 } 2302 2302 2303 2303 mutex_unlock(&as->lock); 2304 2304 }
Note:
See TracChangeset
for help on using the changeset viewer.