Changeset a35b458 in mainline for kernel/generic/src/mm
- Timestamp:
- 2018-03-02T20:10:49Z (7 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- f1380b7
- Parents:
- 3061bc1
- git-author:
- Jiří Zárevúcky <zarevucky.jiri@…> (2018-02-28 17:38:31)
- git-committer:
- Jiří Zárevúcky <zarevucky.jiri@…> (2018-03-02 20:10:49)
- Location:
- kernel/generic/src/mm
- Files:
-
- 10 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/as.c
r3061bc1 ra35b458 114 114 { 115 115 as_t *as = (as_t *) obj; 116 116 117 117 link_initialize(&as->inactive_as_with_asid_link); 118 118 mutex_initialize(&as->lock, MUTEX_PASSIVE); 119 119 120 120 return as_constructor_arch(as, flags); 121 121 } … … 130 130 { 131 131 as_arch_init(); 132 132 133 133 as_cache = slab_cache_create("as_t", sizeof(as_t), 0, 134 134 as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED); 135 135 136 136 AS_KERNEL = as_create(FLAG_AS_KERNEL); 137 137 if (!AS_KERNEL) 138 138 panic("Cannot create kernel address space."); 139 139 140 140 /* 141 141 * Make sure the kernel address space … … 155 155 as_t *as = (as_t *) slab_alloc(as_cache, 0); 156 156 (void) as_create_arch(as, 0); 157 157 158 158 btree_create(&as->as_area_btree); 159 159 160 160 if (flags & FLAG_AS_KERNEL) 161 161 as->asid = ASID_KERNEL; 162 162 else 163 163 as->asid = ASID_INVALID; 164 164 165 165 atomic_set(&as->refcount, 0); 166 166 as->cpu_refcount = 0; 167 167 168 168 #ifdef AS_PAGE_TABLE 169 169 as->genarch.page_table = page_table_create(flags); … … 171 171 page_table_create(flags); 172 172 #endif 173 173 174 174 return as; 175 175 } … … 188 188 { 189 189 DEADLOCK_PROBE_INIT(p_asidlock); 190 190 191 191 assert(as != AS); 192 192 assert(atomic_get(&as->refcount) == 0); 193 193 194 194 /* 195 195 * Since there is no reference to this address space, it is safe not to 196 196 * lock its mutex. 197 197 */ 198 198 199 199 /* 200 200 * We need to avoid deadlock between TLB shootdown and asidlock. … … 206 206 preemption_disable(); 207 207 ipl_t ipl = interrupts_read(); 208 208 209 209 retry: 210 210 interrupts_disable(); … … 214 214 goto retry; 215 215 } 216 216 217 217 /* Interrupts disabled, enable preemption */ 218 218 preemption_enable(); 219 219 220 220 if ((as->asid != ASID_INVALID) && (as != AS_KERNEL)) { 221 221 if (as->cpu_refcount == 0) 222 222 list_remove(&as->inactive_as_with_asid_link); 223 223 224 224 asid_put(as->asid); 225 225 } 226 226 227 227 spinlock_unlock(&asidlock); 228 228 interrupts_restore(ipl); 229 230 229 230 231 231 /* 232 232 * Destroy address space areas of the address space. … … 237 237 while (cond) { 238 238 assert(!list_empty(&as->as_area_btree.leaf_list)); 239 239 240 240 btree_node_t *node = 241 241 list_get_instance(list_first(&as->as_area_btree.leaf_list), 242 242 btree_node_t, leaf_link); 243 243 244 244 if ((cond = node->keys)) 245 245 as_area_destroy(as, node->key[0]); 246 246 } 247 247 248 248 btree_destroy(&as->as_area_btree); 249 249 250 250 #ifdef AS_PAGE_TABLE 251 251 page_table_destroy(as->genarch.page_table); … … 253 253 page_table_destroy(NULL); 254 254 #endif 255 255 256 256 slab_free(as_cache, as); 257 257 } … … 307 307 if (overflows_into_positive(addr, P2SZ(count))) 308 308 return false; 309 309 310 310 /* 311 311 * We don't want any area to have conflicts with NULL page. … … 328 328 return false; 329 329 } 330 330 331 331 /* First, check the two border cases. */ 332 332 btree_node_t *node = … … 334 334 if (node) { 335 335 area = (as_area_t *) node->value[node->keys - 1]; 336 336 337 337 if (area != avoid) { 338 338 mutex_lock(&area->lock); … … 346 346 int const gp = (guarded || 347 347 (area->flags & AS_AREA_GUARD)) ? 1 : 0; 348 348 349 349 /* 350 350 * The area comes from the left neighbour node, which … … 358 358 return false; 359 359 } 360 360 361 361 mutex_unlock(&area->lock); 362 362 } 363 363 } 364 364 365 365 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf); 366 366 if (node) { 367 367 area = (as_area_t *) node->value[0]; 368 368 369 369 if (area != avoid) { 370 370 int gp; … … 382 382 gp--; 383 383 } 384 384 385 385 if (overlaps(addr, P2SZ(count + gp), area->base, 386 386 P2SZ(area->pages))) { … … 388 388 return false; 389 389 } 390 390 391 391 mutex_unlock(&area->lock); 392 392 } 393 393 } 394 394 395 395 /* Second, check the leaf node. */ 396 396 btree_key_t i; … … 399 399 int agp; 400 400 int gp; 401 401 402 402 if (area == avoid) 403 403 continue; 404 404 405 405 mutex_lock(&area->lock); 406 406 … … 421 421 return false; 422 422 } 423 423 424 424 mutex_unlock(&area->lock); 425 425 } 426 426 427 427 /* 428 428 * So far, the area does not conflict with other areas. … … 434 434 addr, P2SZ(count)); 435 435 } 436 436 437 437 return true; 438 438 } … … 456 456 { 457 457 assert(mutex_locked(&as->lock)); 458 458 459 459 if (size == 0) 460 460 return (uintptr_t) -1; 461 461 462 462 /* 463 463 * Make sure we allocate from page-aligned … … 465 465 * each step. 466 466 */ 467 467 468 468 size_t pages = SIZE2FRAMES(size); 469 469 470 470 /* 471 471 * Find the lowest unmapped address aligned on the size 472 472 * boundary, not smaller than bound and of the required size. 473 473 */ 474 474 475 475 /* First check the bound address itself */ 476 476 uintptr_t addr = ALIGN_UP(bound, PAGE_SIZE); … … 486 486 return addr; 487 487 } 488 488 489 489 /* Eventually check the addresses behind each area */ 490 490 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, node) { 491 491 492 492 for (btree_key_t i = 0; i < node->keys; i++) { 493 493 as_area_t *area = (as_area_t *) node->value[i]; 494 494 495 495 mutex_lock(&area->lock); 496 496 497 497 addr = 498 498 ALIGN_UP(area->base + P2SZ(area->pages), PAGE_SIZE); … … 508 508 ((addr >= bound) && (addr >= area->base) && 509 509 (check_area_conflicts(as, addr, pages, guarded, area))); 510 510 511 511 mutex_unlock(&area->lock); 512 512 513 513 if (avail) 514 514 return addr; 515 515 } 516 516 } 517 517 518 518 /* No suitable address space area found */ 519 519 return (uintptr_t) -1; … … 530 530 { 531 531 bool dealloc = false; 532 532 533 533 mutex_lock(&sh_info->lock); 534 534 assert(sh_info->refcount); 535 535 536 536 if (--sh_info->refcount == 0) { 537 537 dealloc = true; 538 538 539 539 /* 540 540 * Now walk carefully the pagemap B+tree and free/remove … … 544 544 btree_node_t, node) { 545 545 btree_key_t i; 546 546 547 547 for (i = 0; i < node->keys; i++) 548 548 frame_free((uintptr_t) node->value[i], 1); 549 549 } 550 550 551 551 } 552 552 mutex_unlock(&sh_info->lock); 553 553 554 554 if (dealloc) { 555 555 if (sh_info->backend && sh_info->backend->destroy_shared_data) { … … 588 588 if ((*base != (uintptr_t) AS_AREA_ANY) && !IS_ALIGNED(*base, PAGE_SIZE)) 589 589 return NULL; 590 590 591 591 if (size == 0) 592 592 return NULL; 593 593 594 594 size_t pages = SIZE2FRAMES(size); 595 595 596 596 /* Writeable executable areas are not supported. */ 597 597 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE)) … … 599 599 600 600 bool const guarded = flags & AS_AREA_GUARD; 601 601 602 602 mutex_lock(&as->lock); 603 603 604 604 if (*base == (uintptr_t) AS_AREA_ANY) { 605 605 *base = as_get_unmapped_area(as, bound, size, guarded); … … 619 619 return NULL; 620 620 } 621 621 622 622 as_area_t *area = (as_area_t *) malloc(sizeof(as_area_t), 0); 623 623 624 624 mutex_initialize(&area->lock, MUTEX_PASSIVE); 625 625 626 626 area->as = as; 627 627 area->flags = flags; … … 632 632 area->backend = backend; 633 633 area->sh_info = NULL; 634 634 635 635 if (backend_data) 636 636 area->backend_data = *backend_data; … … 655 655 656 656 area->sh_info = si; 657 657 658 658 if (area->backend && area->backend->create_shared_data) { 659 659 if (!area->backend->create_shared_data(area)) { … … 679 679 btree_insert(&as->as_area_btree, *base, (void *) area, 680 680 NULL); 681 681 682 682 mutex_unlock(&as->lock); 683 683 684 684 return area; 685 685 } … … 697 697 { 698 698 assert(mutex_locked(&as->lock)); 699 699 700 700 btree_node_t *leaf; 701 701 as_area_t *area = (as_area_t *) btree_search(&as->as_area_btree, va, … … 706 706 return area; 707 707 } 708 708 709 709 /* 710 710 * Search the leaf node and the rightmost record of its left neighbour … … 712 712 * space area found there. 713 713 */ 714 714 715 715 /* First, search the leaf node itself. */ 716 716 btree_key_t i; 717 717 718 718 for (i = 0; i < leaf->keys; i++) { 719 719 area = (as_area_t *) leaf->value[i]; 720 720 721 721 mutex_lock(&area->lock); 722 722 … … 724 724 (va <= area->base + (P2SZ(area->pages) - 1))) 725 725 return area; 726 726 727 727 mutex_unlock(&area->lock); 728 728 } 729 729 730 730 /* 731 731 * Second, locate the left neighbour and test its last record. … … 736 736 if (lnode) { 737 737 area = (as_area_t *) lnode->value[lnode->keys - 1]; 738 738 739 739 mutex_lock(&area->lock); 740 740 741 741 if (va <= area->base + (P2SZ(area->pages) - 1)) 742 742 return area; 743 743 744 744 mutex_unlock(&area->lock); 745 745 } 746 746 747 747 return NULL; 748 748 } … … 766 766 767 767 mutex_lock(&as->lock); 768 768 769 769 /* 770 770 * Locate the area. … … 784 784 return ENOTSUP; 785 785 } 786 786 787 787 mutex_lock(&area->sh_info->lock); 788 788 if (area->sh_info->shared) { … … 797 797 } 798 798 mutex_unlock(&area->sh_info->lock); 799 799 800 800 size_t pages = SIZE2FRAMES((address - area->base) + size); 801 801 if (!pages) { … … 807 807 return EPERM; 808 808 } 809 809 810 810 if (pages < area->pages) { 811 811 uintptr_t start_free = area->base + P2SZ(pages); 812 812 813 813 /* 814 814 * Shrinking the area. 815 815 * No need to check for overlaps. 816 816 */ 817 817 818 818 page_table_lock(as, false); 819 819 820 820 /* 821 821 * Remove frames belonging to used space starting from … … 828 828 while (cond) { 829 829 assert(!list_empty(&area->used_space.leaf_list)); 830 830 831 831 btree_node_t *node = 832 832 list_get_instance(list_last(&area->used_space.leaf_list), 833 833 btree_node_t, leaf_link); 834 834 835 835 if ((cond = (node->keys != 0))) { 836 836 uintptr_t ptr = node->key[node->keys - 1]; … … 838 838 (size_t) node->value[node->keys - 1]; 839 839 size_t i = 0; 840 840 841 841 if (overlaps(ptr, P2SZ(node_size), area->base, 842 842 P2SZ(pages))) { 843 843 844 844 if (ptr + P2SZ(node_size) <= start_free) { 845 845 /* … … 850 850 break; 851 851 } 852 852 853 853 /* 854 854 * Part of the interval corresponding … … 856 856 * address space area. 857 857 */ 858 858 859 859 /* We are almost done */ 860 860 cond = false; … … 871 871 panic("Cannot remove used space."); 872 872 } 873 873 874 874 /* 875 875 * Start TLB shootdown sequence. … … 887 887 as->asid, area->base + P2SZ(pages), 888 888 area->pages - pages); 889 889 890 890 for (; i < node_size; i++) { 891 891 pte_t pte; 892 892 bool found = page_mapping_find(as, 893 893 ptr + P2SZ(i), false, &pte); 894 894 895 895 assert(found); 896 896 assert(PTE_VALID(&pte)); 897 897 assert(PTE_PRESENT(&pte)); 898 898 899 899 if ((area->backend) && 900 900 (area->backend->frame_free)) { … … 903 903 PTE_GET_FRAME(&pte)); 904 904 } 905 905 906 906 page_mapping_remove(as, ptr + P2SZ(i)); 907 907 } 908 908 909 909 /* 910 910 * Finish TLB shootdown sequence. 911 911 */ 912 912 913 913 tlb_invalidate_pages(as->asid, 914 914 area->base + P2SZ(pages), 915 915 area->pages - pages); 916 916 917 917 /* 918 918 * Invalidate software translation caches … … 944 944 } 945 945 } 946 946 947 947 if (area->backend && area->backend->resize) { 948 948 if (!area->backend->resize(area, pages)) { … … 952 952 } 953 953 } 954 954 955 955 area->pages = pages; 956 956 957 957 mutex_unlock(&area->lock); 958 958 mutex_unlock(&as->lock); 959 959 960 960 return 0; 961 961 } … … 972 972 { 973 973 mutex_lock(&as->lock); 974 974 975 975 as_area_t *area = find_area_and_lock(as, address); 976 976 if (!area) { … … 981 981 if (area->backend && area->backend->destroy) 982 982 area->backend->destroy(area); 983 983 984 984 uintptr_t base = area->base; 985 985 986 986 page_table_lock(as, false); 987 987 988 988 /* 989 989 * Start TLB shootdown sequence. … … 991 991 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, 992 992 area->pages); 993 993 994 994 /* 995 995 * Visit only the pages mapped by used_space B+tree. … … 998 998 node) { 999 999 btree_key_t i; 1000 1000 1001 1001 for (i = 0; i < node->keys; i++) { 1002 1002 uintptr_t ptr = node->key[i]; 1003 1003 size_t size; 1004 1004 1005 1005 for (size = 0; size < (size_t) node->value[i]; size++) { 1006 1006 pte_t pte; 1007 1007 bool found = page_mapping_find(as, 1008 1008 ptr + P2SZ(size), false, &pte); 1009 1009 1010 1010 assert(found); 1011 1011 assert(PTE_VALID(&pte)); 1012 1012 assert(PTE_PRESENT(&pte)); 1013 1013 1014 1014 if ((area->backend) && 1015 1015 (area->backend->frame_free)) { … … 1018 1018 PTE_GET_FRAME(&pte)); 1019 1019 } 1020 1020 1021 1021 page_mapping_remove(as, ptr + P2SZ(size)); 1022 1022 } 1023 1023 } 1024 1024 } 1025 1025 1026 1026 /* 1027 1027 * Finish TLB shootdown sequence. 1028 1028 */ 1029 1029 1030 1030 tlb_invalidate_pages(as->asid, area->base, area->pages); 1031 1031 1032 1032 /* 1033 1033 * Invalidate potential software translation caches … … 1036 1036 as_invalidate_translation_cache(as, area->base, area->pages); 1037 1037 tlb_shootdown_finalize(ipl); 1038 1038 1039 1039 page_table_unlock(as, false); 1040 1040 1041 1041 btree_destroy(&area->used_space); 1042 1042 1043 1043 area->attributes |= AS_AREA_ATTR_PARTIAL; 1044 1044 1045 1045 sh_info_remove_reference(area->sh_info); 1046 1046 1047 1047 mutex_unlock(&area->lock); 1048 1048 1049 1049 /* 1050 1050 * Remove the empty area from address space. 1051 1051 */ 1052 1052 btree_remove(&as->as_area_btree, base, NULL); 1053 1053 1054 1054 free(area); 1055 1055 1056 1056 mutex_unlock(&as->lock); 1057 1057 return 0; … … 1098 1098 return ENOENT; 1099 1099 } 1100 1100 1101 1101 if (!src_area->backend->is_shareable(src_area)) { 1102 1102 /* … … 1107 1107 return ENOTSUP; 1108 1108 } 1109 1109 1110 1110 size_t src_size = P2SZ(src_area->pages); 1111 1111 unsigned int src_flags = src_area->flags; 1112 1112 mem_backend_t *src_backend = src_area->backend; 1113 1113 mem_backend_data_t src_backend_data = src_area->backend_data; 1114 1114 1115 1115 /* Share the cacheable flag from the original mapping */ 1116 1116 if (src_flags & AS_AREA_CACHEABLE) 1117 1117 dst_flags_mask |= AS_AREA_CACHEABLE; 1118 1118 1119 1119 if ((src_size != acc_size) || 1120 1120 ((src_flags & dst_flags_mask) != dst_flags_mask)) { … … 1123 1123 return EPERM; 1124 1124 } 1125 1125 1126 1126 /* 1127 1127 * Now we are committed to sharing the area. … … 1130 1130 */ 1131 1131 share_info_t *sh_info = src_area->sh_info; 1132 1132 1133 1133 mutex_lock(&sh_info->lock); 1134 1134 sh_info->refcount++; … … 1144 1144 src_area->backend->share(src_area); 1145 1145 } 1146 1146 1147 1147 mutex_unlock(&src_area->lock); 1148 1148 mutex_unlock(&src_as->lock); 1149 1149 1150 1150 /* 1151 1151 * Create copy of the source address space area. … … 1164 1164 */ 1165 1165 sh_info_remove_reference(sh_info); 1166 1166 1167 1167 return ENOMEM; 1168 1168 } 1169 1169 1170 1170 /* 1171 1171 * Now the destination address space area has been … … 1179 1179 mutex_unlock(&dst_area->lock); 1180 1180 mutex_unlock(&dst_as->lock); 1181 1181 1182 1182 return 0; 1183 1183 } … … 1195 1195 { 1196 1196 assert(mutex_locked(&area->lock)); 1197 1197 1198 1198 int flagmap[] = { 1199 1199 [PF_ACCESS_READ] = AS_AREA_READ, … … 1201 1201 [PF_ACCESS_EXEC] = AS_AREA_EXEC 1202 1202 }; 1203 1203 1204 1204 if (!(area->flags & flagmap[access])) 1205 1205 return false; 1206 1206 1207 1207 return true; 1208 1208 } … … 1218 1218 { 1219 1219 unsigned int flags = PAGE_USER | PAGE_PRESENT; 1220 1220 1221 1221 if (aflags & AS_AREA_READ) 1222 1222 flags |= PAGE_READ; 1223 1223 1224 1224 if (aflags & AS_AREA_WRITE) 1225 1225 flags |= PAGE_WRITE; 1226 1226 1227 1227 if (aflags & AS_AREA_EXEC) 1228 1228 flags |= PAGE_EXEC; 1229 1229 1230 1230 if (aflags & AS_AREA_CACHEABLE) 1231 1231 flags |= PAGE_CACHEABLE; 1232 1232 1233 1233 return flags; 1234 1234 } … … 1252 1252 /* Flags for the new memory mapping */ 1253 1253 unsigned int page_flags = area_flags_to_page_flags(flags); 1254 1254 1255 1255 mutex_lock(&as->lock); 1256 1256 1257 1257 as_area_t *area = find_area_and_lock(as, address); 1258 1258 if (!area) { … … 1260 1260 return ENOENT; 1261 1261 } 1262 1262 1263 1263 if (area->backend != &anon_backend) { 1264 1264 /* Copying non-anonymous memory not supported yet */ … … 1277 1277 } 1278 1278 mutex_unlock(&area->sh_info->lock); 1279 1279 1280 1280 /* 1281 1281 * Compute total number of used pages in the used_space B+tree 1282 1282 */ 1283 1283 size_t used_pages = 0; 1284 1284 1285 1285 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 1286 1286 node) { 1287 1287 btree_key_t i; 1288 1288 1289 1289 for (i = 0; i < node->keys; i++) 1290 1290 used_pages += (size_t) node->value[i]; 1291 1291 } 1292 1292 1293 1293 /* An array for storing frame numbers */ 1294 1294 uintptr_t *old_frame = malloc(used_pages * sizeof(uintptr_t), 0); 1295 1295 1296 1296 page_table_lock(as, false); 1297 1297 1298 1298 /* 1299 1299 * Start TLB shootdown sequence. … … 1301 1301 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, 1302 1302 area->pages); 1303 1303 1304 1304 /* 1305 1305 * Remove used pages from page tables and remember their frame … … 1307 1307 */ 1308 1308 size_t frame_idx = 0; 1309 1309 1310 1310 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 1311 1311 node) { 1312 1312 btree_key_t i; 1313 1313 1314 1314 for (i = 0; i < node->keys; i++) { 1315 1315 uintptr_t ptr = node->key[i]; 1316 1316 size_t size; 1317 1317 1318 1318 for (size = 0; size < (size_t) node->value[i]; size++) { 1319 1319 pte_t pte; 1320 1320 bool found = page_mapping_find(as, 1321 1321 ptr + P2SZ(size), false, &pte); 1322 1322 1323 1323 assert(found); 1324 1324 assert(PTE_VALID(&pte)); 1325 1325 assert(PTE_PRESENT(&pte)); 1326 1326 1327 1327 old_frame[frame_idx++] = PTE_GET_FRAME(&pte); 1328 1328 1329 1329 /* Remove old mapping */ 1330 1330 page_mapping_remove(as, ptr + P2SZ(size)); … … 1332 1332 } 1333 1333 } 1334 1334 1335 1335 /* 1336 1336 * Finish TLB shootdown sequence. 1337 1337 */ 1338 1338 1339 1339 tlb_invalidate_pages(as->asid, area->base, area->pages); 1340 1340 1341 1341 /* 1342 1342 * Invalidate potential software translation caches … … 1345 1345 as_invalidate_translation_cache(as, area->base, area->pages); 1346 1346 tlb_shootdown_finalize(ipl); 1347 1347 1348 1348 page_table_unlock(as, false); 1349 1349 1350 1350 /* 1351 1351 * Set the new flags. 1352 1352 */ 1353 1353 area->flags = flags; 1354 1354 1355 1355 /* 1356 1356 * Map pages back in with new flags. This step is kept separate … … 1359 1359 */ 1360 1360 frame_idx = 0; 1361 1361 1362 1362 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 1363 1363 node) { 1364 1364 btree_key_t i; 1365 1365 1366 1366 for (i = 0; i < node->keys; i++) { 1367 1367 uintptr_t ptr = node->key[i]; 1368 1368 size_t size; 1369 1369 1370 1370 for (size = 0; size < (size_t) node->value[i]; size++) { 1371 1371 page_table_lock(as, false); 1372 1372 1373 1373 /* Insert the new mapping */ 1374 1374 page_mapping_insert(as, ptr + P2SZ(size), 1375 1375 old_frame[frame_idx++], page_flags); 1376 1376 1377 1377 page_table_unlock(as, false); 1378 1378 } 1379 1379 } 1380 1380 } 1381 1381 1382 1382 free(old_frame); 1383 1383 1384 1384 mutex_unlock(&area->lock); 1385 1385 mutex_unlock(&as->lock); 1386 1386 1387 1387 return 0; 1388 1388 } … … 1414 1414 if (!THREAD) 1415 1415 goto page_fault; 1416 1416 1417 1417 if (!AS) 1418 1418 goto page_fault; 1419 1419 1420 1420 mutex_lock(&AS->lock); 1421 1421 as_area_t *area = find_area_and_lock(AS, page); … … 1428 1428 goto page_fault; 1429 1429 } 1430 1430 1431 1431 if (area->attributes & AS_AREA_ATTR_PARTIAL) { 1432 1432 /* … … 1438 1438 goto page_fault; 1439 1439 } 1440 1440 1441 1441 if ((!area->backend) || (!area->backend->page_fault)) { 1442 1442 /* … … 1448 1448 goto page_fault; 1449 1449 } 1450 1450 1451 1451 page_table_lock(AS, false); 1452 1452 1453 1453 /* 1454 1454 * To avoid race condition between two page faults on the same address, … … 1467 1467 } 1468 1468 } 1469 1469 1470 1470 /* 1471 1471 * Resort to the backend page fault handler. … … 1478 1478 goto page_fault; 1479 1479 } 1480 1480 1481 1481 page_table_unlock(AS, false); 1482 1482 mutex_unlock(&area->lock); 1483 1483 mutex_unlock(&AS->lock); 1484 1484 return AS_PF_OK; 1485 1485 1486 1486 page_fault: 1487 1487 if (THREAD->in_copy_from_uspace) { … … 1501 1501 panic_memtrap(istate, access, address, NULL); 1502 1502 } 1503 1503 1504 1504 return AS_PF_DEFER; 1505 1505 } … … 1521 1521 DEADLOCK_PROBE_INIT(p_asidlock); 1522 1522 preemption_disable(); 1523 1523 1524 1524 retry: 1525 1525 (void) interrupts_disable(); … … 1536 1536 } 1537 1537 preemption_enable(); 1538 1538 1539 1539 /* 1540 1540 * First, take care of the old address space. … … 1542 1542 if (old_as) { 1543 1543 assert(old_as->cpu_refcount); 1544 1544 1545 1545 if ((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) { 1546 1546 /* … … 1551 1551 */ 1552 1552 assert(old_as->asid != ASID_INVALID); 1553 1553 1554 1554 list_append(&old_as->inactive_as_with_asid_link, 1555 1555 &inactive_as_with_asid_list); 1556 1556 } 1557 1557 1558 1558 /* 1559 1559 * Perform architecture-specific tasks when the address space … … 1562 1562 as_deinstall_arch(old_as); 1563 1563 } 1564 1564 1565 1565 /* 1566 1566 * Second, prepare the new address space. … … 1572 1572 new_as->asid = asid_get(); 1573 1573 } 1574 1574 1575 1575 #ifdef AS_PAGE_TABLE 1576 1576 SET_PTL0_ADDRESS(new_as->genarch.page_table); 1577 1577 #endif 1578 1578 1579 1579 /* 1580 1580 * Perform architecture-specific steps. … … 1582 1582 */ 1583 1583 as_install_arch(new_as); 1584 1584 1585 1585 spinlock_unlock(&asidlock); 1586 1586 1587 1587 AS = new_as; 1588 1588 } … … 1598 1598 { 1599 1599 assert(mutex_locked(&area->lock)); 1600 1600 1601 1601 return area_flags_to_page_flags(area->flags); 1602 1602 } … … 1617 1617 assert(as_operations); 1618 1618 assert(as_operations->page_table_create); 1619 1619 1620 1620 return as_operations->page_table_create(flags); 1621 1621 } … … 1632 1632 assert(as_operations); 1633 1633 assert(as_operations->page_table_destroy); 1634 1634 1635 1635 as_operations->page_table_destroy(page_table); 1636 1636 } … … 1653 1653 assert(as_operations); 1654 1654 assert(as_operations->page_table_lock); 1655 1655 1656 1656 as_operations->page_table_lock(as, lock); 1657 1657 } … … 1667 1667 assert(as_operations); 1668 1668 assert(as_operations->page_table_unlock); 1669 1669 1670 1670 as_operations->page_table_unlock(as, unlock); 1671 1671 } … … 1697 1697 { 1698 1698 size_t size; 1699 1699 1700 1700 page_table_lock(AS, true); 1701 1701 as_area_t *src_area = find_area_and_lock(AS, base); 1702 1702 1703 1703 if (src_area) { 1704 1704 size = P2SZ(src_area->pages); … … 1706 1706 } else 1707 1707 size = 0; 1708 1708 1709 1709 page_table_unlock(AS, true); 1710 1710 return size; … … 1727 1727 assert(IS_ALIGNED(page, PAGE_SIZE)); 1728 1728 assert(count); 1729 1729 1730 1730 btree_node_t *leaf = NULL; 1731 1731 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf); … … 1738 1738 1739 1739 assert(leaf != NULL); 1740 1740 1741 1741 if (!leaf->keys) { 1742 1742 btree_insert(&area->used_space, page, (void *) count, leaf); 1743 1743 goto success; 1744 1744 } 1745 1745 1746 1746 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, leaf); 1747 1747 if (node) { … … 1750 1750 size_t left_cnt = (size_t) node->value[node->keys - 1]; 1751 1751 size_t right_cnt = (size_t) leaf->value[0]; 1752 1752 1753 1753 /* 1754 1754 * Examine the possibility that the interval fits … … 1756 1756 * the left neigbour and the first interval of the leaf. 1757 1757 */ 1758 1758 1759 1759 if (page >= right_pg) { 1760 1760 /* Do nothing. */ … … 1804 1804 uintptr_t right_pg = leaf->key[0]; 1805 1805 size_t right_cnt = (size_t) leaf->value[0]; 1806 1806 1807 1807 /* 1808 1808 * Investigate the border case in which the left neighbour does 1809 1809 * not exist but the interval fits from the left. 1810 1810 */ 1811 1811 1812 1812 if (overlaps(page, P2SZ(count), right_pg, P2SZ(right_cnt))) { 1813 1813 /* The interval intersects with the right interval. */ … … 1832 1832 } 1833 1833 } 1834 1834 1835 1835 node = btree_leaf_node_right_neighbour(&area->used_space, leaf); 1836 1836 if (node) { … … 1839 1839 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 1840 1840 size_t right_cnt = (size_t) node->value[0]; 1841 1841 1842 1842 /* 1843 1843 * Examine the possibility that the interval fits … … 1845 1845 * the right neigbour and the last interval of the leaf. 1846 1846 */ 1847 1847 1848 1848 if (page < left_pg) { 1849 1849 /* Do nothing. */ … … 1893 1893 uintptr_t left_pg = leaf->key[leaf->keys - 1]; 1894 1894 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 1895 1895 1896 1896 /* 1897 1897 * Investigate the border case in which the right neighbour 1898 1898 * does not exist but the interval fits from the right. 1899 1899 */ 1900 1900 1901 1901 if (overlaps(page, P2SZ(count), left_pg, P2SZ(left_cnt))) { 1902 1902 /* The interval intersects with the left interval. */ … … 1919 1919 } 1920 1920 } 1921 1921 1922 1922 /* 1923 1923 * Note that if the algorithm made it thus far, the interval can fit … … 1932 1932 size_t left_cnt = (size_t) leaf->value[i - 1]; 1933 1933 size_t right_cnt = (size_t) leaf->value[i]; 1934 1934 1935 1935 /* 1936 1936 * The interval fits between left_pg and right_pg. 1937 1937 */ 1938 1938 1939 1939 if (overlaps(page, P2SZ(count), left_pg, 1940 1940 P2SZ(left_cnt))) { … … 1988 1988 } 1989 1989 } 1990 1990 1991 1991 panic("Inconsistency detected while adding %zu pages of used " 1992 1992 "space at %p.", count, (void *) page); 1993 1993 1994 1994 success: 1995 1995 area->resident += count; … … 2013 2013 assert(IS_ALIGNED(page, PAGE_SIZE)); 2014 2014 assert(count); 2015 2015 2016 2016 btree_node_t *leaf; 2017 2017 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf); … … 2038 2038 } 2039 2039 } 2040 2040 2041 2041 goto error; 2042 2042 } 2043 2043 } 2044 2044 2045 2045 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, 2046 2046 leaf); … … 2048 2048 uintptr_t left_pg = node->key[node->keys - 1]; 2049 2049 size_t left_cnt = (size_t) node->value[node->keys - 1]; 2050 2050 2051 2051 if (overlaps(left_pg, P2SZ(left_cnt), page, P2SZ(count))) { 2052 2052 if (page + P2SZ(count) == left_pg + P2SZ(left_cnt)) { … … 2078 2078 } 2079 2079 } 2080 2080 2081 2081 return false; 2082 2082 } else if (page < leaf->key[0]) 2083 2083 return false; 2084 2084 2085 2085 if (page > leaf->key[leaf->keys - 1]) { 2086 2086 uintptr_t left_pg = leaf->key[leaf->keys - 1]; 2087 2087 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 2088 2088 2089 2089 if (overlaps(left_pg, P2SZ(left_cnt), page, P2SZ(count))) { 2090 2090 if (page + P2SZ(count) == left_pg + P2SZ(left_cnt)) { … … 2115 2115 } 2116 2116 } 2117 2117 2118 2118 return false; 2119 2119 } 2120 2120 2121 2121 /* 2122 2122 * The border cases have been already resolved. … … 2128 2128 uintptr_t left_pg = leaf->key[i - 1]; 2129 2129 size_t left_cnt = (size_t) leaf->value[i - 1]; 2130 2130 2131 2131 /* 2132 2132 * Now the interval is between intervals corresponding … … 2166 2166 } 2167 2167 } 2168 2168 2169 2169 return false; 2170 2170 } 2171 2171 } 2172 2172 2173 2173 error: 2174 2174 panic("Inconsistency detected while removing %zu pages of used " 2175 2175 "space from %p.", count, (void *) page); 2176 2176 2177 2177 success: 2178 2178 area->resident -= count; … … 2204 2204 if (area == NULL) 2205 2205 return (sysarg_t) AS_MAP_FAILED; 2206 2206 2207 2207 return (sysarg_t) virt; 2208 2208 } … … 2233 2233 { 2234 2234 mutex_lock(&as->lock); 2235 2235 2236 2236 /* First pass, count number of areas. */ 2237 2237 2238 2238 size_t area_cnt = 0; 2239 2239 2240 2240 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, 2241 2241 node) { 2242 2242 area_cnt += node->keys; 2243 2243 } 2244 2244 2245 2245 size_t isize = area_cnt * sizeof(as_area_info_t); 2246 2246 as_area_info_t *info = malloc(isize, 0); 2247 2247 2248 2248 /* Second pass, record data. */ 2249 2249 2250 2250 size_t area_idx = 0; 2251 2251 2252 2252 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, 2253 2253 node) { 2254 2254 btree_key_t i; 2255 2255 2256 2256 for (i = 0; i < node->keys; i++) { 2257 2257 as_area_t *area = node->value[i]; 2258 2258 2259 2259 assert(area_idx < area_cnt); 2260 2260 mutex_lock(&area->lock); 2261 2261 2262 2262 info[area_idx].start_addr = area->base; 2263 2263 info[area_idx].size = P2SZ(area->pages); 2264 2264 info[area_idx].flags = area->flags; 2265 2265 ++area_idx; 2266 2266 2267 2267 mutex_unlock(&area->lock); 2268 2268 } 2269 2269 } 2270 2270 2271 2271 mutex_unlock(&as->lock); 2272 2272 2273 2273 *obuf = info; 2274 2274 *osize = isize; … … 2283 2283 { 2284 2284 mutex_lock(&as->lock); 2285 2285 2286 2286 /* Print out info about address space areas */ 2287 2287 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, 2288 2288 node) { 2289 2289 btree_key_t i; 2290 2290 2291 2291 for (i = 0; i < node->keys; i++) { 2292 2292 as_area_t *area = node->value[i]; 2293 2293 2294 2294 mutex_lock(&area->lock); 2295 2295 printf("as_area: %p, base=%p, pages=%zu" … … 2300 2300 } 2301 2301 } 2302 2302 2303 2303 mutex_unlock(&as->lock); 2304 2304 } -
kernel/generic/src/mm/backend_anon.c
r3061bc1 ra35b458 125 125 node) { 126 126 unsigned int i; 127 127 128 128 for (i = 0; i < node->keys; i++) { 129 129 uintptr_t base = node->key[i]; 130 130 size_t count = (size_t) node->value[i]; 131 131 unsigned int j; 132 132 133 133 for (j = 0; j < count; j++) { 134 134 pte_t pte; 135 135 bool found; 136 136 137 137 page_table_lock(area->as, false); 138 138 found = page_mapping_find(area->as, … … 201 201 if (area->sh_info->shared) { 202 202 btree_node_t *leaf; 203 203 204 204 /* 205 205 * The area is shared, chances are that the mapping can be found … … 214 214 bool allocate = true; 215 215 unsigned int i; 216 216 217 217 /* 218 218 * Zero can be returned as a valid frame address. … … 230 230 memsetb((void *) kpage, PAGE_SIZE, 0); 231 231 km_temporary_page_put(kpage); 232 232 233 233 /* 234 234 * Insert the address of the newly allocated … … 272 272 } 273 273 mutex_unlock(&area->sh_info->lock); 274 274 275 275 /* 276 276 * Map 'upage' to 'frame'. … … 281 281 if (!used_space_insert(area, upage, 1)) 282 282 panic("Cannot insert used space."); 283 283 284 284 return AS_PF_OK; 285 285 } -
kernel/generic/src/mm/backend_elf.c
r3061bc1 ra35b458 102 102 if (area->pages <= nonanon_pages) 103 103 return true; 104 104 105 105 return reserve_try_alloc(area->pages - nonanon_pages); 106 106 } … … 123 123 reserve_free(nonanon_pages - new_pages); 124 124 } 125 125 126 126 return true; 127 127 } … … 166 166 cur = cur->next) { 167 167 unsigned int i; 168 168 169 169 node = list_get_instance(cur, btree_node_t, leaf_link); 170 170 171 171 for (i = 0; i < node->keys; i++) { 172 172 uintptr_t base = node->key[i]; 173 173 size_t count = (size_t) node->value[i]; 174 174 unsigned int j; 175 175 176 176 /* 177 177 * Skip read-only areas of used space that are backed … … 182 182 base + P2SZ(count) <= start_anon) 183 183 continue; 184 184 185 185 for (j = 0; j < count; j++) { 186 186 pte_t pte; 187 187 bool found; 188 188 189 189 /* 190 190 * Skip read-only pages that are backed by the … … 195 195 base + P2SZ(j + 1) <= start_anon) 196 196 continue; 197 197 198 198 page_table_lock(area->as, false); 199 199 found = page_mapping_find(area->as, … … 212 212 frame_reference_add(pfn); 213 213 } 214 214 215 215 } 216 216 } … … 267 267 if (!as_area_check_access(area, access)) 268 268 return AS_PF_FAULT; 269 269 270 270 if (upage < ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) 271 271 return AS_PF_FAULT; 272 272 273 273 if (upage >= entry->p_vaddr + entry->p_memsz) 274 274 return AS_PF_FAULT; 275 275 276 276 i = (upage - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH; 277 277 base = (uintptr_t) … … 288 288 * The address space area is shared. 289 289 */ 290 290 291 291 frame = (uintptr_t) btree_search(&area->sh_info->pagemap, 292 292 upage - area->base, &leaf); -
kernel/generic/src/mm/backend_phys.c
r3061bc1 ra35b458 75 75 .page_fault = phys_page_fault, 76 76 .frame_free = NULL, 77 77 78 78 .create_shared_data = phys_create_shared_data, 79 79 .destroy_shared_data = phys_destroy_shared_data … … 145 145 page_mapping_insert(AS, upage, base + (upage - area->base), 146 146 as_area_get_flags(area)); 147 147 148 148 if (!used_space_insert(area, upage, 1)) 149 149 panic("Cannot insert used space."); -
kernel/generic/src/mm/backend_user.c
r3061bc1 ra35b458 172 172 /* Nothing to do */ 173 173 } 174 174 175 175 } 176 176 -
kernel/generic/src/mm/frame.c
r3061bc1 ra35b458 108 108 return (size_t) -1; 109 109 } 110 110 111 111 size_t i; 112 112 for (i = 0; i < zones.count; i++) { … … 114 114 if (overlaps(zones.info[i].base, zones.info[i].count, 115 115 base, count)) { 116 116 117 117 /* 118 118 * If the overlaping zones are of the same type … … 121 121 * 122 122 */ 123 123 124 124 if ((zones.info[i].flags != flags) || 125 125 (!iswithin(zones.info[i].base, zones.info[i].count, … … 132 132 (void *) PFN2ADDR(zones.info[i].count)); 133 133 } 134 134 135 135 return (size_t) -1; 136 136 } … … 138 138 break; 139 139 } 140 140 141 141 /* Move other zones up */ 142 142 for (size_t j = zones.count; j > i; j--) 143 143 zones.info[j] = zones.info[j - 1]; 144 144 145 145 zones.count++; 146 146 147 147 return i; 148 148 } … … 163 163 for (i = 0; i < zones.count; i++) 164 164 total += zones.info[i].free_count; 165 165 166 166 return total; 167 167 } … … 195 195 if (hint >= zones.count) 196 196 hint = 0; 197 197 198 198 size_t i = hint; 199 199 do { … … 201 201 && (zones.info[i].base + zones.info[i].count >= frame + count)) 202 202 return i; 203 203 204 204 i++; 205 205 if (i >= zones.count) 206 206 i = 0; 207 207 208 208 } while (i != hint); 209 209 210 210 return (size_t) -1; 211 211 } … … 219 219 * the bitmap if the last argument is NULL. 220 220 */ 221 221 222 222 return ((zone->flags & ZONE_AVAILABLE) && 223 223 bitmap_allocate_range(&zone->bitmap, count, zone->base, … … 245 245 for (size_t pos = 0; pos < zones.count; pos++) { 246 246 size_t i = (pos + hint) % zones.count; 247 247 248 248 /* Check whether the zone meets the search criteria. */ 249 249 if (!ZONE_FLAGS_MATCH(zones.info[i].flags, flags)) 250 250 continue; 251 251 252 252 /* Check if the zone can satisfy the allocation request. */ 253 253 if (zone_can_alloc(&zones.info[i], count, constraint)) 254 254 return i; 255 255 } 256 256 257 257 return (size_t) -1; 258 258 } … … 291 291 for (size_t pos = 0; pos < zones.count; pos++) { 292 292 size_t i = (pos + hint) % zones.count; 293 293 294 294 /* Skip zones containing only high-priority memory. */ 295 295 if (is_high_priority(zones.info[i].base, zones.info[i].count)) 296 296 continue; 297 297 298 298 /* Check whether the zone meets the search criteria. */ 299 299 if (!ZONE_FLAGS_MATCH(zones.info[i].flags, flags)) 300 300 continue; 301 301 302 302 /* Check if the zone can satisfy the allocation request. */ 303 303 if (zone_can_alloc(&zones.info[i], count, constraint)) 304 304 return i; 305 305 } 306 306 307 307 return (size_t) -1; 308 308 } … … 328 328 if (hint >= zones.count) 329 329 hint = 0; 330 330 331 331 /* 332 332 * Prefer zones with low-priority memory over 333 333 * zones with high-priority memory. 334 334 */ 335 335 336 336 size_t znum = find_free_zone_lowprio(count, flags, constraint, hint); 337 337 if (znum != (size_t) -1) 338 338 return znum; 339 339 340 340 /* Take all zones into account */ 341 341 return find_free_zone_all(count, flags, constraint, hint); … … 350 350 { 351 351 assert(index < zone->count); 352 352 353 353 return &zone->frames[index]; 354 354 } … … 371 371 { 372 372 assert(zone->flags & ZONE_AVAILABLE); 373 373 374 374 /* Allocate frames from zone */ 375 375 size_t index = (size_t) -1; 376 376 int avail = bitmap_allocate_range(&zone->bitmap, count, zone->base, 377 377 FRAME_LOWPRIO, constraint, &index); 378 378 379 379 assert(avail); 380 380 assert(index != (size_t) -1); 381 381 382 382 /* Update frame reference count */ 383 383 for (size_t i = 0; i < count; i++) { 384 384 frame_t *frame = zone_get_frame(zone, index + i); 385 385 386 386 assert(frame->refcount == 0); 387 387 frame->refcount = 1; 388 388 } 389 389 390 390 /* Update zone information. */ 391 391 zone->free_count -= count; 392 392 zone->busy_count += count; 393 393 394 394 return index; 395 395 } … … 408 408 { 409 409 assert(zone->flags & ZONE_AVAILABLE); 410 410 411 411 frame_t *frame = zone_get_frame(zone, index); 412 412 413 413 assert(frame->refcount > 0); 414 414 415 415 if (!--frame->refcount) { 416 416 bitmap_set(&zone->bitmap, index, 0); 417 417 418 418 /* Update zone information. */ 419 419 zone->free_count++; 420 420 zone->busy_count--; 421 421 422 422 return 1; 423 423 } 424 424 425 425 return 0; 426 426 } … … 430 430 { 431 431 assert(zone->flags & ZONE_AVAILABLE); 432 432 433 433 frame_t *frame = zone_get_frame(zone, index); 434 434 if (frame->refcount > 0) 435 435 return; 436 436 437 437 frame->refcount = 1; 438 438 bitmap_set_range(&zone->bitmap, index, 1); 439 439 440 440 zone->free_count--; 441 441 reserve_force_alloc(1); … … 462 462 assert(!overlaps(zones.info[z1].base, zones.info[z1].count, 463 463 zones.info[z2].base, zones.info[z2].count)); 464 464 465 465 /* Difference between zone bases */ 466 466 pfn_t base_diff = zones.info[z2].base - zones.info[z1].base; 467 467 468 468 zones.info[z1].count = base_diff + zones.info[z2].count; 469 469 zones.info[z1].free_count += zones.info[z2].free_count; 470 470 zones.info[z1].busy_count += zones.info[z2].busy_count; 471 471 472 472 bitmap_initialize(&zones.info[z1].bitmap, zones.info[z1].count, 473 473 confdata + (sizeof(frame_t) * zones.info[z1].count)); 474 474 bitmap_clear_range(&zones.info[z1].bitmap, 0, zones.info[z1].count); 475 475 476 476 zones.info[z1].frames = (frame_t *) confdata; 477 477 478 478 /* 479 479 * Copy frames and bits from both zones to preserve parents, etc. 480 480 */ 481 481 482 482 for (size_t i = 0; i < old_z1->count; i++) { 483 483 bitmap_set(&zones.info[z1].bitmap, i, … … 485 485 zones.info[z1].frames[i] = old_z1->frames[i]; 486 486 } 487 487 488 488 for (size_t i = 0; i < zones.info[z2].count; i++) { 489 489 bitmap_set(&zones.info[z1].bitmap, base_diff + i, … … 510 510 { 511 511 assert(zones.info[znum].flags & ZONE_AVAILABLE); 512 512 513 513 size_t cframes = SIZE2FRAMES(zone_conf_size(count)); 514 514 515 515 if ((pfn < zones.info[znum].base) || 516 516 (pfn >= zones.info[znum].base + zones.info[znum].count)) 517 517 return; 518 518 519 519 for (size_t i = 0; i < cframes; i++) 520 520 (void) zone_frame_free(&zones.info[znum], … … 536 536 { 537 537 irq_spinlock_lock(&zones.lock, true); 538 538 539 539 bool ret = true; 540 540 541 541 /* 542 542 * We can join only 2 zones with none existing inbetween, … … 549 549 goto errout; 550 550 } 551 551 552 552 pfn_t cframes = SIZE2FRAMES(zone_conf_size( 553 553 zones.info[z2].base - zones.info[z1].base 554 554 + zones.info[z2].count)); 555 555 556 556 /* Allocate merged zone data inside one of the zones */ 557 557 pfn_t pfn; … … 566 566 goto errout; 567 567 } 568 568 569 569 /* Preserve original data from z1 */ 570 570 zone_t old_z1 = zones.info[z1]; 571 571 572 572 /* Do zone merging */ 573 573 zone_merge_internal(z1, z2, &old_z1, (void *) PA2KA(PFN2ADDR(pfn))); 574 574 575 575 /* Subtract zone information from busy frames */ 576 576 zones.info[z1].busy_count -= cframes; 577 577 578 578 /* Free old zone information */ 579 579 return_config_frames(z1, … … 582 582 ADDR2PFN(KA2PA((uintptr_t) zones.info[z2].frames)), 583 583 zones.info[z2].count); 584 584 585 585 /* Move zones down */ 586 586 for (size_t i = z2 + 1; i < zones.count; i++) 587 587 zones.info[i - 1] = zones.info[i]; 588 588 589 589 zones.count--; 590 590 591 591 errout: 592 592 irq_spinlock_unlock(&zones.lock, true); 593 593 594 594 return ret; 595 595 } … … 605 605 { 606 606 size_t i = 1; 607 607 608 608 while (i < zones.count) { 609 609 if (!zone_merge(i - 1, i)) … … 631 631 zone->free_count = count; 632 632 zone->busy_count = 0; 633 633 634 634 if (flags & ZONE_AVAILABLE) { 635 635 /* … … 637 637 * frame_t structures in the configuration space). 638 638 */ 639 639 640 640 bitmap_initialize(&zone->bitmap, count, confdata + 641 641 (sizeof(frame_t) * count)); 642 642 bitmap_clear_range(&zone->bitmap, 0, count); 643 643 644 644 /* 645 645 * Initialize the array of frame_t structures. 646 646 */ 647 647 648 648 zone->frames = (frame_t *) confdata; 649 649 650 650 for (size_t i = 0; i < count; i++) 651 651 frame_initialize(&zone->frames[i]); … … 672 672 { 673 673 size_t frames = SIZE2FRAMES(zone_conf_size(count)); 674 674 675 675 return ADDR2PFN((uintptr_t) 676 676 frame_alloc(frames, FRAME_LOWMEM | FRAME_ATOMIC, 0)); … … 697 697 { 698 698 irq_spinlock_lock(&zones.lock, true); 699 699 700 700 if (flags & ZONE_AVAILABLE) { /* Create available zone */ 701 701 /* … … 705 705 */ 706 706 assert(confframe != ADDR2PFN((uintptr_t ) NULL)); 707 707 708 708 /* Update the known end of physical memory. */ 709 709 config.physmem_end = max(config.physmem_end, PFN2ADDR(start + count)); 710 710 711 711 /* 712 712 * If confframe is supposed to be inside our zone, then make sure … … 714 714 */ 715 715 size_t confcount = SIZE2FRAMES(zone_conf_size(count)); 716 716 717 717 if ((confframe >= start) && (confframe < start + count)) { 718 718 for (; confframe < start + count; confframe++) { … … 721 721 KA2PA(config.base), config.kernel_size)) 722 722 continue; 723 723 724 724 if (overlaps(addr, PFN2ADDR(confcount), 725 725 KA2PA(config.stack_base), config.stack_size)) 726 726 continue; 727 727 728 728 bool overlap = false; 729 729 for (size_t i = 0; i < init.cnt; i++) { … … 735 735 } 736 736 } 737 737 738 738 if (overlap) 739 739 continue; 740 740 741 741 break; 742 742 } 743 743 744 744 if (confframe >= start + count) 745 745 panic("Cannot find configuration data for zone."); 746 746 } 747 747 748 748 size_t znum = zones_insert_zone(start, count, flags); 749 749 if (znum == (size_t) -1) { … … 751 751 return (size_t) -1; 752 752 } 753 753 754 754 void *confdata = (void *) PA2KA(PFN2ADDR(confframe)); 755 755 zone_construct(&zones.info[znum], start, count, flags, confdata); 756 756 757 757 /* If confdata in zone, mark as unavailable */ 758 758 if ((confframe >= start) && (confframe < start + count)) { … … 761 761 i - zones.info[znum].base); 762 762 } 763 763 764 764 irq_spinlock_unlock(&zones.lock, true); 765 765 766 766 return znum; 767 767 } 768 768 769 769 /* Non-available zone */ 770 770 size_t znum = zones_insert_zone(start, count, flags); … … 773 773 return (size_t) -1; 774 774 } 775 775 776 776 zone_construct(&zones.info[znum], start, count, flags, NULL); 777 777 778 778 irq_spinlock_unlock(&zones.lock, true); 779 779 780 780 return znum; 781 781 } … … 789 789 { 790 790 irq_spinlock_lock(&zones.lock, true); 791 791 792 792 size_t znum = find_zone(pfn, 1, hint); 793 793 794 794 assert(znum != (size_t) -1); 795 795 796 796 zone_get_frame(&zones.info[znum], 797 797 pfn - zones.info[znum].base)->parent = data; 798 798 799 799 irq_spinlock_unlock(&zones.lock, true); 800 800 } … … 803 803 { 804 804 irq_spinlock_lock(&zones.lock, true); 805 805 806 806 size_t znum = find_zone(pfn, 1, hint); 807 807 808 808 assert(znum != (size_t) -1); 809 809 810 810 void *res = zone_get_frame(&zones.info[znum], 811 811 pfn - zones.info[znum].base)->parent; 812 812 813 813 irq_spinlock_unlock(&zones.lock, true); 814 814 815 815 return res; 816 816 } … … 831 831 { 832 832 assert(count > 0); 833 833 834 834 size_t hint = pzone ? (*pzone) : 0; 835 835 pfn_t frame_constraint = ADDR2PFN(constraint); 836 836 837 837 /* 838 838 * If not told otherwise, we must first reserve the memory. … … 840 840 if (!(flags & FRAME_NO_RESERVE)) 841 841 reserve_force_alloc(count); 842 842 843 843 loop: 844 844 irq_spinlock_lock(&zones.lock, true); 845 845 846 846 /* 847 847 * First, find suitable frame zone. … … 849 849 size_t znum = find_free_zone(count, FRAME_TO_ZONE_FLAGS(flags), 850 850 frame_constraint, hint); 851 851 852 852 /* 853 853 * If no memory, reclaim some slab memory, … … 858 858 size_t freed = slab_reclaim(0); 859 859 irq_spinlock_lock(&zones.lock, true); 860 860 861 861 if (freed > 0) 862 862 znum = find_free_zone(count, FRAME_TO_ZONE_FLAGS(flags), 863 863 frame_constraint, hint); 864 864 865 865 if (znum == (size_t) -1) { 866 866 irq_spinlock_unlock(&zones.lock, true); 867 867 freed = slab_reclaim(SLAB_RECLAIM_ALL); 868 868 irq_spinlock_lock(&zones.lock, true); 869 869 870 870 if (freed > 0) 871 871 znum = find_free_zone(count, FRAME_TO_ZONE_FLAGS(flags), … … 873 873 } 874 874 } 875 875 876 876 if (znum == (size_t) -1) { 877 877 if (flags & FRAME_ATOMIC) { 878 878 irq_spinlock_unlock(&zones.lock, true); 879 879 880 880 if (!(flags & FRAME_NO_RESERVE)) 881 881 reserve_free(count); 882 882 883 883 return 0; 884 884 } 885 885 886 886 size_t avail = frame_total_free_get_internal(); 887 887 888 888 irq_spinlock_unlock(&zones.lock, true); 889 889 890 890 if (!THREAD) 891 891 panic("Cannot wait for %zu frames to become available " 892 892 "(%zu available).", count, avail); 893 893 894 894 /* 895 895 * Sleep until some frames are available again. 896 896 */ 897 897 898 898 #ifdef CONFIG_DEBUG 899 899 log(LF_OTHER, LVL_DEBUG, … … 901 901 "%zu available.", THREAD->tid, count, avail); 902 902 #endif 903 903 904 904 /* 905 905 * Since the mem_avail_mtx is an active mutex, we need to … … 908 908 ipl_t ipl = interrupts_disable(); 909 909 mutex_lock(&mem_avail_mtx); 910 910 911 911 if (mem_avail_req > 0) 912 912 mem_avail_req = min(mem_avail_req, count); 913 913 else 914 914 mem_avail_req = count; 915 915 916 916 size_t gen = mem_avail_gen; 917 917 918 918 while (gen == mem_avail_gen) 919 919 condvar_wait(&mem_avail_cv, &mem_avail_mtx); 920 920 921 921 mutex_unlock(&mem_avail_mtx); 922 922 interrupts_restore(ipl); 923 923 924 924 #ifdef CONFIG_DEBUG 925 925 log(LF_OTHER, LVL_DEBUG, "Thread %" PRIu64 " woken up.", 926 926 THREAD->tid); 927 927 #endif 928 928 929 929 goto loop; 930 930 } 931 931 932 932 pfn_t pfn = zone_frame_alloc(&zones.info[znum], count, 933 933 frame_constraint) + zones.info[znum].base; 934 934 935 935 irq_spinlock_unlock(&zones.lock, true); 936 936 937 937 if (pzone) 938 938 *pzone = znum; 939 939 940 940 return PFN2ADDR(pfn); 941 941 } … … 960 960 { 961 961 size_t freed = 0; 962 962 963 963 irq_spinlock_lock(&zones.lock, true); 964 964 965 965 for (size_t i = 0; i < count; i++) { 966 966 /* … … 969 969 pfn_t pfn = ADDR2PFN(start) + i; 970 970 size_t znum = find_zone(pfn, 1, 0); 971 971 972 972 assert(znum != (size_t) -1); 973 973 974 974 freed += zone_frame_free(&zones.info[znum], 975 975 pfn - zones.info[znum].base); 976 976 } 977 977 978 978 irq_spinlock_unlock(&zones.lock, true); 979 979 980 980 /* 981 981 * Signal that some memory has been freed. … … 984 984 * with TLB shootdown. 985 985 */ 986 986 987 987 ipl_t ipl = interrupts_disable(); 988 988 mutex_lock(&mem_avail_mtx); 989 989 990 990 if (mem_avail_req > 0) 991 991 mem_avail_req -= min(mem_avail_req, freed); 992 992 993 993 if (mem_avail_req == 0) { 994 994 mem_avail_gen++; 995 995 condvar_broadcast(&mem_avail_cv); 996 996 } 997 997 998 998 mutex_unlock(&mem_avail_mtx); 999 999 interrupts_restore(ipl); 1000 1000 1001 1001 if (!(flags & FRAME_NO_RESERVE)) 1002 1002 reserve_free(freed); … … 1024 1024 { 1025 1025 irq_spinlock_lock(&zones.lock, true); 1026 1026 1027 1027 /* 1028 1028 * First, find host frame zone for addr. 1029 1029 */ 1030 1030 size_t znum = find_zone(pfn, 1, 0); 1031 1031 1032 1032 assert(znum != (size_t) -1); 1033 1033 1034 1034 zones.info[znum].frames[pfn - zones.info[znum].base].refcount++; 1035 1035 1036 1036 irq_spinlock_unlock(&zones.lock, true); 1037 1037 } … … 1043 1043 { 1044 1044 irq_spinlock_lock(&zones.lock, true); 1045 1045 1046 1046 for (size_t i = 0; i < count; i++) { 1047 1047 size_t znum = find_zone(start + i, 1, 0); 1048 1048 1049 1049 if (znum == (size_t) -1) /* PFN not found */ 1050 1050 continue; 1051 1051 1052 1052 zone_mark_unavailable(&zones.info[znum], 1053 1053 start + i - zones.info[znum].base); 1054 1054 } 1055 1055 1056 1056 irq_spinlock_unlock(&zones.lock, true); 1057 1057 } … … 1068 1068 condvar_initialize(&mem_avail_cv); 1069 1069 } 1070 1070 1071 1071 /* Tell the architecture to create some memory */ 1072 1072 frame_low_arch_init(); 1073 1073 1074 1074 if (config.cpu_active == 1) { 1075 1075 frame_mark_unavailable(ADDR2PFN(KA2PA(config.base)), … … 1077 1077 frame_mark_unavailable(ADDR2PFN(KA2PA(config.stack_base)), 1078 1078 SIZE2FRAMES(config.stack_size)); 1079 1079 1080 1080 for (size_t i = 0; i < init.cnt; i++) 1081 1081 frame_mark_unavailable(ADDR2PFN(init.tasks[i].paddr), 1082 1082 SIZE2FRAMES(init.tasks[i].size)); 1083 1083 1084 1084 if (ballocs.size) 1085 1085 frame_mark_unavailable(ADDR2PFN(KA2PA(ballocs.base)), 1086 1086 SIZE2FRAMES(ballocs.size)); 1087 1087 1088 1088 /* 1089 1089 * Blacklist first frame, as allocating NULL would … … 1092 1092 frame_mark_unavailable(0, 1); 1093 1093 } 1094 1094 1095 1095 frame_high_arch_init(); 1096 1096 } … … 1113 1113 { 1114 1114 uintptr_t limit = KA2PA(config.identity_base) + config.identity_size; 1115 1115 1116 1116 if (low) { 1117 1117 if (*basep > limit) 1118 1118 return false; 1119 1119 1120 1120 if (*basep + *sizep > limit) 1121 1121 *sizep = limit - *basep; … … 1123 1123 if (*basep + *sizep <= limit) 1124 1124 return false; 1125 1125 1126 1126 if (*basep <= limit) { 1127 1127 *sizep -= limit - *basep; … … 1129 1129 } 1130 1130 } 1131 1131 1132 1132 return true; 1133 1133 } … … 1139 1139 { 1140 1140 irq_spinlock_lock(&zones.lock, true); 1141 1141 1142 1142 uint64_t total = 0; 1143 1143 1144 1144 for (size_t i = 0; i < zones.count; i++) 1145 1145 total += (uint64_t) FRAMES2SIZE(zones.info[i].count); 1146 1146 1147 1147 irq_spinlock_unlock(&zones.lock, true); 1148 1148 1149 1149 return total; 1150 1150 } … … 1157 1157 assert(busy != NULL); 1158 1158 assert(free != NULL); 1159 1159 1160 1160 irq_spinlock_lock(&zones.lock, true); 1161 1161 1162 1162 *total = 0; 1163 1163 *unavail = 0; 1164 1164 *busy = 0; 1165 1165 *free = 0; 1166 1166 1167 1167 for (size_t i = 0; i < zones.count; i++) { 1168 1168 *total += (uint64_t) FRAMES2SIZE(zones.info[i].count); 1169 1169 1170 1170 if (zones.info[i].flags & ZONE_AVAILABLE) { 1171 1171 *busy += (uint64_t) FRAMES2SIZE(zones.info[i].busy_count); … … 1174 1174 *unavail += (uint64_t) FRAMES2SIZE(zones.info[i].count); 1175 1175 } 1176 1176 1177 1177 irq_spinlock_unlock(&zones.lock, true); 1178 1178 } … … 1190 1190 printf("[nr] [base address ] [frames ] [flags ] [free frames ] [busy frames ]\n"); 1191 1191 #endif 1192 1192 1193 1193 /* 1194 1194 * Because printing may require allocation of memory, we may not hold … … 1201 1201 * the listing). 1202 1202 */ 1203 1203 1204 1204 size_t free_lowmem = 0; 1205 1205 size_t free_highmem = 0; 1206 1206 size_t free_highprio = 0; 1207 1207 1208 1208 for (size_t i = 0;; i++) { 1209 1209 irq_spinlock_lock(&zones.lock, true); 1210 1210 1211 1211 if (i >= zones.count) { 1212 1212 irq_spinlock_unlock(&zones.lock, true); 1213 1213 break; 1214 1214 } 1215 1215 1216 1216 pfn_t fbase = zones.info[i].base; 1217 1217 uintptr_t base = PFN2ADDR(fbase); … … 1220 1220 size_t free_count = zones.info[i].free_count; 1221 1221 size_t busy_count = zones.info[i].busy_count; 1222 1222 1223 1223 bool available = ((flags & ZONE_AVAILABLE) != 0); 1224 1224 bool lowmem = ((flags & ZONE_LOWMEM) != 0); 1225 1225 bool highmem = ((flags & ZONE_HIGHMEM) != 0); 1226 1226 bool highprio = is_high_priority(fbase, count); 1227 1227 1228 1228 if (available) { 1229 1229 if (lowmem) 1230 1230 free_lowmem += free_count; 1231 1231 1232 1232 if (highmem) 1233 1233 free_highmem += free_count; 1234 1234 1235 1235 if (highprio) { 1236 1236 free_highprio += free_count; … … 1241 1241 * statistics. 1242 1242 */ 1243 1243 1244 1244 for (size_t index = 0; index < count; index++) { 1245 1245 if (is_high_priority(fbase + index, 0)) { … … 1251 1251 } 1252 1252 } 1253 1253 1254 1254 irq_spinlock_unlock(&zones.lock, true); 1255 1255 1256 1256 printf("%-4zu", i); 1257 1257 1258 1258 #ifdef __32_BITS__ 1259 1259 printf(" %p", (void *) base); 1260 1260 #endif 1261 1261 1262 1262 #ifdef __64_BITS__ 1263 1263 printf(" %p", (void *) base); 1264 1264 #endif 1265 1265 1266 1266 printf(" %12zu %c%c%c%c%c ", count, 1267 1267 available ? 'A' : '-', … … 1270 1270 (flags & ZONE_LOWMEM) ? 'L' : '-', 1271 1271 (flags & ZONE_HIGHMEM) ? 'H' : '-'); 1272 1272 1273 1273 if (available) 1274 1274 printf("%14zu %14zu", 1275 1275 free_count, busy_count); 1276 1276 1277 1277 printf("\n"); 1278 1278 } 1279 1279 1280 1280 printf("\n"); 1281 1281 1282 1282 uint64_t size; 1283 1283 const char *size_suffix; 1284 1284 1285 1285 bin_order_suffix(FRAMES2SIZE(free_lowmem), &size, &size_suffix, 1286 1286 false); 1287 1287 printf("Available low memory: %zu frames (%" PRIu64 " %s)\n", 1288 1288 free_lowmem, size, size_suffix); 1289 1289 1290 1290 bin_order_suffix(FRAMES2SIZE(free_highmem), &size, &size_suffix, 1291 1291 false); 1292 1292 printf("Available high memory: %zu frames (%" PRIu64 " %s)\n", 1293 1293 free_highmem, size, size_suffix); 1294 1294 1295 1295 bin_order_suffix(FRAMES2SIZE(free_highprio), &size, &size_suffix, 1296 1296 false); … … 1308 1308 irq_spinlock_lock(&zones.lock, true); 1309 1309 size_t znum = (size_t) -1; 1310 1310 1311 1311 for (size_t i = 0; i < zones.count; i++) { 1312 1312 if ((i == num) || (PFN2ADDR(zones.info[i].base) == num)) { … … 1315 1315 } 1316 1316 } 1317 1317 1318 1318 if (znum == (size_t) -1) { 1319 1319 irq_spinlock_unlock(&zones.lock, true); … … 1321 1321 return; 1322 1322 } 1323 1323 1324 1324 size_t free_lowmem = 0; 1325 1325 size_t free_highmem = 0; 1326 1326 size_t free_highprio = 0; 1327 1327 1328 1328 pfn_t fbase = zones.info[znum].base; 1329 1329 uintptr_t base = PFN2ADDR(fbase); … … 1332 1332 size_t free_count = zones.info[znum].free_count; 1333 1333 size_t busy_count = zones.info[znum].busy_count; 1334 1334 1335 1335 bool available = ((flags & ZONE_AVAILABLE) != 0); 1336 1336 bool lowmem = ((flags & ZONE_LOWMEM) != 0); 1337 1337 bool highmem = ((flags & ZONE_HIGHMEM) != 0); 1338 1338 bool highprio = is_high_priority(fbase, count); 1339 1339 1340 1340 if (available) { 1341 1341 if (lowmem) 1342 1342 free_lowmem = free_count; 1343 1343 1344 1344 if (highmem) 1345 1345 free_highmem = free_count; 1346 1346 1347 1347 if (highprio) { 1348 1348 free_highprio = free_count; … … 1353 1353 * statistics. 1354 1354 */ 1355 1355 1356 1356 for (size_t index = 0; index < count; index++) { 1357 1357 if (is_high_priority(fbase + index, 0)) { … … 1363 1363 } 1364 1364 } 1365 1365 1366 1366 irq_spinlock_unlock(&zones.lock, true); 1367 1367 1368 1368 uint64_t size; 1369 1369 const char *size_suffix; 1370 1370 1371 1371 bin_order_suffix(FRAMES2SIZE(count), &size, &size_suffix, false); 1372 1372 1373 1373 printf("Zone number: %zu\n", znum); 1374 1374 printf("Zone base address: %p\n", (void *) base); … … 1381 1381 (flags & ZONE_LOWMEM) ? 'L' : '-', 1382 1382 (flags & ZONE_HIGHMEM) ? 'H' : '-'); 1383 1383 1384 1384 if (available) { 1385 1385 bin_order_suffix(FRAMES2SIZE(busy_count), &size, &size_suffix, … … 1387 1387 printf("Allocated space: %zu frames (%" PRIu64 " %s)\n", 1388 1388 busy_count, size, size_suffix); 1389 1389 1390 1390 bin_order_suffix(FRAMES2SIZE(free_count), &size, &size_suffix, 1391 1391 false); 1392 1392 printf("Available space: %zu frames (%" PRIu64 " %s)\n", 1393 1393 free_count, size, size_suffix); 1394 1394 1395 1395 bin_order_suffix(FRAMES2SIZE(free_lowmem), &size, &size_suffix, 1396 1396 false); 1397 1397 printf("Available low memory: %zu frames (%" PRIu64 " %s)\n", 1398 1398 free_lowmem, size, size_suffix); 1399 1399 1400 1400 bin_order_suffix(FRAMES2SIZE(free_highmem), &size, &size_suffix, 1401 1401 false); 1402 1402 printf("Available high memory: %zu frames (%" PRIu64 " %s)\n", 1403 1403 free_highmem, size, size_suffix); 1404 1404 1405 1405 bin_order_suffix(FRAMES2SIZE(free_highprio), &size, &size_suffix, 1406 1406 false); -
kernel/generic/src/mm/km.c
r3061bc1 ra35b458 149 149 } 150 150 page_table_unlock(AS_KERNEL, true); 151 151 152 152 return vaddr; 153 153 } … … 247 247 assert(framep); 248 248 assert(!(flags & ~(FRAME_NO_RESERVE | FRAME_ATOMIC))); 249 249 250 250 /* 251 251 * Allocate a frame, preferably from high memory. … … 267 267 if (!frame) 268 268 return (uintptr_t) NULL; 269 269 270 270 page = PA2KA(frame); 271 271 } 272 272 273 273 *framep = frame; 274 274 return page; -
kernel/generic/src/mm/page.c
r3061bc1 ra35b458 99 99 { 100 100 assert(page_table_locked(as)); 101 101 102 102 assert(page_mapping_operations); 103 103 assert(page_mapping_operations->mapping_insert); … … 105 105 page_mapping_operations->mapping_insert(as, ALIGN_DOWN(page, PAGE_SIZE), 106 106 ALIGN_DOWN(frame, FRAME_SIZE), flags); 107 107 108 108 /* Repel prefetched accesses to the old mapping. */ 109 109 memory_barrier(); … … 123 123 { 124 124 assert(page_table_locked(as)); 125 125 126 126 assert(page_mapping_operations); 127 127 assert(page_mapping_operations->mapping_remove); 128 128 129 129 page_mapping_operations->mapping_remove(as, 130 130 ALIGN_DOWN(page, PAGE_SIZE)); 131 131 132 132 /* Repel prefetched accesses to the old mapping. */ 133 133 memory_barrier(); … … 148 148 { 149 149 assert(nolock || page_table_locked(as)); 150 150 151 151 assert(page_mapping_operations); 152 152 assert(page_mapping_operations->mapping_find); 153 153 154 154 return page_mapping_operations->mapping_find(as, 155 155 ALIGN_DOWN(page, PAGE_SIZE), nolock, pte); … … 169 169 { 170 170 assert(nolock || page_table_locked(as)); 171 171 172 172 assert(page_mapping_operations); 173 173 assert(page_mapping_operations->mapping_find); 174 174 175 175 page_mapping_operations->mapping_update(as, 176 176 ALIGN_DOWN(page, PAGE_SIZE), nolock, pte); … … 186 186 assert(page_mapping_operations); 187 187 assert(page_mapping_operations->mapping_make_global); 188 188 189 189 return page_mapping_operations->mapping_make_global(base, size); 190 190 } … … 193 193 { 194 194 page_table_lock(AS, true); 195 195 196 196 pte_t pte; 197 197 bool found = page_mapping_find(AS, virt, false, &pte); … … 200 200 return ENOENT; 201 201 } 202 202 203 203 *phys = PTE_GET_FRAME(&pte) + 204 204 (virt - ALIGN_DOWN(virt, PAGE_SIZE)); 205 205 206 206 page_table_unlock(AS, true); 207 207 208 208 return EOK; 209 209 } … … 221 221 if (rc != EOK) 222 222 return rc; 223 223 224 224 rc = copy_to_uspace(phys_ptr, &phys, sizeof(phys)); 225 225 return (sys_errno_t) rc; -
kernel/generic/src/mm/slab.c
r3061bc1 ra35b458 186 186 { 187 187 size_t zone = 0; 188 188 189 189 uintptr_t data_phys = 190 190 frame_alloc_generic(cache->frames, flags, 0, &zone); 191 191 if (!data_phys) 192 192 return NULL; 193 193 194 194 void *data = (void *) PA2KA(data_phys); 195 195 196 196 slab_t *slab; 197 197 size_t fsize; 198 198 199 199 if (!(cache->flags & SLAB_CACHE_SLINSIDE)) { 200 200 slab = slab_alloc(slab_extern_cache, flags); … … 207 207 slab = data + fsize - sizeof(*slab); 208 208 } 209 209 210 210 /* Fill in slab structures */ 211 211 size_t i; 212 212 for (i = 0; i < cache->frames; i++) 213 213 frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab, zone); 214 214 215 215 slab->start = data; 216 216 slab->available = cache->objects; 217 217 slab->nextavail = 0; 218 218 slab->cache = cache; 219 219 220 220 for (i = 0; i < cache->objects; i++) 221 221 *((size_t *) (slab->start + i * cache->size)) = i + 1; 222 222 223 223 atomic_inc(&cache->allocated_slabs); 224 224 return slab; … … 235 235 if (!(cache->flags & SLAB_CACHE_SLINSIDE)) 236 236 slab_free(slab_extern_cache, slab); 237 237 238 238 atomic_dec(&cache->allocated_slabs); 239 239 240 240 return cache->frames; 241 241 } … … 263 263 if (!slab) 264 264 slab = obj2slab(obj); 265 265 266 266 assert(slab->cache == cache); 267 267 268 268 size_t freed = 0; 269 269 270 270 if (cache->destructor) 271 271 freed = cache->destructor(obj); 272 272 273 273 irq_spinlock_lock(&cache->slablock, true); 274 274 assert(slab->available < cache->objects); 275 275 276 276 *((size_t *) obj) = slab->nextavail; 277 277 slab->nextavail = (obj - slab->start) / cache->size; 278 278 slab->available++; 279 279 280 280 /* Move it to correct list */ 281 281 if (slab->available == cache->objects) { … … 283 283 list_remove(&slab->link); 284 284 irq_spinlock_unlock(&cache->slablock, true); 285 285 286 286 return freed + slab_space_free(cache, slab); 287 287 } else if (slab->available == 1) { … … 290 290 list_prepend(&slab->link, &cache->partial_slabs); 291 291 } 292 292 293 293 irq_spinlock_unlock(&cache->slablock, true); 294 294 return freed; … … 303 303 { 304 304 irq_spinlock_lock(&cache->slablock, true); 305 305 306 306 slab_t *slab; 307 307 308 308 if (list_empty(&cache->partial_slabs)) { 309 309 /* … … 319 319 if (!slab) 320 320 return NULL; 321 321 322 322 irq_spinlock_lock(&cache->slablock, true); 323 323 } else { … … 326 326 list_remove(&slab->link); 327 327 } 328 328 329 329 void *obj = slab->start + slab->nextavail * cache->size; 330 330 slab->nextavail = *((size_t *) obj); 331 331 slab->available--; 332 332 333 333 if (!slab->available) 334 334 list_prepend(&slab->link, &cache->full_slabs); 335 335 else 336 336 list_prepend(&slab->link, &cache->partial_slabs); 337 337 338 338 irq_spinlock_unlock(&cache->slablock, true); 339 339 340 340 if ((cache->constructor) && (cache->constructor(obj, flags) != EOK)) { 341 341 /* Bad, bad, construction failed */ … … 343 343 return NULL; 344 344 } 345 345 346 346 return obj; 347 347 } … … 361 361 slab_magazine_t *mag = NULL; 362 362 link_t *cur; 363 363 364 364 irq_spinlock_lock(&cache->maglock, true); 365 365 if (!list_empty(&cache->magazines)) { … … 368 368 else 369 369 cur = list_last(&cache->magazines); 370 370 371 371 mag = list_get_instance(cur, slab_magazine_t, link); 372 372 list_remove(&mag->link); … … 385 385 { 386 386 irq_spinlock_lock(&cache->maglock, true); 387 387 388 388 list_prepend(&mag->link, &cache->magazines); 389 389 atomic_inc(&cache->magazine_counter); 390 390 391 391 irq_spinlock_unlock(&cache->maglock, true); 392 392 } … … 402 402 size_t i; 403 403 size_t frames = 0; 404 404 405 405 for (i = 0; i < mag->busy; i++) { 406 406 frames += slab_obj_destroy(cache, mag->objs[i], NULL); 407 407 atomic_dec(&cache->cached_objs); 408 408 } 409 409 410 410 slab_free(&mag_cache, mag); 411 411 412 412 return frames; 413 413 } … … 420 420 slab_magazine_t *cmag = cache->mag_cache[CPU->id].current; 421 421 slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last; 422 422 423 423 assert(irq_spinlock_locked(&cache->mag_cache[CPU->id].lock)); 424 424 425 425 if (cmag) { /* First try local CPU magazines */ 426 426 if (cmag->busy) 427 427 return cmag; 428 428 429 429 if ((lastmag) && (lastmag->busy)) { 430 430 cache->mag_cache[CPU->id].current = lastmag; … … 433 433 } 434 434 } 435 435 436 436 /* Local magazines are empty, import one from magazine list */ 437 437 slab_magazine_t *newmag = get_mag_from_cache(cache, 1); 438 438 if (!newmag) 439 439 return NULL; 440 440 441 441 if (lastmag) 442 442 magazine_destroy(cache, lastmag); 443 443 444 444 cache->mag_cache[CPU->id].last = cmag; 445 445 cache->mag_cache[CPU->id].current = newmag; 446 446 447 447 return newmag; 448 448 } … … 457 457 if (!CPU) 458 458 return NULL; 459 459 460 460 irq_spinlock_lock(&cache->mag_cache[CPU->id].lock, true); 461 461 462 462 slab_magazine_t *mag = get_full_current_mag(cache); 463 463 if (!mag) { … … 465 465 return NULL; 466 466 } 467 467 468 468 void *obj = mag->objs[--mag->busy]; 469 469 irq_spinlock_unlock(&cache->mag_cache[CPU->id].lock, true); 470 470 471 471 atomic_dec(&cache->cached_objs); 472 472 473 473 return obj; 474 474 } … … 487 487 slab_magazine_t *cmag = cache->mag_cache[CPU->id].current; 488 488 slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last; 489 489 490 490 assert(irq_spinlock_locked(&cache->mag_cache[CPU->id].lock)); 491 491 492 492 if (cmag) { 493 493 if (cmag->busy < cmag->size) 494 494 return cmag; 495 495 496 496 if ((lastmag) && (lastmag->busy < lastmag->size)) { 497 497 cache->mag_cache[CPU->id].last = cmag; … … 500 500 } 501 501 } 502 502 503 503 /* current | last are full | nonexistent, allocate new */ 504 504 505 505 /* 506 506 * We do not want to sleep just because of caching, … … 513 513 if (!newmag) 514 514 return NULL; 515 515 516 516 newmag->size = SLAB_MAG_SIZE; 517 517 newmag->busy = 0; 518 518 519 519 /* Flush last to magazine list */ 520 520 if (lastmag) 521 521 put_mag_to_cache(cache, lastmag); 522 522 523 523 /* Move current as last, save new as current */ 524 524 cache->mag_cache[CPU->id].last = cmag; 525 525 cache->mag_cache[CPU->id].current = newmag; 526 526 527 527 return newmag; 528 528 } … … 537 537 if (!CPU) 538 538 return -1; 539 539 540 540 irq_spinlock_lock(&cache->mag_cache[CPU->id].lock, true); 541 541 542 542 slab_magazine_t *mag = make_empty_current_mag(cache); 543 543 if (!mag) { … … 545 545 return -1; 546 546 } 547 547 548 548 mag->objs[mag->busy++] = obj; 549 549 550 550 irq_spinlock_unlock(&cache->mag_cache[CPU->id].lock, true); 551 551 552 552 atomic_inc(&cache->cached_objs); 553 553 554 554 return 0; 555 555 } … … 578 578 size_t objects = comp_objects(cache); 579 579 size_t ssize = FRAMES2SIZE(cache->frames); 580 580 581 581 if (cache->flags & SLAB_CACHE_SLINSIDE) 582 582 ssize -= sizeof(slab_t); 583 583 584 584 return ssize - objects * cache->size; 585 585 } … … 591 591 { 592 592 assert(_slab_initialized >= 2); 593 593 594 594 cache->mag_cache = slab_alloc(&slab_mag_cache, FRAME_ATOMIC); 595 595 if (!cache->mag_cache) 596 596 return false; 597 597 598 598 size_t i; 599 599 for (i = 0; i < config.cpu_count; i++) { … … 602 602 "slab.cache.mag_cache[].lock"); 603 603 } 604 604 605 605 return true; 606 606 } … … 614 614 { 615 615 assert(size > 0); 616 616 617 617 memsetb(cache, sizeof(*cache), 0); 618 618 cache->name = name; 619 619 620 620 if (align < sizeof(sysarg_t)) 621 621 align = sizeof(sysarg_t); 622 622 623 623 size = ALIGN_UP(size, align); 624 624 625 625 cache->size = size; 626 626 cache->constructor = constructor; 627 627 cache->destructor = destructor; 628 628 cache->flags = flags; 629 629 630 630 list_initialize(&cache->full_slabs); 631 631 list_initialize(&cache->partial_slabs); 632 632 list_initialize(&cache->magazines); 633 633 634 634 irq_spinlock_initialize(&cache->slablock, "slab.cache.slablock"); 635 635 irq_spinlock_initialize(&cache->maglock, "slab.cache.maglock"); 636 636 637 637 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) 638 638 (void) make_magcache(cache); 639 639 640 640 /* Compute slab sizes, object counts in slabs etc. */ 641 641 if (cache->size < SLAB_INSIDE_SIZE) 642 642 cache->flags |= SLAB_CACHE_SLINSIDE; 643 643 644 644 /* Minimum slab frames */ 645 645 cache->frames = SIZE2FRAMES(cache->size); 646 646 647 647 while (badness(cache) > SLAB_MAX_BADNESS(cache)) 648 648 cache->frames <<= 1; 649 649 650 650 cache->objects = comp_objects(cache); 651 651 652 652 /* If info fits in, put it inside */ 653 653 if (badness(cache) > sizeof(slab_t)) 654 654 cache->flags |= SLAB_CACHE_SLINSIDE; 655 655 656 656 /* Add cache to cache list */ 657 657 irq_spinlock_lock(&slab_cache_lock, true); … … 670 670 _slab_cache_create(cache, name, size, align, constructor, destructor, 671 671 flags); 672 672 673 673 return cache; 674 674 } … … 685 685 if (cache->flags & SLAB_CACHE_NOMAGAZINE) 686 686 return 0; /* Nothing to do */ 687 687 688 688 /* 689 689 * We count up to original magazine count to avoid … … 691 691 */ 692 692 atomic_count_t magcount = atomic_get(&cache->magazine_counter); 693 693 694 694 slab_magazine_t *mag; 695 695 size_t frames = 0; 696 696 697 697 while ((magcount--) && (mag = get_mag_from_cache(cache, 0))) { 698 698 frames += magazine_destroy(cache, mag); … … 700 700 break; 701 701 } 702 702 703 703 if (flags & SLAB_RECLAIM_ALL) { 704 704 /* Free cpu-bound magazines */ … … 707 707 for (i = 0; i < config.cpu_count; i++) { 708 708 irq_spinlock_lock(&cache->mag_cache[i].lock, true); 709 709 710 710 mag = cache->mag_cache[i].current; 711 711 if (mag) 712 712 frames += magazine_destroy(cache, mag); 713 713 cache->mag_cache[i].current = NULL; 714 714 715 715 mag = cache->mag_cache[i].last; 716 716 if (mag) 717 717 frames += magazine_destroy(cache, mag); 718 718 cache->mag_cache[i].last = NULL; 719 719 720 720 irq_spinlock_unlock(&cache->mag_cache[i].lock, true); 721 721 } 722 722 } 723 723 724 724 return frames; 725 725 } … … 731 731 { 732 732 ipl_t ipl = interrupts_disable(); 733 733 734 734 if ((cache->flags & SLAB_CACHE_NOMAGAZINE) || 735 735 (magazine_obj_put(cache, obj))) 736 736 slab_obj_destroy(cache, obj, slab); 737 737 738 738 interrupts_restore(ipl); 739 739 atomic_dec(&cache->allocated_objs); … … 753 753 list_remove(&cache->link); 754 754 irq_spinlock_unlock(&slab_cache_lock, true); 755 755 756 756 /* 757 757 * Do not lock anything, we assume the software is correct and … … 759 759 * 760 760 */ 761 761 762 762 /* Destroy all magazines */ 763 763 _slab_reclaim(cache, SLAB_RECLAIM_ALL); 764 764 765 765 /* All slabs must be empty */ 766 766 if ((!list_empty(&cache->full_slabs)) || 767 767 (!list_empty(&cache->partial_slabs))) 768 768 panic("Destroying cache that is not empty."); 769 769 770 770 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) { 771 771 slab_t *mag_slab = obj2slab(cache->mag_cache); 772 772 _slab_free(mag_slab->cache, cache->mag_cache, mag_slab); 773 773 } 774 774 775 775 slab_free(&slab_cache_cache, cache); 776 776 } … … 783 783 /* Disable interrupts to avoid deadlocks with interrupt handlers */ 784 784 ipl_t ipl = interrupts_disable(); 785 785 786 786 void *result = NULL; 787 787 788 788 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) 789 789 result = magazine_obj_get(cache); 790 790 791 791 if (!result) 792 792 result = slab_obj_create(cache, flags); 793 793 794 794 interrupts_restore(ipl); 795 795 796 796 if (result) 797 797 atomic_inc(&cache->allocated_objs); 798 798 799 799 return result; 800 800 } … … 812 812 { 813 813 irq_spinlock_lock(&slab_cache_lock, true); 814 814 815 815 size_t frames = 0; 816 816 list_foreach(slab_cache_list, link, slab_cache_t, cache) { 817 817 frames += _slab_reclaim(cache, flags); 818 818 } 819 819 820 820 irq_spinlock_unlock(&slab_cache_lock, true); 821 821 822 822 return frames; 823 823 } … … 828 828 printf("[cache name ] [size ] [pages ] [obj/pg] [slabs ]" 829 829 " [cached] [alloc ] [ctl]\n"); 830 830 831 831 size_t skip = 0; 832 832 while (true) { … … 853 853 * statistics. 854 854 */ 855 855 856 856 irq_spinlock_lock(&slab_cache_lock, true); 857 857 858 858 link_t *cur; 859 859 size_t i; … … 861 861 (i < skip) && (cur != &slab_cache_list.head); 862 862 i++, cur = cur->next); 863 863 864 864 if (cur == &slab_cache_list.head) { 865 865 irq_spinlock_unlock(&slab_cache_lock, true); 866 866 break; 867 867 } 868 868 869 869 skip++; 870 870 871 871 slab_cache_t *cache = list_get_instance(cur, slab_cache_t, link); 872 872 873 873 const char *name = cache->name; 874 874 size_t frames = cache->frames; … … 879 879 long allocated_objs = atomic_get(&cache->allocated_objs); 880 880 unsigned int flags = cache->flags; 881 881 882 882 irq_spinlock_unlock(&slab_cache_lock, true); 883 883 884 884 printf("%-18s %8zu %8zu %8zu %8ld %8ld %8ld %-5s\n", 885 885 name, size, frames, objects, allocated_slabs, … … 896 896 sizeof(uintptr_t), NULL, NULL, SLAB_CACHE_NOMAGAZINE | 897 897 SLAB_CACHE_SLINSIDE); 898 898 899 899 /* Initialize slab_cache cache */ 900 900 _slab_cache_create(&slab_cache_cache, "slab_cache_cache", 901 901 sizeof(slab_cache_cache), sizeof(uintptr_t), NULL, NULL, 902 902 SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); 903 903 904 904 /* Initialize external slab cache */ 905 905 slab_extern_cache = slab_cache_create("slab_t", sizeof(slab_t), 0, 906 906 NULL, NULL, SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED); 907 907 908 908 /* Initialize structures for malloc */ 909 909 size_t i; 910 910 size_t size; 911 911 912 912 for (i = 0, size = (1 << SLAB_MIN_MALLOC_W); 913 913 i < (SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1); … … 916 916 NULL, NULL, SLAB_CACHE_MAGDEFERRED); 917 917 } 918 918 919 919 #ifdef CONFIG_DEBUG 920 920 _slab_initialized = 1; … … 934 934 _slab_initialized = 2; 935 935 #endif 936 936 937 937 _slab_cache_create(&slab_mag_cache, "slab_mag_cache", 938 938 sizeof(slab_mag_cache_t) * config.cpu_count, sizeof(uintptr_t), 939 939 NULL, NULL, SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); 940 940 941 941 irq_spinlock_lock(&slab_cache_lock, false); 942 942 943 943 list_foreach(slab_cache_list, link, slab_cache_t, slab) { 944 944 if ((slab->flags & SLAB_CACHE_MAGDEFERRED) != 945 945 SLAB_CACHE_MAGDEFERRED) 946 946 continue; 947 947 948 948 (void) make_magcache(slab); 949 949 slab->flags &= ~SLAB_CACHE_MAGDEFERRED; 950 950 } 951 951 952 952 irq_spinlock_unlock(&slab_cache_lock, false); 953 953 } … … 957 957 assert(_slab_initialized); 958 958 assert(size <= (1 << SLAB_MAX_MALLOC_W)); 959 959 960 960 if (size < (1 << SLAB_MIN_MALLOC_W)) 961 961 size = (1 << SLAB_MIN_MALLOC_W); 962 962 963 963 uint8_t idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1; 964 964 965 965 return slab_alloc(malloc_caches[idx], flags); 966 966 } … … 970 970 assert(_slab_initialized); 971 971 assert(size <= (1 << SLAB_MAX_MALLOC_W)); 972 972 973 973 void *new_ptr; 974 974 975 975 if (size > 0) { 976 976 if (size < (1 << SLAB_MIN_MALLOC_W)) 977 977 size = (1 << SLAB_MIN_MALLOC_W); 978 978 uint8_t idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1; 979 979 980 980 new_ptr = slab_alloc(malloc_caches[idx], flags); 981 981 } else 982 982 new_ptr = NULL; 983 983 984 984 if ((new_ptr != NULL) && (ptr != NULL)) { 985 985 slab_t *slab = obj2slab(ptr); 986 986 memcpy(new_ptr, ptr, min(size, slab->cache->size)); 987 987 } 988 988 989 989 if (ptr != NULL) 990 990 free(ptr); 991 991 992 992 return new_ptr; 993 993 } … … 997 997 if (!ptr) 998 998 return; 999 999 1000 1000 slab_t *slab = obj2slab(ptr); 1001 1001 _slab_free(slab->cache, ptr, slab); -
kernel/generic/src/mm/tlb.c
r3061bc1 ra35b458 87 87 CPU->tlb_active = false; 88 88 irq_spinlock_lock(&tlblock, false); 89 89 90 90 size_t i; 91 91 for (i = 0; i < config.cpu_count; i++) { 92 92 if (i == CPU->id) 93 93 continue; 94 94 95 95 cpu_t *cpu = &cpus[i]; 96 96 97 97 irq_spinlock_lock(&cpu->lock, false); 98 98 if (cpu->tlb_messages_count == TLB_MESSAGE_QUEUE_LEN) { … … 118 118 irq_spinlock_unlock(&cpu->lock, false); 119 119 } 120 120 121 121 tlb_shootdown_ipi_send(); 122 122 123 123 busy_wait: 124 124 for (i = 0; i < config.cpu_count; i++) { … … 126 126 goto busy_wait; 127 127 } 128 128 129 129 return ipl; 130 130 } … … 153 153 { 154 154 assert(CPU); 155 155 156 156 CPU->tlb_active = false; 157 157 irq_spinlock_lock(&tlblock, false); 158 158 irq_spinlock_unlock(&tlblock, false); 159 159 160 160 irq_spinlock_lock(&CPU->lock, false); 161 161 assert(CPU->tlb_messages_count <= TLB_MESSAGE_QUEUE_LEN); 162 162 163 163 size_t i; 164 164 for (i = 0; i < CPU->tlb_messages_count; i++) { … … 167 167 uintptr_t page = CPU->tlb_messages[i].page; 168 168 size_t count = CPU->tlb_messages[i].count; 169 169 170 170 switch (type) { 171 171 case TLB_INVL_ALL: … … 183 183 break; 184 184 } 185 185 186 186 if (type == TLB_INVL_ALL) 187 187 break; 188 188 } 189 189 190 190 CPU->tlb_messages_count = 0; 191 191 irq_spinlock_unlock(&CPU->lock, false);
Note:
See TracChangeset
for help on using the changeset viewer.