Changeset 89c57b6 in mainline for kernel/generic/src/mm
- Timestamp:
- 2011-04-13T14:45:41Z (14 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 88634420
- Parents:
- cefb126 (diff), 17279ead (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- kernel/generic/src/mm
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/as.c
rcefb126 r89c57b6 71 71 #include <memstr.h> 72 72 #include <macros.h> 73 #include <bitops.h> 73 74 #include <arch.h> 74 75 #include <errno.h> … … 86 87 * Each architecture decides what functions will be used to carry out 87 88 * address space operations such as creating or locking page tables. 88 *89 89 */ 90 90 as_operations_t *as_operations = NULL; 91 91 92 /** 93 * Slab for as_t objects. 92 /** Slab for as_t objects. 94 93 * 95 94 */ 96 95 static slab_cache_t *as_slab; 97 96 98 /** 99 * This lock serializes access to the ASID subsystem.100 * Itprotects:97 /** ASID subsystem lock. 98 * 99 * This lock protects: 101 100 * - inactive_as_with_asid_head list 102 101 * - as->asid for each as of the as_t type … … 107 106 108 107 /** 109 * This list contains address spaces that are not active on any 110 * processor and that have valid ASID. 111 * 108 * Inactive address spaces (on all processors) 109 * that have valid ASID. 112 110 */ 113 111 LIST_INITIALIZE(inactive_as_with_asid_head); … … 116 114 as_t *AS_KERNEL = NULL; 117 115 118 static int as_constructor(void *obj, unsigned int flags)116 NO_TRACE static int as_constructor(void *obj, unsigned int flags) 119 117 { 120 118 as_t *as = (as_t *) obj; … … 123 121 mutex_initialize(&as->lock, MUTEX_PASSIVE); 124 122 125 int rc = as_constructor_arch(as, flags); 126 127 return rc; 128 } 129 130 static size_t as_destructor(void *obj) 131 { 132 as_t *as = (as_t *) obj; 133 return as_destructor_arch(as); 123 return as_constructor_arch(as, flags); 124 } 125 126 NO_TRACE static size_t as_destructor(void *obj) 127 { 128 return as_destructor_arch((as_t *) obj); 134 129 } 135 130 … … 146 141 panic("Cannot create kernel address space."); 147 142 148 /* Make sure the kernel address space 143 /* 144 * Make sure the kernel address space 149 145 * reference count never drops to zero. 150 146 */ … … 195 191 { 196 192 DEADLOCK_PROBE_INIT(p_asidlock); 197 193 198 194 ASSERT(as != AS); 199 195 ASSERT(atomic_get(&as->refcount) == 0); … … 203 199 * lock its mutex. 204 200 */ 205 201 206 202 /* 207 203 * We need to avoid deadlock between TLB shootdown and asidlock. … … 210 206 * disabled to prevent nested context switches. We also depend on the 211 207 * fact that so far no spinlocks are held. 212 *213 208 */ 214 209 preemption_disable(); … … 235 230 spinlock_unlock(&asidlock); 236 231 interrupts_restore(ipl); 237 232 238 233 239 234 /* … … 241 236 * The B+tree must be walked carefully because it is 242 237 * also being destroyed. 243 *244 238 */ 245 239 bool cond = true; … … 268 262 /** Hold a reference to an address space. 269 263 * 270 * Holding a reference to an address space prevents destruction of that address271 * space.264 * Holding a reference to an address space prevents destruction 265 * of that address space. 272 266 * 273 267 * @param as Address space to be held. 274 268 * 275 269 */ 276 void as_hold(as_t *as)270 NO_TRACE void as_hold(as_t *as) 277 271 { 278 272 atomic_inc(&as->refcount); … … 281 275 /** Release a reference to an address space. 282 276 * 283 * The last one to release a reference to an address space destroys the address284 * space.277 * The last one to release a reference to an address space 278 * destroys the address space. 285 279 * 286 280 * @param asAddress space to be released. 287 281 * 288 282 */ 289 void as_release(as_t *as)283 NO_TRACE void as_release(as_t *as) 290 284 { 291 285 if (atomic_predec(&as->refcount) == 0) … … 295 289 /** Check area conflicts with other areas. 296 290 * 297 * @param as 298 * @param vaStarting virtual address of the area being tested.299 * @param size Size ofthe area being tested.300 * @param avoid _areaDo not touch this area.291 * @param as Address space. 292 * @param addr Starting virtual address of the area being tested. 293 * @param count Number of pages in the area being tested. 294 * @param avoid Do not touch this area. 301 295 * 302 296 * @return True if there is no conflict, false otherwise. 303 297 * 304 298 */ 305 static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, 306 as_area_t *avoid_area) 307 { 299 NO_TRACE static bool check_area_conflicts(as_t *as, uintptr_t addr, 300 size_t count, as_area_t *avoid) 301 { 302 ASSERT((addr % PAGE_SIZE) == 0); 308 303 ASSERT(mutex_locked(&as->lock)); 309 304 310 305 /* 311 306 * We don't want any area to have conflicts with NULL page. 312 * 313 */ 314 if (overlaps(va, size, NULL, PAGE_SIZE)) 307 */ 308 if (overlaps(addr, count << PAGE_WIDTH, (uintptr_t) NULL, PAGE_SIZE)) 315 309 return false; 316 310 … … 321 315 * record in the left neighbour, the leftmost record in the right 322 316 * neighbour and all records in the leaf node itself. 323 *324 317 */ 325 318 btree_node_t *leaf; 326 319 as_area_t *area = 327 (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);320 (as_area_t *) btree_search(&as->as_area_btree, addr, &leaf); 328 321 if (area) { 329 if (area != avoid _area)322 if (area != avoid) 330 323 return false; 331 324 } … … 337 330 area = (as_area_t *) node->value[node->keys - 1]; 338 331 339 mutex_lock(&area->lock); 340 341 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 332 if (area != avoid) { 333 mutex_lock(&area->lock); 334 335 if (overlaps(addr, count << PAGE_WIDTH, 336 area->base, area->pages << PAGE_WIDTH)) { 337 mutex_unlock(&area->lock); 338 return false; 339 } 340 342 341 mutex_unlock(&area->lock); 343 return false; 344 } 345 346 mutex_unlock(&area->lock); 342 } 347 343 } 348 344 … … 351 347 area = (as_area_t *) node->value[0]; 352 348 353 mutex_lock(&area->lock); 354 355 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 349 if (area != avoid) { 350 mutex_lock(&area->lock); 351 352 if (overlaps(addr, count << PAGE_WIDTH, 353 area->base, area->pages << PAGE_WIDTH)) { 354 mutex_unlock(&area->lock); 355 return false; 356 } 357 356 358 mutex_unlock(&area->lock); 357 return false; 358 } 359 360 mutex_unlock(&area->lock); 359 } 361 360 } 362 361 … … 366 365 area = (as_area_t *) leaf->value[i]; 367 366 368 if (area == avoid _area)367 if (area == avoid) 369 368 continue; 370 369 371 370 mutex_lock(&area->lock); 372 371 373 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 372 if (overlaps(addr, count << PAGE_WIDTH, 373 area->base, area->pages << PAGE_WIDTH)) { 374 374 mutex_unlock(&area->lock); 375 375 return false; … … 382 382 * So far, the area does not conflict with other areas. 383 383 * Check if it doesn't conflict with kernel address space. 384 *385 384 */ 386 385 if (!KERNEL_ADDRESS_SPACE_SHADOWED) { 387 return !overlaps( va, size,386 return !overlaps(addr, count << PAGE_WIDTH, 388 387 KERNEL_ADDRESS_SPACE_START, 389 388 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START); … … 412 411 mem_backend_data_t *backend_data) 413 412 { 414 if ( base % PAGE_SIZE)413 if ((base % PAGE_SIZE) != 0) 415 414 return NULL; 416 415 417 if ( !size)416 if (size == 0) 418 417 return NULL; 418 419 size_t pages = SIZE2FRAMES(size); 419 420 420 421 /* Writeable executable areas are not supported. */ … … 424 425 mutex_lock(&as->lock); 425 426 426 if (!check_area_conflicts(as, base, size, NULL)) {427 if (!check_area_conflicts(as, base, pages, NULL)) { 427 428 mutex_unlock(&as->lock); 428 429 return NULL; … … 436 437 area->flags = flags; 437 438 area->attributes = attrs; 438 area->pages = SIZE2FRAMES(size); 439 area->pages = pages; 440 area->resident = 0; 439 441 area->base = base; 440 442 area->sh_info = NULL; … … 463 465 * 464 466 */ 465 static as_area_t *find_area_and_lock(as_t *as, uintptr_t va)467 NO_TRACE static as_area_t *find_area_and_lock(as_t *as, uintptr_t va) 466 468 { 467 469 ASSERT(mutex_locked(&as->lock)); … … 479 481 * to find out whether this is a miss or va belongs to an address 480 482 * space area found there. 481 *482 483 */ 483 484 … … 490 491 mutex_lock(&area->lock); 491 492 492 if ((area->base <= va) && (va < area->base + area->pages * PAGE_SIZE)) 493 if ((area->base <= va) && 494 (va < area->base + (area->pages << PAGE_WIDTH))) 493 495 return area; 494 496 … … 499 501 * Second, locate the left neighbour and test its last record. 500 502 * Because of its position in the B+tree, it must have base < va. 501 *502 503 */ 503 504 btree_node_t *lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); … … 507 508 mutex_lock(&area->lock); 508 509 509 if (va < area->base + area->pages * PAGE_SIZE)510 if (va < area->base + (area->pages << PAGE_WIDTH)) 510 511 return area; 511 512 … … 534 535 /* 535 536 * Locate the area. 536 *537 537 */ 538 538 as_area_t *area = find_area_and_lock(as, address); … … 546 546 * Remapping of address space areas associated 547 547 * with memory mapped devices is not supported. 548 *549 548 */ 550 549 mutex_unlock(&area->lock); … … 557 556 * Remapping of shared address space areas 558 557 * is not supported. 559 *560 558 */ 561 559 mutex_unlock(&area->lock); … … 568 566 /* 569 567 * Zero size address space areas are not allowed. 570 *571 568 */ 572 569 mutex_unlock(&area->lock); … … 576 573 577 574 if (pages < area->pages) { 578 uintptr_t start_free = area->base + pages * PAGE_SIZE;575 uintptr_t start_free = area->base + (pages << PAGE_WIDTH); 579 576 580 577 /* 581 578 * Shrinking the area. 582 579 * No need to check for overlaps. 583 *584 580 */ 585 581 … … 588 584 /* 589 585 * Start TLB shootdown sequence. 590 *591 586 */ 592 587 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, 593 area->base + pages * PAGE_SIZE, area->pages - pages);588 area->base + (pages << PAGE_WIDTH), area->pages - pages); 594 589 595 590 /* … … 599 594 * is also the right way to remove part of the used_space 600 595 * B+tree leaf list. 601 *602 596 */ 603 597 bool cond = true; … … 615 609 size_t i = 0; 616 610 617 if (overlaps(ptr, size * PAGE_SIZE, area->base,618 pages * PAGE_SIZE)) {611 if (overlaps(ptr, size << PAGE_WIDTH, area->base, 612 pages << PAGE_WIDTH)) { 619 613 620 if (ptr + size * PAGE_SIZE<= start_free) {614 if (ptr + (size << PAGE_WIDTH) <= start_free) { 621 615 /* 622 616 * The whole interval fits 623 617 * completely in the resized 624 618 * address space area. 625 *626 619 */ 627 620 break; … … 632 625 * to b and c overlaps with the resized 633 626 * address space area. 634 *635 627 */ 636 628 … … 652 644 for (; i < size; i++) { 653 645 pte_t *pte = page_mapping_find(as, ptr + 654 i * PAGE_SIZE);646 (i << PAGE_WIDTH)); 655 647 656 648 ASSERT(pte); … … 661 653 (area->backend->frame_free)) { 662 654 area->backend->frame_free(area, 663 ptr + i * PAGE_SIZE,655 ptr + (i << PAGE_WIDTH), 664 656 PTE_GET_FRAME(pte)); 665 657 } 666 658 667 659 page_mapping_remove(as, ptr + 668 i * PAGE_SIZE);660 (i << PAGE_WIDTH)); 669 661 } 670 662 } … … 673 665 /* 674 666 * Finish TLB shootdown sequence. 675 * 676 */ 677 678 tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE, 667 */ 668 669 tlb_invalidate_pages(as->asid, area->base + (pages << PAGE_WIDTH), 679 670 area->pages - pages); 680 671 681 672 /* 682 673 * Invalidate software translation caches (e.g. TSB on sparc64). 683 *684 674 */ 685 675 as_invalidate_translation_cache(as, area->base + 686 pages * PAGE_SIZE, area->pages - pages);676 (pages << PAGE_WIDTH), area->pages - pages); 687 677 tlb_shootdown_finalize(ipl); 688 678 … … 692 682 * Growing the area. 693 683 * Check for overlaps with other address space areas. 694 * 695 */ 696 if (!check_area_conflicts(as, address, pages * PAGE_SIZE, 697 area)) { 684 */ 685 if (!check_area_conflicts(as, address, pages, area)) { 698 686 mutex_unlock(&area->lock); 699 687 mutex_unlock(&as->lock); … … 717 705 * 718 706 */ 719 static void sh_info_remove_reference(share_info_t *sh_info)707 NO_TRACE static void sh_info_remove_reference(share_info_t *sh_info) 720 708 { 721 709 bool dealloc = false; … … 794 782 795 783 for (size = 0; size < (size_t) node->value[i]; size++) { 796 pte_t *pte = page_mapping_find(as, ptr + size * PAGE_SIZE); 784 pte_t *pte = 785 page_mapping_find(as, ptr + (size << PAGE_WIDTH)); 797 786 798 787 ASSERT(pte); … … 803 792 (area->backend->frame_free)) { 804 793 area->backend->frame_free(area, 805 ptr + size * PAGE_SIZE, PTE_GET_FRAME(pte));794 ptr + (size << PAGE_WIDTH), PTE_GET_FRAME(pte)); 806 795 } 807 796 808 page_mapping_remove(as, ptr + size * PAGE_SIZE);797 page_mapping_remove(as, ptr + (size << PAGE_WIDTH)); 809 798 } 810 799 } … … 813 802 /* 814 803 * Finish TLB shootdown sequence. 815 *816 804 */ 817 805 … … 821 809 * Invalidate potential software translation caches (e.g. TSB on 822 810 * sparc64). 823 *824 811 */ 825 812 as_invalidate_translation_cache(as, area->base, area->pages); … … 839 826 /* 840 827 * Remove the empty area from address space. 841 *842 828 */ 843 829 btree_remove(&as->as_area_btree, base, NULL); … … 881 867 /* 882 868 * Could not find the source address space area. 883 *884 869 */ 885 870 mutex_unlock(&src_as->lock); … … 891 876 * There is no backend or the backend does not 892 877 * know how to share the area. 893 *894 878 */ 895 879 mutex_unlock(&src_area->lock); … … 898 882 } 899 883 900 size_t src_size = src_area->pages * PAGE_SIZE;884 size_t src_size = src_area->pages << PAGE_WIDTH; 901 885 unsigned int src_flags = src_area->flags; 902 886 mem_backend_t *src_backend = src_area->backend; … … 918 902 * First, prepare the area for sharing. 919 903 * Then it will be safe to unlock it. 920 *921 904 */ 922 905 share_info_t *sh_info = src_area->sh_info; … … 930 913 /* 931 914 * Call the backend to setup sharing. 932 *933 915 */ 934 916 src_area->backend->share(src_area); … … 949 931 * The flags of the source area are masked against dst_flags_mask 950 932 * to support sharing in less privileged mode. 951 *952 933 */ 953 934 as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask, src_size, … … 966 947 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL 967 948 * attribute and set the sh_info. 968 *969 949 */ 970 950 mutex_lock(&dst_as->lock); … … 987 967 * 988 968 */ 989 bool as_area_check_access(as_area_t *area, pf_access_t access) 990 { 969 NO_TRACE bool as_area_check_access(as_area_t *area, pf_access_t access) 970 { 971 ASSERT(mutex_locked(&area->lock)); 972 991 973 int flagmap[] = { 992 974 [PF_ACCESS_READ] = AS_AREA_READ, … … 994 976 [PF_ACCESS_EXEC] = AS_AREA_EXEC 995 977 }; 996 997 ASSERT(mutex_locked(&area->lock));998 978 999 979 if (!(area->flags & flagmap[access])) … … 1010 990 * 1011 991 */ 1012 static unsigned int area_flags_to_page_flags(unsigned int aflags)992 NO_TRACE static unsigned int area_flags_to_page_flags(unsigned int aflags) 1013 993 { 1014 994 unsigned int flags = PAGE_USER | PAGE_PRESENT; … … 1066 1046 /* 1067 1047 * Compute total number of used pages in the used_space B+tree 1068 *1069 1048 */ 1070 1049 size_t used_pages = 0; … … 1088 1067 /* 1089 1068 * Start TLB shootdown sequence. 1090 *1091 1069 */ 1092 1070 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, … … 1096 1074 * Remove used pages from page tables and remember their frame 1097 1075 * numbers. 1098 *1099 1076 */ 1100 1077 size_t frame_idx = 0; … … 1111 1088 1112 1089 for (size = 0; size < (size_t) node->value[i]; size++) { 1113 pte_t *pte = page_mapping_find(as, ptr + size * PAGE_SIZE); 1090 pte_t *pte = 1091 page_mapping_find(as, ptr + (size << PAGE_WIDTH)); 1114 1092 1115 1093 ASSERT(pte); … … 1120 1098 1121 1099 /* Remove old mapping */ 1122 page_mapping_remove(as, ptr + size * PAGE_SIZE);1100 page_mapping_remove(as, ptr + (size << PAGE_WIDTH)); 1123 1101 } 1124 1102 } … … 1127 1105 /* 1128 1106 * Finish TLB shootdown sequence. 1129 *1130 1107 */ 1131 1108 … … 1135 1112 * Invalidate potential software translation caches (e.g. TSB on 1136 1113 * sparc64). 1137 *1138 1114 */ 1139 1115 as_invalidate_translation_cache(as, area->base, area->pages); … … 1168 1144 1169 1145 /* Insert the new mapping */ 1170 page_mapping_insert(as, ptr + size * PAGE_SIZE,1146 page_mapping_insert(as, ptr + (size << PAGE_WIDTH), 1171 1147 old_frame[frame_idx++], page_flags); 1172 1148 … … 1217 1193 * No area contained mapping for 'page'. 1218 1194 * Signal page fault to low-level handler. 1219 *1220 1195 */ 1221 1196 mutex_unlock(&AS->lock); … … 1237 1212 * The address space area is not backed by any backend 1238 1213 * or the backend cannot handle page faults. 1239 *1240 1214 */ 1241 1215 mutex_unlock(&area->lock); … … 1249 1223 * To avoid race condition between two page faults on the same address, 1250 1224 * we need to make sure the mapping has not been already inserted. 1251 *1252 1225 */ 1253 1226 pte_t *pte; … … 1267 1240 /* 1268 1241 * Resort to the backend page fault handler. 1269 *1270 1242 */ 1271 1243 if (area->backend->page_fault(area, page, access) != AS_PF_OK) { … … 1322 1294 * preemption is disabled. We should not be 1323 1295 * holding any other lock. 1324 *1325 1296 */ 1326 1297 (void) interrupts_enable(); … … 1342 1313 * list of inactive address spaces with assigned 1343 1314 * ASID. 1344 *1345 1315 */ 1346 1316 ASSERT(old_as->asid != ASID_INVALID); … … 1353 1323 * Perform architecture-specific tasks when the address space 1354 1324 * is being removed from the CPU. 1355 *1356 1325 */ 1357 1326 as_deinstall_arch(old_as); … … 1360 1329 /* 1361 1330 * Second, prepare the new address space. 1362 *1363 1331 */ 1364 1332 if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) { … … 1376 1344 * Perform architecture-specific steps. 1377 1345 * (e.g. write ASID to hardware register etc.) 1378 *1379 1346 */ 1380 1347 as_install_arch(new_as); … … 1385 1352 } 1386 1353 1387 1388 1389 1354 /** Compute flags for virtual address translation subsytem. 1390 1355 * … … 1394 1359 * 1395 1360 */ 1396 unsigned int as_area_get_flags(as_area_t *area)1361 NO_TRACE unsigned int as_area_get_flags(as_area_t *area) 1397 1362 { 1398 1363 ASSERT(mutex_locked(&area->lock)); 1399 1364 1400 1365 return area_flags_to_page_flags(area->flags); 1401 1366 } … … 1412 1377 * 1413 1378 */ 1414 pte_t *page_table_create(unsigned int flags)1379 NO_TRACE pte_t *page_table_create(unsigned int flags) 1415 1380 { 1416 1381 ASSERT(as_operations); … … 1427 1392 * 1428 1393 */ 1429 void page_table_destroy(pte_t *page_table)1394 NO_TRACE void page_table_destroy(pte_t *page_table) 1430 1395 { 1431 1396 ASSERT(as_operations); … … 1448 1413 * 1449 1414 */ 1450 void page_table_lock(as_t *as, bool lock)1415 NO_TRACE void page_table_lock(as_t *as, bool lock) 1451 1416 { 1452 1417 ASSERT(as_operations); … … 1462 1427 * 1463 1428 */ 1464 void page_table_unlock(as_t *as, bool unlock)1429 NO_TRACE void page_table_unlock(as_t *as, bool unlock) 1465 1430 { 1466 1431 ASSERT(as_operations); … … 1477 1442 * are locked, otherwise false. 1478 1443 */ 1479 bool page_table_locked(as_t *as)1444 NO_TRACE bool page_table_locked(as_t *as) 1480 1445 { 1481 1446 ASSERT(as_operations); … … 1501 1466 1502 1467 if (src_area) { 1503 size = src_area->pages * PAGE_SIZE;1468 size = src_area->pages << PAGE_WIDTH; 1504 1469 mutex_unlock(&src_area->lock); 1505 1470 } else … … 1518 1483 * @param count Number of page to be marked. 1519 1484 * 1520 * @return Zero on failure and non-zeroon success.1521 * 1522 */ 1523 intused_space_insert(as_area_t *area, uintptr_t page, size_t count)1485 * @return False on failure or true on success. 1486 * 1487 */ 1488 bool used_space_insert(as_area_t *area, uintptr_t page, size_t count) 1524 1489 { 1525 1490 ASSERT(mutex_locked(&area->lock)); … … 1532 1497 /* 1533 1498 * We hit the beginning of some used space. 1534 * 1535 */ 1536 return 0; 1499 */ 1500 return false; 1537 1501 } 1538 1502 1539 1503 if (!leaf->keys) { 1540 1504 btree_insert(&area->used_space, page, (void *) count, leaf); 1541 return 1;1505 goto success; 1542 1506 } 1543 1507 … … 1553 1517 * somewhere between the rightmost interval of 1554 1518 * the left neigbour and the first interval of the leaf. 1555 *1556 1519 */ 1557 1520 1558 1521 if (page >= right_pg) { 1559 1522 /* Do nothing. */ 1560 } else if (overlaps(page, count * PAGE_SIZE, left_pg,1561 left_cnt * PAGE_SIZE)) {1523 } else if (overlaps(page, count << PAGE_WIDTH, left_pg, 1524 left_cnt << PAGE_WIDTH)) { 1562 1525 /* The interval intersects with the left interval. */ 1563 return 0;1564 } else if (overlaps(page, count * PAGE_SIZE, right_pg,1565 right_cnt * PAGE_SIZE)) {1526 return false; 1527 } else if (overlaps(page, count << PAGE_WIDTH, right_pg, 1528 right_cnt << PAGE_WIDTH)) { 1566 1529 /* The interval intersects with the right interval. */ 1567 return 0;1568 } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&1569 (page + count * PAGE_SIZE== right_pg)) {1530 return false; 1531 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) && 1532 (page + (count << PAGE_WIDTH) == right_pg)) { 1570 1533 /* 1571 1534 * The interval can be added by merging the two already 1572 1535 * present intervals. 1573 *1574 1536 */ 1575 1537 node->value[node->keys - 1] += count + right_cnt; 1576 1538 btree_remove(&area->used_space, right_pg, leaf); 1577 return 1;1578 } else if (page == left_pg + left_cnt * PAGE_SIZE) {1539 goto success; 1540 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) { 1579 1541 /* 1580 1542 * The interval can be added by simply growing the left 1581 1543 * interval. 1582 *1583 1544 */ 1584 1545 node->value[node->keys - 1] += count; 1585 return 1;1586 } else if (page + count * PAGE_SIZE== right_pg) {1546 goto success; 1547 } else if (page + (count << PAGE_WIDTH) == right_pg) { 1587 1548 /* 1588 1549 * The interval can be addded by simply moving base of 1589 1550 * the right interval down and increasing its size 1590 1551 * accordingly. 1591 *1592 1552 */ 1593 1553 leaf->value[0] += count; 1594 1554 leaf->key[0] = page; 1595 return 1;1555 goto success; 1596 1556 } else { 1597 1557 /* 1598 1558 * The interval is between both neigbouring intervals, 1599 1559 * but cannot be merged with any of them. 1600 *1601 1560 */ 1602 1561 btree_insert(&area->used_space, page, (void *) count, 1603 1562 leaf); 1604 return 1;1563 goto success; 1605 1564 } 1606 1565 } else if (page < leaf->key[0]) { … … 1611 1570 * Investigate the border case in which the left neighbour does 1612 1571 * not exist but the interval fits from the left. 1613 * 1614 */ 1615 1616 if (overlaps(page, count * PAGE_SIZE, right_pg, 1617 right_cnt * PAGE_SIZE)) { 1572 */ 1573 1574 if (overlaps(page, count << PAGE_WIDTH, right_pg, 1575 right_cnt << PAGE_WIDTH)) { 1618 1576 /* The interval intersects with the right interval. */ 1619 return 0;1620 } else if (page + count * PAGE_SIZE== right_pg) {1577 return false; 1578 } else if (page + (count << PAGE_WIDTH) == right_pg) { 1621 1579 /* 1622 1580 * The interval can be added by moving the base of the 1623 1581 * right interval down and increasing its size 1624 1582 * accordingly. 1625 *1626 1583 */ 1627 1584 leaf->key[0] = page; 1628 1585 leaf->value[0] += count; 1629 return 1;1586 goto success; 1630 1587 } else { 1631 1588 /* 1632 1589 * The interval doesn't adjoin with the right interval. 1633 1590 * It must be added individually. 1634 *1635 1591 */ 1636 1592 btree_insert(&area->used_space, page, (void *) count, 1637 1593 leaf); 1638 return 1;1594 goto success; 1639 1595 } 1640 1596 } … … 1651 1607 * somewhere between the leftmost interval of 1652 1608 * the right neigbour and the last interval of the leaf. 1653 *1654 1609 */ 1655 1610 1656 1611 if (page < left_pg) { 1657 1612 /* Do nothing. */ 1658 } else if (overlaps(page, count * PAGE_SIZE, left_pg,1659 left_cnt * PAGE_SIZE)) {1613 } else if (overlaps(page, count << PAGE_WIDTH, left_pg, 1614 left_cnt << PAGE_WIDTH)) { 1660 1615 /* The interval intersects with the left interval. */ 1661 return 0;1662 } else if (overlaps(page, count * PAGE_SIZE, right_pg,1663 right_cnt * PAGE_SIZE)) {1616 return false; 1617 } else if (overlaps(page, count << PAGE_WIDTH, right_pg, 1618 right_cnt << PAGE_WIDTH)) { 1664 1619 /* The interval intersects with the right interval. */ 1665 return 0;1666 } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&1667 (page + count * PAGE_SIZE== right_pg)) {1620 return false; 1621 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) && 1622 (page + (count << PAGE_WIDTH) == right_pg)) { 1668 1623 /* 1669 1624 * The interval can be added by merging the two already 1670 1625 * present intervals. 1671 *1672 1626 */ 1673 1627 leaf->value[leaf->keys - 1] += count + right_cnt; 1674 1628 btree_remove(&area->used_space, right_pg, node); 1675 return 1;1676 } else if (page == left_pg + left_cnt * PAGE_SIZE) {1629 goto success; 1630 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) { 1677 1631 /* 1678 1632 * The interval can be added by simply growing the left 1679 1633 * interval. 1680 *1681 1634 */ 1682 leaf->value[leaf->keys - 1] += 1683 return 1;1684 } else if (page + count * PAGE_SIZE== right_pg) {1635 leaf->value[leaf->keys - 1] += count; 1636 goto success; 1637 } else if (page + (count << PAGE_WIDTH) == right_pg) { 1685 1638 /* 1686 1639 * The interval can be addded by simply moving base of 1687 1640 * the right interval down and increasing its size 1688 1641 * accordingly. 1689 *1690 1642 */ 1691 1643 node->value[0] += count; 1692 1644 node->key[0] = page; 1693 return 1;1645 goto success; 1694 1646 } else { 1695 1647 /* 1696 1648 * The interval is between both neigbouring intervals, 1697 1649 * but cannot be merged with any of them. 1698 *1699 1650 */ 1700 1651 btree_insert(&area->used_space, page, (void *) count, 1701 1652 leaf); 1702 return 1;1653 goto success; 1703 1654 } 1704 1655 } else if (page >= leaf->key[leaf->keys - 1]) { … … 1709 1660 * Investigate the border case in which the right neighbour 1710 1661 * does not exist but the interval fits from the right. 1711 * 1712 */ 1713 1714 if (overlaps(page, count * PAGE_SIZE, left_pg, 1715 left_cnt * PAGE_SIZE)) { 1662 */ 1663 1664 if (overlaps(page, count << PAGE_WIDTH, left_pg, 1665 left_cnt << PAGE_WIDTH)) { 1716 1666 /* The interval intersects with the left interval. */ 1717 return 0;1718 } else if (left_pg + left_cnt * PAGE_SIZE== page) {1667 return false; 1668 } else if (left_pg + (left_cnt << PAGE_WIDTH) == page) { 1719 1669 /* 1720 1670 * The interval can be added by growing the left 1721 1671 * interval. 1722 *1723 1672 */ 1724 1673 leaf->value[leaf->keys - 1] += count; 1725 return 1;1674 goto success; 1726 1675 } else { 1727 1676 /* 1728 1677 * The interval doesn't adjoin with the left interval. 1729 1678 * It must be added individually. 1730 *1731 1679 */ 1732 1680 btree_insert(&area->used_space, page, (void *) count, 1733 1681 leaf); 1734 return 1;1682 goto success; 1735 1683 } 1736 1684 } … … 1740 1688 * only between two other intervals of the leaf. The two border cases 1741 1689 * were already resolved. 1742 *1743 1690 */ 1744 1691 btree_key_t i; … … 1752 1699 /* 1753 1700 * The interval fits between left_pg and right_pg. 1754 *1755 1701 */ 1756 1702 1757 if (overlaps(page, count * PAGE_SIZE, left_pg,1758 left_cnt * PAGE_SIZE)) {1703 if (overlaps(page, count << PAGE_WIDTH, left_pg, 1704 left_cnt << PAGE_WIDTH)) { 1759 1705 /* 1760 1706 * The interval intersects with the left 1761 1707 * interval. 1762 *1763 1708 */ 1764 return 0;1765 } else if (overlaps(page, count * PAGE_SIZE, right_pg,1766 right_cnt * PAGE_SIZE)) {1709 return false; 1710 } else if (overlaps(page, count << PAGE_WIDTH, right_pg, 1711 right_cnt << PAGE_WIDTH)) { 1767 1712 /* 1768 1713 * The interval intersects with the right 1769 1714 * interval. 1770 *1771 1715 */ 1772 return 0;1773 } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&1774 (page + count * PAGE_SIZE== right_pg)) {1716 return false; 1717 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) && 1718 (page + (count << PAGE_WIDTH) == right_pg)) { 1775 1719 /* 1776 1720 * The interval can be added by merging the two 1777 1721 * already present intervals. 1778 *1779 1722 */ 1780 1723 leaf->value[i - 1] += count + right_cnt; 1781 1724 btree_remove(&area->used_space, right_pg, leaf); 1782 return 1;1783 } else if (page == left_pg + left_cnt * PAGE_SIZE) {1725 goto success; 1726 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) { 1784 1727 /* 1785 1728 * The interval can be added by simply growing 1786 1729 * the left interval. 1787 *1788 1730 */ 1789 1731 leaf->value[i - 1] += count; 1790 return 1;1791 } else if (page + count * PAGE_SIZE== right_pg) {1732 goto success; 1733 } else if (page + (count << PAGE_WIDTH) == right_pg) { 1792 1734 /* 1793 1735 * The interval can be addded by simply moving 1794 1736 * base of the right interval down and 1795 1737 * increasing its size accordingly. 1796 *1797 1738 */ 1798 1739 leaf->value[i] += count; 1799 1740 leaf->key[i] = page; 1800 return 1;1741 goto success; 1801 1742 } else { 1802 1743 /* … … 1804 1745 * intervals, but cannot be merged with any of 1805 1746 * them. 1806 *1807 1747 */ 1808 1748 btree_insert(&area->used_space, page, 1809 1749 (void *) count, leaf); 1810 return 1;1750 goto success; 1811 1751 } 1812 1752 } 1813 1753 } 1814 1754 1815 panic("Inconsistency detected while adding %" PRIs " pages of used " 1816 "space at %p.", count, page); 1755 panic("Inconsistency detected while adding %zu pages of used " 1756 "space at %p.", count, (void *) page); 1757 1758 success: 1759 area->resident += count; 1760 return true; 1817 1761 } 1818 1762 … … 1825 1769 * @param count Number of page to be marked. 1826 1770 * 1827 * @return Zero on failure and non-zeroon success.1828 * 1829 */ 1830 intused_space_remove(as_area_t *area, uintptr_t page, size_t count)1771 * @return False on failure or true on success. 1772 * 1773 */ 1774 bool used_space_remove(as_area_t *area, uintptr_t page, size_t count) 1831 1775 { 1832 1776 ASSERT(mutex_locked(&area->lock)); … … 1839 1783 /* 1840 1784 * We are lucky, page is the beginning of some interval. 1841 *1842 1785 */ 1843 1786 if (count > pages) { 1844 return 0;1787 return false; 1845 1788 } else if (count == pages) { 1846 1789 btree_remove(&area->used_space, page, leaf); 1847 return 1;1790 goto success; 1848 1791 } else { 1849 1792 /* 1850 1793 * Find the respective interval. 1851 1794 * Decrease its size and relocate its start address. 1852 *1853 1795 */ 1854 1796 btree_key_t i; 1855 1797 for (i = 0; i < leaf->keys; i++) { 1856 1798 if (leaf->key[i] == page) { 1857 leaf->key[i] += count * PAGE_SIZE;1799 leaf->key[i] += count << PAGE_WIDTH; 1858 1800 leaf->value[i] -= count; 1859 return 1;1801 goto success; 1860 1802 } 1861 1803 } 1804 1862 1805 goto error; 1863 1806 } … … 1869 1812 size_t left_cnt = (size_t) node->value[node->keys - 1]; 1870 1813 1871 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,1872 count * PAGE_SIZE)) {1873 if (page + count * PAGE_SIZE==1874 left_pg + left_cnt * PAGE_SIZE) {1814 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page, 1815 count << PAGE_WIDTH)) { 1816 if (page + (count << PAGE_WIDTH) == 1817 left_pg + (left_cnt << PAGE_WIDTH)) { 1875 1818 /* 1876 1819 * The interval is contained in the rightmost … … 1878 1821 * removed by updating the size of the bigger 1879 1822 * interval. 1880 *1881 1823 */ 1882 1824 node->value[node->keys - 1] -= count; 1883 return 1;1884 } else if (page + count * PAGE_SIZE<1885 left_pg + left_cnt*PAGE_SIZE) {1825 goto success; 1826 } else if (page + (count << PAGE_WIDTH) < 1827 left_pg + (left_cnt << PAGE_WIDTH)) { 1886 1828 /* 1887 1829 * The interval is contained in the rightmost … … 1890 1832 * the original interval and also inserting a 1891 1833 * new interval. 1892 *1893 1834 */ 1894 size_t new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -1895 (page + count*PAGE_SIZE)) >> PAGE_WIDTH;1835 size_t new_cnt = ((left_pg + (left_cnt << PAGE_WIDTH)) - 1836 (page + (count << PAGE_WIDTH))) >> PAGE_WIDTH; 1896 1837 node->value[node->keys - 1] -= count + new_cnt; 1897 1838 btree_insert(&area->used_space, page + 1898 count * PAGE_SIZE, (void *) new_cnt, leaf);1899 return 1;1839 (count << PAGE_WIDTH), (void *) new_cnt, leaf); 1840 goto success; 1900 1841 } 1901 1842 } 1902 return 0; 1843 1844 return false; 1903 1845 } else if (page < leaf->key[0]) 1904 return 0;1846 return false; 1905 1847 1906 1848 if (page > leaf->key[leaf->keys - 1]) { … … 1908 1850 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 1909 1851 1910 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,1911 count * PAGE_SIZE)) {1912 if (page + count * PAGE_SIZE==1913 left_pg + left_cnt * PAGE_SIZE) {1852 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page, 1853 count << PAGE_WIDTH)) { 1854 if (page + (count << PAGE_WIDTH) == 1855 left_pg + (left_cnt << PAGE_WIDTH)) { 1914 1856 /* 1915 1857 * The interval is contained in the rightmost 1916 1858 * interval of the leaf and can be removed by 1917 1859 * updating the size of the bigger interval. 1918 *1919 1860 */ 1920 1861 leaf->value[leaf->keys - 1] -= count; 1921 return 1;1922 } else if (page + count * PAGE_SIZE< left_pg +1923 left_cnt * PAGE_SIZE) {1862 goto success; 1863 } else if (page + (count << PAGE_WIDTH) < left_pg + 1864 (left_cnt << PAGE_WIDTH)) { 1924 1865 /* 1925 1866 * The interval is contained in the rightmost … … 1928 1869 * original interval and also inserting a new 1929 1870 * interval. 1930 *1931 1871 */ 1932 size_t new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -1933 (page + count * PAGE_SIZE)) >> PAGE_WIDTH;1872 size_t new_cnt = ((left_pg + (left_cnt << PAGE_WIDTH)) - 1873 (page + (count << PAGE_WIDTH))) >> PAGE_WIDTH; 1934 1874 leaf->value[leaf->keys - 1] -= count + new_cnt; 1935 1875 btree_insert(&area->used_space, page + 1936 count * PAGE_SIZE, (void *) new_cnt, leaf);1937 return 1;1876 (count << PAGE_WIDTH), (void *) new_cnt, leaf); 1877 goto success; 1938 1878 } 1939 1879 } 1940 return 0; 1880 1881 return false; 1941 1882 } 1942 1883 1943 1884 /* 1944 1885 * The border cases have been already resolved. 1945 * Now the interval can be only between intervals of the leaf. 1886 * Now the interval can be only between intervals of the leaf. 1946 1887 */ 1947 1888 btree_key_t i; … … 1955 1896 * to (i - 1) and i. 1956 1897 */ 1957 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,1958 count * PAGE_SIZE)) {1959 if (page + count * PAGE_SIZE==1960 left_pg + left_cnt*PAGE_SIZE) {1898 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page, 1899 count << PAGE_WIDTH)) { 1900 if (page + (count << PAGE_WIDTH) == 1901 left_pg + (left_cnt << PAGE_WIDTH)) { 1961 1902 /* 1962 1903 * The interval is contained in the … … 1964 1905 * be removed by updating the size of 1965 1906 * the bigger interval. 1966 *1967 1907 */ 1968 1908 leaf->value[i - 1] -= count; 1969 return 1;1970 } else if (page + count * PAGE_SIZE<1971 left_pg + left_cnt * PAGE_SIZE) {1909 goto success; 1910 } else if (page + (count << PAGE_WIDTH) < 1911 left_pg + (left_cnt << PAGE_WIDTH)) { 1972 1912 /* 1973 1913 * The interval is contained in the … … 1978 1918 */ 1979 1919 size_t new_cnt = ((left_pg + 1980 left_cnt * PAGE_SIZE) -1981 (page + count * PAGE_SIZE)) >>1920 (left_cnt << PAGE_WIDTH)) - 1921 (page + (count << PAGE_WIDTH))) >> 1982 1922 PAGE_WIDTH; 1983 1923 leaf->value[i - 1] -= count + new_cnt; 1984 1924 btree_insert(&area->used_space, page + 1985 count * PAGE_SIZE, (void *) new_cnt,1925 (count << PAGE_WIDTH), (void *) new_cnt, 1986 1926 leaf); 1987 return 1;1927 goto success; 1988 1928 } 1989 1929 } 1990 return 0; 1930 1931 return false; 1991 1932 } 1992 1933 } 1993 1934 1994 1935 error: 1995 panic("Inconsistency detected while removing %" PRIs " pages of used " 1996 "space from %p.", count, page); 1936 panic("Inconsistency detected while removing %zu pages of used " 1937 "space from %p.", count, (void *) page); 1938 1939 success: 1940 area->resident -= count; 1941 return true; 1997 1942 } 1998 1943 … … 2002 1947 2003 1948 /** Wrapper for as_area_create(). */ 2004 unative_t sys_as_area_create(uintptr_t address, size_t size, unsigned int flags)1949 sysarg_t sys_as_area_create(uintptr_t address, size_t size, unsigned int flags) 2005 1950 { 2006 1951 if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, 2007 1952 AS_AREA_ATTR_NONE, &anon_backend, NULL)) 2008 return ( unative_t) address;1953 return (sysarg_t) address; 2009 1954 else 2010 return ( unative_t) -1;1955 return (sysarg_t) -1; 2011 1956 } 2012 1957 2013 1958 /** Wrapper for as_area_resize(). */ 2014 unative_t sys_as_area_resize(uintptr_t address, size_t size, unsigned int flags)2015 { 2016 return ( unative_t) as_area_resize(AS, address, size, 0);1959 sysarg_t sys_as_area_resize(uintptr_t address, size_t size, unsigned int flags) 1960 { 1961 return (sysarg_t) as_area_resize(AS, address, size, 0); 2017 1962 } 2018 1963 2019 1964 /** Wrapper for as_area_change_flags(). */ 2020 unative_t sys_as_area_change_flags(uintptr_t address, unsigned int flags)2021 { 2022 return ( unative_t) as_area_change_flags(AS, flags, address);1965 sysarg_t sys_as_area_change_flags(uintptr_t address, unsigned int flags) 1966 { 1967 return (sysarg_t) as_area_change_flags(AS, flags, address); 2023 1968 } 2024 1969 2025 1970 /** Wrapper for as_area_destroy(). */ 2026 unative_t sys_as_area_destroy(uintptr_t address) 2027 { 2028 return (unative_t) as_area_destroy(AS, address); 1971 sysarg_t sys_as_area_destroy(uintptr_t address) 1972 { 1973 return (sysarg_t) as_area_destroy(AS, address); 1974 } 1975 1976 /** Return pointer to unmapped address space area 1977 * 1978 * @param base Lowest address bound. 1979 * @param size Requested size of the allocation. 1980 * 1981 * @return Pointer to the beginning of unmapped address space area. 1982 * 1983 */ 1984 sysarg_t sys_as_get_unmapped_area(uintptr_t base, size_t size) 1985 { 1986 if (size == 0) 1987 return 0; 1988 1989 /* 1990 * Make sure we allocate from page-aligned 1991 * address. Check for possible overflow in 1992 * each step. 1993 */ 1994 1995 size_t pages = SIZE2FRAMES(size); 1996 uintptr_t ret = 0; 1997 1998 /* 1999 * Find the lowest unmapped address aligned on the sz 2000 * boundary, not smaller than base and of the required size. 2001 */ 2002 2003 mutex_lock(&AS->lock); 2004 2005 /* First check the base address itself */ 2006 uintptr_t addr = ALIGN_UP(base, PAGE_SIZE); 2007 if ((addr >= base) && 2008 (check_area_conflicts(AS, addr, pages, NULL))) 2009 ret = addr; 2010 2011 /* Eventually check the addresses behind each area */ 2012 link_t *cur; 2013 for (cur = AS->as_area_btree.leaf_head.next; 2014 (ret == 0) && (cur != &AS->as_area_btree.leaf_head); 2015 cur = cur->next) { 2016 btree_node_t *node = 2017 list_get_instance(cur, btree_node_t, leaf_link); 2018 2019 btree_key_t i; 2020 for (i = 0; (ret == 0) && (i < node->keys); i++) { 2021 as_area_t *area = (as_area_t *) node->value[i]; 2022 2023 mutex_lock(&area->lock); 2024 2025 uintptr_t addr = 2026 ALIGN_UP(area->base + (area->pages << PAGE_WIDTH), 2027 PAGE_SIZE); 2028 2029 if ((addr >= base) && (addr >= area->base) && 2030 (check_area_conflicts(AS, addr, pages, area))) 2031 ret = addr; 2032 2033 mutex_unlock(&area->lock); 2034 } 2035 } 2036 2037 mutex_unlock(&AS->lock); 2038 2039 return (sysarg_t) ret; 2029 2040 } 2030 2041 … … 2095 2106 mutex_lock(&as->lock); 2096 2107 2097 /* print out info about address space areas */2108 /* Print out info about address space areas */ 2098 2109 link_t *cur; 2099 2110 for (cur = as->as_area_btree.leaf_head.next; … … 2107 2118 2108 2119 mutex_lock(&area->lock); 2109 printf("as_area: %p, base=%p, pages=%" PRIs 2110 " (%p - %p)\n", area, area->base, area->pages, 2111 area->base, area->base + FRAMES2SIZE(area->pages)); 2120 printf("as_area: %p, base=%p, pages=%zu" 2121 " (%p - %p)\n", area, (void *) area->base, 2122 area->pages, (void *) area->base, 2123 (void *) (area->base + FRAMES2SIZE(area->pages))); 2112 2124 mutex_unlock(&area->lock); 2113 2125 } -
kernel/generic/src/mm/backend_elf.c
rcefb126 r89c57b6 91 91 if (!as_area_check_access(area, access)) 92 92 return AS_PF_FAULT; 93 94 ASSERT((addr >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) && 95 (addr < entry->p_vaddr + entry->p_memsz)); 93 94 if (addr < ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) 95 return AS_PF_FAULT; 96 97 if (addr >= entry->p_vaddr + entry->p_memsz) 98 return AS_PF_FAULT; 99 96 100 i = (addr - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH; 97 101 base = (uintptr_t) -
kernel/generic/src/mm/backend_phys.c
rcefb126 r89c57b6 81 81 page_mapping_insert(AS, addr, base + (addr - area->base), 82 82 as_area_get_flags(area)); 83 if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1)) 84 panic("Cannot insert used space."); 83 84 if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1)) 85 panic("Cannot insert used space."); 85 86 86 87 return AS_PF_OK; -
kernel/generic/src/mm/frame.c
rcefb126 r89c57b6 75 75 /********************/ 76 76 77 static inline size_t frame_index(zone_t *zone, frame_t *frame)77 NO_TRACE static inline size_t frame_index(zone_t *zone, frame_t *frame) 78 78 { 79 79 return (size_t) (frame - zone->frames); 80 80 } 81 81 82 static inline size_t frame_index_abs(zone_t *zone, frame_t *frame)82 NO_TRACE static inline size_t frame_index_abs(zone_t *zone, frame_t *frame) 83 83 { 84 84 return (size_t) (frame - zone->frames) + zone->base; 85 85 } 86 86 87 static inline bool frame_index_valid(zone_t *zone, size_t index)87 NO_TRACE static inline bool frame_index_valid(zone_t *zone, size_t index) 88 88 { 89 89 return (index < zone->count); 90 90 } 91 91 92 static inline size_t make_frame_index(zone_t *zone, frame_t *frame)92 NO_TRACE static inline size_t make_frame_index(zone_t *zone, frame_t *frame) 93 93 { 94 94 return (frame - zone->frames); … … 100 100 * 101 101 */ 102 static void frame_initialize(frame_t *frame)102 NO_TRACE static void frame_initialize(frame_t *frame) 103 103 { 104 104 frame->refcount = 1; … … 121 121 * 122 122 */ 123 static size_t zones_insert_zone(pfn_t base, size_t count) 123 NO_TRACE static size_t zones_insert_zone(pfn_t base, size_t count, 124 zone_flags_t flags) 124 125 { 125 126 if (zones.count + 1 == ZONES_MAX) { … … 131 132 for (i = 0; i < zones.count; i++) { 132 133 /* Check for overlap */ 133 if (overlaps(base, count, 134 zones.info[i].base, zones.info[i].count)) { 135 printf("Zones overlap!\n"); 134 if (overlaps(zones.info[i].base, zones.info[i].count, 135 base, count)) { 136 137 /* 138 * If the overlaping zones are of the same type 139 * and the new zone is completely within the previous 140 * one, then quietly ignore the new zone. 141 * 142 */ 143 144 if ((zones.info[i].flags != flags) || 145 (!iswithin(zones.info[i].base, zones.info[i].count, 146 base, count))) { 147 printf("Zone (%p, %p) overlaps " 148 "with previous zone (%p %p)!\n", 149 (void *) PFN2ADDR(base), (void *) PFN2ADDR(count), 150 (void *) PFN2ADDR(zones.info[i].base), 151 (void *) PFN2ADDR(zones.info[i].count)); 152 } 153 136 154 return (size_t) -1; 137 155 } … … 144 162 for (j = zones.count; j > i; j--) { 145 163 zones.info[j] = zones.info[j - 1]; 146 zones.info[j].buddy_system->data = 147 (void *) &zones.info[j - 1]; 164 if (zones.info[j].buddy_system != NULL) 165 zones.info[j].buddy_system->data = 166 (void *) &zones.info[j]; 148 167 } 149 168 … … 162 181 */ 163 182 #ifdef CONFIG_DEBUG 164 static size_t total_frames_free(void)183 NO_TRACE static size_t total_frames_free(void) 165 184 { 166 185 size_t total = 0; … … 185 204 * 186 205 */ 187 size_t find_zone(pfn_t frame, size_t count, size_t hint)206 NO_TRACE size_t find_zone(pfn_t frame, size_t count, size_t hint) 188 207 { 189 208 if (hint >= zones.count) … … 206 225 207 226 /** @return True if zone can allocate specified order */ 208 static bool zone_can_alloc(zone_t *zone, uint8_t order)227 NO_TRACE static bool zone_can_alloc(zone_t *zone, uint8_t order) 209 228 { 210 229 return (zone_flags_available(zone->flags) … … 222 241 * 223 242 */ 224 static size_t find_free_zone(uint8_t order, zone_flags_t flags, size_t hint) 243 NO_TRACE static size_t find_free_zone(uint8_t order, zone_flags_t flags, 244 size_t hint) 225 245 { 226 246 if (hint >= zones.count) … … 262 282 * 263 283 */ 264 static link_t *zone_buddy_find_block(buddy_system_t *buddy, link_t *child,265 uint8_t order)284 NO_TRACE static link_t *zone_buddy_find_block(buddy_system_t *buddy, 285 link_t *child, uint8_t order) 266 286 { 267 287 frame_t *frame = list_get_instance(child, frame_t, buddy_link); … … 285 305 * 286 306 */ 287 static link_t *zone_buddy_find_buddy(buddy_system_t *buddy, link_t *block) 307 NO_TRACE static link_t *zone_buddy_find_buddy(buddy_system_t *buddy, 308 link_t *block) 288 309 { 289 310 frame_t *frame = list_get_instance(block, frame_t, buddy_link); … … 321 342 * 322 343 */ 323 static link_t *zone_buddy_bisect(buddy_system_t *buddy, link_t *block)344 NO_TRACE static link_t *zone_buddy_bisect(buddy_system_t *buddy, link_t *block) 324 345 { 325 346 frame_t *frame_l = list_get_instance(block, frame_t, buddy_link); … … 339 360 * 340 361 */ 341 static link_t *zone_buddy_coalesce(buddy_system_t *buddy, link_t *block_1,342 link_t *block_ 2)362 NO_TRACE static link_t *zone_buddy_coalesce(buddy_system_t *buddy, 363 link_t *block_1, link_t *block_2) 343 364 { 344 365 frame_t *frame1 = list_get_instance(block_1, frame_t, buddy_link); … … 355 376 * 356 377 */ 357 static void zone_buddy_set_order(buddy_system_t *buddy, link_t *block,378 NO_TRACE static void zone_buddy_set_order(buddy_system_t *buddy, link_t *block, 358 379 uint8_t order) 359 380 { … … 369 390 * 370 391 */ 371 static uint8_t zone_buddy_get_order(buddy_system_t *buddy, link_t *block) 392 NO_TRACE static uint8_t zone_buddy_get_order(buddy_system_t *buddy, 393 link_t *block) 372 394 { 373 395 return list_get_instance(block, frame_t, buddy_link)->buddy_order; … … 380 402 * 381 403 */ 382 static void zone_buddy_mark_busy(buddy_system_t *buddy, link_t *block)404 NO_TRACE static void zone_buddy_mark_busy(buddy_system_t *buddy, link_t *block) 383 405 { 384 406 list_get_instance(block, frame_t, buddy_link)->refcount = 1; … … 389 411 * @param buddy Buddy system. 390 412 * @param block Buddy system block. 391 */ 392 static void zone_buddy_mark_available(buddy_system_t *buddy, link_t *block) 413 * 414 */ 415 NO_TRACE static void zone_buddy_mark_available(buddy_system_t *buddy, 416 link_t *block) 393 417 { 394 418 list_get_instance(block, frame_t, buddy_link)->refcount = 0; … … 421 445 * 422 446 */ 423 static pfn_t zone_frame_alloc(zone_t *zone, uint8_t order)447 NO_TRACE static pfn_t zone_frame_alloc(zone_t *zone, uint8_t order) 424 448 { 425 449 ASSERT(zone_flags_available(zone->flags)); … … 449 473 * 450 474 */ 451 static void zone_frame_free(zone_t *zone, size_t frame_idx)475 NO_TRACE static void zone_frame_free(zone_t *zone, size_t frame_idx) 452 476 { 453 477 ASSERT(zone_flags_available(zone->flags)); … … 470 494 471 495 /** Return frame from zone. */ 472 static frame_t *zone_get_frame(zone_t *zone, size_t frame_idx)496 NO_TRACE static frame_t *zone_get_frame(zone_t *zone, size_t frame_idx) 473 497 { 474 498 ASSERT(frame_idx < zone->count); … … 477 501 478 502 /** Mark frame in zone unavailable to allocation. */ 479 static void zone_mark_unavailable(zone_t *zone, size_t frame_idx)503 NO_TRACE static void zone_mark_unavailable(zone_t *zone, size_t frame_idx) 480 504 { 481 505 ASSERT(zone_flags_available(zone->flags)); … … 506 530 * 507 531 */ 508 static void zone_merge_internal(size_t z1, size_t z2, zone_t *old_z1, buddy_system_t *buddy) 532 NO_TRACE static void zone_merge_internal(size_t z1, size_t z2, zone_t *old_z1, 533 buddy_system_t *buddy) 509 534 { 510 535 ASSERT(zone_flags_available(zones.info[z1].flags)); … … 602 627 * 603 628 */ 604 static void return_config_frames(size_t znum, pfn_t pfn, size_t count)629 NO_TRACE static void return_config_frames(size_t znum, pfn_t pfn, size_t count) 605 630 { 606 631 ASSERT(zone_flags_available(zones.info[znum].flags)); … … 637 662 * 638 663 */ 639 static void zone_reduce_region(size_t znum, pfn_t frame_idx, size_t count) 664 NO_TRACE static void zone_reduce_region(size_t znum, pfn_t frame_idx, 665 size_t count) 640 666 { 641 667 ASSERT(zone_flags_available(zones.info[znum].flags)); … … 738 764 for (i = z2 + 1; i < zones.count; i++) { 739 765 zones.info[i - 1] = zones.info[i]; 740 zones.info[i - 1].buddy_system->data = 741 (void *) &zones.info[i - 1]; 766 if (zones.info[i - 1].buddy_system != NULL) 767 zones.info[i - 1].buddy_system->data = 768 (void *) &zones.info[i - 1]; 742 769 } 743 770 … … 777 804 * 778 805 */ 779 static void zone_construct(zone_t *zone, buddy_system_t *buddy, pfn_t start,780 size_t count, zone_flags_t flags)806 NO_TRACE static void zone_construct(zone_t *zone, buddy_system_t *buddy, 807 pfn_t start, size_t count, zone_flags_t flags) 781 808 { 782 809 zone->base = start; … … 821 848 * 822 849 */ 823 uintptr_t zone_conf_size(size_t count)850 size_t zone_conf_size(size_t count) 824 851 { 825 852 return (count * sizeof(frame_t) + buddy_conf_size(fnzb(count))); … … 852 879 * the assert 853 880 */ 854 ASSERT(confframe != NULL);881 ASSERT(confframe != ADDR2PFN((uintptr_t ) NULL)); 855 882 856 883 /* If confframe is supposed to be inside our zone, then make sure … … 888 915 } 889 916 890 size_t znum = zones_insert_zone(start, count );917 size_t znum = zones_insert_zone(start, count, flags); 891 918 if (znum == (size_t) -1) { 892 919 irq_spinlock_unlock(&zones.lock, true); … … 911 938 912 939 /* Non-available zone */ 913 size_t znum = zones_insert_zone(start, count );940 size_t znum = zones_insert_zone(start, count, flags); 914 941 if (znum == (size_t) -1) { 915 942 irq_spinlock_unlock(&zones.lock, true); … … 1023 1050 1024 1051 #ifdef CONFIG_DEBUG 1025 printf("Thread %" PRIu64 " waiting for % " PRIs "frames, "1026 "% " PRIs "available.\n", THREAD->tid, size, avail);1052 printf("Thread %" PRIu64 " waiting for %zu frames, " 1053 "%zu available.\n", THREAD->tid, size, avail); 1027 1054 #endif 1028 1055 … … 1078 1105 */ 1079 1106 pfn_t pfn = ADDR2PFN(frame); 1080 size_t znum = find_zone(pfn, 1, NULL);1107 size_t znum = find_zone(pfn, 1, 0); 1081 1108 1082 1109 ASSERT(znum != (size_t) -1); … … 1108 1135 * 1109 1136 */ 1110 void frame_reference_add(pfn_t pfn)1137 NO_TRACE void frame_reference_add(pfn_t pfn) 1111 1138 { 1112 1139 irq_spinlock_lock(&zones.lock, true); … … 1115 1142 * First, find host frame zone for addr. 1116 1143 */ 1117 size_t znum = find_zone(pfn, 1, NULL);1144 size_t znum = find_zone(pfn, 1, 0); 1118 1145 1119 1146 ASSERT(znum != (size_t) -1); … … 1127 1154 * 1128 1155 */ 1129 void frame_mark_unavailable(pfn_t start, size_t count)1156 NO_TRACE void frame_mark_unavailable(pfn_t start, size_t count) 1130 1157 { 1131 1158 irq_spinlock_lock(&zones.lock, true); … … 1271 1298 bool available = zone_flags_available(flags); 1272 1299 1273 printf("%-4 " PRIs, i);1300 printf("%-4zu", i); 1274 1301 1275 1302 #ifdef __32_BITS__ 1276 printf(" % 10p",base);1303 printf(" %p", (void *) base); 1277 1304 #endif 1278 1305 1279 1306 #ifdef __64_BITS__ 1280 printf(" % 18p",base);1307 printf(" %p", (void *) base); 1281 1308 #endif 1282 1309 1283 printf(" %12 " PRIs "%c%c%c ", count,1310 printf(" %12zu %c%c%c ", count, 1284 1311 available ? 'A' : ' ', 1285 1312 (flags & ZONE_RESERVED) ? 'R' : ' ', … … 1287 1314 1288 1315 if (available) 1289 printf("%14 " PRIs " %14" PRIs,1316 printf("%14zu %14zu", 1290 1317 free_count, busy_count); 1291 1318 … … 1328 1355 bool available = zone_flags_available(flags); 1329 1356 1330 printf("Zone number: % " PRIs "\n", znum);1331 printf("Zone base address: %p\n", base);1332 printf("Zone size: % " PRIs " frames (%" PRIs "KiB)\n", count,1357 printf("Zone number: %zu\n", znum); 1358 printf("Zone base address: %p\n", (void *) base); 1359 printf("Zone size: %zu frames (%zu KiB)\n", count, 1333 1360 SIZE2KB(FRAMES2SIZE(count))); 1334 1361 printf("Zone flags: %c%c%c\n", … … 1338 1365 1339 1366 if (available) { 1340 printf("Allocated space: % " PRIs " frames (%" PRIs "KiB)\n",1367 printf("Allocated space: %zu frames (%zu KiB)\n", 1341 1368 busy_count, SIZE2KB(FRAMES2SIZE(busy_count))); 1342 printf("Available space: % " PRIs " frames (%" PRIs "KiB)\n",1369 printf("Available space: %zu frames (%zu KiB)\n", 1343 1370 free_count, SIZE2KB(FRAMES2SIZE(free_count))); 1344 1371 } -
kernel/generic/src/mm/page.c
rcefb126 r89c57b6 115 115 * 116 116 */ 117 void page_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame,117 NO_TRACE void page_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, 118 118 unsigned int flags) 119 119 { … … 139 139 * 140 140 */ 141 void page_mapping_remove(as_t *as, uintptr_t page)141 NO_TRACE void page_mapping_remove(as_t *as, uintptr_t page) 142 142 { 143 143 ASSERT(page_table_locked(as)); … … 163 163 * 164 164 */ 165 pte_t *page_mapping_find(as_t *as, uintptr_t page)165 NO_TRACE pte_t *page_mapping_find(as_t *as, uintptr_t page) 166 166 { 167 167 ASSERT(page_table_locked(as)); -
kernel/generic/src/mm/slab.c
rcefb126 r89c57b6 177 177 * 178 178 */ 179 static slab_t *slab_space_alloc(slab_cache_t *cache, unsigned int flags) 179 NO_TRACE static slab_t *slab_space_alloc(slab_cache_t *cache, 180 unsigned int flags) 180 181 { 181 182 … … 224 225 * 225 226 */ 226 static size_t slab_space_free(slab_cache_t *cache, slab_t *slab)227 NO_TRACE static size_t slab_space_free(slab_cache_t *cache, slab_t *slab) 227 228 { 228 229 frame_free(KA2PA(slab->start)); … … 236 237 237 238 /** Map object to slab structure */ 238 static slab_t *obj2slab(void *obj)239 NO_TRACE static slab_t *obj2slab(void *obj) 239 240 { 240 241 return (slab_t *) frame_get_parent(ADDR2PFN(KA2PA(obj)), 0); … … 252 253 * 253 254 */ 254 static size_t slab_obj_destroy(slab_cache_t *cache, void *obj, slab_t *slab) 255 NO_TRACE static size_t slab_obj_destroy(slab_cache_t *cache, void *obj, 256 slab_t *slab) 255 257 { 256 258 if (!slab) … … 293 295 * 294 296 */ 295 static void *slab_obj_create(slab_cache_t *cache,int flags)297 NO_TRACE static void *slab_obj_create(slab_cache_t *cache, unsigned int flags) 296 298 { 297 299 spinlock_lock(&cache->slablock); … … 349 351 * 350 352 */ 351 static slab_magazine_t *get_mag_from_cache(slab_cache_t *cache, bool first) 353 NO_TRACE static slab_magazine_t *get_mag_from_cache(slab_cache_t *cache, 354 bool first) 352 355 { 353 356 slab_magazine_t *mag = NULL; … … 373 376 * 374 377 */ 375 static void put_mag_to_cache(slab_cache_t *cache, slab_magazine_t *mag) 378 NO_TRACE static void put_mag_to_cache(slab_cache_t *cache, 379 slab_magazine_t *mag) 376 380 { 377 381 spinlock_lock(&cache->maglock); … … 388 392 * 389 393 */ 390 static size_t magazine_destroy(slab_cache_t *cache, slab_magazine_t *mag) 394 NO_TRACE static size_t magazine_destroy(slab_cache_t *cache, 395 slab_magazine_t *mag) 391 396 { 392 397 size_t i; … … 406 411 * 407 412 */ 408 static slab_magazine_t *get_full_current_mag(slab_cache_t *cache)413 NO_TRACE static slab_magazine_t *get_full_current_mag(slab_cache_t *cache) 409 414 { 410 415 slab_magazine_t *cmag = cache->mag_cache[CPU->id].current; 411 416 slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last; 412 417 413 418 ASSERT(spinlock_locked(&cache->mag_cache[CPU->id].lock)); 414 419 … … 443 448 * 444 449 */ 445 static void *magazine_obj_get(slab_cache_t *cache)450 NO_TRACE static void *magazine_obj_get(slab_cache_t *cache) 446 451 { 447 452 if (!CPU) … … 473 478 * 474 479 */ 475 static slab_magazine_t *make_empty_current_mag(slab_cache_t *cache)480 NO_TRACE static slab_magazine_t *make_empty_current_mag(slab_cache_t *cache) 476 481 { 477 482 slab_magazine_t *cmag = cache->mag_cache[CPU->id].current; … … 479 484 480 485 ASSERT(spinlock_locked(&cache->mag_cache[CPU->id].lock)); 481 486 482 487 if (cmag) { 483 488 if (cmag->busy < cmag->size) … … 523 528 * 524 529 */ 525 static int magazine_obj_put(slab_cache_t *cache, void *obj)530 NO_TRACE static int magazine_obj_put(slab_cache_t *cache, void *obj) 526 531 { 527 532 if (!CPU) … … 552 557 * 553 558 */ 554 static size_t comp_objects(slab_cache_t *cache)559 NO_TRACE static size_t comp_objects(slab_cache_t *cache) 555 560 { 556 561 if (cache->flags & SLAB_CACHE_SLINSIDE) … … 564 569 * 565 570 */ 566 static size_t badness(slab_cache_t *cache)571 NO_TRACE static size_t badness(slab_cache_t *cache) 567 572 { 568 573 size_t objects = comp_objects(cache); … … 578 583 * 579 584 */ 580 static bool make_magcache(slab_cache_t *cache)585 NO_TRACE static bool make_magcache(slab_cache_t *cache) 581 586 { 582 587 ASSERT(_slab_initialized >= 2); … … 600 605 * 601 606 */ 602 static void _slab_cache_create(slab_cache_t *cache, const char *name,607 NO_TRACE static void _slab_cache_create(slab_cache_t *cache, const char *name, 603 608 size_t size, size_t align, int (*constructor)(void *obj, 604 609 unsigned int kmflag), size_t (*destructor)(void *obj), unsigned int flags) … … 607 612 cache->name = name; 608 613 609 if (align < sizeof( unative_t))610 align = sizeof( unative_t);614 if (align < sizeof(sysarg_t)) 615 align = sizeof(sysarg_t); 611 616 612 617 size = ALIGN_UP(size, align); … … 676 681 * 677 682 */ 678 static size_t _slab_reclaim(slab_cache_t *cache, unsigned int flags)683 NO_TRACE static size_t _slab_reclaim(slab_cache_t *cache, unsigned int flags) 679 684 { 680 685 if (cache->flags & SLAB_CACHE_NOMAGAZINE) … … 781 786 * 782 787 */ 783 static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab)788 NO_TRACE static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab) 784 789 { 785 790 ipl_t ipl = interrupts_disable(); … … 801 806 } 802 807 803 /** Go through all caches and reclaim what is possible 804 * 805 * Interrupts must be disabled before calling this function, 806 * otherwise memory allocation from interrupts can deadlock. 807 * 808 */ 808 /** Go through all caches and reclaim what is possible */ 809 809 size_t slab_reclaim(unsigned int flags) 810 810 { 811 irq_spinlock_lock(&slab_cache_lock, false);811 irq_spinlock_lock(&slab_cache_lock, true); 812 812 813 813 size_t frames = 0; … … 819 819 } 820 820 821 irq_spinlock_unlock(&slab_cache_lock, false);821 irq_spinlock_unlock(&slab_cache_lock, true); 822 822 823 823 return frames; … … 885 885 irq_spinlock_unlock(&slab_cache_lock, true); 886 886 887 printf("%-18s %8 " PRIs " %8u %8" PRIs "%8ld %8ld %8ld %-5s\n",887 printf("%-18s %8zu %8u %8zu %8ld %8ld %8ld %-5s\n", 888 888 name, size, (1 << order), objects, allocated_slabs, 889 889 cached_objs, allocated_objs,
Note:
See TracChangeset
for help on using the changeset viewer.