Changes in kernel/generic/src/mm/as.c [908bb96:c4c2406] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/as.c
r908bb96 rc4c2406 79 79 #include <syscall/copy.h> 80 80 #include <arch/interrupt.h> 81 #include <interrupt.h>82 81 83 82 /** … … 286 285 /** Check area conflicts with other areas. 287 286 * 288 * @param as Address space. 289 * @param addr Starting virtual address of the area being tested. 290 * @param count Number of pages in the area being tested. 291 * @param guarded True if the area being tested is protected by guard pages. 292 * @param avoid Do not touch this area. 287 * @param as Address space. 288 * @param addr Starting virtual address of the area being tested. 289 * @param count Number of pages in the area being tested. 290 * @param avoid Do not touch this area. 293 291 * 294 292 * @return True if there is no conflict, false otherwise. … … 296 294 */ 297 295 NO_TRACE static bool check_area_conflicts(as_t *as, uintptr_t addr, 298 size_t count, bool guarded,as_area_t *avoid)296 size_t count, as_area_t *avoid) 299 297 { 300 298 ASSERT((addr % PAGE_SIZE) == 0); 301 299 ASSERT(mutex_locked(&as->lock)); 302 303 /*304 * If the addition of the supposed area address and size overflows,305 * report conflict.306 */307 if (overflows_into_positive(addr, P2SZ(count)))308 return false;309 300 310 301 /* … … 313 304 if (overlaps(addr, P2SZ(count), (uintptr_t) NULL, PAGE_SIZE)) 314 305 return false; 315 306 316 307 /* 317 308 * The leaf node is found in O(log n), where n is proportional to … … 337 328 if (area != avoid) { 338 329 mutex_lock(&area->lock); 339 340 /* 341 * If at least one of the two areas are protected 342 * by the AS_AREA_GUARD flag then we must be sure 343 * that they are separated by at least one unmapped 344 * page. 345 */ 346 int const gp = (guarded || 347 (area->flags & AS_AREA_GUARD)) ? 1 : 0; 348 349 /* 350 * The area comes from the left neighbour node, which 351 * means that there already are some areas in the leaf 352 * node, which in turn means that adding gp is safe and 353 * will not cause an integer overflow. 354 */ 330 355 331 if (overlaps(addr, P2SZ(count), area->base, 356 P2SZ(area->pages + gp))) {357 mutex_unlock(&area->lock);358 return false;359 }360 361 mutex_unlock(&area->lock);362 }363 }364 365 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf);366 if (node) {367 area = (as_area_t *) node->value[0];368 369 if (area != avoid) {370 int gp;371 372 mutex_lock(&area->lock);373 374 gp = (guarded || (area->flags & AS_AREA_GUARD)) ? 1 : 0;375 if (gp && overflows(addr, P2SZ(count))) {376 /*377 * Guard page not needed if the supposed area378 * is adjacent to the end of the address space.379 * We already know that the following test is380 * going to fail...381 */382 gp--;383 }384 385 if (overlaps(addr, P2SZ(count + gp), area->base,386 332 P2SZ(area->pages))) { 387 333 mutex_unlock(&area->lock); … … 393 339 } 394 340 341 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf); 342 if (node) { 343 area = (as_area_t *) node->value[0]; 344 345 if (area != avoid) { 346 mutex_lock(&area->lock); 347 348 if (overlaps(addr, P2SZ(count), area->base, 349 P2SZ(area->pages))) { 350 mutex_unlock(&area->lock); 351 return false; 352 } 353 354 mutex_unlock(&area->lock); 355 } 356 } 357 395 358 /* Second, check the leaf node. */ 396 359 btree_key_t i; 397 360 for (i = 0; i < leaf->keys; i++) { 398 361 area = (as_area_t *) leaf->value[i]; 399 int agp;400 int gp;401 362 402 363 if (area == avoid) … … 404 365 405 366 mutex_lock(&area->lock); 406 407 gp = (guarded || (area->flags & AS_AREA_GUARD)) ? 1 : 0; 408 agp = gp; 409 410 /* 411 * Sanitize the two possible unsigned integer overflows. 412 */ 413 if (gp && overflows(addr, P2SZ(count))) 414 gp--; 415 if (agp && overflows(area->base, P2SZ(area->pages))) 416 agp--; 417 418 if (overlaps(addr, P2SZ(count + gp), area->base, 419 P2SZ(area->pages + agp))) { 367 368 if (overlaps(addr, P2SZ(count), area->base, 369 P2SZ(area->pages))) { 420 370 mutex_unlock(&area->lock); 421 371 return false; … … 427 377 /* 428 378 * So far, the area does not conflict with other areas. 429 * Check if it is contained in the useraddress space.379 * Check if it doesn't conflict with kernel address space. 430 380 */ 431 381 if (!KERNEL_ADDRESS_SPACE_SHADOWED) { 432 return iswithin(USER_ADDRESS_SPACE_START, 433 (USER_ADDRESS_SPACE_END - USER_ADDRESS_SPACE_START) + 1, 434 addr, P2SZ(count)); 382 return !overlaps(addr, P2SZ(count), KERNEL_ADDRESS_SPACE_START, 383 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START); 435 384 } 436 385 … … 443 392 * this function. 444 393 * 445 * @param as Address space. 446 * @param bound Lowest address bound. 447 * @param size Requested size of the allocation. 448 * @param guarded True if the allocation must be protected by guard pages. 394 * @param as Address space. 395 * @param bound Lowest address bound. 396 * @param size Requested size of the allocation. 449 397 * 450 398 * @return Address of the beginning of unmapped address space area. … … 453 401 */ 454 402 NO_TRACE static uintptr_t as_get_unmapped_area(as_t *as, uintptr_t bound, 455 size_t size , bool guarded)403 size_t size) 456 404 { 457 405 ASSERT(mutex_locked(&as->lock)); … … 475 423 /* First check the bound address itself */ 476 424 uintptr_t addr = ALIGN_UP(bound, PAGE_SIZE); 477 if (addr >= bound) { 478 if (guarded) { 479 /* Leave an unmapped page between the lower 480 * bound and the area's start address. 481 */ 482 addr += P2SZ(1); 483 } 484 485 if (check_area_conflicts(as, addr, pages, guarded, NULL)) 486 return addr; 487 } 425 if ((addr >= bound) && 426 (check_area_conflicts(as, addr, pages, NULL))) 427 return addr; 488 428 489 429 /* Eventually check the addresses behind each area */ … … 499 439 addr = 500 440 ALIGN_UP(area->base + P2SZ(area->pages), PAGE_SIZE); 501 502 if (guarded || area->flags & AS_AREA_GUARD) {503 /* We must leave an unmapped page504 * between the two areas.505 */506 addr += P2SZ(1);507 }508 509 441 bool avail = 510 442 ((addr >= bound) && (addr >= area->base) && 511 (check_area_conflicts(as, addr, pages, guarded,area)));443 (check_area_conflicts(as, addr, pages, area))); 512 444 513 445 mutex_unlock(&area->lock); … … 549 481 if (size == 0) 550 482 return NULL; 551 483 552 484 size_t pages = SIZE2FRAMES(size); 553 485 … … 555 487 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE)) 556 488 return NULL; 557 558 bool const guarded = flags & AS_AREA_GUARD;559 489 560 490 mutex_lock(&as->lock); 561 491 562 492 if (*base == (uintptr_t) -1) { 563 *base = as_get_unmapped_area(as, bound, size , guarded);493 *base = as_get_unmapped_area(as, bound, size); 564 494 if (*base == (uintptr_t) -1) { 565 495 mutex_unlock(&as->lock); … … 567 497 } 568 498 } 569 570 if (overflows_into_positive(*base, size)) 571 return NULL; 572 573 if (!check_area_conflicts(as, *base, pages, guarded, NULL)) { 499 500 if (!check_area_conflicts(as, *base, pages, NULL)) { 574 501 mutex_unlock(&as->lock); 575 502 return NULL; … … 698 625 return ENOENT; 699 626 } 700 701 if ( !area->backend->is_resizable(area)) {627 628 if (area->backend == &phys_backend) { 702 629 /* 703 * The backend does not support resizing for this area. 630 * Remapping of address space areas associated 631 * with memory mapped devices is not supported. 704 632 */ 705 633 mutex_unlock(&area->lock); … … 848 776 /* 849 777 * Growing the area. 850 */851 852 if (overflows_into_positive(address, P2SZ(pages)))853 return EINVAL;854 855 /*856 778 * Check for overlaps with other address space areas. 857 779 */ 858 bool const guarded = area->flags & AS_AREA_GUARD; 859 if (!check_area_conflicts(as, address, pages, guarded, area)) { 780 if (!check_area_conflicts(as, address, pages, area)) { 860 781 mutex_unlock(&area->lock); 861 782 mutex_unlock(&as->lock); … … 1058 979 } 1059 980 1060 if ( !src_area->backend->is_shareable(src_area)) {981 if ((!src_area->backend) || (!src_area->backend->share)) { 1061 982 /* 1062 * The backend does not permit sharing of this area. 983 * There is no backend or the backend does not 984 * know how to share the area. 1063 985 */ 1064 986 mutex_unlock(&src_area->lock); … … 1363 1285 int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate) 1364 1286 { 1365 int rc = AS_PF_FAULT;1366 1367 1287 if (!THREAD) 1368 goto page_fault;1288 return AS_PF_FAULT; 1369 1289 1370 1290 if (!AS) 1371 goto page_fault;1291 return AS_PF_FAULT; 1372 1292 1373 1293 mutex_lock(&AS->lock); … … 1425 1345 * Resort to the backend page fault handler. 1426 1346 */ 1427 rc = area->backend->page_fault(area, page, access); 1428 if (rc != AS_PF_OK) { 1347 if (area->backend->page_fault(area, page, access) != AS_PF_OK) { 1429 1348 page_table_unlock(AS, false); 1430 1349 mutex_unlock(&area->lock); … … 1447 1366 istate_set_retaddr(istate, 1448 1367 (uintptr_t) &memcpy_to_uspace_failover_address); 1449 } else if (rc == AS_PF_SILENT) {1450 printf("Killing task %" PRIu64 " due to a "1451 "failed late reservation request.\n", TASK->taskid);1452 task_kill_self(true);1453 1368 } else { 1454 fault_if_from_uspace(istate, "Page fault: %p.", (void *) page); 1455 panic_memtrap(istate, access, page, NULL); 1369 return AS_PF_FAULT; 1456 1370 } 1457 1371 … … 2140 2054 { 2141 2055 uintptr_t virt = base; 2142 as_area_t *area = as_area_create(AS, flags | AS_AREA_CACHEABLE, size,2056 as_area_t *area = as_area_create(AS, flags, size, 2143 2057 AS_AREA_ATTR_NONE, &anon_backend, NULL, &virt, bound); 2144 2058 if (area == NULL)
Note:
See TracChangeset
for help on using the changeset viewer.