Changeset 8f88beb7 in mainline for kernel/generic/src
- Timestamp:
- 2012-11-25T21:34:07Z (13 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- e1a27be
- Parents:
- 150a2718 (diff), 7462674 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- kernel/generic/src
- Files:
-
- 11 edited
-
console/cmd.c (modified) (4 diffs)
-
console/kconsole.c (modified) (2 diffs)
-
interrupt/interrupt.c (modified) (1 diff)
-
main/kinit.c (modified) (1 diff)
-
mm/as.c (modified) (21 diffs)
-
mm/backend_anon.c (modified) (8 diffs)
-
mm/backend_elf.c (modified) (3 diffs)
-
mm/backend_phys.c (modified) (3 diffs)
-
mm/km.c (modified) (3 diffs)
-
proc/program.c (modified) (2 diffs)
-
proc/task.c (modified) (1 diff)
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/console/cmd.c
r150a2718 r8f88beb7 56 56 #include <cpu.h> 57 57 #include <mm/tlb.h> 58 #include <mm/km.h> 58 59 #include <arch/mm/tlb.h> 59 60 #include <mm/frame.h> … … 81 82 .func = cmd_help, 82 83 .argc = 0 84 }; 85 86 /* Data and methods for pio_read_8 command */ 87 static int cmd_pio_read_8(cmd_arg_t *argv); 88 static cmd_arg_t pio_read_8_argv[] = { { .type = ARG_TYPE_INT } }; 89 static cmd_info_t pio_read_8_info = { 90 .name = "pio_read_8", 91 .description = "pio_read_8 <address> Read 1 byte from memory (or port).", 92 .func = cmd_pio_read_8, 93 .argc = 1, 94 .argv = pio_read_8_argv 95 }; 96 97 /* Data and methods for pio_read_16 command */ 98 static int cmd_pio_read_16(cmd_arg_t *argv); 99 static cmd_arg_t pio_read_16_argv[] = { { .type = ARG_TYPE_INT } }; 100 static cmd_info_t pio_read_16_info = { 101 .name = "pio_read_16", 102 .description = "pio_read_16 <address> Read 2 bytes from memory (or port).", 103 .func = cmd_pio_read_16, 104 .argc = 1, 105 .argv = pio_read_16_argv 106 }; 107 108 /* Data and methods for pio_read_32 command */ 109 static int cmd_pio_read_32(cmd_arg_t *argv); 110 static cmd_arg_t pio_read_32_argv[] = { { .type = ARG_TYPE_INT } }; 111 static cmd_info_t pio_read_32_info = { 112 .name = "pio_read_32", 113 .description = "pio_read_32 <address> Read 4 bytes from memory (or port).", 114 .func = cmd_pio_read_32, 115 .argc = 1, 116 .argv = pio_read_32_argv 117 }; 118 119 /* Data and methods for pio_write_8 command */ 120 static int cmd_pio_write_8(cmd_arg_t *argv); 121 static cmd_arg_t pio_write_8_argv[] = { 122 { .type = ARG_TYPE_INT }, 123 { .type = ARG_TYPE_INT } 124 }; 125 static cmd_info_t pio_write_8_info = { 126 .name = "pio_write_8", 127 .description = "pio_write_8 <address> <value> Write 1 byte to memory (or port).", 128 .func = cmd_pio_write_8, 129 .argc = 2, 130 .argv = pio_write_8_argv 131 }; 132 133 /* Data and methods for pio_write_16 command */ 134 static int cmd_pio_write_16(cmd_arg_t *argv); 135 static cmd_arg_t pio_write_16_argv[] = { 136 { .type = ARG_TYPE_INT }, 137 { .type = ARG_TYPE_INT } 138 }; 139 static cmd_info_t pio_write_16_info = { 140 .name = "pio_write_16", 141 .description = "pio_write_16 <address> <value> Write 2 bytes to memory (or port).", 142 .func = cmd_pio_write_16, 143 .argc = 2, 144 .argv = pio_write_16_argv 145 }; 146 147 /* Data and methods for pio_write_32 command */ 148 static int cmd_pio_write_32(cmd_arg_t *argv); 149 static cmd_arg_t pio_write_32_argv[] = { 150 { .type = ARG_TYPE_INT }, 151 { .type = ARG_TYPE_INT } 152 }; 153 static cmd_info_t pio_write_32_info = { 154 .name = "pio_write_32", 155 .description = "pio_write_32 <address> <value> Write 4 bytes to memory (or port).", 156 .func = cmd_pio_write_32, 157 .argc = 2, 158 .argv = pio_write_32_argv 83 159 }; 84 160 … … 531 607 &btrace_info, 532 608 #endif 609 &pio_read_8_info, 610 &pio_read_16_info, 611 &pio_read_32_info, 612 &pio_write_8_info, 613 &pio_write_16_info, 614 &pio_write_32_info, 533 615 NULL 534 616 }; … … 601 683 spinlock_unlock(&cmd_lock); 602 684 685 return 1; 686 } 687 688 /** Read 1 byte from phys memory or io port. 689 * 690 * @param argv Argument vector. 691 * 692 * @return 0 on failure, 1 on success. 693 */ 694 static int cmd_pio_read_8(cmd_arg_t *argv) 695 { 696 uint8_t *ptr = NULL; 697 698 #ifdef IO_SPACE_BOUNDARY 699 if ((void *) argv->intval < IO_SPACE_BOUNDARY) 700 ptr = (void *) argv[0].intval; 701 else 702 #endif 703 ptr = (uint8_t *) km_map(argv[0].intval, sizeof(uint8_t), 704 PAGE_NOT_CACHEABLE); 705 706 const uint8_t val = pio_read_8(ptr); 707 printf("read %" PRIxn ": %" PRIx8 "\n", argv[0].intval, val); 708 709 #ifdef IO_SPACE_BOUNDARY 710 if ((void *) argv->intval < IO_SPACE_BOUNDARY) 711 return 1; 712 #endif 713 714 km_unmap((uintptr_t) ptr, sizeof(uint8_t)); 715 return 1; 716 } 717 718 /** Read 2 bytes from phys memory or io port. 719 * 720 * @param argv Argument vector. 721 * 722 * @return 0 on failure, 1 on success. 723 */ 724 static int cmd_pio_read_16(cmd_arg_t *argv) 725 { 726 uint16_t *ptr = NULL; 727 728 #ifdef IO_SPACE_BOUNDARY 729 if ((void *) argv->intval < IO_SPACE_BOUNDARY) 730 ptr = (void *) argv[0].intval; 731 else 732 #endif 733 ptr = (uint16_t *) km_map(argv[0].intval, sizeof(uint16_t), 734 PAGE_NOT_CACHEABLE); 735 736 const uint16_t val = pio_read_16(ptr); 737 printf("read %" PRIxn ": %" PRIx16 "\n", argv[0].intval, val); 738 739 #ifdef IO_SPACE_BOUNDARY 740 if ((void *) argv->intval < IO_SPACE_BOUNDARY) 741 return 1; 742 #endif 743 744 km_unmap((uintptr_t) ptr, sizeof(uint16_t)); 745 return 1; 746 } 747 748 /** Read 4 bytes from phys memory or io port. 749 * 750 * @param argv Argument vector. 751 * 752 * @return 0 on failure, 1 on success. 753 */ 754 static int cmd_pio_read_32(cmd_arg_t *argv) 755 { 756 uint32_t *ptr = NULL; 757 758 #ifdef IO_SPACE_BOUNDARY 759 if ((void *) argv->intval < IO_SPACE_BOUNDARY) 760 ptr = (void *) argv[0].intval; 761 else 762 #endif 763 ptr = (uint32_t *) km_map(argv[0].intval, sizeof(uint32_t), 764 PAGE_NOT_CACHEABLE); 765 766 const uint32_t val = pio_read_32(ptr); 767 printf("read %" PRIxn ": %" PRIx32 "\n", argv[0].intval, val); 768 769 #ifdef IO_SPACE_BOUNDARY 770 if ((void *) argv->intval < IO_SPACE_BOUNDARY) 771 return 1; 772 #endif 773 774 km_unmap((uintptr_t) ptr, sizeof(uint32_t)); 775 return 1; 776 } 777 778 /** Write 1 byte to phys memory or io port. 779 * 780 * @param argv Argument vector. 781 * 782 * @return 0 on failure, 1 on success. 783 */ 784 static int cmd_pio_write_8(cmd_arg_t *argv) 785 { 786 uint8_t *ptr = NULL; 787 788 #ifdef IO_SPACE_BOUNDARY 789 if ((void *) argv->intval < IO_SPACE_BOUNDARY) 790 ptr = (void *) argv[0].intval; 791 else 792 #endif 793 ptr = (uint8_t *) km_map(argv[0].intval, sizeof(uint8_t), 794 PAGE_NOT_CACHEABLE); 795 796 printf("write %" PRIxn ": %" PRIx8 "\n", argv[0].intval, 797 (uint8_t) argv[1].intval); 798 pio_write_8(ptr, (uint8_t) argv[1].intval); 799 800 #ifdef IO_SPACE_BOUNDARY 801 if ((void *) argv->intval < IO_SPACE_BOUNDARY) 802 return 1; 803 #endif 804 805 km_unmap((uintptr_t) ptr, sizeof(uint8_t)); 806 return 1; 807 } 808 809 /** Write 2 bytes to phys memory or io port. 810 * 811 * @param argv Argument vector. 812 * 813 * @return 0 on failure, 1 on success. 814 */ 815 static int cmd_pio_write_16(cmd_arg_t *argv) 816 { 817 uint16_t *ptr = NULL; 818 819 #ifdef IO_SPACE_BOUNDARY 820 if ((void *) argv->intval < IO_SPACE_BOUNDARY) 821 ptr = (void *) argv[0].intval; 822 else 823 #endif 824 ptr = (uint16_t *) km_map(argv[0].intval, sizeof(uint16_t), 825 PAGE_NOT_CACHEABLE); 826 827 printf("write %" PRIxn ": %" PRIx16 "\n", argv[0].intval, 828 (uint16_t) argv[1].intval); 829 pio_write_16(ptr, (uint16_t) argv[1].intval); 830 831 #ifdef IO_SPACE_BOUNDARY 832 if ((void *) argv->intval < IO_SPACE_BOUNDARY) 833 return 1; 834 #endif 835 836 km_unmap((uintptr_t) ptr, sizeof(uint16_t)); 837 return 1; 838 } 839 840 /** Write 4 bytes to phys memory or io port. 841 * 842 * @param argv Argument vector. 843 * 844 * @return 0 on failure, 1 on success. 845 */ 846 static int cmd_pio_write_32(cmd_arg_t *argv) 847 { 848 uint32_t *ptr = NULL; 849 850 #ifdef IO_SPACE_BOUNDARY 851 if ((void *) argv->intval < IO_SPACE_BOUNDARY) 852 ptr = (void *) argv[0].intval; 853 else 854 #endif 855 ptr = (uint32_t *) km_map(argv[0].intval, sizeof(uint32_t), 856 PAGE_NOT_CACHEABLE); 857 858 printf("write %" PRIxn ": %" PRIx32 "\n", argv[0].intval, 859 (uint32_t) argv[1].intval); 860 pio_write_32(ptr, (uint32_t) argv[1].intval); 861 862 #ifdef IO_SPACE_BOUNDARY 863 if ((void *) argv->intval < IO_SPACE_BOUNDARY) 864 return 1; 865 #endif 866 867 km_unmap((uintptr_t) ptr, sizeof(uint32_t)); 603 868 return 1; 604 869 } -
kernel/generic/src/console/kconsole.c
r150a2718 r8f88beb7 524 524 /* It's a number - convert it */ 525 525 uint64_t value; 526 int rc = str_uint64_t(text, NULL, 0, true, &value); 526 char *end; 527 int rc = str_uint64_t(text, &end, 0, false, &value); 528 if (end != text + len) 529 rc = EINVAL; 527 530 switch (rc) { 528 531 case EINVAL: 529 printf("Invalid number .\n");532 printf("Invalid number '%s'.\n", text); 530 533 return false; 531 534 case EOVERFLOW: 532 printf("Integer overflow .\n");535 printf("Integer overflow in '%s'.\n", text); 533 536 return false; 534 537 case EOK: … … 538 541 break; 539 542 default: 540 printf("Unknown error .\n");543 printf("Unknown error parsing '%s'.\n", text); 541 544 return false; 542 545 } -
kernel/generic/src/interrupt/interrupt.c
r150a2718 r8f88beb7 166 166 } 167 167 168 static NO_TRACE void fault_from_uspace_core(istate_t *istate, const char *fmt, va_list args) 168 static NO_TRACE 169 void fault_from_uspace_core(istate_t *istate, const char *fmt, va_list args) 169 170 { 170 171 printf("Task %s (%" PRIu64 ") killed due to an exception at " -
kernel/generic/src/main/kinit.c
r150a2718 r8f88beb7 172 172 #endif /* CONFIG_KCONSOLE */ 173 173 174 /* 175 * Store the default stack size in sysinfo so that uspace can create 176 * stack with this default size. 177 */ 178 sysinfo_set_item_val("default.stack_size", NULL, STACK_SIZE_USER); 179 174 180 interrupts_enable(); 175 181 -
kernel/generic/src/mm/as.c
r150a2718 r8f88beb7 79 79 #include <syscall/copy.h> 80 80 #include <arch/interrupt.h> 81 #include <interrupt.h> 81 82 82 83 /** … … 285 286 /** Check area conflicts with other areas. 286 287 * 287 * @param as Address space. 288 * @param addr Starting virtual address of the area being tested. 289 * @param count Number of pages in the area being tested. 290 * @param avoid Do not touch this area. 288 * @param as Address space. 289 * @param addr Starting virtual address of the area being tested. 290 * @param count Number of pages in the area being tested. 291 * @param guarded True if the area being tested is protected by guard pages. 292 * @param avoid Do not touch this area. 291 293 * 292 294 * @return True if there is no conflict, false otherwise. … … 294 296 */ 295 297 NO_TRACE static bool check_area_conflicts(as_t *as, uintptr_t addr, 296 size_t count, as_area_t *avoid)298 size_t count, bool guarded, as_area_t *avoid) 297 299 { 298 300 ASSERT((addr % PAGE_SIZE) == 0); 299 301 ASSERT(mutex_locked(&as->lock)); 302 303 /* 304 * If the addition of the supposed area address and size overflows, 305 * report conflict. 306 */ 307 if (overflows_into_positive(addr, P2SZ(count))) 308 return false; 300 309 301 310 /* … … 304 313 if (overlaps(addr, P2SZ(count), (uintptr_t) NULL, PAGE_SIZE)) 305 314 return false; 306 315 307 316 /* 308 317 * The leaf node is found in O(log n), where n is proportional to … … 328 337 if (area != avoid) { 329 338 mutex_lock(&area->lock); 330 339 340 /* 341 * If at least one of the two areas are protected 342 * by the AS_AREA_GUARD flag then we must be sure 343 * that they are separated by at least one unmapped 344 * page. 345 */ 346 int const gp = (guarded || 347 (area->flags & AS_AREA_GUARD)) ? 1 : 0; 348 349 /* 350 * The area comes from the left neighbour node, which 351 * means that there already are some areas in the leaf 352 * node, which in turn means that adding gp is safe and 353 * will not cause an integer overflow. 354 */ 331 355 if (overlaps(addr, P2SZ(count), area->base, 356 P2SZ(area->pages + gp))) { 357 mutex_unlock(&area->lock); 358 return false; 359 } 360 361 mutex_unlock(&area->lock); 362 } 363 } 364 365 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf); 366 if (node) { 367 area = (as_area_t *) node->value[0]; 368 369 if (area != avoid) { 370 int gp; 371 372 mutex_lock(&area->lock); 373 374 gp = (guarded || (area->flags & AS_AREA_GUARD)) ? 1 : 0; 375 if (gp && overflows(addr, P2SZ(count))) { 376 /* 377 * Guard page not needed if the supposed area 378 * is adjacent to the end of the address space. 379 * We already know that the following test is 380 * going to fail... 381 */ 382 gp--; 383 } 384 385 if (overlaps(addr, P2SZ(count + gp), area->base, 332 386 P2SZ(area->pages))) { 333 387 mutex_unlock(&area->lock); … … 339 393 } 340 394 341 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf);342 if (node) {343 area = (as_area_t *) node->value[0];344 345 if (area != avoid) {346 mutex_lock(&area->lock);347 348 if (overlaps(addr, P2SZ(count), area->base,349 P2SZ(area->pages))) {350 mutex_unlock(&area->lock);351 return false;352 }353 354 mutex_unlock(&area->lock);355 }356 }357 358 395 /* Second, check the leaf node. */ 359 396 btree_key_t i; 360 397 for (i = 0; i < leaf->keys; i++) { 361 398 area = (as_area_t *) leaf->value[i]; 399 int agp; 400 int gp; 362 401 363 402 if (area == avoid) … … 365 404 366 405 mutex_lock(&area->lock); 367 368 if (overlaps(addr, P2SZ(count), area->base, 369 P2SZ(area->pages))) { 406 407 gp = (guarded || (area->flags & AS_AREA_GUARD)) ? 1 : 0; 408 agp = gp; 409 410 /* 411 * Sanitize the two possible unsigned integer overflows. 412 */ 413 if (gp && overflows(addr, P2SZ(count))) 414 gp--; 415 if (agp && overflows(area->base, P2SZ(area->pages))) 416 agp--; 417 418 if (overlaps(addr, P2SZ(count + gp), area->base, 419 P2SZ(area->pages + agp))) { 370 420 mutex_unlock(&area->lock); 371 421 return false; … … 377 427 /* 378 428 * So far, the area does not conflict with other areas. 379 * Check if it doesn't conflict with kerneladdress space.429 * Check if it is contained in the user address space. 380 430 */ 381 431 if (!KERNEL_ADDRESS_SPACE_SHADOWED) { 382 return !overlaps(addr, P2SZ(count), KERNEL_ADDRESS_SPACE_START, 383 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START); 432 return iswithin(USER_ADDRESS_SPACE_START, 433 (USER_ADDRESS_SPACE_END - USER_ADDRESS_SPACE_START) + 1, 434 addr, P2SZ(count)); 384 435 } 385 436 … … 392 443 * this function. 393 444 * 394 * @param as Address space. 395 * @param bound Lowest address bound. 396 * @param size Requested size of the allocation. 445 * @param as Address space. 446 * @param bound Lowest address bound. 447 * @param size Requested size of the allocation. 448 * @param guarded True if the allocation must be protected by guard pages. 397 449 * 398 450 * @return Address of the beginning of unmapped address space area. … … 401 453 */ 402 454 NO_TRACE static uintptr_t as_get_unmapped_area(as_t *as, uintptr_t bound, 403 size_t size )455 size_t size, bool guarded) 404 456 { 405 457 ASSERT(mutex_locked(&as->lock)); … … 423 475 /* First check the bound address itself */ 424 476 uintptr_t addr = ALIGN_UP(bound, PAGE_SIZE); 425 if ((addr >= bound) && 426 (check_area_conflicts(as, addr, pages, NULL))) 427 return addr; 477 if (addr >= bound) { 478 if (guarded) { 479 /* Leave an unmapped page between the lower 480 * bound and the area's start address. 481 */ 482 addr += P2SZ(1); 483 } 484 485 if (check_area_conflicts(as, addr, pages, guarded, NULL)) 486 return addr; 487 } 428 488 429 489 /* Eventually check the addresses behind each area */ … … 439 499 addr = 440 500 ALIGN_UP(area->base + P2SZ(area->pages), PAGE_SIZE); 501 502 if (guarded || area->flags & AS_AREA_GUARD) { 503 /* We must leave an unmapped page 504 * between the two areas. 505 */ 506 addr += P2SZ(1); 507 } 508 441 509 bool avail = 442 510 ((addr >= bound) && (addr >= area->base) && 443 (check_area_conflicts(as, addr, pages, area)));511 (check_area_conflicts(as, addr, pages, guarded, area))); 444 512 445 513 mutex_unlock(&area->lock); … … 481 549 if (size == 0) 482 550 return NULL; 483 551 484 552 size_t pages = SIZE2FRAMES(size); 485 553 … … 487 555 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE)) 488 556 return NULL; 557 558 bool const guarded = flags & AS_AREA_GUARD; 489 559 490 560 mutex_lock(&as->lock); 491 561 492 562 if (*base == (uintptr_t) -1) { 493 *base = as_get_unmapped_area(as, bound, size );563 *base = as_get_unmapped_area(as, bound, size, guarded); 494 564 if (*base == (uintptr_t) -1) { 495 565 mutex_unlock(&as->lock); … … 497 567 } 498 568 } 499 500 if (!check_area_conflicts(as, *base, pages, NULL)) { 569 570 if (overflows_into_positive(*base, size)) 571 return NULL; 572 573 if (!check_area_conflicts(as, *base, pages, guarded, NULL)) { 501 574 mutex_unlock(&as->lock); 502 575 return NULL; … … 625 698 return ENOENT; 626 699 } 627 628 if (area->backend == &phys_backend) { 629 /* 630 * Remapping of address space areas associated 631 * with memory mapped devices is not supported. 700 701 if (!area->backend->is_resizable(area)) { 702 /* 703 * The backend does not support resizing for this area. 632 704 */ 633 705 mutex_unlock(&area->lock); … … 776 848 /* 777 849 * Growing the area. 850 */ 851 852 if (overflows_into_positive(address, P2SZ(pages))) 853 return EINVAL; 854 855 /* 778 856 * Check for overlaps with other address space areas. 779 857 */ 780 if (!check_area_conflicts(as, address, pages, area)) { 858 bool const guarded = area->flags & AS_AREA_GUARD; 859 if (!check_area_conflicts(as, address, pages, guarded, area)) { 781 860 mutex_unlock(&area->lock); 782 861 mutex_unlock(&as->lock); … … 979 1058 } 980 1059 981 if ((!src_area->backend) || (!src_area->backend->share)) { 982 /* 983 * There is no backend or the backend does not 984 * know how to share the area. 1060 if (!src_area->backend->is_shareable(src_area)) { 1061 /* 1062 * The backend does not permit sharing of this area. 985 1063 */ 986 1064 mutex_unlock(&src_area->lock); … … 1285 1363 int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate) 1286 1364 { 1365 int rc = AS_PF_FAULT; 1366 1287 1367 if (!THREAD) 1288 return AS_PF_FAULT;1368 goto page_fault; 1289 1369 1290 1370 if (!AS) 1291 return AS_PF_FAULT;1371 goto page_fault; 1292 1372 1293 1373 mutex_lock(&AS->lock); … … 1345 1425 * Resort to the backend page fault handler. 1346 1426 */ 1347 if (area->backend->page_fault(area, page, access) != AS_PF_OK) { 1427 rc = area->backend->page_fault(area, page, access); 1428 if (rc != AS_PF_OK) { 1348 1429 page_table_unlock(AS, false); 1349 1430 mutex_unlock(&area->lock); … … 1366 1447 istate_set_retaddr(istate, 1367 1448 (uintptr_t) &memcpy_to_uspace_failover_address); 1449 } else if (rc == AS_PF_SILENT) { 1450 printf("Killing task %" PRIu64 " due to a " 1451 "failed late reservation request.\n", TASK->taskid); 1452 task_kill_self(true); 1368 1453 } else { 1369 return AS_PF_FAULT; 1454 fault_if_from_uspace(istate, "Page fault: %p.", (void *) page); 1455 panic_memtrap(istate, access, page, NULL); 1370 1456 } 1371 1457 -
kernel/generic/src/mm/backend_anon.c
r150a2718 r8f88beb7 59 59 static void anon_destroy(as_area_t *); 60 60 61 static bool anon_is_resizable(as_area_t *); 62 static bool anon_is_shareable(as_area_t *); 63 61 64 static int anon_page_fault(as_area_t *, uintptr_t, pf_access_t); 62 65 static void anon_frame_free(as_area_t *, uintptr_t, uintptr_t); … … 68 71 .destroy = anon_destroy, 69 72 73 .is_resizable = anon_is_resizable, 74 .is_shareable = anon_is_shareable, 75 70 76 .page_fault = anon_page_fault, 71 77 .frame_free = anon_frame_free, … … 74 80 bool anon_create(as_area_t *area) 75 81 { 82 if (area->flags & AS_AREA_LATE_RESERVE) 83 return true; 84 76 85 return reserve_try_alloc(area->pages); 77 86 } … … 79 88 bool anon_resize(as_area_t *area, size_t new_pages) 80 89 { 90 if (area->flags & AS_AREA_LATE_RESERVE) 91 return true; 92 81 93 if (new_pages > area->pages) 82 94 return reserve_try_alloc(new_pages - area->pages); … … 100 112 ASSERT(mutex_locked(&area->as->lock)); 101 113 ASSERT(mutex_locked(&area->lock)); 114 ASSERT(!(area->flags & AS_AREA_LATE_RESERVE)); 102 115 103 116 /* … … 139 152 void anon_destroy(as_area_t *area) 140 153 { 154 if (area->flags & AS_AREA_LATE_RESERVE) 155 return; 156 141 157 reserve_free(area->pages); 142 158 } 143 159 160 bool anon_is_resizable(as_area_t *area) 161 { 162 return true; 163 } 164 165 bool anon_is_shareable(as_area_t *area) 166 { 167 return !(area->flags & AS_AREA_LATE_RESERVE); 168 } 144 169 145 170 /** Service a page fault in the anonymous memory address space area. … … 225 250 * the different causes 226 251 */ 252 253 if (area->flags & AS_AREA_LATE_RESERVE) { 254 /* 255 * Reserve the memory for this page now. 256 */ 257 if (!reserve_try_alloc(1)) 258 return AS_PF_SILENT; 259 } 260 227 261 kpage = km_temporary_page_get(&frame, FRAME_NO_RESERVE); 228 262 memsetb((void *) kpage, PAGE_SIZE, 0); … … 255 289 ASSERT(mutex_locked(&area->lock)); 256 290 257 frame_free_noreserve(frame); 291 if (area->flags & AS_AREA_LATE_RESERVE) { 292 /* 293 * In case of the late reserve areas, physical memory will not 294 * be unreserved when the area is destroyed so we need to use 295 * the normal unreserving frame_free(). 296 */ 297 frame_free(frame); 298 } else { 299 /* 300 * The reserve will be given back when the area is destroyed or 301 * resized, so use the frame_free_noreserve() which does not 302 * manipulate the reserve or it would be given back twice. 303 */ 304 frame_free_noreserve(frame); 305 } 258 306 } 259 307 -
kernel/generic/src/mm/backend_elf.c
r150a2718 r8f88beb7 58 58 static void elf_destroy(as_area_t *); 59 59 60 static bool elf_is_resizable(as_area_t *); 61 static bool elf_is_shareable(as_area_t *); 62 60 63 static int elf_page_fault(as_area_t *, uintptr_t, pf_access_t); 61 64 static void elf_frame_free(as_area_t *, uintptr_t, uintptr_t); … … 66 69 .share = elf_share, 67 70 .destroy = elf_destroy, 71 72 .is_resizable = elf_is_resizable, 73 .is_shareable = elf_is_shareable, 68 74 69 75 .page_fault = elf_page_fault, … … 213 219 } 214 220 221 bool elf_is_resizable(as_area_t *area) 222 { 223 return true; 224 } 225 226 bool elf_is_shareable(as_area_t *area) 227 { 228 return true; 229 } 230 231 215 232 /** Service a page fault in the ELF backend address space area. 216 233 * -
kernel/generic/src/mm/backend_phys.c
r150a2718 r8f88beb7 52 52 static void phys_destroy(as_area_t *); 53 53 54 static bool phys_is_resizable(as_area_t *); 55 static bool phys_is_shareable(as_area_t *); 56 57 54 58 static int phys_page_fault(as_area_t *, uintptr_t, pf_access_t); 55 59 … … 59 63 .share = phys_share, 60 64 .destroy = phys_destroy, 65 66 .is_resizable = phys_is_resizable, 67 .is_shareable = phys_is_shareable, 61 68 62 69 .page_fault = phys_page_fault, … … 87 94 /* Nothing to do. */ 88 95 } 96 97 bool phys_is_resizable(as_area_t *area) 98 { 99 return false; 100 } 101 102 bool phys_is_shareable(as_area_t *area) 103 { 104 return true; 105 } 106 89 107 90 108 /** Service a page fault in the address space area backed by physical memory. -
kernel/generic/src/mm/km.c
r150a2718 r8f88beb7 233 233 * @param[inout] framep Pointer to a variable which will receive the physical 234 234 * address of the allocated frame. 235 * @param[in] flags Frame allocation flags. FRAME_NONE or FRAME_NO_RESERVE. 235 * @param[in] flags Frame allocation flags. FRAME_NONE, FRAME_NO_RESERVE 236 * and FRAME_ATOMIC bits are allowed. 236 237 * @return Virtual address of the allocated frame. 237 238 */ … … 243 244 ASSERT(THREAD); 244 245 ASSERT(framep); 245 ASSERT(!(flags & ~ FRAME_NO_RESERVE));246 ASSERT(!(flags & ~(FRAME_NO_RESERVE | FRAME_ATOMIC))); 246 247 247 248 /* … … 255 256 ASSERT(page); // FIXME 256 257 } else { 257 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 258 FRAME_LOWMEM); 258 frame = (uintptr_t) frame_alloc(ONE_FRAME, 259 FRAME_LOWMEM | flags); 260 if (!frame) 261 return (uintptr_t) NULL; 259 262 page = PA2KA(frame); 260 263 } -
kernel/generic/src/proc/program.c
r150a2718 r8f88beb7 79 79 * Create the stack address space area. 80 80 */ 81 uintptr_t virt = USTACK_ADDRESS; 81 uintptr_t virt = (uintptr_t) -1; 82 uintptr_t bound = USER_ADDRESS_SPACE_END - (STACK_SIZE_USER - 1); 83 84 /* Adjust bound to create space for the desired guard page. */ 85 bound -= PAGE_SIZE; 86 82 87 as_area_t *area = as_area_create(as, 83 AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE, 84 STACK_SIZE, AS_AREA_ATTR_NONE, &anon_backend, NULL, &virt, 0); 88 AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE | AS_AREA_GUARD | 89 AS_AREA_LATE_RESERVE, STACK_SIZE_USER, AS_AREA_ATTR_NONE, 90 &anon_backend, NULL, &virt, bound); 85 91 if (!area) { 86 92 task_destroy(prg->task); … … 93 99 kernel_uarg->uspace_entry = (void *) entry_addr; 94 100 kernel_uarg->uspace_stack = (void *) virt; 95 kernel_uarg->uspace_stack_size = STACK_SIZE ;101 kernel_uarg->uspace_stack_size = STACK_SIZE_USER; 96 102 kernel_uarg->uspace_thread_function = NULL; 97 103 kernel_uarg->uspace_thread_arg = NULL; -
kernel/generic/src/proc/task.c
r150a2718 r8f88beb7 196 196 task->ucycles = 0; 197 197 task->kcycles = 0; 198 198 199 199 task->ipc_info.call_sent = 0; 200 200 task->ipc_info.call_received = 0;
Note:
See TracChangeset
for help on using the changeset viewer.
