Changeset c739102 in mainline for kernel/generic/src
- Timestamp:
- 2012-11-21T23:26:22Z (13 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 0f2c80a
- Parents:
- bebf97d (diff), 1f7753a (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- kernel/generic/src
- Files:
-
- 9 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/interrupt/interrupt.c
rbebf97d rc739102 168 168 static NO_TRACE void fault_from_uspace_core(istate_t *istate, const char *fmt, va_list args) 169 169 { 170 printf("Task %s (%" PRIu64 ") killed due to an exception at " 171 "program counter %p.\n", TASK->name, TASK->taskid, 172 (void *) istate_get_pc(istate)); 173 174 istate_decode(istate); 175 stack_trace_istate(istate); 176 177 printf("Kill message: "); 178 vprintf(fmt, args); 179 printf("\n"); 170 if (!TASK->silent_kill) { 171 printf("Task %s (%" PRIu64 ") killed due to an exception at " 172 "program counter %p.\n", TASK->name, TASK->taskid, 173 (void *) istate_get_pc(istate)); 174 175 istate_decode(istate); 176 stack_trace_istate(istate); 177 178 printf("Kill message: "); 179 vprintf(fmt, args); 180 printf("\n"); 181 } 180 182 181 183 task_kill_self(true); -
kernel/generic/src/main/kinit.c
rbebf97d rc739102 172 172 #endif /* CONFIG_KCONSOLE */ 173 173 174 /* 175 * Store the default stack size in sysinfo so that uspace can create 176 * stack with this default size. 177 */ 178 sysinfo_set_item_val("default.stack_size", NULL, STACK_SIZE_USER); 179 174 180 interrupts_enable(); 175 181 -
kernel/generic/src/mm/as.c
rbebf97d rc739102 285 285 /** Check area conflicts with other areas. 286 286 * 287 * @param as Address space. 288 * @param addr Starting virtual address of the area being tested. 289 * @param count Number of pages in the area being tested. 290 * @param avoid Do not touch this area. 287 * @param as Address space. 288 * @param addr Starting virtual address of the area being tested. 289 * @param count Number of pages in the area being tested. 290 * @param guarded True if the area being tested is protected by guard pages. 291 * @param avoid Do not touch this area. 291 292 * 292 293 * @return True if there is no conflict, false otherwise. … … 294 295 */ 295 296 NO_TRACE static bool check_area_conflicts(as_t *as, uintptr_t addr, 296 size_t count, as_area_t *avoid)297 size_t count, bool guarded, as_area_t *avoid) 297 298 { 298 299 ASSERT((addr % PAGE_SIZE) == 0); 299 300 ASSERT(mutex_locked(&as->lock)); 301 302 /* 303 * If the addition of the supposed area address and size overflows, 304 * report conflict. 305 */ 306 if (overflows_into_positive(addr, P2SZ(count))) 307 return false; 300 308 301 309 /* … … 304 312 if (overlaps(addr, P2SZ(count), (uintptr_t) NULL, PAGE_SIZE)) 305 313 return false; 306 314 307 315 /* 308 316 * The leaf node is found in O(log n), where n is proportional to … … 328 336 if (area != avoid) { 329 337 mutex_lock(&area->lock); 330 338 339 /* 340 * If at least one of the two areas are protected 341 * by the AS_AREA_GUARD flag then we must be sure 342 * that they are separated by at least one unmapped 343 * page. 344 */ 345 int const gp = (guarded || 346 (area->flags & AS_AREA_GUARD)) ? 1 : 0; 347 348 /* 349 * The area comes from the left neighbour node, which 350 * means that there already are some areas in the leaf 351 * node, which in turn means that adding gp is safe and 352 * will not cause an integer overflow. 353 */ 331 354 if (overlaps(addr, P2SZ(count), area->base, 355 P2SZ(area->pages + gp))) { 356 mutex_unlock(&area->lock); 357 return false; 358 } 359 360 mutex_unlock(&area->lock); 361 } 362 } 363 364 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf); 365 if (node) { 366 area = (as_area_t *) node->value[0]; 367 368 if (area != avoid) { 369 int gp; 370 371 mutex_lock(&area->lock); 372 373 gp = (guarded || (area->flags & AS_AREA_GUARD)) ? 1 : 0; 374 if (gp && overflows(addr, P2SZ(count))) { 375 /* 376 * Guard page not needed if the supposed area 377 * is adjacent to the end of the address space. 378 * We already know that the following test is 379 * going to fail... 380 */ 381 gp--; 382 } 383 384 if (overlaps(addr, P2SZ(count + gp), area->base, 332 385 P2SZ(area->pages))) { 333 386 mutex_unlock(&area->lock); … … 339 392 } 340 393 341 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf);342 if (node) {343 area = (as_area_t *) node->value[0];344 345 if (area != avoid) {346 mutex_lock(&area->lock);347 348 if (overlaps(addr, P2SZ(count), area->base,349 P2SZ(area->pages))) {350 mutex_unlock(&area->lock);351 return false;352 }353 354 mutex_unlock(&area->lock);355 }356 }357 358 394 /* Second, check the leaf node. */ 359 395 btree_key_t i; 360 396 for (i = 0; i < leaf->keys; i++) { 361 397 area = (as_area_t *) leaf->value[i]; 398 int agp; 399 int gp; 362 400 363 401 if (area == avoid) … … 365 403 366 404 mutex_lock(&area->lock); 367 368 if (overlaps(addr, P2SZ(count), area->base, 369 P2SZ(area->pages))) { 405 406 gp = (guarded || (area->flags & AS_AREA_GUARD)) ? 1 : 0; 407 agp = gp; 408 409 /* 410 * Sanitize the two possible unsigned integer overflows. 411 */ 412 if (gp && overflows(addr, P2SZ(count))) 413 gp--; 414 if (agp && overflows(area->base, P2SZ(area->pages))) 415 agp--; 416 417 if (overlaps(addr, P2SZ(count + gp), area->base, 418 P2SZ(area->pages + agp))) { 370 419 mutex_unlock(&area->lock); 371 420 return false; … … 377 426 /* 378 427 * So far, the area does not conflict with other areas. 379 * Check if it doesn't conflict with kerneladdress space.428 * Check if it is contained in the user address space. 380 429 */ 381 430 if (!KERNEL_ADDRESS_SPACE_SHADOWED) { 382 return !overlaps(addr, P2SZ(count), KERNEL_ADDRESS_SPACE_START, 383 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START); 431 return iswithin(USER_ADDRESS_SPACE_START, 432 (USER_ADDRESS_SPACE_END - USER_ADDRESS_SPACE_START) + 1, 433 addr, P2SZ(count)); 384 434 } 385 435 … … 392 442 * this function. 393 443 * 394 * @param as Address space. 395 * @param bound Lowest address bound. 396 * @param size Requested size of the allocation. 444 * @param as Address space. 445 * @param bound Lowest address bound. 446 * @param size Requested size of the allocation. 447 * @param guarded True if the allocation must be protected by guard pages. 397 448 * 398 449 * @return Address of the beginning of unmapped address space area. … … 401 452 */ 402 453 NO_TRACE static uintptr_t as_get_unmapped_area(as_t *as, uintptr_t bound, 403 size_t size )454 size_t size, bool guarded) 404 455 { 405 456 ASSERT(mutex_locked(&as->lock)); … … 423 474 /* First check the bound address itself */ 424 475 uintptr_t addr = ALIGN_UP(bound, PAGE_SIZE); 425 if ((addr >= bound) && 426 (check_area_conflicts(as, addr, pages, NULL))) 427 return addr; 476 if (addr >= bound) { 477 if (guarded) { 478 /* Leave an unmapped page between the lower 479 * bound and the area's start address. 480 */ 481 addr += P2SZ(1); 482 } 483 484 if (check_area_conflicts(as, addr, pages, guarded, NULL)) 485 return addr; 486 } 428 487 429 488 /* Eventually check the addresses behind each area */ … … 439 498 addr = 440 499 ALIGN_UP(area->base + P2SZ(area->pages), PAGE_SIZE); 500 501 if (guarded || area->flags & AS_AREA_GUARD) { 502 /* We must leave an unmapped page 503 * between the two areas. 504 */ 505 addr += P2SZ(1); 506 } 507 441 508 bool avail = 442 509 ((addr >= bound) && (addr >= area->base) && 443 (check_area_conflicts(as, addr, pages, area)));510 (check_area_conflicts(as, addr, pages, guarded, area))); 444 511 445 512 mutex_unlock(&area->lock); … … 481 548 if (size == 0) 482 549 return NULL; 483 550 484 551 size_t pages = SIZE2FRAMES(size); 485 552 … … 487 554 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE)) 488 555 return NULL; 556 557 bool const guarded = flags & AS_AREA_GUARD; 489 558 490 559 mutex_lock(&as->lock); 491 560 492 561 if (*base == (uintptr_t) -1) { 493 *base = as_get_unmapped_area(as, bound, size );562 *base = as_get_unmapped_area(as, bound, size, guarded); 494 563 if (*base == (uintptr_t) -1) { 495 564 mutex_unlock(&as->lock); … … 497 566 } 498 567 } 499 500 if (!check_area_conflicts(as, *base, pages, NULL)) { 568 569 if (overflows_into_positive(*base, size)) 570 return NULL; 571 572 if (!check_area_conflicts(as, *base, pages, guarded, NULL)) { 501 573 mutex_unlock(&as->lock); 502 574 return NULL; … … 625 697 return ENOENT; 626 698 } 627 628 if (area->backend == &phys_backend) { 629 /* 630 * Remapping of address space areas associated 631 * with memory mapped devices is not supported. 699 700 if (!area->backend->is_resizable(area)) { 701 /* 702 * The backend does not support resizing for this area. 632 703 */ 633 704 mutex_unlock(&area->lock); … … 776 847 /* 777 848 * Growing the area. 849 */ 850 851 if (overflows_into_positive(address, P2SZ(pages))) 852 return EINVAL; 853 854 /* 778 855 * Check for overlaps with other address space areas. 779 856 */ 780 if (!check_area_conflicts(as, address, pages, area)) { 857 bool const guarded = area->flags & AS_AREA_GUARD; 858 if (!check_area_conflicts(as, address, pages, guarded, area)) { 781 859 mutex_unlock(&area->lock); 782 860 mutex_unlock(&as->lock); … … 979 1057 } 980 1058 981 if ((!src_area->backend) || (!src_area->backend->share)) { 982 /* 983 * There is no backend or the backend does not 984 * know how to share the area. 1059 if (!src_area->backend->is_shareable(src_area)) { 1060 /* 1061 * The backend does not permit sharing of this area. 985 1062 */ 986 1063 mutex_unlock(&src_area->lock); -
kernel/generic/src/mm/backend_anon.c
rbebf97d rc739102 59 59 static void anon_destroy(as_area_t *); 60 60 61 static bool anon_is_resizable(as_area_t *); 62 static bool anon_is_shareable(as_area_t *); 63 61 64 static int anon_page_fault(as_area_t *, uintptr_t, pf_access_t); 62 65 static void anon_frame_free(as_area_t *, uintptr_t, uintptr_t); … … 68 71 .destroy = anon_destroy, 69 72 73 .is_resizable = anon_is_resizable, 74 .is_shareable = anon_is_shareable, 75 70 76 .page_fault = anon_page_fault, 71 77 .frame_free = anon_frame_free, … … 74 80 bool anon_create(as_area_t *area) 75 81 { 82 if (area->flags & AS_AREA_LATE_RESERVE) 83 return true; 84 76 85 return reserve_try_alloc(area->pages); 77 86 } … … 79 88 bool anon_resize(as_area_t *area, size_t new_pages) 80 89 { 90 if (area->flags & AS_AREA_LATE_RESERVE) 91 return true; 92 81 93 if (new_pages > area->pages) 82 94 return reserve_try_alloc(new_pages - area->pages); … … 100 112 ASSERT(mutex_locked(&area->as->lock)); 101 113 ASSERT(mutex_locked(&area->lock)); 114 ASSERT(!(area->flags & AS_AREA_LATE_RESERVE)); 102 115 103 116 /* … … 139 152 void anon_destroy(as_area_t *area) 140 153 { 154 if (area->flags & AS_AREA_LATE_RESERVE) 155 return; 156 141 157 reserve_free(area->pages); 142 158 } 143 159 160 bool anon_is_resizable(as_area_t *area) 161 { 162 return true; 163 } 164 165 bool anon_is_shareable(as_area_t *area) 166 { 167 return !(area->flags & AS_AREA_LATE_RESERVE); 168 } 144 169 145 170 /** Service a page fault in the anonymous memory address space area. … … 225 250 * the different causes 226 251 */ 252 253 if (area->flags & AS_AREA_LATE_RESERVE) { 254 /* 255 * Reserve the memory for this page now. 256 */ 257 if (!reserve_try_alloc(1)) { 258 printf("Killing task %" PRIu64 " due to a " 259 "failed late reservation request.\n", 260 TASK->taskid); 261 TASK->silent_kill = true; 262 return AS_PF_FAULT; 263 } 264 } 265 227 266 kpage = km_temporary_page_get(&frame, FRAME_NO_RESERVE); 228 267 memsetb((void *) kpage, PAGE_SIZE, 0); … … 255 294 ASSERT(mutex_locked(&area->lock)); 256 295 257 frame_free_noreserve(frame); 296 if (area->flags & AS_AREA_LATE_RESERVE) { 297 /* 298 * In case of the late reserve areas, physical memory will not 299 * be unreserved when the area is destroyed so we need to use 300 * the normal unreserving frame_free(). 301 */ 302 frame_free(frame); 303 } else { 304 /* 305 * The reserve will be given back when the area is destroyed or 306 * resized, so use the frame_free_noreserve() which does not 307 * manipulate the reserve or it would be given back twice. 308 */ 309 frame_free_noreserve(frame); 310 } 258 311 } 259 312 -
kernel/generic/src/mm/backend_elf.c
rbebf97d rc739102 58 58 static void elf_destroy(as_area_t *); 59 59 60 static bool elf_is_resizable(as_area_t *); 61 static bool elf_is_shareable(as_area_t *); 62 60 63 static int elf_page_fault(as_area_t *, uintptr_t, pf_access_t); 61 64 static void elf_frame_free(as_area_t *, uintptr_t, uintptr_t); … … 66 69 .share = elf_share, 67 70 .destroy = elf_destroy, 71 72 .is_resizable = elf_is_resizable, 73 .is_shareable = elf_is_shareable, 68 74 69 75 .page_fault = elf_page_fault, … … 213 219 } 214 220 221 bool elf_is_resizable(as_area_t *area) 222 { 223 return true; 224 } 225 226 bool elf_is_shareable(as_area_t *area) 227 { 228 return true; 229 } 230 231 215 232 /** Service a page fault in the ELF backend address space area. 216 233 * -
kernel/generic/src/mm/backend_phys.c
rbebf97d rc739102 52 52 static void phys_destroy(as_area_t *); 53 53 54 static bool phys_is_resizable(as_area_t *); 55 static bool phys_is_shareable(as_area_t *); 56 57 54 58 static int phys_page_fault(as_area_t *, uintptr_t, pf_access_t); 55 59 … … 59 63 .share = phys_share, 60 64 .destroy = phys_destroy, 65 66 .is_resizable = phys_is_resizable, 67 .is_shareable = phys_is_shareable, 61 68 62 69 .page_fault = phys_page_fault, … … 87 94 /* Nothing to do. */ 88 95 } 96 97 bool phys_is_resizable(as_area_t *area) 98 { 99 return false; 100 } 101 102 bool phys_is_shareable(as_area_t *area) 103 { 104 return true; 105 } 106 89 107 90 108 /** Service a page fault in the address space area backed by physical memory. -
kernel/generic/src/mm/km.c
rbebf97d rc739102 233 233 * @param[inout] framep Pointer to a variable which will receive the physical 234 234 * address of the allocated frame. 235 * @param[in] flags Frame allocation flags. FRAME_NONE or FRAME_NO_RESERVE. 235 * @param[in] flags Frame allocation flags. FRAME_NONE, FRAME_NO_RESERVE 236 * and FRAME_ATOMIC bits are allowed. 236 237 * @return Virtual address of the allocated frame. 237 238 */ … … 243 244 ASSERT(THREAD); 244 245 ASSERT(framep); 245 ASSERT(!(flags & ~ FRAME_NO_RESERVE));246 ASSERT(!(flags & ~(FRAME_NO_RESERVE | FRAME_ATOMIC))); 246 247 247 248 /* … … 255 256 ASSERT(page); // FIXME 256 257 } else { 257 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 258 FRAME_LOWMEM); 258 frame = (uintptr_t) frame_alloc(ONE_FRAME, 259 FRAME_LOWMEM | flags); 260 if (!frame) 261 return (uintptr_t) NULL; 259 262 page = PA2KA(frame); 260 263 } -
kernel/generic/src/proc/program.c
rbebf97d rc739102 79 79 * Create the stack address space area. 80 80 */ 81 uintptr_t virt = USTACK_ADDRESS; 81 uintptr_t virt = (uintptr_t) -1; 82 uintptr_t bound = USER_ADDRESS_SPACE_END - (STACK_SIZE_USER - 1); 83 84 /* Adjust bound to create space for the desired guard page. */ 85 bound -= PAGE_SIZE; 86 82 87 as_area_t *area = as_area_create(as, 83 AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE, 84 STACK_SIZE, AS_AREA_ATTR_NONE, &anon_backend, NULL, &virt, 0); 88 AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE | AS_AREA_GUARD | 89 AS_AREA_LATE_RESERVE, STACK_SIZE_USER, AS_AREA_ATTR_NONE, 90 &anon_backend, NULL, &virt, bound); 85 91 if (!area) { 86 92 task_destroy(prg->task); … … 93 99 kernel_uarg->uspace_entry = (void *) entry_addr; 94 100 kernel_uarg->uspace_stack = (void *) virt; 95 kernel_uarg->uspace_stack_size = STACK_SIZE ;101 kernel_uarg->uspace_stack_size = STACK_SIZE_USER; 96 102 kernel_uarg->uspace_thread_function = NULL; 97 103 kernel_uarg->uspace_thread_arg = NULL; -
kernel/generic/src/proc/task.c
rbebf97d rc739102 196 196 task->ucycles = 0; 197 197 task->kcycles = 0; 198 199 task->silent_kill = false; 198 200 199 201 task->ipc_info.call_sent = 0;
Note:
See TracChangeset
for help on using the changeset viewer.