Changes in kernel/generic/src/mm/as.c [7e752b2:336db295] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/as.c
r7e752b2 r336db295 1 1 /* 2 * Copyright (c) 20 10Jakub Jermar2 * Copyright (c) 2001-2006 Jakub Jermar 3 3 * All rights reserved. 4 4 * … … 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Address space related functions. 36 36 * 37 37 * This file contains address space manipulation functions. … … 75 75 #include <config.h> 76 76 #include <align.h> 77 #include < typedefs.h>77 #include <arch/types.h> 78 78 #include <syscall/copy.h> 79 79 #include <arch/interrupt.h> … … 86 86 * Each architecture decides what functions will be used to carry out 87 87 * address space operations such as creating or locking page tables. 88 *89 88 */ 90 89 as_operations_t *as_operations = NULL; … … 92 91 /** 93 92 * Slab for as_t objects. 94 *95 93 */ 96 94 static slab_cache_t *as_slab; … … 102 100 * - as->asid for each as of the as_t type 103 101 * - asids_allocated counter 104 *105 102 */ 106 103 SPINLOCK_INITIALIZE(asidlock); … … 109 106 * This list contains address spaces that are not active on any 110 107 * processor and that have valid ASID. 111 *112 108 */ 113 109 LIST_INITIALIZE(inactive_as_with_asid_head); … … 116 112 as_t *AS_KERNEL = NULL; 117 113 118 NO_TRACE static int as_constructor(void *obj, unsigned int flags) 114 static int area_flags_to_page_flags(int); 115 static as_area_t *find_area_and_lock(as_t *, uintptr_t); 116 static bool check_area_conflicts(as_t *, uintptr_t, size_t, as_area_t *); 117 static void sh_info_remove_reference(share_info_t *); 118 119 static int as_constructor(void *obj, int flags) 119 120 { 120 121 as_t *as = (as_t *) obj; 121 122 int rc; 123 122 124 link_initialize(&as->inactive_as_with_asid_link); 123 125 mutex_initialize(&as->lock, MUTEX_PASSIVE); 124 126 125 intrc = as_constructor_arch(as, flags);127 rc = as_constructor_arch(as, flags); 126 128 127 129 return rc; 128 130 } 129 131 130 NO_TRACE static size_t as_destructor(void *obj)132 static int as_destructor(void *obj) 131 133 { 132 134 as_t *as = (as_t *) obj; 135 133 136 return as_destructor_arch(as); 134 137 } … … 138 141 { 139 142 as_arch_init(); 140 143 141 144 as_slab = slab_cache_create("as_slab", sizeof(as_t), 0, 142 145 as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED); … … 149 152 * reference count never drops to zero. 150 153 */ 151 a s_hold(AS_KERNEL);154 atomic_set(&AS_KERNEL->refcount, 1); 152 155 } 153 156 154 157 /** Create address space. 155 158 * 156 * @param flags Flags that influence the way in wich the address 157 * space is created. 158 * 159 */ 160 as_t *as_create(unsigned int flags) 161 { 162 as_t *as = (as_t *) slab_alloc(as_slab, 0); 159 * @param flags Flags that influence the way in wich the address space 160 * is created. 161 */ 162 as_t *as_create(int flags) 163 { 164 as_t *as; 165 166 as = (as_t *) slab_alloc(as_slab, 0); 163 167 (void) as_create_arch(as, 0); 164 168 … … 172 176 atomic_set(&as->refcount, 0); 173 177 as->cpu_refcount = 0; 174 175 178 #ifdef AS_PAGE_TABLE 176 179 as->genarch.page_table = page_table_create(flags); … … 189 192 * We know that we don't hold any spinlock. 190 193 * 191 * @param as Address space to be destroyed. 192 * 194 * @param as Address space to be destroyed. 193 195 */ 194 196 void as_destroy(as_t *as) 195 197 { 198 ipl_t ipl; 199 bool cond; 196 200 DEADLOCK_PROBE_INIT(p_asidlock); 197 201 198 ASSERT(as != AS);199 202 ASSERT(atomic_get(&as->refcount) == 0); 200 203 201 204 /* 202 * Since there is no reference to this a ddress space, it is safe not to203 * lock its mutex.205 * Since there is no reference to this area, 206 * it is safe not to lock its mutex. 204 207 */ 205 208 … … 210 213 * disabled to prevent nested context switches. We also depend on the 211 214 * fact that so far no spinlocks are held. 212 *213 215 */ 214 216 preemption_disable(); 215 ipl_t ipl = interrupts_read(); 216 217 ipl = interrupts_read(); 217 218 retry: 218 219 interrupts_disable(); … … 222 223 goto retry; 223 224 } 224 225 /* Interrupts disabled, enable preemption */ 226 preemption_enable(); 227 228 if ((as->asid != ASID_INVALID) && (as != AS_KERNEL)) { 229 if (as->cpu_refcount == 0) 225 preemption_enable(); /* Interrupts disabled, enable preemption */ 226 if (as->asid != ASID_INVALID && as != AS_KERNEL) { 227 if (as != AS && as->cpu_refcount == 0) 230 228 list_remove(&as->inactive_as_with_asid_link); 231 232 229 asid_put(as->asid); 233 230 } 234 235 231 spinlock_unlock(&asidlock); 236 interrupts_restore(ipl); 237 238 232 239 233 /* 240 234 * Destroy address space areas of the address space. 241 235 * The B+tree must be walked carefully because it is 242 236 * also being destroyed. 243 * 244 */245 bool cond = true;246 while (cond) { 237 */ 238 for (cond = true; cond; ) { 239 btree_node_t *node; 240 247 241 ASSERT(!list_empty(&as->as_area_btree.leaf_head)); 248 249 btree_node_t *node = 250 list_get_instance(as->as_area_btree.leaf_head.next, 242 node = list_get_instance(as->as_area_btree.leaf_head.next, 251 243 btree_node_t, leaf_link); 252 253 if ((cond = node->keys)) 244 245 if ((cond = node->keys)) { 254 246 as_area_destroy(as, node->key[0]); 255 } 256 247 } 248 } 249 257 250 btree_destroy(&as->as_area_btree); 258 259 251 #ifdef AS_PAGE_TABLE 260 252 page_table_destroy(as->genarch.page_table); … … 262 254 page_table_destroy(NULL); 263 255 #endif 264 256 257 interrupts_restore(ipl); 258 265 259 slab_free(as_slab, as); 266 260 } 267 261 268 /** Hold a reference to an address space.269 *270 * Holding a reference to an address space prevents destruction of that address271 * space.272 *273 * @param as Address space to be held.274 *275 */276 NO_TRACE void as_hold(as_t *as)277 {278 atomic_inc(&as->refcount);279 }280 281 /** Release a reference to an address space.282 *283 * The last one to release a reference to an address space destroys the address284 * space.285 *286 * @param asAddress space to be released.287 *288 */289 NO_TRACE void as_release(as_t *as)290 {291 if (atomic_predec(&as->refcount) == 0)292 as_destroy(as);293 }294 295 /** Check area conflicts with other areas.296 *297 * @param as Address space.298 * @param va Starting virtual address of the area being tested.299 * @param size Size of the area being tested.300 * @param avoid_area Do not touch this area.301 *302 * @return True if there is no conflict, false otherwise.303 *304 */305 NO_TRACE static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size,306 as_area_t *avoid_area)307 {308 ASSERT(mutex_locked(&as->lock));309 310 /*311 * We don't want any area to have conflicts with NULL page.312 *313 */314 if (overlaps(va, size, (uintptr_t) NULL, PAGE_SIZE))315 return false;316 317 /*318 * The leaf node is found in O(log n), where n is proportional to319 * the number of address space areas belonging to as.320 * The check for conflicts is then attempted on the rightmost321 * record in the left neighbour, the leftmost record in the right322 * neighbour and all records in the leaf node itself.323 *324 */325 btree_node_t *leaf;326 as_area_t *area =327 (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);328 if (area) {329 if (area != avoid_area)330 return false;331 }332 333 /* First, check the two border cases. */334 btree_node_t *node =335 btree_leaf_node_left_neighbour(&as->as_area_btree, leaf);336 if (node) {337 area = (as_area_t *) node->value[node->keys - 1];338 339 mutex_lock(&area->lock);340 341 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) {342 mutex_unlock(&area->lock);343 return false;344 }345 346 mutex_unlock(&area->lock);347 }348 349 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf);350 if (node) {351 area = (as_area_t *) node->value[0];352 353 mutex_lock(&area->lock);354 355 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) {356 mutex_unlock(&area->lock);357 return false;358 }359 360 mutex_unlock(&area->lock);361 }362 363 /* Second, check the leaf node. */364 btree_key_t i;365 for (i = 0; i < leaf->keys; i++) {366 area = (as_area_t *) leaf->value[i];367 368 if (area == avoid_area)369 continue;370 371 mutex_lock(&area->lock);372 373 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) {374 mutex_unlock(&area->lock);375 return false;376 }377 378 mutex_unlock(&area->lock);379 }380 381 /*382 * So far, the area does not conflict with other areas.383 * Check if it doesn't conflict with kernel address space.384 *385 */386 if (!KERNEL_ADDRESS_SPACE_SHADOWED) {387 return !overlaps(va, size,388 KERNEL_ADDRESS_SPACE_START,389 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START);390 }391 392 return true;393 }394 395 262 /** Create address space area of common attributes. 396 263 * 397 264 * The created address space area is added to the target address space. 398 265 * 399 * @param as Target address space. 400 * @param flags Flags of the area memory. 401 * @param size Size of area. 402 * @param base Base address of area. 403 * @param attrs Attributes of the area. 404 * @param backend Address space area backend. NULL if no backend is used. 405 * @param backend_data NULL or a pointer to an array holding two void *. 406 * 407 * @return Address space area on success or NULL on failure. 408 * 409 */ 410 as_area_t *as_area_create(as_t *as, unsigned int flags, size_t size, 411 uintptr_t base, unsigned int attrs, mem_backend_t *backend, 412 mem_backend_data_t *backend_data) 413 { 266 * @param as Target address space. 267 * @param flags Flags of the area memory. 268 * @param size Size of area. 269 * @param base Base address of area. 270 * @param attrs Attributes of the area. 271 * @param backend Address space area backend. NULL if no backend is used. 272 * @param backend_data NULL or a pointer to an array holding two void *. 273 * 274 * @return Address space area on success or NULL on failure. 275 */ 276 as_area_t * 277 as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs, 278 mem_backend_t *backend, mem_backend_data_t *backend_data) 279 { 280 ipl_t ipl; 281 as_area_t *a; 282 414 283 if (base % PAGE_SIZE) 415 284 return NULL; 416 285 417 286 if (!size) 418 287 return NULL; 419 288 420 289 /* Writeable executable areas are not supported. */ 421 290 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE)) 422 291 return NULL; 423 292 293 ipl = interrupts_disable(); 424 294 mutex_lock(&as->lock); 425 295 426 296 if (!check_area_conflicts(as, base, size, NULL)) { 427 297 mutex_unlock(&as->lock); 298 interrupts_restore(ipl); 428 299 return NULL; 429 300 } 430 301 431 as_area_t *area = (as_area_t *) malloc(sizeof(as_area_t), 0); 432 433 mutex_initialize(&area->lock, MUTEX_PASSIVE); 434 435 area->as = as; 436 area->flags = flags; 437 area->attributes = attrs; 438 area->pages = SIZE2FRAMES(size); 439 area->base = base; 440 area->sh_info = NULL; 441 area->backend = backend; 442 302 a = (as_area_t *) malloc(sizeof(as_area_t), 0); 303 304 mutex_initialize(&a->lock, MUTEX_PASSIVE); 305 306 a->as = as; 307 a->flags = flags; 308 a->attributes = attrs; 309 a->pages = SIZE2FRAMES(size); 310 a->base = base; 311 a->sh_info = NULL; 312 a->backend = backend; 443 313 if (backend_data) 444 a rea->backend_data = *backend_data;314 a->backend_data = *backend_data; 445 315 else 446 memsetb(&area->backend_data, sizeof(area->backend_data), 0); 447 448 btree_create(&area->used_space); 449 btree_insert(&as->as_area_btree, base, (void *) area, NULL); 450 316 memsetb(&a->backend_data, sizeof(a->backend_data), 0); 317 318 btree_create(&a->used_space); 319 320 btree_insert(&as->as_area_btree, base, (void *) a, NULL); 321 451 322 mutex_unlock(&as->lock); 452 453 return area; 454 } 455 456 /** Find address space area and lock it. 457 * 458 * @param as Address space. 459 * @param va Virtual address. 460 * 461 * @return Locked address space area containing va on success or 462 * NULL on failure. 463 * 464 */ 465 NO_TRACE static as_area_t *find_area_and_lock(as_t *as, uintptr_t va) 466 { 467 ASSERT(mutex_locked(&as->lock)); 468 469 btree_node_t *leaf; 470 as_area_t *area = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); 471 if (area) { 472 /* va is the base address of an address space area */ 473 mutex_lock(&area->lock); 474 return area; 475 } 476 477 /* 478 * Search the leaf node and the righmost record of its left neighbour 479 * to find out whether this is a miss or va belongs to an address 480 * space area found there. 481 * 482 */ 483 484 /* First, search the leaf node itself. */ 485 btree_key_t i; 486 487 for (i = 0; i < leaf->keys; i++) { 488 area = (as_area_t *) leaf->value[i]; 489 490 mutex_lock(&area->lock); 491 492 if ((area->base <= va) && (va < area->base + area->pages * PAGE_SIZE)) 493 return area; 494 495 mutex_unlock(&area->lock); 496 } 497 498 /* 499 * Second, locate the left neighbour and test its last record. 500 * Because of its position in the B+tree, it must have base < va. 501 * 502 */ 503 btree_node_t *lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); 504 if (lnode) { 505 area = (as_area_t *) lnode->value[lnode->keys - 1]; 506 507 mutex_lock(&area->lock); 508 509 if (va < area->base + area->pages * PAGE_SIZE) 510 return area; 511 512 mutex_unlock(&area->lock); 513 } 514 515 return NULL; 323 interrupts_restore(ipl); 324 325 return a; 516 326 } 517 327 518 328 /** Find address space area and change it. 519 329 * 520 * @param as Address space. 521 * @param address Virtual address belonging to the area to be changed. 522 * Must be page-aligned. 523 * @param size New size of the virtual memory block starting at 524 * address. 525 * @param flags Flags influencing the remap operation. Currently unused. 526 * 527 * @return Zero on success or a value from @ref errno.h otherwise. 528 * 529 */ 530 int as_area_resize(as_t *as, uintptr_t address, size_t size, unsigned int flags) 531 { 330 * @param as Address space. 331 * @param address Virtual address belonging to the area to be changed. 332 * Must be page-aligned. 333 * @param size New size of the virtual memory block starting at 334 * address. 335 * @param flags Flags influencing the remap operation. Currently unused. 336 * 337 * @return Zero on success or a value from @ref errno.h otherwise. 338 */ 339 int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags) 340 { 341 as_area_t *area; 342 ipl_t ipl; 343 size_t pages; 344 345 ipl = interrupts_disable(); 532 346 mutex_lock(&as->lock); 533 347 534 348 /* 535 349 * Locate the area. 536 * 537 */ 538 as_area_t *area = find_area_and_lock(as, address); 350 */ 351 area = find_area_and_lock(as, address); 539 352 if (!area) { 540 353 mutex_unlock(&as->lock); 354 interrupts_restore(ipl); 541 355 return ENOENT; 542 356 } 543 357 544 358 if (area->backend == &phys_backend) { 545 359 /* 546 360 * Remapping of address space areas associated 547 361 * with memory mapped devices is not supported. 548 *549 362 */ 550 363 mutex_unlock(&area->lock); 551 364 mutex_unlock(&as->lock); 365 interrupts_restore(ipl); 552 366 return ENOTSUP; 553 367 } 554 555 368 if (area->sh_info) { 556 369 /* 557 * Remapping of shared address space areas 370 * Remapping of shared address space areas 558 371 * is not supported. 559 *560 372 */ 561 373 mutex_unlock(&area->lock); 562 374 mutex_unlock(&as->lock); 375 interrupts_restore(ipl); 563 376 return ENOTSUP; 564 377 } 565 566 size_tpages = SIZE2FRAMES((address - area->base) + size);378 379 pages = SIZE2FRAMES((address - area->base) + size); 567 380 if (!pages) { 568 381 /* 569 382 * Zero size address space areas are not allowed. 570 *571 383 */ 572 384 mutex_unlock(&area->lock); 573 385 mutex_unlock(&as->lock); 386 interrupts_restore(ipl); 574 387 return EPERM; 575 388 } 576 389 577 390 if (pages < area->pages) { 391 bool cond; 578 392 uintptr_t start_free = area->base + pages * PAGE_SIZE; 579 393 580 394 /* 581 395 * Shrinking the area. 582 396 * No need to check for overlaps. 583 * 584 */ 585 586 page_table_lock(as, false); 587 397 */ 398 588 399 /* 589 400 * Start TLB shootdown sequence. 590 * 591 */ 592 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, 593 area->base + pages * PAGE_SIZE, area->pages - pages); 594 401 */ 402 tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base + 403 pages * PAGE_SIZE, area->pages - pages); 404 595 405 /* 596 406 * Remove frames belonging to used space starting from … … 599 409 * is also the right way to remove part of the used_space 600 410 * B+tree leaf list. 601 * 602 */603 bool cond = true;604 while (cond) {411 */ 412 for (cond = true; cond;) { 413 btree_node_t *node; 414 605 415 ASSERT(!list_empty(&area->used_space.leaf_head)); 606 607 btree_node_t *node = 416 node = 608 417 list_get_instance(area->used_space.leaf_head.prev, 609 418 btree_node_t, leaf_link); 419 if ((cond = (bool) node->keys)) { 420 uintptr_t b = node->key[node->keys - 1]; 421 size_t c = 422 (size_t) node->value[node->keys - 1]; 423 unsigned int i = 0; 610 424 611 if ((cond = (bool) node->keys)) { 612 uintptr_t ptr = node->key[node->keys - 1]; 613 size_t size = 614 (size_t) node->value[node->keys - 1]; 615 size_t i = 0; 616 617 if (overlaps(ptr, size * PAGE_SIZE, area->base, 425 if (overlaps(b, c * PAGE_SIZE, area->base, 618 426 pages * PAGE_SIZE)) { 619 427 620 if ( ptr + size* PAGE_SIZE <= start_free) {428 if (b + c * PAGE_SIZE <= start_free) { 621 429 /* 622 430 * The whole interval fits 623 431 * completely in the resized 624 432 * address space area. 625 *626 433 */ 627 434 break; 628 435 } 629 436 630 437 /* 631 438 * Part of the interval corresponding 632 439 * to b and c overlaps with the resized 633 440 * address space area. 634 *635 441 */ 636 637 /* We are almost done */ 638 cond = false; 639 i = (start_free - ptr) >> PAGE_WIDTH; 442 443 cond = false; /* we are almost done */ 444 i = (start_free - b) >> PAGE_WIDTH; 640 445 if (!used_space_remove(area, start_free, 641 size - i)) 642 panic("Cannot remove used space."); 446 c - i)) 447 panic("Cannot remove used " 448 "space."); 643 449 } else { 644 450 /* … … 646 452 * completely removed. 647 453 */ 648 if (!used_space_remove(area, ptr, size)) 649 panic("Cannot remove used space."); 454 if (!used_space_remove(area, b, c)) 455 panic("Cannot remove used " 456 "space."); 650 457 } 651 652 for (; i < size; i++) { 653 pte_t *pte = page_mapping_find(as, ptr + 458 459 for (; i < c; i++) { 460 pte_t *pte; 461 462 page_table_lock(as, false); 463 pte = page_mapping_find(as, b + 654 464 i * PAGE_SIZE); 655 656 ASSERT(pte); 657 ASSERT(PTE_VALID(pte)); 658 ASSERT(PTE_PRESENT(pte)); 659 660 if ((area->backend) && 661 (area->backend->frame_free)) { 465 ASSERT(pte && PTE_VALID(pte) && 466 PTE_PRESENT(pte)); 467 if (area->backend && 468 area->backend->frame_free) { 662 469 area->backend->frame_free(area, 663 ptr+ i * PAGE_SIZE,470 b + i * PAGE_SIZE, 664 471 PTE_GET_FRAME(pte)); 665 472 } 666 667 page_mapping_remove(as, ptr + 473 page_mapping_remove(as, b + 668 474 i * PAGE_SIZE); 475 page_table_unlock(as, false); 669 476 } 670 477 } 671 478 } 672 479 673 480 /* 674 481 * Finish TLB shootdown sequence. 675 * 676 */ 677 482 */ 483 678 484 tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE, 679 485 area->pages - pages); 680 681 486 /* 682 487 * Invalidate software translation caches (e.g. TSB on sparc64). 683 *684 488 */ 685 489 as_invalidate_translation_cache(as, area->base + 686 490 pages * PAGE_SIZE, area->pages - pages); 687 tlb_shootdown_finalize( ipl);491 tlb_shootdown_finalize(); 688 492 689 page_table_unlock(as, false);690 493 } else { 691 494 /* 692 495 * Growing the area. 693 496 * Check for overlaps with other address space areas. 694 *695 497 */ 696 498 if (!check_area_conflicts(as, address, pages * PAGE_SIZE, 697 499 area)) { 698 500 mutex_unlock(&area->lock); 699 mutex_unlock(&as->lock); 501 mutex_unlock(&as->lock); 502 interrupts_restore(ipl); 700 503 return EADDRNOTAVAIL; 701 504 } 702 } 703 505 } 506 704 507 area->pages = pages; 705 508 706 509 mutex_unlock(&area->lock); 707 510 mutex_unlock(&as->lock); 708 511 interrupts_restore(ipl); 512 709 513 return 0; 710 514 } 711 515 712 /** Remove reference to address space area share info.713 *714 * If the reference count drops to 0, the sh_info is deallocated.715 *716 * @param sh_info Pointer to address space area share info.717 *718 */719 NO_TRACE static void sh_info_remove_reference(share_info_t *sh_info)720 {721 bool dealloc = false;722 723 mutex_lock(&sh_info->lock);724 ASSERT(sh_info->refcount);725 726 if (--sh_info->refcount == 0) {727 dealloc = true;728 link_t *cur;729 730 /*731 * Now walk carefully the pagemap B+tree and free/remove732 * reference from all frames found there.733 */734 for (cur = sh_info->pagemap.leaf_head.next;735 cur != &sh_info->pagemap.leaf_head; cur = cur->next) {736 btree_node_t *node737 = list_get_instance(cur, btree_node_t, leaf_link);738 btree_key_t i;739 740 for (i = 0; i < node->keys; i++)741 frame_free((uintptr_t) node->value[i]);742 }743 744 }745 mutex_unlock(&sh_info->lock);746 747 if (dealloc) {748 btree_destroy(&sh_info->pagemap);749 free(sh_info);750 }751 }752 753 516 /** Destroy address space area. 754 517 * 755 * @param as Address space. 756 * @param address Address within the area to be deleted. 757 * 758 * @return Zero on success or a value from @ref errno.h on failure. 759 * 518 * @param as Address space. 519 * @param address Address within the area to be deleted. 520 * 521 * @return Zero on success or a value from @ref errno.h on failure. 760 522 */ 761 523 int as_area_destroy(as_t *as, uintptr_t address) 762 524 { 525 as_area_t *area; 526 uintptr_t base; 527 link_t *cur; 528 ipl_t ipl; 529 530 ipl = interrupts_disable(); 763 531 mutex_lock(&as->lock); 764 765 a s_area_t *area = find_area_and_lock(as, address);532 533 area = find_area_and_lock(as, address); 766 534 if (!area) { 767 535 mutex_unlock(&as->lock); 536 interrupts_restore(ipl); 768 537 return ENOENT; 769 538 } 770 771 uintptr_t base = area->base; 772 773 page_table_lock(as, false); 774 539 540 base = area->base; 541 775 542 /* 776 543 * Start TLB shootdown sequence. 777 544 */ 778 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, 779 area->pages); 780 545 tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages); 546 781 547 /* 782 548 * Visit only the pages mapped by used_space B+tree. 783 549 */ 784 link_t *cur;785 550 for (cur = area->used_space.leaf_head.next; 786 551 cur != &area->used_space.leaf_head; cur = cur->next) { 787 552 btree_node_t *node; 788 btree_key_t i;553 unsigned int i; 789 554 790 555 node = list_get_instance(cur, btree_node_t, leaf_link); 791 556 for (i = 0; i < node->keys; i++) { 792 uintptr_t ptr = node->key[i]; 793 size_t size; 557 uintptr_t b = node->key[i]; 558 size_t j; 559 pte_t *pte; 794 560 795 for (size = 0; size < (size_t) node->value[i]; size++) { 796 pte_t *pte = page_mapping_find(as, ptr + size * PAGE_SIZE); 797 798 ASSERT(pte); 799 ASSERT(PTE_VALID(pte)); 800 ASSERT(PTE_PRESENT(pte)); 801 802 if ((area->backend) && 803 (area->backend->frame_free)) { 804 area->backend->frame_free(area, 805 ptr + size * PAGE_SIZE, PTE_GET_FRAME(pte)); 561 for (j = 0; j < (size_t) node->value[i]; j++) { 562 page_table_lock(as, false); 563 pte = page_mapping_find(as, b + j * PAGE_SIZE); 564 ASSERT(pte && PTE_VALID(pte) && 565 PTE_PRESENT(pte)); 566 if (area->backend && 567 area->backend->frame_free) { 568 area->backend->frame_free(area, b + 569 j * PAGE_SIZE, PTE_GET_FRAME(pte)); 806 570 } 807 808 page_ mapping_remove(as, ptr + size * PAGE_SIZE);571 page_mapping_remove(as, b + j * PAGE_SIZE); 572 page_table_unlock(as, false); 809 573 } 810 574 } 811 575 } 812 576 813 577 /* 814 578 * Finish TLB shootdown sequence. 815 * 816 */ 817 579 */ 580 818 581 tlb_invalidate_pages(as->asid, area->base, area->pages); 819 820 582 /* 821 583 * Invalidate potential software translation caches (e.g. TSB on 822 584 * sparc64). 823 *824 585 */ 825 586 as_invalidate_translation_cache(as, area->base, area->pages); 826 tlb_shootdown_finalize(ipl); 827 828 page_table_unlock(as, false); 587 tlb_shootdown_finalize(); 829 588 830 589 btree_destroy(&area->used_space); 831 590 832 591 area->attributes |= AS_AREA_ATTR_PARTIAL; 833 592 834 593 if (area->sh_info) 835 594 sh_info_remove_reference(area->sh_info); 836 595 837 596 mutex_unlock(&area->lock); 838 597 839 598 /* 840 599 * Remove the empty area from address space. 841 *842 600 */ 843 601 btree_remove(&as->as_area_btree, base, NULL); … … 846 604 847 605 mutex_unlock(&as->lock); 606 interrupts_restore(ipl); 848 607 return 0; 849 608 } … … 856 615 * sh_info of the source area. The process of duplicating the 857 616 * mapping is done through the backend share function. 858 * 859 * @param src_as 860 * @param src_base 861 * @param acc_size 862 * @param dst_as 863 * @param dst_base 617 * 618 * @param src_as Pointer to source address space. 619 * @param src_base Base address of the source address space area. 620 * @param acc_size Expected size of the source area. 621 * @param dst_as Pointer to destination address space. 622 * @param dst_base Target base address. 864 623 * @param dst_flags_mask Destination address space area flags mask. 865 624 * 866 * @return Zero on success. 867 * @return ENOENT if there is no such task or such address space. 868 * @return EPERM if there was a problem in accepting the area. 869 * @return ENOMEM if there was a problem in allocating destination 870 * address space area. 871 * @return ENOTSUP if the address space area backend does not support 872 * sharing. 873 * 625 * @return Zero on success or ENOENT if there is no such task or if 626 * there is no such address space area, EPERM if there was 627 * a problem in accepting the area or ENOMEM if there was a 628 * problem in allocating destination address space area. 629 * ENOTSUP is returned if the address space area backend 630 * does not support sharing. 874 631 */ 875 632 int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size, 876 as_t *dst_as, uintptr_t dst_base, unsigned int dst_flags_mask) 877 { 633 as_t *dst_as, uintptr_t dst_base, int dst_flags_mask) 634 { 635 ipl_t ipl; 636 int src_flags; 637 size_t src_size; 638 as_area_t *src_area, *dst_area; 639 share_info_t *sh_info; 640 mem_backend_t *src_backend; 641 mem_backend_data_t src_backend_data; 642 643 ipl = interrupts_disable(); 878 644 mutex_lock(&src_as->lock); 879 as_area_t *src_area = find_area_and_lock(src_as, src_base);645 src_area = find_area_and_lock(src_as, src_base); 880 646 if (!src_area) { 881 647 /* 882 648 * Could not find the source address space area. 883 *884 649 */ 885 650 mutex_unlock(&src_as->lock); 651 interrupts_restore(ipl); 886 652 return ENOENT; 887 653 } 888 889 if ( (!src_area->backend) || (!src_area->backend->share)) {654 655 if (!src_area->backend || !src_area->backend->share) { 890 656 /* 891 657 * There is no backend or the backend does not 892 658 * know how to share the area. 893 *894 659 */ 895 660 mutex_unlock(&src_area->lock); 896 661 mutex_unlock(&src_as->lock); 662 interrupts_restore(ipl); 897 663 return ENOTSUP; 898 664 } 899 665 900 s ize_t src_size = src_area->pages * PAGE_SIZE;901 unsigned intsrc_flags = src_area->flags;902 mem_backend_t *src_backend = src_area->backend;903 mem_backend_data_tsrc_backend_data = src_area->backend_data;904 666 src_size = src_area->pages * PAGE_SIZE; 667 src_flags = src_area->flags; 668 src_backend = src_area->backend; 669 src_backend_data = src_area->backend_data; 670 905 671 /* Share the cacheable flag from the original mapping */ 906 672 if (src_flags & AS_AREA_CACHEABLE) 907 673 dst_flags_mask |= AS_AREA_CACHEABLE; 908 909 if ( (src_size != acc_size)||910 ( (src_flags & dst_flags_mask) != dst_flags_mask)) {674 675 if (src_size != acc_size || 676 (src_flags & dst_flags_mask) != dst_flags_mask) { 911 677 mutex_unlock(&src_area->lock); 912 678 mutex_unlock(&src_as->lock); 679 interrupts_restore(ipl); 913 680 return EPERM; 914 681 } 915 682 916 683 /* 917 684 * Now we are committed to sharing the area. 918 685 * First, prepare the area for sharing. 919 686 * Then it will be safe to unlock it. 920 * 921 */ 922 share_info_t *sh_info = src_area->sh_info; 687 */ 688 sh_info = src_area->sh_info; 923 689 if (!sh_info) { 924 690 sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0); … … 927 693 btree_create(&sh_info->pagemap); 928 694 src_area->sh_info = sh_info; 929 930 695 /* 931 696 * Call the backend to setup sharing. 932 *933 697 */ 934 698 src_area->backend->share(src_area); … … 938 702 mutex_unlock(&sh_info->lock); 939 703 } 940 704 941 705 mutex_unlock(&src_area->lock); 942 706 mutex_unlock(&src_as->lock); 943 707 944 708 /* 945 709 * Create copy of the source address space area. … … 949 713 * The flags of the source area are masked against dst_flags_mask 950 714 * to support sharing in less privileged mode. 951 * 952 */ 953 as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask, src_size, 954 dst_base, AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data); 715 */ 716 dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base, 717 AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data); 955 718 if (!dst_area) { 956 719 /* … … 959 722 sh_info_remove_reference(sh_info); 960 723 724 interrupts_restore(ipl); 961 725 return ENOMEM; 962 726 } 963 727 964 728 /* 965 729 * Now the destination address space area has been 966 730 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL 967 731 * attribute and set the sh_info. 968 * 969 */ 970 mutex_lock(&dst_as->lock); 732 */ 733 mutex_lock(&dst_as->lock); 971 734 mutex_lock(&dst_area->lock); 972 735 dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL; 973 736 dst_area->sh_info = sh_info; 974 737 mutex_unlock(&dst_area->lock); 975 mutex_unlock(&dst_as->lock); 738 mutex_unlock(&dst_as->lock); 739 740 interrupts_restore(ipl); 976 741 977 742 return 0; … … 980 745 /** Check access mode for address space area. 981 746 * 982 * @param area Address space area. 983 * @param access Access mode. 984 * 985 * @return False if access violates area's permissions, true 986 * otherwise. 987 * 988 */ 989 NO_TRACE bool as_area_check_access(as_area_t *area, pf_access_t access) 747 * The address space area must be locked prior to this call. 748 * 749 * @param area Address space area. 750 * @param access Access mode. 751 * 752 * @return False if access violates area's permissions, true 753 * otherwise. 754 */ 755 bool as_area_check_access(as_area_t *area, pf_access_t access) 990 756 { 991 757 int flagmap[] = { … … 995 761 }; 996 762 997 ASSERT(mutex_locked(&area->lock));998 999 763 if (!(area->flags & flagmap[access])) 1000 764 return false; 1001 765 1002 766 return true; 1003 }1004 1005 /** Convert address space area flags to page flags.1006 *1007 * @param aflags Flags of some address space area.1008 *1009 * @return Flags to be passed to page_mapping_insert().1010 *1011 */1012 NO_TRACE static unsigned int area_flags_to_page_flags(unsigned int aflags)1013 {1014 unsigned int flags = PAGE_USER | PAGE_PRESENT;1015 1016 if (aflags & AS_AREA_READ)1017 flags |= PAGE_READ;1018 1019 if (aflags & AS_AREA_WRITE)1020 flags |= PAGE_WRITE;1021 1022 if (aflags & AS_AREA_EXEC)1023 flags |= PAGE_EXEC;1024 1025 if (aflags & AS_AREA_CACHEABLE)1026 flags |= PAGE_CACHEABLE;1027 1028 return flags;1029 767 } 1030 768 … … 1043 781 * 1044 782 */ 1045 int as_area_change_flags(as_t *as, unsigned int flags, uintptr_t address) 1046 { 783 int as_area_change_flags(as_t *as, int flags, uintptr_t address) 784 { 785 as_area_t *area; 786 uintptr_t base; 787 link_t *cur; 788 ipl_t ipl; 789 int page_flags; 790 uintptr_t *old_frame; 791 size_t frame_idx; 792 size_t used_pages; 793 1047 794 /* Flags for the new memory mapping */ 1048 unsigned int page_flags = area_flags_to_page_flags(flags); 1049 795 page_flags = area_flags_to_page_flags(flags); 796 797 ipl = interrupts_disable(); 1050 798 mutex_lock(&as->lock); 1051 1052 a s_area_t *area = find_area_and_lock(as, address);799 800 area = find_area_and_lock(as, address); 1053 801 if (!area) { 1054 802 mutex_unlock(&as->lock); 803 interrupts_restore(ipl); 1055 804 return ENOENT; 1056 805 } 1057 806 1058 807 if ((area->sh_info) || (area->backend != &anon_backend)) { 1059 808 /* Copying shared areas not supported yet */ … … 1061 810 mutex_unlock(&area->lock); 1062 811 mutex_unlock(&as->lock); 812 interrupts_restore(ipl); 1063 813 return ENOTSUP; 1064 814 } 1065 815 816 base = area->base; 817 1066 818 /* 1067 819 * Compute total number of used pages in the used_space B+tree 1068 * 1069 */ 1070 size_t used_pages = 0; 1071 link_t *cur; 1072 820 */ 821 used_pages = 0; 822 1073 823 for (cur = area->used_space.leaf_head.next; 1074 824 cur != &area->used_space.leaf_head; cur = cur->next) { 1075 btree_node_t *node 1076 = list_get_instance(cur, btree_node_t, leaf_link); 1077 btree_key_t i; 825 btree_node_t *node; 826 unsigned int i; 1078 827 1079 for (i = 0; i < node->keys; i++) 828 node = list_get_instance(cur, btree_node_t, leaf_link); 829 for (i = 0; i < node->keys; i++) { 1080 830 used_pages += (size_t) node->value[i]; 1081 } 1082 831 } 832 } 833 1083 834 /* An array for storing frame numbers */ 1084 uintptr_t *old_frame = malloc(used_pages * sizeof(uintptr_t), 0); 1085 1086 page_table_lock(as, false); 1087 835 old_frame = malloc(used_pages * sizeof(uintptr_t), 0); 836 1088 837 /* 1089 838 * Start TLB shootdown sequence. 1090 * 1091 */ 1092 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, 1093 area->pages); 1094 839 */ 840 tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages); 841 1095 842 /* 1096 843 * Remove used pages from page tables and remember their frame 1097 844 * numbers. 1098 * 1099 */ 1100 size_t frame_idx = 0; 1101 845 */ 846 frame_idx = 0; 847 1102 848 for (cur = area->used_space.leaf_head.next; 1103 849 cur != &area->used_space.leaf_head; cur = cur->next) { 1104 btree_node_t *node 1105 = list_get_instance(cur, btree_node_t, leaf_link); 1106 btree_key_t i; 850 btree_node_t *node; 851 unsigned int i; 1107 852 853 node = list_get_instance(cur, btree_node_t, leaf_link); 1108 854 for (i = 0; i < node->keys; i++) { 1109 uintptr_t ptr = node->key[i]; 1110 size_t size; 855 uintptr_t b = node->key[i]; 856 size_t j; 857 pte_t *pte; 1111 858 1112 for (size = 0; size < (size_t) node->value[i]; size++) { 1113 pte_t *pte = page_mapping_find(as, ptr + size * PAGE_SIZE); 1114 1115 ASSERT(pte); 1116 ASSERT(PTE_VALID(pte)); 1117 ASSERT(PTE_PRESENT(pte)); 1118 859 for (j = 0; j < (size_t) node->value[i]; j++) { 860 page_table_lock(as, false); 861 pte = page_mapping_find(as, b + j * PAGE_SIZE); 862 ASSERT(pte && PTE_VALID(pte) && 863 PTE_PRESENT(pte)); 1119 864 old_frame[frame_idx++] = PTE_GET_FRAME(pte); 1120 865 1121 866 /* Remove old mapping */ 1122 page_mapping_remove(as, ptr + size * PAGE_SIZE); 867 page_mapping_remove(as, b + j * PAGE_SIZE); 868 page_table_unlock(as, false); 1123 869 } 1124 870 } 1125 871 } 1126 872 1127 873 /* 1128 874 * Finish TLB shootdown sequence. 1129 * 1130 */ 1131 875 */ 876 1132 877 tlb_invalidate_pages(as->asid, area->base, area->pages); 1133 878 … … 1135 880 * Invalidate potential software translation caches (e.g. TSB on 1136 881 * sparc64). 1137 *1138 882 */ 1139 883 as_invalidate_translation_cache(as, area->base, area->pages); 1140 tlb_shootdown_finalize(ipl); 1141 1142 page_table_unlock(as, false); 1143 884 tlb_shootdown_finalize(); 885 1144 886 /* 1145 887 * Set the new flags. 1146 888 */ 1147 889 area->flags = flags; 1148 890 1149 891 /* 1150 892 * Map pages back in with new flags. This step is kept separate … … 1153 895 */ 1154 896 frame_idx = 0; 1155 897 1156 898 for (cur = area->used_space.leaf_head.next; 1157 899 cur != &area->used_space.leaf_head; cur = cur->next) { 1158 btree_node_t *node 1159 = list_get_instance(cur, btree_node_t, leaf_link); 1160 btree_key_t i; 900 btree_node_t *node; 901 unsigned int i; 1161 902 903 node = list_get_instance(cur, btree_node_t, leaf_link); 1162 904 for (i = 0; i < node->keys; i++) { 1163 uintptr_t ptr= node->key[i];1164 size_t size;905 uintptr_t b = node->key[i]; 906 size_t j; 1165 907 1166 for ( size = 0; size < (size_t) node->value[i]; size++) {908 for (j = 0; j < (size_t) node->value[i]; j++) { 1167 909 page_table_lock(as, false); 1168 910 1169 911 /* Insert the new mapping */ 1170 page_mapping_insert(as, ptr + size* PAGE_SIZE,912 page_mapping_insert(as, b + j * PAGE_SIZE, 1171 913 old_frame[frame_idx++], page_flags); 1172 914 1173 915 page_table_unlock(as, false); 1174 916 } 1175 917 } 1176 918 } 1177 919 1178 920 free(old_frame); 1179 921 1180 922 mutex_unlock(&area->lock); 1181 923 mutex_unlock(&as->lock); 1182 924 interrupts_restore(ipl); 925 1183 926 return 0; 1184 927 } 928 1185 929 1186 930 /** Handle page fault within the current address space. … … 1192 936 * Interrupts are assumed disabled. 1193 937 * 1194 * @param page Faulting page. 1195 * @param access Access mode that caused the page fault (i.e. 1196 * read/write/exec). 1197 * @param istate Pointer to the interrupted state. 1198 * 1199 * @return AS_PF_FAULT on page fault. 1200 * @return AS_PF_OK on success. 1201 * @return AS_PF_DEFER if the fault was caused by copy_to_uspace() 1202 * or copy_from_uspace(). 1203 * 938 * @param page Faulting page. 939 * @param access Access mode that caused the page fault (i.e. 940 * read/write/exec). 941 * @param istate Pointer to the interrupted state. 942 * 943 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or 944 * AS_PF_DEFER if the fault was caused by copy_to_uspace() 945 * or copy_from_uspace(). 1204 946 */ 1205 947 int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate) 1206 948 { 949 pte_t *pte; 950 as_area_t *area; 951 1207 952 if (!THREAD) 1208 953 return AS_PF_FAULT; 1209 1210 if (!AS) 1211 return AS_PF_FAULT; 1212 954 955 ASSERT(AS); 956 1213 957 mutex_lock(&AS->lock); 1214 a s_area_t *area = find_area_and_lock(AS, page);958 area = find_area_and_lock(AS, page); 1215 959 if (!area) { 1216 960 /* 1217 961 * No area contained mapping for 'page'. 1218 962 * Signal page fault to low-level handler. 1219 *1220 963 */ 1221 964 mutex_unlock(&AS->lock); 1222 965 goto page_fault; 1223 966 } 1224 967 1225 968 if (area->attributes & AS_AREA_ATTR_PARTIAL) { 1226 969 /* … … 1230 973 mutex_unlock(&area->lock); 1231 974 mutex_unlock(&AS->lock); 1232 goto page_fault; 1233 } 1234 1235 if ( (!area->backend) || (!area->backend->page_fault)) {975 goto page_fault; 976 } 977 978 if (!area->backend || !area->backend->page_fault) { 1236 979 /* 1237 980 * The address space area is not backed by any backend 1238 981 * or the backend cannot handle page faults. 1239 *1240 982 */ 1241 983 mutex_unlock(&area->lock); 1242 984 mutex_unlock(&AS->lock); 1243 goto page_fault; 1244 } 1245 985 goto page_fault; 986 } 987 1246 988 page_table_lock(AS, false); 1247 989 … … 1249 991 * To avoid race condition between two page faults on the same address, 1250 992 * we need to make sure the mapping has not been already inserted. 1251 * 1252 */ 1253 pte_t *pte; 993 */ 1254 994 if ((pte = page_mapping_find(AS, page))) { 1255 995 if (PTE_PRESENT(pte)) { … … 1267 1007 /* 1268 1008 * Resort to the backend page fault handler. 1269 *1270 1009 */ 1271 1010 if (area->backend->page_fault(area, page, access) != AS_PF_OK) { … … 1280 1019 mutex_unlock(&AS->lock); 1281 1020 return AS_PF_OK; 1282 1021 1283 1022 page_fault: 1284 1023 if (THREAD->in_copy_from_uspace) { … … 1293 1032 return AS_PF_FAULT; 1294 1033 } 1295 1034 1296 1035 return AS_PF_DEFER; 1297 1036 } … … 1305 1044 * When this function is enetered, no spinlocks may be held. 1306 1045 * 1307 * @param old Old address space or NULL. 1308 * @param new New address space. 1309 * 1046 * @param old Old address space or NULL. 1047 * @param new New address space. 1310 1048 */ 1311 1049 void as_switch(as_t *old_as, as_t *new_as) … … 1313 1051 DEADLOCK_PROBE_INIT(p_asidlock); 1314 1052 preemption_disable(); 1315 1316 1053 retry: 1317 1054 (void) interrupts_disable(); 1318 1055 if (!spinlock_trylock(&asidlock)) { 1319 /* 1056 /* 1320 1057 * Avoid deadlock with TLB shootdown. 1321 1058 * We can enable interrupts here because 1322 1059 * preemption is disabled. We should not be 1323 1060 * holding any other lock. 1324 *1325 1061 */ 1326 1062 (void) interrupts_enable(); … … 1329 1065 } 1330 1066 preemption_enable(); 1331 1067 1332 1068 /* 1333 1069 * First, take care of the old address space. 1334 */ 1070 */ 1335 1071 if (old_as) { 1336 1072 ASSERT(old_as->cpu_refcount); 1337 1338 if ((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) { 1073 if((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) { 1339 1074 /* 1340 1075 * The old address space is no longer active on … … 1342 1077 * list of inactive address spaces with assigned 1343 1078 * ASID. 1344 *1345 1079 */ 1346 1080 ASSERT(old_as->asid != ASID_INVALID); 1347 1348 1081 list_append(&old_as->inactive_as_with_asid_link, 1349 1082 &inactive_as_with_asid_head); 1350 1083 } 1351 1084 1352 1085 /* 1353 1086 * Perform architecture-specific tasks when the address space 1354 1087 * is being removed from the CPU. 1355 *1356 1088 */ 1357 1089 as_deinstall_arch(old_as); 1358 1090 } 1359 1091 1360 1092 /* 1361 1093 * Second, prepare the new address space. 1362 *1363 1094 */ 1364 1095 if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) { … … 1368 1099 new_as->asid = asid_get(); 1369 1100 } 1370 1371 1101 #ifdef AS_PAGE_TABLE 1372 1102 SET_PTL0_ADDRESS(new_as->genarch.page_table); … … 1376 1106 * Perform architecture-specific steps. 1377 1107 * (e.g. write ASID to hardware register etc.) 1378 *1379 1108 */ 1380 1109 as_install_arch(new_as); 1381 1110 1382 1111 spinlock_unlock(&asidlock); 1383 1112 … … 1385 1114 } 1386 1115 1116 /** Convert address space area flags to page flags. 1117 * 1118 * @param aflags Flags of some address space area. 1119 * 1120 * @return Flags to be passed to page_mapping_insert(). 1121 */ 1122 int area_flags_to_page_flags(int aflags) 1123 { 1124 int flags; 1125 1126 flags = PAGE_USER | PAGE_PRESENT; 1127 1128 if (aflags & AS_AREA_READ) 1129 flags |= PAGE_READ; 1130 1131 if (aflags & AS_AREA_WRITE) 1132 flags |= PAGE_WRITE; 1133 1134 if (aflags & AS_AREA_EXEC) 1135 flags |= PAGE_EXEC; 1136 1137 if (aflags & AS_AREA_CACHEABLE) 1138 flags |= PAGE_CACHEABLE; 1139 1140 return flags; 1141 } 1142 1387 1143 /** Compute flags for virtual address translation subsytem. 1388 1144 * 1389 * @param area Address space area.1390 * 1391 * @return Flags to be used in page_mapping_insert().1392 * 1393 * /1394 NO_TRACE unsigned int as_area_get_flags(as_area_t *area) 1395 { 1396 ASSERT(mutex_locked(&area->lock)); 1397 1398 return area_flags_to_page_flags(a rea->flags);1145 * The address space area must be locked. 1146 * Interrupts must be disabled. 1147 * 1148 * @param a Address space area. 1149 * 1150 * @return Flags to be used in page_mapping_insert(). 1151 */ 1152 int as_area_get_flags(as_area_t *a) 1153 { 1154 return area_flags_to_page_flags(a->flags); 1399 1155 } 1400 1156 … … 1404 1160 * table. 1405 1161 * 1406 * @param flags Flags saying whether the page table is for the kernel 1407 * address space. 1408 * 1409 * @return First entry of the page table. 1410 * 1411 */ 1412 NO_TRACE pte_t *page_table_create(unsigned int flags) 1162 * @param flags Flags saying whether the page table is for the kernel 1163 * address space. 1164 * 1165 * @return First entry of the page table. 1166 */ 1167 pte_t *page_table_create(int flags) 1413 1168 { 1414 1169 ASSERT(as_operations); … … 1422 1177 * Destroy page table in architecture specific way. 1423 1178 * 1424 * @param page_table Physical address of PTL0. 1425 * 1426 */ 1427 NO_TRACE void page_table_destroy(pte_t *page_table) 1179 * @param page_table Physical address of PTL0. 1180 */ 1181 void page_table_destroy(pte_t *page_table) 1428 1182 { 1429 1183 ASSERT(as_operations); … … 1437 1191 * This function should be called before any page_mapping_insert(), 1438 1192 * page_mapping_remove() and page_mapping_find(). 1439 * 1193 * 1440 1194 * Locking order is such that address space areas must be locked 1441 1195 * prior to this call. Address space can be locked prior to this 1442 1196 * call in which case the lock argument is false. 1443 1197 * 1444 * @param as Address space. 1445 * @param lock If false, do not attempt to lock as->lock. 1446 * 1447 */ 1448 NO_TRACE void page_table_lock(as_t *as, bool lock) 1198 * @param as Address space. 1199 * @param lock If false, do not attempt to lock as->lock. 1200 */ 1201 void page_table_lock(as_t *as, bool lock) 1449 1202 { 1450 1203 ASSERT(as_operations); … … 1456 1209 /** Unlock page table. 1457 1210 * 1458 * @param as Address space. 1459 * @param unlock If false, do not attempt to unlock as->lock. 1460 * 1461 */ 1462 NO_TRACE void page_table_unlock(as_t *as, bool unlock) 1211 * @param as Address space. 1212 * @param unlock If false, do not attempt to unlock as->lock. 1213 */ 1214 void page_table_unlock(as_t *as, bool unlock) 1463 1215 { 1464 1216 ASSERT(as_operations); … … 1468 1220 } 1469 1221 1470 /** Test whether page tables are locked. 1471 * 1472 * @param as Address space where the page tables belong. 1473 * 1474 * @return True if the page tables belonging to the address soace 1475 * are locked, otherwise false. 1476 */ 1477 NO_TRACE bool page_table_locked(as_t *as) 1478 { 1479 ASSERT(as_operations); 1480 ASSERT(as_operations->page_table_locked); 1481 1482 return as_operations->page_table_locked(as); 1222 1223 /** Find address space area and lock it. 1224 * 1225 * The address space must be locked and interrupts must be disabled. 1226 * 1227 * @param as Address space. 1228 * @param va Virtual address. 1229 * 1230 * @return Locked address space area containing va on success or 1231 * NULL on failure. 1232 */ 1233 as_area_t *find_area_and_lock(as_t *as, uintptr_t va) 1234 { 1235 as_area_t *a; 1236 btree_node_t *leaf, *lnode; 1237 unsigned int i; 1238 1239 a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); 1240 if (a) { 1241 /* va is the base address of an address space area */ 1242 mutex_lock(&a->lock); 1243 return a; 1244 } 1245 1246 /* 1247 * Search the leaf node and the righmost record of its left neighbour 1248 * to find out whether this is a miss or va belongs to an address 1249 * space area found there. 1250 */ 1251 1252 /* First, search the leaf node itself. */ 1253 for (i = 0; i < leaf->keys; i++) { 1254 a = (as_area_t *) leaf->value[i]; 1255 mutex_lock(&a->lock); 1256 if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) { 1257 return a; 1258 } 1259 mutex_unlock(&a->lock); 1260 } 1261 1262 /* 1263 * Second, locate the left neighbour and test its last record. 1264 * Because of its position in the B+tree, it must have base < va. 1265 */ 1266 lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); 1267 if (lnode) { 1268 a = (as_area_t *) lnode->value[lnode->keys - 1]; 1269 mutex_lock(&a->lock); 1270 if (va < a->base + a->pages * PAGE_SIZE) { 1271 return a; 1272 } 1273 mutex_unlock(&a->lock); 1274 } 1275 1276 return NULL; 1277 } 1278 1279 /** Check area conflicts with other areas. 1280 * 1281 * The address space must be locked and interrupts must be disabled. 1282 * 1283 * @param as Address space. 1284 * @param va Starting virtual address of the area being tested. 1285 * @param size Size of the area being tested. 1286 * @param avoid_area Do not touch this area. 1287 * 1288 * @return True if there is no conflict, false otherwise. 1289 */ 1290 bool 1291 check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area) 1292 { 1293 as_area_t *a; 1294 btree_node_t *leaf, *node; 1295 unsigned int i; 1296 1297 /* 1298 * We don't want any area to have conflicts with NULL page. 1299 */ 1300 if (overlaps(va, size, NULL, PAGE_SIZE)) 1301 return false; 1302 1303 /* 1304 * The leaf node is found in O(log n), where n is proportional to 1305 * the number of address space areas belonging to as. 1306 * The check for conflicts is then attempted on the rightmost 1307 * record in the left neighbour, the leftmost record in the right 1308 * neighbour and all records in the leaf node itself. 1309 */ 1310 1311 if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) { 1312 if (a != avoid_area) 1313 return false; 1314 } 1315 1316 /* First, check the two border cases. */ 1317 if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) { 1318 a = (as_area_t *) node->value[node->keys - 1]; 1319 mutex_lock(&a->lock); 1320 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { 1321 mutex_unlock(&a->lock); 1322 return false; 1323 } 1324 mutex_unlock(&a->lock); 1325 } 1326 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf); 1327 if (node) { 1328 a = (as_area_t *) node->value[0]; 1329 mutex_lock(&a->lock); 1330 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { 1331 mutex_unlock(&a->lock); 1332 return false; 1333 } 1334 mutex_unlock(&a->lock); 1335 } 1336 1337 /* Second, check the leaf node. */ 1338 for (i = 0; i < leaf->keys; i++) { 1339 a = (as_area_t *) leaf->value[i]; 1340 1341 if (a == avoid_area) 1342 continue; 1343 1344 mutex_lock(&a->lock); 1345 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { 1346 mutex_unlock(&a->lock); 1347 return false; 1348 } 1349 mutex_unlock(&a->lock); 1350 } 1351 1352 /* 1353 * So far, the area does not conflict with other areas. 1354 * Check if it doesn't conflict with kernel address space. 1355 */ 1356 if (!KERNEL_ADDRESS_SPACE_SHADOWED) { 1357 return !overlaps(va, size, 1358 KERNEL_ADDRESS_SPACE_START, 1359 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START); 1360 } 1361 1362 return true; 1483 1363 } 1484 1364 1485 1365 /** Return size of the address space area with given base. 1486 1366 * 1487 * @param base Arbitrary address inside the address space area. 1488 * 1489 * @return Size of the address space area in bytes or zero if it 1490 * does not exist. 1491 * 1367 * @param base Arbitrary address insede the address space area. 1368 * 1369 * @return Size of the address space area in bytes or zero if it 1370 * does not exist. 1492 1371 */ 1493 1372 size_t as_area_get_size(uintptr_t base) 1494 1373 { 1374 ipl_t ipl; 1375 as_area_t *src_area; 1495 1376 size_t size; 1496 1497 page_table_lock(AS, true); 1498 as_area_t *src_area = find_area_and_lock(AS, base); 1499 1377 1378 ipl = interrupts_disable(); 1379 src_area = find_area_and_lock(AS, base); 1500 1380 if (src_area) { 1501 1381 size = src_area->pages * PAGE_SIZE; 1502 1382 mutex_unlock(&src_area->lock); 1503 } else 1383 } else { 1504 1384 size = 0; 1505 1506 page_table_unlock(AS, true);1385 } 1386 interrupts_restore(ipl); 1507 1387 return size; 1508 1388 } … … 1512 1392 * The address space area must be already locked. 1513 1393 * 1514 * @param area Address space area. 1515 * @param page First page to be marked. 1516 * @param count Number of page to be marked. 1517 * 1518 * @return Zero on failure and non-zero on success. 1519 * 1520 */ 1521 int used_space_insert(as_area_t *area, uintptr_t page, size_t count) 1522 { 1523 ASSERT(mutex_locked(&area->lock)); 1394 * @param a Address space area. 1395 * @param page First page to be marked. 1396 * @param count Number of page to be marked. 1397 * 1398 * @return Zero on failure and non-zero on success. 1399 */ 1400 int used_space_insert(as_area_t *a, uintptr_t page, size_t count) 1401 { 1402 btree_node_t *leaf, *node; 1403 size_t pages; 1404 unsigned int i; 1405 1524 1406 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE)); 1525 1407 ASSERT(count); 1526 1527 btree_node_t *leaf; 1528 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf); 1408 1409 pages = (size_t) btree_search(&a->used_space, page, &leaf); 1529 1410 if (pages) { 1530 1411 /* 1531 1412 * We hit the beginning of some used space. 1532 *1533 1413 */ 1534 1414 return 0; 1535 1415 } 1536 1416 1537 1417 if (!leaf->keys) { 1538 btree_insert(&a rea->used_space, page, (void *) count, leaf);1418 btree_insert(&a->used_space, page, (void *) count, leaf); 1539 1419 return 1; 1540 1420 } 1541 1542 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, leaf);1421 1422 node = btree_leaf_node_left_neighbour(&a->used_space, leaf); 1543 1423 if (node) { 1544 1424 uintptr_t left_pg = node->key[node->keys - 1]; … … 1551 1431 * somewhere between the rightmost interval of 1552 1432 * the left neigbour and the first interval of the leaf. 1553 * 1554 */ 1555 1433 */ 1434 1556 1435 if (page >= right_pg) { 1557 1436 /* Do nothing. */ … … 1563 1442 right_cnt * PAGE_SIZE)) { 1564 1443 /* The interval intersects with the right interval. */ 1565 return 0; 1444 return 0; 1566 1445 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1567 1446 (page + count * PAGE_SIZE == right_pg)) { … … 1569 1448 * The interval can be added by merging the two already 1570 1449 * present intervals. 1571 *1572 1450 */ 1573 1451 node->value[node->keys - 1] += count + right_cnt; 1574 btree_remove(&a rea->used_space, right_pg, leaf);1575 return 1; 1452 btree_remove(&a->used_space, right_pg, leaf); 1453 return 1; 1576 1454 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1577 /* 1455 /* 1578 1456 * The interval can be added by simply growing the left 1579 1457 * interval. 1580 *1581 1458 */ 1582 1459 node->value[node->keys - 1] += count; … … 1587 1464 * the right interval down and increasing its size 1588 1465 * accordingly. 1589 *1590 1466 */ 1591 1467 leaf->value[0] += count; … … 1596 1472 * The interval is between both neigbouring intervals, 1597 1473 * but cannot be merged with any of them. 1598 *1599 1474 */ 1600 btree_insert(&a rea->used_space, page, (void *) count,1475 btree_insert(&a->used_space, page, (void *) count, 1601 1476 leaf); 1602 1477 return 1; … … 1605 1480 uintptr_t right_pg = leaf->key[0]; 1606 1481 size_t right_cnt = (size_t) leaf->value[0]; 1607 1482 1608 1483 /* 1609 1484 * Investigate the border case in which the left neighbour does 1610 1485 * not exist but the interval fits from the left. 1611 * 1612 */ 1613 1486 */ 1487 1614 1488 if (overlaps(page, count * PAGE_SIZE, right_pg, 1615 1489 right_cnt * PAGE_SIZE)) { … … 1621 1495 * right interval down and increasing its size 1622 1496 * accordingly. 1623 *1624 1497 */ 1625 1498 leaf->key[0] = page; … … 1630 1503 * The interval doesn't adjoin with the right interval. 1631 1504 * It must be added individually. 1632 *1633 1505 */ 1634 btree_insert(&a rea->used_space, page, (void *) count,1506 btree_insert(&a->used_space, page, (void *) count, 1635 1507 leaf); 1636 1508 return 1; 1637 1509 } 1638 1510 } 1639 1640 node = btree_leaf_node_right_neighbour(&a rea->used_space, leaf);1511 1512 node = btree_leaf_node_right_neighbour(&a->used_space, leaf); 1641 1513 if (node) { 1642 1514 uintptr_t left_pg = leaf->key[leaf->keys - 1]; … … 1649 1521 * somewhere between the leftmost interval of 1650 1522 * the right neigbour and the last interval of the leaf. 1651 * 1652 */ 1653 1523 */ 1524 1654 1525 if (page < left_pg) { 1655 1526 /* Do nothing. */ … … 1661 1532 right_cnt * PAGE_SIZE)) { 1662 1533 /* The interval intersects with the right interval. */ 1663 return 0; 1534 return 0; 1664 1535 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1665 1536 (page + count * PAGE_SIZE == right_pg)) { … … 1667 1538 * The interval can be added by merging the two already 1668 1539 * present intervals. 1669 * 1670 */ 1540 * */ 1671 1541 leaf->value[leaf->keys - 1] += count + right_cnt; 1672 btree_remove(&a rea->used_space, right_pg, node);1673 return 1; 1542 btree_remove(&a->used_space, right_pg, node); 1543 return 1; 1674 1544 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1675 1545 /* 1676 1546 * The interval can be added by simply growing the left 1677 1547 * interval. 1678 * 1679 */ 1548 * */ 1680 1549 leaf->value[leaf->keys - 1] += count; 1681 1550 return 1; … … 1685 1554 * the right interval down and increasing its size 1686 1555 * accordingly. 1687 *1688 1556 */ 1689 1557 node->value[0] += count; … … 1694 1562 * The interval is between both neigbouring intervals, 1695 1563 * but cannot be merged with any of them. 1696 *1697 1564 */ 1698 btree_insert(&a rea->used_space, page, (void *) count,1565 btree_insert(&a->used_space, page, (void *) count, 1699 1566 leaf); 1700 1567 return 1; … … 1703 1570 uintptr_t left_pg = leaf->key[leaf->keys - 1]; 1704 1571 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 1705 1572 1706 1573 /* 1707 1574 * Investigate the border case in which the right neighbour 1708 1575 * does not exist but the interval fits from the right. 1709 * 1710 */ 1711 1576 */ 1577 1712 1578 if (overlaps(page, count * PAGE_SIZE, left_pg, 1713 1579 left_cnt * PAGE_SIZE)) { … … 1718 1584 * The interval can be added by growing the left 1719 1585 * interval. 1720 *1721 1586 */ 1722 1587 leaf->value[leaf->keys - 1] += count; … … 1726 1591 * The interval doesn't adjoin with the left interval. 1727 1592 * It must be added individually. 1728 *1729 1593 */ 1730 btree_insert(&a rea->used_space, page, (void *) count,1594 btree_insert(&a->used_space, page, (void *) count, 1731 1595 leaf); 1732 1596 return 1; … … 1738 1602 * only between two other intervals of the leaf. The two border cases 1739 1603 * were already resolved. 1740 * 1741 */ 1742 btree_key_t i; 1604 */ 1743 1605 for (i = 1; i < leaf->keys; i++) { 1744 1606 if (page < leaf->key[i]) { … … 1747 1609 size_t left_cnt = (size_t) leaf->value[i - 1]; 1748 1610 size_t right_cnt = (size_t) leaf->value[i]; 1749 1611 1750 1612 /* 1751 1613 * The interval fits between left_pg and right_pg. 1752 *1753 1614 */ 1754 1615 1755 1616 if (overlaps(page, count * PAGE_SIZE, left_pg, 1756 1617 left_cnt * PAGE_SIZE)) { … … 1758 1619 * The interval intersects with the left 1759 1620 * interval. 1760 *1761 1621 */ 1762 1622 return 0; … … 1766 1626 * The interval intersects with the right 1767 1627 * interval. 1768 *1769 1628 */ 1770 return 0; 1629 return 0; 1771 1630 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1772 1631 (page + count * PAGE_SIZE == right_pg)) { … … 1774 1633 * The interval can be added by merging the two 1775 1634 * already present intervals. 1776 *1777 1635 */ 1778 1636 leaf->value[i - 1] += count + right_cnt; 1779 btree_remove(&a rea->used_space, right_pg, leaf);1780 return 1; 1637 btree_remove(&a->used_space, right_pg, leaf); 1638 return 1; 1781 1639 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1782 1640 /* 1783 1641 * The interval can be added by simply growing 1784 1642 * the left interval. 1785 *1786 1643 */ 1787 1644 leaf->value[i - 1] += count; … … 1789 1646 } else if (page + count * PAGE_SIZE == right_pg) { 1790 1647 /* 1791 1648 * The interval can be addded by simply moving 1792 1649 * base of the right interval down and 1793 1650 * increasing its size accordingly. 1794 * 1795 */ 1651 */ 1796 1652 leaf->value[i] += count; 1797 1653 leaf->key[i] = page; … … 1802 1658 * intervals, but cannot be merged with any of 1803 1659 * them. 1804 *1805 1660 */ 1806 btree_insert(&a rea->used_space, page,1661 btree_insert(&a->used_space, page, 1807 1662 (void *) count, leaf); 1808 1663 return 1; … … 1810 1665 } 1811 1666 } 1812 1813 panic("Inconsistency detected while adding % zupages of used "1814 "space at %p.", count, (void *)page);1667 1668 panic("Inconsistency detected while adding %" PRIs " pages of used " 1669 "space at %p.", count, page); 1815 1670 } 1816 1671 … … 1819 1674 * The address space area must be already locked. 1820 1675 * 1821 * @param area Address space area. 1822 * @param page First page to be marked. 1823 * @param count Number of page to be marked. 1824 * 1825 * @return Zero on failure and non-zero on success. 1826 * 1827 */ 1828 int used_space_remove(as_area_t *area, uintptr_t page, size_t count) 1829 { 1830 ASSERT(mutex_locked(&area->lock)); 1676 * @param a Address space area. 1677 * @param page First page to be marked. 1678 * @param count Number of page to be marked. 1679 * 1680 * @return Zero on failure and non-zero on success. 1681 */ 1682 int used_space_remove(as_area_t *a, uintptr_t page, size_t count) 1683 { 1684 btree_node_t *leaf, *node; 1685 size_t pages; 1686 unsigned int i; 1687 1831 1688 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE)); 1832 1689 ASSERT(count); 1833 1834 btree_node_t *leaf; 1835 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf); 1690 1691 pages = (size_t) btree_search(&a->used_space, page, &leaf); 1836 1692 if (pages) { 1837 1693 /* 1838 1694 * We are lucky, page is the beginning of some interval. 1839 *1840 1695 */ 1841 1696 if (count > pages) { 1842 1697 return 0; 1843 1698 } else if (count == pages) { 1844 btree_remove(&a rea->used_space, page, leaf);1699 btree_remove(&a->used_space, page, leaf); 1845 1700 return 1; 1846 1701 } else { … … 1848 1703 * Find the respective interval. 1849 1704 * Decrease its size and relocate its start address. 1850 *1851 1705 */ 1852 btree_key_t i;1853 1706 for (i = 0; i < leaf->keys; i++) { 1854 1707 if (leaf->key[i] == page) { … … 1861 1714 } 1862 1715 } 1863 1864 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, leaf);1865 if ( (node) && (page < leaf->key[0])) {1716 1717 node = btree_leaf_node_left_neighbour(&a->used_space, leaf); 1718 if (node && page < leaf->key[0]) { 1866 1719 uintptr_t left_pg = node->key[node->keys - 1]; 1867 1720 size_t left_cnt = (size_t) node->value[node->keys - 1]; 1868 1721 1869 1722 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page, 1870 1723 count * PAGE_SIZE)) { … … 1876 1729 * removed by updating the size of the bigger 1877 1730 * interval. 1878 *1879 1731 */ 1880 1732 node->value[node->keys - 1] -= count; … … 1882 1734 } else if (page + count * PAGE_SIZE < 1883 1735 left_pg + left_cnt*PAGE_SIZE) { 1736 size_t new_cnt; 1737 1884 1738 /* 1885 1739 * The interval is contained in the rightmost … … 1888 1742 * the original interval and also inserting a 1889 1743 * new interval. 1890 *1891 1744 */ 1892 size_tnew_cnt = ((left_pg + left_cnt * PAGE_SIZE) -1745 new_cnt = ((left_pg + left_cnt * PAGE_SIZE) - 1893 1746 (page + count*PAGE_SIZE)) >> PAGE_WIDTH; 1894 1747 node->value[node->keys - 1] -= count + new_cnt; 1895 btree_insert(&a rea->used_space, page +1748 btree_insert(&a->used_space, page + 1896 1749 count * PAGE_SIZE, (void *) new_cnt, leaf); 1897 1750 return 1; … … 1899 1752 } 1900 1753 return 0; 1901 } else if (page < leaf->key[0]) 1754 } else if (page < leaf->key[0]) { 1902 1755 return 0; 1756 } 1903 1757 1904 1758 if (page > leaf->key[leaf->keys - 1]) { 1905 1759 uintptr_t left_pg = leaf->key[leaf->keys - 1]; 1906 1760 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 1907 1761 1908 1762 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page, 1909 1763 count * PAGE_SIZE)) { 1910 if (page + count * PAGE_SIZE == 1764 if (page + count * PAGE_SIZE == 1911 1765 left_pg + left_cnt * PAGE_SIZE) { 1912 1766 /* … … 1914 1768 * interval of the leaf and can be removed by 1915 1769 * updating the size of the bigger interval. 1916 *1917 1770 */ 1918 1771 leaf->value[leaf->keys - 1] -= count; … … 1920 1773 } else if (page + count * PAGE_SIZE < left_pg + 1921 1774 left_cnt * PAGE_SIZE) { 1775 size_t new_cnt; 1776 1922 1777 /* 1923 1778 * The interval is contained in the rightmost … … 1926 1781 * original interval and also inserting a new 1927 1782 * interval. 1928 *1929 1783 */ 1930 size_tnew_cnt = ((left_pg + left_cnt * PAGE_SIZE) -1784 new_cnt = ((left_pg + left_cnt * PAGE_SIZE) - 1931 1785 (page + count * PAGE_SIZE)) >> PAGE_WIDTH; 1932 1786 leaf->value[leaf->keys - 1] -= count + new_cnt; 1933 btree_insert(&a rea->used_space, page +1787 btree_insert(&a->used_space, page + 1934 1788 count * PAGE_SIZE, (void *) new_cnt, leaf); 1935 1789 return 1; … … 1937 1791 } 1938 1792 return 0; 1939 } 1793 } 1940 1794 1941 1795 /* … … 1943 1797 * Now the interval can be only between intervals of the leaf. 1944 1798 */ 1945 btree_key_t i;1946 1799 for (i = 1; i < leaf->keys - 1; i++) { 1947 1800 if (page < leaf->key[i]) { 1948 1801 uintptr_t left_pg = leaf->key[i - 1]; 1949 1802 size_t left_cnt = (size_t) leaf->value[i - 1]; 1950 1803 1951 1804 /* 1952 1805 * Now the interval is between intervals corresponding … … 1962 1815 * be removed by updating the size of 1963 1816 * the bigger interval. 1964 *1965 1817 */ 1966 1818 leaf->value[i - 1] -= count; … … 1968 1820 } else if (page + count * PAGE_SIZE < 1969 1821 left_pg + left_cnt * PAGE_SIZE) { 1822 size_t new_cnt; 1823 1970 1824 /* 1971 1825 * The interval is contained in the … … 1975 1829 * also inserting a new interval. 1976 1830 */ 1977 size_tnew_cnt = ((left_pg +1831 new_cnt = ((left_pg + 1978 1832 left_cnt * PAGE_SIZE) - 1979 1833 (page + count * PAGE_SIZE)) >> 1980 1834 PAGE_WIDTH; 1981 1835 leaf->value[i - 1] -= count + new_cnt; 1982 btree_insert(&a rea->used_space, page +1836 btree_insert(&a->used_space, page + 1983 1837 count * PAGE_SIZE, (void *) new_cnt, 1984 1838 leaf); … … 1989 1843 } 1990 1844 } 1991 1845 1992 1846 error: 1993 panic("Inconsistency detected while removing %zu pages of used " 1994 "space from %p.", count, (void *) page); 1847 panic("Inconsistency detected while removing %" PRIs " pages of used " 1848 "space from %p.", count, page); 1849 } 1850 1851 /** Remove reference to address space area share info. 1852 * 1853 * If the reference count drops to 0, the sh_info is deallocated. 1854 * 1855 * @param sh_info Pointer to address space area share info. 1856 */ 1857 void sh_info_remove_reference(share_info_t *sh_info) 1858 { 1859 bool dealloc = false; 1860 1861 mutex_lock(&sh_info->lock); 1862 ASSERT(sh_info->refcount); 1863 if (--sh_info->refcount == 0) { 1864 dealloc = true; 1865 link_t *cur; 1866 1867 /* 1868 * Now walk carefully the pagemap B+tree and free/remove 1869 * reference from all frames found there. 1870 */ 1871 for (cur = sh_info->pagemap.leaf_head.next; 1872 cur != &sh_info->pagemap.leaf_head; cur = cur->next) { 1873 btree_node_t *node; 1874 unsigned int i; 1875 1876 node = list_get_instance(cur, btree_node_t, leaf_link); 1877 for (i = 0; i < node->keys; i++) 1878 frame_free((uintptr_t) node->value[i]); 1879 } 1880 1881 } 1882 mutex_unlock(&sh_info->lock); 1883 1884 if (dealloc) { 1885 btree_destroy(&sh_info->pagemap); 1886 free(sh_info); 1887 } 1995 1888 } 1996 1889 … … 2000 1893 2001 1894 /** Wrapper for as_area_create(). */ 2002 unative_t sys_as_area_create(uintptr_t address, size_t size, unsignedint flags)1895 unative_t sys_as_area_create(uintptr_t address, size_t size, int flags) 2003 1896 { 2004 1897 if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, … … 2010 1903 2011 1904 /** Wrapper for as_area_resize(). */ 2012 unative_t sys_as_area_resize(uintptr_t address, size_t size, unsignedint flags)1905 unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags) 2013 1906 { 2014 1907 return (unative_t) as_area_resize(AS, address, size, 0); … … 2016 1909 2017 1910 /** Wrapper for as_area_change_flags(). */ 2018 unative_t sys_as_area_change_flags(uintptr_t address, unsignedint flags)1911 unative_t sys_as_area_change_flags(uintptr_t address, int flags) 2019 1912 { 2020 1913 return (unative_t) as_area_change_flags(AS, flags, address); … … 2029 1922 /** Get list of adress space areas. 2030 1923 * 2031 * @param as Address space. 2032 * @param obuf Place to save pointer to returned buffer. 2033 * @param osize Place to save size of returned buffer. 2034 * 1924 * @param as Address space. 1925 * @param obuf Place to save pointer to returned buffer. 1926 * @param osize Place to save size of returned buffer. 2035 1927 */ 2036 1928 void as_get_area_info(as_t *as, as_area_info_t **obuf, size_t *osize) 2037 1929 { 1930 ipl_t ipl; 1931 size_t area_cnt, area_idx, i; 1932 link_t *cur; 1933 1934 as_area_info_t *info; 1935 size_t isize; 1936 1937 ipl = interrupts_disable(); 2038 1938 mutex_lock(&as->lock); 2039 1939 2040 1940 /* First pass, count number of areas. */ 2041 2042 size_t area_cnt = 0; 2043 link_t *cur; 2044 1941 1942 area_cnt = 0; 1943 2045 1944 for (cur = as->as_area_btree.leaf_head.next; 2046 1945 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 2047 btree_node_t *node = 2048 list_get_instance(cur, btree_node_t, leaf_link); 1946 btree_node_t *node; 1947 1948 node = list_get_instance(cur, btree_node_t, leaf_link); 2049 1949 area_cnt += node->keys; 2050 1950 } 2051 2052 size_tisize = area_cnt * sizeof(as_area_info_t);2053 as_area_info_t *info = malloc(isize, 0);2054 1951 1952 isize = area_cnt * sizeof(as_area_info_t); 1953 info = malloc(isize, 0); 1954 2055 1955 /* Second pass, record data. */ 2056 2057 size_tarea_idx = 0;2058 1956 1957 area_idx = 0; 1958 2059 1959 for (cur = as->as_area_btree.leaf_head.next; 2060 1960 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 2061 btree_node_t *node =2062 list_get_instance(cur, btree_node_t, leaf_link); 2063 btree_key_t i;2064 1961 btree_node_t *node; 1962 1963 node = list_get_instance(cur, btree_node_t, leaf_link); 1964 2065 1965 for (i = 0; i < node->keys; i++) { 2066 1966 as_area_t *area = node->value[i]; 2067 1967 2068 1968 ASSERT(area_idx < area_cnt); 2069 1969 mutex_lock(&area->lock); 2070 1970 2071 1971 info[area_idx].start_addr = area->base; 2072 1972 info[area_idx].size = FRAMES2SIZE(area->pages); 2073 1973 info[area_idx].flags = area->flags; 2074 1974 ++area_idx; 2075 1975 2076 1976 mutex_unlock(&area->lock); 2077 1977 } 2078 1978 } 2079 1979 2080 1980 mutex_unlock(&as->lock); 2081 1981 interrupts_restore(ipl); 1982 2082 1983 *obuf = info; 2083 1984 *osize = isize; 2084 1985 } 2085 1986 1987 2086 1988 /** Print out information about address space. 2087 1989 * 2088 * @param as Address space. 2089 * 1990 * @param as Address space. 2090 1991 */ 2091 1992 void as_print(as_t *as) 2092 1993 { 1994 ipl_t ipl; 1995 1996 ipl = interrupts_disable(); 2093 1997 mutex_lock(&as->lock); 2094 1998 … … 2097 2001 for (cur = as->as_area_btree.leaf_head.next; 2098 2002 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 2099 btree_node_t *node 2100 = list_get_instance(cur, btree_node_t, leaf_link); 2101 btree_key_t i; 2003 btree_node_t *node; 2102 2004 2005 node = list_get_instance(cur, btree_node_t, leaf_link); 2006 2007 unsigned int i; 2103 2008 for (i = 0; i < node->keys; i++) { 2104 2009 as_area_t *area = node->value[i]; 2105 2010 2106 2011 mutex_lock(&area->lock); 2107 printf("as_area: %p, base=%p, pages=%zu" 2108 " (%p - %p)\n", area, (void *) area->base, 2109 area->pages, (void *) area->base, 2110 (void *) (area->base + FRAMES2SIZE(area->pages))); 2012 printf("as_area: %p, base=%p, pages=%" PRIs 2013 " (%p - %p)\n", area, area->base, area->pages, 2014 area->base, area->base + FRAMES2SIZE(area->pages)); 2111 2015 mutex_unlock(&area->lock); 2112 2016 } … … 2114 2018 2115 2019 mutex_unlock(&as->lock); 2020 interrupts_restore(ipl); 2116 2021 } 2117 2022
Note:
See TracChangeset
for help on using the changeset viewer.