Changeset 04803bf in mainline for kernel/generic/src/mm/as.c
- Timestamp:
- 2011-03-21T22:00:17Z (15 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 143932e3
- Parents:
- b50b5af2 (diff), 7308e84 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - File:
-
- 1 edited
-
kernel/generic/src/mm/as.c (modified) (84 diffs)
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/as.c
rb50b5af2 r04803bf 1 1 /* 2 * Copyright (c) 20 01-2006Jakub Jermar2 * Copyright (c) 2010 Jakub Jermar 3 3 * All rights reserved. 4 4 * … … 33 33 /** 34 34 * @file 35 * @brief Address space related functions.35 * @brief Address space related functions. 36 36 * 37 37 * This file contains address space manipulation functions. … … 71 71 #include <memstr.h> 72 72 #include <macros.h> 73 #include <bitops.h> 73 74 #include <arch.h> 74 75 #include <errno.h> 75 76 #include <config.h> 76 77 #include <align.h> 77 #include < arch/types.h>78 #include <typedefs.h> 78 79 #include <syscall/copy.h> 79 80 #include <arch/interrupt.h> … … 89 90 as_operations_t *as_operations = NULL; 90 91 91 /** 92 * Slab for as_t objects.92 /** Slab for as_t objects. 93 * 93 94 */ 94 95 static slab_cache_t *as_slab; 95 96 96 /** 97 * This lock serializes access to the ASID subsystem.98 * Itprotects:97 /** ASID subsystem lock. 98 * 99 * This lock protects: 99 100 * - inactive_as_with_asid_head list 100 101 * - as->asid for each as of the as_t type 101 102 * - asids_allocated counter 103 * 102 104 */ 103 105 SPINLOCK_INITIALIZE(asidlock); 104 106 105 107 /** 106 * This list contains address spaces that are not active on any107 * processor andthat have valid ASID.108 * Inactive address spaces (on all processors) 109 * that have valid ASID. 108 110 */ 109 111 LIST_INITIALIZE(inactive_as_with_asid_head); … … 112 114 as_t *AS_KERNEL = NULL; 113 115 114 static int area_flags_to_page_flags(int); 115 static as_area_t *find_area_and_lock(as_t *, uintptr_t); 116 static bool check_area_conflicts(as_t *, uintptr_t, size_t, as_area_t *); 117 static void sh_info_remove_reference(share_info_t *); 118 119 static int as_constructor(void *obj, int flags) 116 NO_TRACE static int as_constructor(void *obj, unsigned int flags) 120 117 { 121 118 as_t *as = (as_t *) obj; 122 int rc; 123 119 124 120 link_initialize(&as->inactive_as_with_asid_link); 125 121 mutex_initialize(&as->lock, MUTEX_PASSIVE); 126 122 127 rc = as_constructor_arch(as, flags); 128 129 return rc; 130 } 131 132 static int as_destructor(void *obj) 133 { 134 as_t *as = (as_t *) obj; 135 136 return as_destructor_arch(as); 123 return as_constructor_arch(as, flags); 124 } 125 126 NO_TRACE static size_t as_destructor(void *obj) 127 { 128 return as_destructor_arch((as_t *) obj); 137 129 } 138 130 … … 141 133 { 142 134 as_arch_init(); 143 135 144 136 as_slab = slab_cache_create("as_slab", sizeof(as_t), 0, 145 137 as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED); … … 149 141 panic("Cannot create kernel address space."); 150 142 151 /* Make sure the kernel address space 143 /* 144 * Make sure the kernel address space 152 145 * reference count never drops to zero. 153 146 */ 154 a tomic_set(&AS_KERNEL->refcount, 1);147 as_hold(AS_KERNEL); 155 148 } 156 149 157 150 /** Create address space. 158 151 * 159 * @param flags Flags that influence the way in wich the address space 160 * is created. 161 */ 162 as_t *as_create(int flags) 163 { 164 as_t *as; 165 166 as = (as_t *) slab_alloc(as_slab, 0); 152 * @param flags Flags that influence the way in wich the address 153 * space is created. 154 * 155 */ 156 as_t *as_create(unsigned int flags) 157 { 158 as_t *as = (as_t *) slab_alloc(as_slab, 0); 167 159 (void) as_create_arch(as, 0); 168 160 … … 176 168 atomic_set(&as->refcount, 0); 177 169 as->cpu_refcount = 0; 170 178 171 #ifdef AS_PAGE_TABLE 179 172 as->genarch.page_table = page_table_create(flags); … … 192 185 * We know that we don't hold any spinlock. 193 186 * 194 * @param as Address space to be destroyed. 187 * @param as Address space to be destroyed. 188 * 195 189 */ 196 190 void as_destroy(as_t *as) 197 191 { 198 ipl_t ipl;199 bool cond;200 192 DEADLOCK_PROBE_INIT(p_asidlock); 201 193 194 ASSERT(as != AS); 202 195 ASSERT(atomic_get(&as->refcount) == 0); 203 196 204 197 /* 205 * Since there is no reference to this a rea,206 * it is safe not tolock its mutex.207 */ 208 198 * Since there is no reference to this address space, it is safe not to 199 * lock its mutex. 200 */ 201 209 202 /* 210 203 * We need to avoid deadlock between TLB shootdown and asidlock. … … 215 208 */ 216 209 preemption_disable(); 217 ipl = interrupts_read(); 210 ipl_t ipl = interrupts_read(); 211 218 212 retry: 219 213 interrupts_disable(); … … 223 217 goto retry; 224 218 } 225 preemption_enable(); /* Interrupts disabled, enable preemption */ 226 if (as->asid != ASID_INVALID && as != AS_KERNEL) { 227 if (as != AS && as->cpu_refcount == 0) 219 220 /* Interrupts disabled, enable preemption */ 221 preemption_enable(); 222 223 if ((as->asid != ASID_INVALID) && (as != AS_KERNEL)) { 224 if (as->cpu_refcount == 0) 228 225 list_remove(&as->inactive_as_with_asid_link); 226 229 227 asid_put(as->asid); 230 228 } 229 231 230 spinlock_unlock(&asidlock); 232 231 interrupts_restore(ipl); 232 233 233 234 /* 234 235 * Destroy address space areas of the address space. 235 236 * The B+tree must be walked carefully because it is 236 237 * also being destroyed. 237 */ 238 for (cond = true; cond; ) { 239 btree_node_t *node; 240 238 */ 239 bool cond = true; 240 while (cond) { 241 241 ASSERT(!list_empty(&as->as_area_btree.leaf_head)); 242 node = list_get_instance(as->as_area_btree.leaf_head.next, 242 243 btree_node_t *node = 244 list_get_instance(as->as_area_btree.leaf_head.next, 243 245 btree_node_t, leaf_link); 244 245 if ((cond = node->keys)) {246 247 if ((cond = node->keys)) 246 248 as_area_destroy(as, node->key[0]); 247 } 248 } 249 249 } 250 250 251 btree_destroy(&as->as_area_btree); 252 251 253 #ifdef AS_PAGE_TABLE 252 254 page_table_destroy(as->genarch.page_table); … … 254 256 page_table_destroy(NULL); 255 257 #endif 256 257 interrupts_restore(ipl); 258 258 259 259 slab_free(as_slab, as); 260 260 } 261 261 262 /** Hold a reference to an address space. 263 * 264 * Holding a reference to an address space prevents destruction 265 * of that address space. 266 * 267 * @param as Address space to be held. 268 * 269 */ 270 NO_TRACE void as_hold(as_t *as) 271 { 272 atomic_inc(&as->refcount); 273 } 274 275 /** Release a reference to an address space. 276 * 277 * The last one to release a reference to an address space 278 * destroys the address space. 279 * 280 * @param asAddress space to be released. 281 * 282 */ 283 NO_TRACE void as_release(as_t *as) 284 { 285 if (atomic_predec(&as->refcount) == 0) 286 as_destroy(as); 287 } 288 289 /** Check area conflicts with other areas. 290 * 291 * @param as Address space. 292 * @param addr Starting virtual address of the area being tested. 293 * @param count Number of pages in the area being tested. 294 * @param avoid Do not touch this area. 295 * 296 * @return True if there is no conflict, false otherwise. 297 * 298 */ 299 NO_TRACE static bool check_area_conflicts(as_t *as, uintptr_t addr, 300 size_t count, as_area_t *avoid) 301 { 302 ASSERT((addr % PAGE_SIZE) == 0); 303 ASSERT(mutex_locked(&as->lock)); 304 305 /* 306 * We don't want any area to have conflicts with NULL page. 307 */ 308 if (overlaps(addr, count << PAGE_WIDTH, (uintptr_t) NULL, PAGE_SIZE)) 309 return false; 310 311 /* 312 * The leaf node is found in O(log n), where n is proportional to 313 * the number of address space areas belonging to as. 314 * The check for conflicts is then attempted on the rightmost 315 * record in the left neighbour, the leftmost record in the right 316 * neighbour and all records in the leaf node itself. 317 */ 318 btree_node_t *leaf; 319 as_area_t *area = 320 (as_area_t *) btree_search(&as->as_area_btree, addr, &leaf); 321 if (area) { 322 if (area != avoid) 323 return false; 324 } 325 326 /* First, check the two border cases. */ 327 btree_node_t *node = 328 btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); 329 if (node) { 330 area = (as_area_t *) node->value[node->keys - 1]; 331 332 if (area != avoid) { 333 mutex_lock(&area->lock); 334 335 if (overlaps(addr, count << PAGE_WIDTH, 336 area->base, area->pages << PAGE_WIDTH)) { 337 mutex_unlock(&area->lock); 338 return false; 339 } 340 341 mutex_unlock(&area->lock); 342 } 343 } 344 345 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf); 346 if (node) { 347 area = (as_area_t *) node->value[0]; 348 349 if (area != avoid) { 350 mutex_lock(&area->lock); 351 352 if (overlaps(addr, count << PAGE_WIDTH, 353 area->base, area->pages << PAGE_WIDTH)) { 354 mutex_unlock(&area->lock); 355 return false; 356 } 357 358 mutex_unlock(&area->lock); 359 } 360 } 361 362 /* Second, check the leaf node. */ 363 btree_key_t i; 364 for (i = 0; i < leaf->keys; i++) { 365 area = (as_area_t *) leaf->value[i]; 366 367 if (area == avoid) 368 continue; 369 370 mutex_lock(&area->lock); 371 372 if (overlaps(addr, count << PAGE_WIDTH, 373 area->base, area->pages << PAGE_WIDTH)) { 374 mutex_unlock(&area->lock); 375 return false; 376 } 377 378 mutex_unlock(&area->lock); 379 } 380 381 /* 382 * So far, the area does not conflict with other areas. 383 * Check if it doesn't conflict with kernel address space. 384 */ 385 if (!KERNEL_ADDRESS_SPACE_SHADOWED) { 386 return !overlaps(addr, count << PAGE_WIDTH, 387 KERNEL_ADDRESS_SPACE_START, 388 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START); 389 } 390 391 return true; 392 } 393 262 394 /** Create address space area of common attributes. 263 395 * 264 396 * The created address space area is added to the target address space. 265 397 * 266 * @param as Target address space. 267 * @param flags Flags of the area memory. 268 * @param size Size of area. 269 * @param base Base address of area. 270 * @param attrs Attributes of the area. 271 * @param backend Address space area backend. NULL if no backend is used. 272 * @param backend_data NULL or a pointer to an array holding two void *. 273 * 274 * @return Address space area on success or NULL on failure. 275 */ 276 as_area_t * 277 as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs, 278 mem_backend_t *backend, mem_backend_data_t *backend_data) 279 { 280 ipl_t ipl; 281 as_area_t *a; 282 283 if (base % PAGE_SIZE) 398 * @param as Target address space. 399 * @param flags Flags of the area memory. 400 * @param size Size of area. 401 * @param base Base address of area. 402 * @param attrs Attributes of the area. 403 * @param backend Address space area backend. NULL if no backend is used. 404 * @param backend_data NULL or a pointer to an array holding two void *. 405 * 406 * @return Address space area on success or NULL on failure. 407 * 408 */ 409 as_area_t *as_area_create(as_t *as, unsigned int flags, size_t size, 410 uintptr_t base, unsigned int attrs, mem_backend_t *backend, 411 mem_backend_data_t *backend_data) 412 { 413 if ((base % PAGE_SIZE) != 0) 284 414 return NULL; 285 286 if ( !size)415 416 if (size == 0) 287 417 return NULL; 288 418 419 size_t pages = SIZE2FRAMES(size); 420 289 421 /* Writeable executable areas are not supported. */ 290 422 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE)) 291 423 return NULL; 292 424 293 ipl = interrupts_disable();294 425 mutex_lock(&as->lock); 295 426 296 if (!check_area_conflicts(as, base, size, NULL)) {427 if (!check_area_conflicts(as, base, pages, NULL)) { 297 428 mutex_unlock(&as->lock); 298 interrupts_restore(ipl);299 429 return NULL; 300 430 } 301 431 302 a = (as_area_t *) malloc(sizeof(as_area_t), 0); 303 304 mutex_initialize(&a->lock, MUTEX_PASSIVE); 305 306 a->as = as; 307 a->flags = flags; 308 a->attributes = attrs; 309 a->pages = SIZE2FRAMES(size); 310 a->base = base; 311 a->sh_info = NULL; 312 a->backend = backend; 432 as_area_t *area = (as_area_t *) malloc(sizeof(as_area_t), 0); 433 434 mutex_initialize(&area->lock, MUTEX_PASSIVE); 435 436 area->as = as; 437 area->flags = flags; 438 area->attributes = attrs; 439 area->pages = pages; 440 area->resident = 0; 441 area->base = base; 442 area->sh_info = NULL; 443 area->backend = backend; 444 313 445 if (backend_data) 314 a ->backend_data = *backend_data;446 area->backend_data = *backend_data; 315 447 else 316 memsetb(&a->backend_data, sizeof(a->backend_data), 0); 317 318 btree_create(&a->used_space); 319 320 btree_insert(&as->as_area_btree, base, (void *) a, NULL); 321 448 memsetb(&area->backend_data, sizeof(area->backend_data), 0); 449 450 btree_create(&area->used_space); 451 btree_insert(&as->as_area_btree, base, (void *) area, NULL); 452 322 453 mutex_unlock(&as->lock); 323 interrupts_restore(ipl); 324 325 return a; 454 455 return area; 456 } 457 458 /** Find address space area and lock it. 459 * 460 * @param as Address space. 461 * @param va Virtual address. 462 * 463 * @return Locked address space area containing va on success or 464 * NULL on failure. 465 * 466 */ 467 NO_TRACE static as_area_t *find_area_and_lock(as_t *as, uintptr_t va) 468 { 469 ASSERT(mutex_locked(&as->lock)); 470 471 btree_node_t *leaf; 472 as_area_t *area = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); 473 if (area) { 474 /* va is the base address of an address space area */ 475 mutex_lock(&area->lock); 476 return area; 477 } 478 479 /* 480 * Search the leaf node and the righmost record of its left neighbour 481 * to find out whether this is a miss or va belongs to an address 482 * space area found there. 483 */ 484 485 /* First, search the leaf node itself. */ 486 btree_key_t i; 487 488 for (i = 0; i < leaf->keys; i++) { 489 area = (as_area_t *) leaf->value[i]; 490 491 mutex_lock(&area->lock); 492 493 if ((area->base <= va) && 494 (va < area->base + (area->pages << PAGE_WIDTH))) 495 return area; 496 497 mutex_unlock(&area->lock); 498 } 499 500 /* 501 * Second, locate the left neighbour and test its last record. 502 * Because of its position in the B+tree, it must have base < va. 503 */ 504 btree_node_t *lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); 505 if (lnode) { 506 area = (as_area_t *) lnode->value[lnode->keys - 1]; 507 508 mutex_lock(&area->lock); 509 510 if (va < area->base + (area->pages << PAGE_WIDTH)) 511 return area; 512 513 mutex_unlock(&area->lock); 514 } 515 516 return NULL; 326 517 } 327 518 328 519 /** Find address space area and change it. 329 520 * 330 * @param as Address space. 331 * @param address Virtual address belonging to the area to be changed. 332 * Must be page-aligned. 333 * @param size New size of the virtual memory block starting at 334 * address. 335 * @param flags Flags influencing the remap operation. Currently unused. 336 * 337 * @return Zero on success or a value from @ref errno.h otherwise. 338 */ 339 int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags) 340 { 341 as_area_t *area; 342 ipl_t ipl; 343 size_t pages; 344 345 ipl = interrupts_disable(); 521 * @param as Address space. 522 * @param address Virtual address belonging to the area to be changed. 523 * Must be page-aligned. 524 * @param size New size of the virtual memory block starting at 525 * address. 526 * @param flags Flags influencing the remap operation. Currently unused. 527 * 528 * @return Zero on success or a value from @ref errno.h otherwise. 529 * 530 */ 531 int as_area_resize(as_t *as, uintptr_t address, size_t size, unsigned int flags) 532 { 346 533 mutex_lock(&as->lock); 347 534 … … 349 536 * Locate the area. 350 537 */ 351 a rea = find_area_and_lock(as, address);538 as_area_t *area = find_area_and_lock(as, address); 352 539 if (!area) { 353 540 mutex_unlock(&as->lock); 354 interrupts_restore(ipl);355 541 return ENOENT; 356 542 } 357 543 358 544 if (area->backend == &phys_backend) { 359 545 /* … … 363 549 mutex_unlock(&area->lock); 364 550 mutex_unlock(&as->lock); 365 interrupts_restore(ipl);366 551 return ENOTSUP; 367 552 } 553 368 554 if (area->sh_info) { 369 555 /* 370 * Remapping of shared address space areas 556 * Remapping of shared address space areas 371 557 * is not supported. 372 558 */ 373 559 mutex_unlock(&area->lock); 374 560 mutex_unlock(&as->lock); 375 interrupts_restore(ipl);376 561 return ENOTSUP; 377 562 } 378 379 pages = SIZE2FRAMES((address - area->base) + size);563 564 size_t pages = SIZE2FRAMES((address - area->base) + size); 380 565 if (!pages) { 381 566 /* … … 384 569 mutex_unlock(&area->lock); 385 570 mutex_unlock(&as->lock); 386 interrupts_restore(ipl);387 571 return EPERM; 388 572 } 389 573 390 574 if (pages < area->pages) { 391 bool cond; 392 uintptr_t start_free = area->base + pages * PAGE_SIZE; 393 575 uintptr_t start_free = area->base + (pages << PAGE_WIDTH); 576 394 577 /* 395 578 * Shrinking the area. 396 579 * No need to check for overlaps. 397 580 */ 398 581 582 page_table_lock(as, false); 583 399 584 /* 400 585 * Start TLB shootdown sequence. 401 586 */ 402 tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base +403 pages * PAGE_SIZE, area->pages - pages);404 587 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, 588 area->base + (pages << PAGE_WIDTH), area->pages - pages); 589 405 590 /* 406 591 * Remove frames belonging to used space starting from … … 409 594 * is also the right way to remove part of the used_space 410 595 * B+tree leaf list. 411 */ 412 for (cond = true; cond;) { 413 btree_node_t *node; 414 596 */ 597 bool cond = true; 598 while (cond) { 415 599 ASSERT(!list_empty(&area->used_space.leaf_head)); 416 node = 600 601 btree_node_t *node = 417 602 list_get_instance(area->used_space.leaf_head.prev, 418 603 btree_node_t, leaf_link); 604 419 605 if ((cond = (bool) node->keys)) { 420 uintptr_t b= node->key[node->keys - 1];421 size_t c=606 uintptr_t ptr = node->key[node->keys - 1]; 607 size_t size = 422 608 (size_t) node->value[node->keys - 1]; 423 unsigned int i = 0;424 425 if (overlaps( b, c * PAGE_SIZE, area->base,426 pages * PAGE_SIZE)) {609 size_t i = 0; 610 611 if (overlaps(ptr, size << PAGE_WIDTH, area->base, 612 pages << PAGE_WIDTH)) { 427 613 428 if ( b + c * PAGE_SIZE<= start_free) {614 if (ptr + (size << PAGE_WIDTH) <= start_free) { 429 615 /* 430 616 * The whole interval fits … … 434 620 break; 435 621 } 436 622 437 623 /* 438 624 * Part of the interval corresponding … … 440 626 * address space area. 441 627 */ 442 443 cond = false; /* we are almost done */ 444 i = (start_free - b) >> PAGE_WIDTH; 628 629 /* We are almost done */ 630 cond = false; 631 i = (start_free - ptr) >> PAGE_WIDTH; 445 632 if (!used_space_remove(area, start_free, 446 c - i)) 447 panic("Cannot remove used " 448 "space."); 633 size - i)) 634 panic("Cannot remove used space."); 449 635 } else { 450 636 /* … … 452 638 * completely removed. 453 639 */ 454 if (!used_space_remove(area, b, c)) 455 panic("Cannot remove used " 456 "space."); 640 if (!used_space_remove(area, ptr, size)) 641 panic("Cannot remove used space."); 457 642 } 458 459 for (; i < c; i++) {460 pte_t *pte ;461 462 page_table_lock(as, false);463 pte = page_mapping_find(as, b +464 i * PAGE_SIZE);465 ASSERT( pte && PTE_VALID(pte) &&466 PTE_PRESENT(pte));467 if ( area->backend&&468 area->backend->frame_free) {643 644 for (; i < size; i++) { 645 pte_t *pte = page_mapping_find(as, ptr + 646 (i << PAGE_WIDTH)); 647 648 ASSERT(pte); 649 ASSERT(PTE_VALID(pte)); 650 ASSERT(PTE_PRESENT(pte)); 651 652 if ((area->backend) && 653 (area->backend->frame_free)) { 469 654 area->backend->frame_free(area, 470 b + i * PAGE_SIZE,655 ptr + (i << PAGE_WIDTH), 471 656 PTE_GET_FRAME(pte)); 472 657 } 473 page_mapping_remove(as, b +474 i * PAGE_SIZE);475 page_table_unlock(as, false);658 659 page_mapping_remove(as, ptr + 660 (i << PAGE_WIDTH)); 476 661 } 477 662 } 478 663 } 479 664 480 665 /* 481 666 * Finish TLB shootdown sequence. 482 667 */ 483 484 tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE,668 669 tlb_invalidate_pages(as->asid, area->base + (pages << PAGE_WIDTH), 485 670 area->pages - pages); 671 486 672 /* 487 673 * Invalidate software translation caches (e.g. TSB on sparc64). 488 674 */ 489 675 as_invalidate_translation_cache(as, area->base + 490 pages * PAGE_SIZE, area->pages - pages); 491 tlb_shootdown_finalize(); 492 676 (pages << PAGE_WIDTH), area->pages - pages); 677 tlb_shootdown_finalize(ipl); 678 679 page_table_unlock(as, false); 493 680 } else { 494 681 /* … … 496 683 * Check for overlaps with other address space areas. 497 684 */ 498 if (!check_area_conflicts(as, address, pages * PAGE_SIZE, 499 area)) { 685 if (!check_area_conflicts(as, address, pages, area)) { 500 686 mutex_unlock(&area->lock); 501 mutex_unlock(&as->lock); 502 interrupts_restore(ipl); 687 mutex_unlock(&as->lock); 503 688 return EADDRNOTAVAIL; 504 689 } 505 } 506 690 } 691 507 692 area->pages = pages; 508 693 509 694 mutex_unlock(&area->lock); 510 695 mutex_unlock(&as->lock); 511 interrupts_restore(ipl); 512 696 513 697 return 0; 514 698 } 515 699 700 /** Remove reference to address space area share info. 701 * 702 * If the reference count drops to 0, the sh_info is deallocated. 703 * 704 * @param sh_info Pointer to address space area share info. 705 * 706 */ 707 NO_TRACE static void sh_info_remove_reference(share_info_t *sh_info) 708 { 709 bool dealloc = false; 710 711 mutex_lock(&sh_info->lock); 712 ASSERT(sh_info->refcount); 713 714 if (--sh_info->refcount == 0) { 715 dealloc = true; 716 link_t *cur; 717 718 /* 719 * Now walk carefully the pagemap B+tree and free/remove 720 * reference from all frames found there. 721 */ 722 for (cur = sh_info->pagemap.leaf_head.next; 723 cur != &sh_info->pagemap.leaf_head; cur = cur->next) { 724 btree_node_t *node 725 = list_get_instance(cur, btree_node_t, leaf_link); 726 btree_key_t i; 727 728 for (i = 0; i < node->keys; i++) 729 frame_free((uintptr_t) node->value[i]); 730 } 731 732 } 733 mutex_unlock(&sh_info->lock); 734 735 if (dealloc) { 736 btree_destroy(&sh_info->pagemap); 737 free(sh_info); 738 } 739 } 740 516 741 /** Destroy address space area. 517 742 * 518 * @param as Address space. 519 * @param address Address within the area to be deleted. 520 * 521 * @return Zero on success or a value from @ref errno.h on failure. 743 * @param as Address space. 744 * @param address Address within the area to be deleted. 745 * 746 * @return Zero on success or a value from @ref errno.h on failure. 747 * 522 748 */ 523 749 int as_area_destroy(as_t *as, uintptr_t address) 524 750 { 525 as_area_t *area;526 uintptr_t base;527 link_t *cur;528 ipl_t ipl;529 530 ipl = interrupts_disable();531 751 mutex_lock(&as->lock); 532 533 a rea = find_area_and_lock(as, address);752 753 as_area_t *area = find_area_and_lock(as, address); 534 754 if (!area) { 535 755 mutex_unlock(&as->lock); 536 interrupts_restore(ipl);537 756 return ENOENT; 538 757 } 539 540 base = area->base; 541 758 759 uintptr_t base = area->base; 760 761 page_table_lock(as, false); 762 542 763 /* 543 764 * Start TLB shootdown sequence. 544 765 */ 545 tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages); 546 766 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, 767 area->pages); 768 547 769 /* 548 770 * Visit only the pages mapped by used_space B+tree. 549 771 */ 772 link_t *cur; 550 773 for (cur = area->used_space.leaf_head.next; 551 774 cur != &area->used_space.leaf_head; cur = cur->next) { 552 775 btree_node_t *node; 553 unsigned int i;776 btree_key_t i; 554 777 555 778 node = list_get_instance(cur, btree_node_t, leaf_link); 556 779 for (i = 0; i < node->keys; i++) { 557 uintptr_t b = node->key[i]; 558 size_t j; 559 pte_t *pte; 560 561 for (j = 0; j < (size_t) node->value[i]; j++) { 562 page_table_lock(as, false); 563 pte = page_mapping_find(as, b + j * PAGE_SIZE); 564 ASSERT(pte && PTE_VALID(pte) && 565 PTE_PRESENT(pte)); 566 if (area->backend && 567 area->backend->frame_free) { 568 area->backend->frame_free(area, b + 569 j * PAGE_SIZE, PTE_GET_FRAME(pte)); 780 uintptr_t ptr = node->key[i]; 781 size_t size; 782 783 for (size = 0; size < (size_t) node->value[i]; size++) { 784 pte_t *pte = 785 page_mapping_find(as, ptr + (size << PAGE_WIDTH)); 786 787 ASSERT(pte); 788 ASSERT(PTE_VALID(pte)); 789 ASSERT(PTE_PRESENT(pte)); 790 791 if ((area->backend) && 792 (area->backend->frame_free)) { 793 area->backend->frame_free(area, 794 ptr + (size << PAGE_WIDTH), PTE_GET_FRAME(pte)); 570 795 } 571 page_mapping_remove(as, b + j * PAGE_SIZE);572 page_ table_unlock(as, false);796 797 page_mapping_remove(as, ptr + (size << PAGE_WIDTH)); 573 798 } 574 799 } 575 800 } 576 801 577 802 /* 578 803 * Finish TLB shootdown sequence. 579 804 */ 580 805 581 806 tlb_invalidate_pages(as->asid, area->base, area->pages); 807 582 808 /* 583 809 * Invalidate potential software translation caches (e.g. TSB on … … 585 811 */ 586 812 as_invalidate_translation_cache(as, area->base, area->pages); 587 tlb_shootdown_finalize(); 813 tlb_shootdown_finalize(ipl); 814 815 page_table_unlock(as, false); 588 816 589 817 btree_destroy(&area->used_space); 590 818 591 819 area->attributes |= AS_AREA_ATTR_PARTIAL; 592 820 593 821 if (area->sh_info) 594 822 sh_info_remove_reference(area->sh_info); 595 823 596 824 mutex_unlock(&area->lock); 597 825 598 826 /* 599 827 * Remove the empty area from address space. … … 604 832 605 833 mutex_unlock(&as->lock); 606 interrupts_restore(ipl);607 834 return 0; 608 835 } … … 615 842 * sh_info of the source area. The process of duplicating the 616 843 * mapping is done through the backend share function. 617 * 618 * @param src_as Pointer to source address space.619 * @param src_base Base address of the source address space area.620 * @param acc_size Expected size of the source area.621 * @param dst_as Pointer to destination address space.622 * @param dst_base Target base address.844 * 845 * @param src_as Pointer to source address space. 846 * @param src_base Base address of the source address space area. 847 * @param acc_size Expected size of the source area. 848 * @param dst_as Pointer to destination address space. 849 * @param dst_base Target base address. 623 850 * @param dst_flags_mask Destination address space area flags mask. 624 851 * 625 * @return Zero on success or ENOENT if there is no such task or if 626 * there is no such address space area, EPERM if there was 627 * a problem in accepting the area or ENOMEM if there was a 628 * problem in allocating destination address space area. 629 * ENOTSUP is returned if the address space area backend 630 * does not support sharing. 852 * @return Zero on success. 853 * @return ENOENT if there is no such task or such address space. 854 * @return EPERM if there was a problem in accepting the area. 855 * @return ENOMEM if there was a problem in allocating destination 856 * address space area. 857 * @return ENOTSUP if the address space area backend does not support 858 * sharing. 859 * 631 860 */ 632 861 int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size, 633 as_t *dst_as, uintptr_t dst_base, int dst_flags_mask) 634 { 635 ipl_t ipl; 636 int src_flags; 637 size_t src_size; 638 as_area_t *src_area, *dst_area; 639 share_info_t *sh_info; 640 mem_backend_t *src_backend; 641 mem_backend_data_t src_backend_data; 642 643 ipl = interrupts_disable(); 862 as_t *dst_as, uintptr_t dst_base, unsigned int dst_flags_mask) 863 { 644 864 mutex_lock(&src_as->lock); 645 src_area = find_area_and_lock(src_as, src_base);865 as_area_t *src_area = find_area_and_lock(src_as, src_base); 646 866 if (!src_area) { 647 867 /* … … 649 869 */ 650 870 mutex_unlock(&src_as->lock); 651 interrupts_restore(ipl);652 871 return ENOENT; 653 872 } 654 655 if ( !src_area->backend || !src_area->backend->share) {873 874 if ((!src_area->backend) || (!src_area->backend->share)) { 656 875 /* 657 876 * There is no backend or the backend does not … … 660 879 mutex_unlock(&src_area->lock); 661 880 mutex_unlock(&src_as->lock); 662 interrupts_restore(ipl);663 881 return ENOTSUP; 664 882 } 665 883 666 s rc_size = src_area->pages * PAGE_SIZE;667 src_flags = src_area->flags;668 src_backend = src_area->backend;669 src_backend_data = src_area->backend_data;670 884 size_t src_size = src_area->pages << PAGE_WIDTH; 885 unsigned int src_flags = src_area->flags; 886 mem_backend_t *src_backend = src_area->backend; 887 mem_backend_data_t src_backend_data = src_area->backend_data; 888 671 889 /* Share the cacheable flag from the original mapping */ 672 890 if (src_flags & AS_AREA_CACHEABLE) 673 891 dst_flags_mask |= AS_AREA_CACHEABLE; 674 675 if ( src_size != acc_size||676 ( src_flags & dst_flags_mask) != dst_flags_mask) {892 893 if ((src_size != acc_size) || 894 ((src_flags & dst_flags_mask) != dst_flags_mask)) { 677 895 mutex_unlock(&src_area->lock); 678 896 mutex_unlock(&src_as->lock); 679 interrupts_restore(ipl);680 897 return EPERM; 681 898 } 682 899 683 900 /* 684 901 * Now we are committed to sharing the area. … … 686 903 * Then it will be safe to unlock it. 687 904 */ 688 sh _info = src_area->sh_info;905 share_info_t *sh_info = src_area->sh_info; 689 906 if (!sh_info) { 690 907 sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0); … … 693 910 btree_create(&sh_info->pagemap); 694 911 src_area->sh_info = sh_info; 912 695 913 /* 696 914 * Call the backend to setup sharing. … … 702 920 mutex_unlock(&sh_info->lock); 703 921 } 704 922 705 923 mutex_unlock(&src_area->lock); 706 924 mutex_unlock(&src_as->lock); 707 925 708 926 /* 709 927 * Create copy of the source address space area. … … 714 932 * to support sharing in less privileged mode. 715 933 */ 716 dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base,717 AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);934 as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask, src_size, 935 dst_base, AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data); 718 936 if (!dst_area) { 719 937 /* … … 722 940 sh_info_remove_reference(sh_info); 723 941 724 interrupts_restore(ipl);725 942 return ENOMEM; 726 943 } 727 944 728 945 /* 729 946 * Now the destination address space area has been 730 947 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL 731 948 * attribute and set the sh_info. 732 */ 733 mutex_lock(&dst_as->lock); 949 */ 950 mutex_lock(&dst_as->lock); 734 951 mutex_lock(&dst_area->lock); 735 952 dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL; 736 953 dst_area->sh_info = sh_info; 737 954 mutex_unlock(&dst_area->lock); 738 mutex_unlock(&dst_as->lock); 739 740 interrupts_restore(ipl); 955 mutex_unlock(&dst_as->lock); 741 956 742 957 return 0; … … 745 960 /** Check access mode for address space area. 746 961 * 747 * The address space area must be locked prior to this call. 748 * 749 * @param area Address space area. 750 * @param access Access mode. 751 * 752 * @return False if access violates area's permissions, true 753 * otherwise. 754 */ 755 bool as_area_check_access(as_area_t *area, pf_access_t access) 756 { 962 * @param area Address space area. 963 * @param access Access mode. 964 * 965 * @return False if access violates area's permissions, true 966 * otherwise. 967 * 968 */ 969 NO_TRACE bool as_area_check_access(as_area_t *area, pf_access_t access) 970 { 971 ASSERT(mutex_locked(&area->lock)); 972 757 973 int flagmap[] = { 758 974 [PF_ACCESS_READ] = AS_AREA_READ, … … 760 976 [PF_ACCESS_EXEC] = AS_AREA_EXEC 761 977 }; 762 978 763 979 if (!(area->flags & flagmap[access])) 764 980 return false; 765 981 766 982 return true; 983 } 984 985 /** Convert address space area flags to page flags. 986 * 987 * @param aflags Flags of some address space area. 988 * 989 * @return Flags to be passed to page_mapping_insert(). 990 * 991 */ 992 NO_TRACE static unsigned int area_flags_to_page_flags(unsigned int aflags) 993 { 994 unsigned int flags = PAGE_USER | PAGE_PRESENT; 995 996 if (aflags & AS_AREA_READ) 997 flags |= PAGE_READ; 998 999 if (aflags & AS_AREA_WRITE) 1000 flags |= PAGE_WRITE; 1001 1002 if (aflags & AS_AREA_EXEC) 1003 flags |= PAGE_EXEC; 1004 1005 if (aflags & AS_AREA_CACHEABLE) 1006 flags |= PAGE_CACHEABLE; 1007 1008 return flags; 767 1009 } 768 1010 … … 781 1023 * 782 1024 */ 783 int as_area_change_flags(as_t *as, int flags, uintptr_t address) 784 { 785 as_area_t *area; 786 uintptr_t base; 787 link_t *cur; 788 ipl_t ipl; 789 int page_flags; 790 uintptr_t *old_frame; 791 size_t frame_idx; 792 size_t used_pages; 793 1025 int as_area_change_flags(as_t *as, unsigned int flags, uintptr_t address) 1026 { 794 1027 /* Flags for the new memory mapping */ 795 page_flags = area_flags_to_page_flags(flags); 796 797 ipl = interrupts_disable(); 1028 unsigned int page_flags = area_flags_to_page_flags(flags); 1029 798 1030 mutex_lock(&as->lock); 799 800 a rea = find_area_and_lock(as, address);1031 1032 as_area_t *area = find_area_and_lock(as, address); 801 1033 if (!area) { 802 1034 mutex_unlock(&as->lock); 803 interrupts_restore(ipl);804 1035 return ENOENT; 805 1036 } 806 1037 807 1038 if ((area->sh_info) || (area->backend != &anon_backend)) { 808 1039 /* Copying shared areas not supported yet */ … … 810 1041 mutex_unlock(&area->lock); 811 1042 mutex_unlock(&as->lock); 812 interrupts_restore(ipl);813 1043 return ENOTSUP; 814 1044 } 815 816 base = area->base; 817 1045 818 1046 /* 819 1047 * Compute total number of used pages in the used_space B+tree 820 1048 */ 821 used_pages = 0; 822 1049 size_t used_pages = 0; 1050 link_t *cur; 1051 823 1052 for (cur = area->used_space.leaf_head.next; 824 1053 cur != &area->used_space.leaf_head; cur = cur->next) { 825 btree_node_t *node ;826 unsigned int i;827 828 node = list_get_instance(cur, btree_node_t, leaf_link);829 for (i = 0; i < node->keys; i++) {1054 btree_node_t *node 1055 = list_get_instance(cur, btree_node_t, leaf_link); 1056 btree_key_t i; 1057 1058 for (i = 0; i < node->keys; i++) 830 1059 used_pages += (size_t) node->value[i]; 831 } 832 } 833 1060 } 1061 834 1062 /* An array for storing frame numbers */ 835 old_frame = malloc(used_pages * sizeof(uintptr_t), 0); 836 1063 uintptr_t *old_frame = malloc(used_pages * sizeof(uintptr_t), 0); 1064 1065 page_table_lock(as, false); 1066 837 1067 /* 838 1068 * Start TLB shootdown sequence. 839 1069 */ 840 tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages); 841 1070 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, 1071 area->pages); 1072 842 1073 /* 843 1074 * Remove used pages from page tables and remember their frame 844 1075 * numbers. 845 1076 */ 846 frame_idx = 0;847 1077 size_t frame_idx = 0; 1078 848 1079 for (cur = area->used_space.leaf_head.next; 849 1080 cur != &area->used_space.leaf_head; cur = cur->next) { 850 btree_node_t *node ;851 unsigned int i;852 853 node = list_get_instance(cur, btree_node_t, leaf_link);1081 btree_node_t *node 1082 = list_get_instance(cur, btree_node_t, leaf_link); 1083 btree_key_t i; 1084 854 1085 for (i = 0; i < node->keys; i++) { 855 uintptr_t b = node->key[i]; 856 size_t j; 857 pte_t *pte; 858 859 for (j = 0; j < (size_t) node->value[i]; j++) { 860 page_table_lock(as, false); 861 pte = page_mapping_find(as, b + j * PAGE_SIZE); 862 ASSERT(pte && PTE_VALID(pte) && 863 PTE_PRESENT(pte)); 1086 uintptr_t ptr = node->key[i]; 1087 size_t size; 1088 1089 for (size = 0; size < (size_t) node->value[i]; size++) { 1090 pte_t *pte = 1091 page_mapping_find(as, ptr + (size << PAGE_WIDTH)); 1092 1093 ASSERT(pte); 1094 ASSERT(PTE_VALID(pte)); 1095 ASSERT(PTE_PRESENT(pte)); 1096 864 1097 old_frame[frame_idx++] = PTE_GET_FRAME(pte); 865 1098 866 1099 /* Remove old mapping */ 867 page_mapping_remove(as, b + j * PAGE_SIZE); 868 page_table_unlock(as, false); 1100 page_mapping_remove(as, ptr + (size << PAGE_WIDTH)); 869 1101 } 870 1102 } 871 1103 } 872 1104 873 1105 /* 874 1106 * Finish TLB shootdown sequence. 875 1107 */ 876 1108 877 1109 tlb_invalidate_pages(as->asid, area->base, area->pages); 878 1110 … … 882 1114 */ 883 1115 as_invalidate_translation_cache(as, area->base, area->pages); 884 tlb_shootdown_finalize(); 885 1116 tlb_shootdown_finalize(ipl); 1117 1118 page_table_unlock(as, false); 1119 886 1120 /* 887 1121 * Set the new flags. 888 1122 */ 889 1123 area->flags = flags; 890 1124 891 1125 /* 892 1126 * Map pages back in with new flags. This step is kept separate … … 895 1129 */ 896 1130 frame_idx = 0; 897 1131 898 1132 for (cur = area->used_space.leaf_head.next; 899 1133 cur != &area->used_space.leaf_head; cur = cur->next) { 900 btree_node_t *node ;901 unsigned int i;902 903 node = list_get_instance(cur, btree_node_t, leaf_link);1134 btree_node_t *node 1135 = list_get_instance(cur, btree_node_t, leaf_link); 1136 btree_key_t i; 1137 904 1138 for (i = 0; i < node->keys; i++) { 905 uintptr_t b= node->key[i];906 size_t j;907 908 for ( j = 0; j < (size_t) node->value[i]; j++) {1139 uintptr_t ptr = node->key[i]; 1140 size_t size; 1141 1142 for (size = 0; size < (size_t) node->value[i]; size++) { 909 1143 page_table_lock(as, false); 910 1144 911 1145 /* Insert the new mapping */ 912 page_mapping_insert(as, b + j * PAGE_SIZE,1146 page_mapping_insert(as, ptr + (size << PAGE_WIDTH), 913 1147 old_frame[frame_idx++], page_flags); 914 1148 915 1149 page_table_unlock(as, false); 916 1150 } 917 1151 } 918 1152 } 919 1153 920 1154 free(old_frame); 921 1155 922 1156 mutex_unlock(&area->lock); 923 1157 mutex_unlock(&as->lock); 924 interrupts_restore(ipl); 925 1158 926 1159 return 0; 927 1160 } 928 929 1161 930 1162 /** Handle page fault within the current address space. … … 936 1168 * Interrupts are assumed disabled. 937 1169 * 938 * @param page Faulting page. 939 * @param access Access mode that caused the page fault (i.e. 940 * read/write/exec). 941 * @param istate Pointer to the interrupted state. 942 * 943 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or 944 * AS_PF_DEFER if the fault was caused by copy_to_uspace() 945 * or copy_from_uspace(). 1170 * @param page Faulting page. 1171 * @param access Access mode that caused the page fault (i.e. 1172 * read/write/exec). 1173 * @param istate Pointer to the interrupted state. 1174 * 1175 * @return AS_PF_FAULT on page fault. 1176 * @return AS_PF_OK on success. 1177 * @return AS_PF_DEFER if the fault was caused by copy_to_uspace() 1178 * or copy_from_uspace(). 1179 * 946 1180 */ 947 1181 int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate) 948 1182 { 949 pte_t *pte;950 as_area_t *area;951 952 1183 if (!THREAD) 953 1184 return AS_PF_FAULT; 954 955 ASSERT(AS); 956 1185 1186 if (!AS) 1187 return AS_PF_FAULT; 1188 957 1189 mutex_lock(&AS->lock); 958 a rea = find_area_and_lock(AS, page);1190 as_area_t *area = find_area_and_lock(AS, page); 959 1191 if (!area) { 960 1192 /* … … 965 1197 goto page_fault; 966 1198 } 967 1199 968 1200 if (area->attributes & AS_AREA_ATTR_PARTIAL) { 969 1201 /* … … 973 1205 mutex_unlock(&area->lock); 974 1206 mutex_unlock(&AS->lock); 975 goto page_fault; 976 } 977 978 if ( !area->backend || !area->backend->page_fault) {1207 goto page_fault; 1208 } 1209 1210 if ((!area->backend) || (!area->backend->page_fault)) { 979 1211 /* 980 1212 * The address space area is not backed by any backend … … 983 1215 mutex_unlock(&area->lock); 984 1216 mutex_unlock(&AS->lock); 985 goto page_fault; 986 } 987 1217 goto page_fault; 1218 } 1219 988 1220 page_table_lock(AS, false); 989 1221 … … 992 1224 * we need to make sure the mapping has not been already inserted. 993 1225 */ 1226 pte_t *pte; 994 1227 if ((pte = page_mapping_find(AS, page))) { 995 1228 if (PTE_PRESENT(pte)) { … … 1019 1252 mutex_unlock(&AS->lock); 1020 1253 return AS_PF_OK; 1021 1254 1022 1255 page_fault: 1023 1256 if (THREAD->in_copy_from_uspace) { … … 1032 1265 return AS_PF_FAULT; 1033 1266 } 1034 1267 1035 1268 return AS_PF_DEFER; 1036 1269 } … … 1044 1277 * When this function is enetered, no spinlocks may be held. 1045 1278 * 1046 * @param old Old address space or NULL. 1047 * @param new New address space. 1279 * @param old Old address space or NULL. 1280 * @param new New address space. 1281 * 1048 1282 */ 1049 1283 void as_switch(as_t *old_as, as_t *new_as) … … 1051 1285 DEADLOCK_PROBE_INIT(p_asidlock); 1052 1286 preemption_disable(); 1287 1053 1288 retry: 1054 1289 (void) interrupts_disable(); 1055 1290 if (!spinlock_trylock(&asidlock)) { 1056 /* 1291 /* 1057 1292 * Avoid deadlock with TLB shootdown. 1058 1293 * We can enable interrupts here because … … 1065 1300 } 1066 1301 preemption_enable(); 1067 1302 1068 1303 /* 1069 1304 * First, take care of the old address space. 1070 */ 1305 */ 1071 1306 if (old_as) { 1072 1307 ASSERT(old_as->cpu_refcount); 1073 if((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) { 1308 1309 if ((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) { 1074 1310 /* 1075 1311 * The old address space is no longer active on … … 1079 1315 */ 1080 1316 ASSERT(old_as->asid != ASID_INVALID); 1317 1081 1318 list_append(&old_as->inactive_as_with_asid_link, 1082 1319 &inactive_as_with_asid_head); 1083 1320 } 1084 1321 1085 1322 /* 1086 1323 * Perform architecture-specific tasks when the address space … … 1089 1326 as_deinstall_arch(old_as); 1090 1327 } 1091 1328 1092 1329 /* 1093 1330 * Second, prepare the new address space. … … 1099 1336 new_as->asid = asid_get(); 1100 1337 } 1338 1101 1339 #ifdef AS_PAGE_TABLE 1102 1340 SET_PTL0_ADDRESS(new_as->genarch.page_table); … … 1108 1346 */ 1109 1347 as_install_arch(new_as); 1110 1348 1111 1349 spinlock_unlock(&asidlock); 1112 1350 … … 1114 1352 } 1115 1353 1116 /** Convert address space area flags to page flags.1117 *1118 * @param aflags Flags of some address space area.1119 *1120 * @return Flags to be passed to page_mapping_insert().1121 */1122 int area_flags_to_page_flags(int aflags)1123 {1124 int flags;1125 1126 flags = PAGE_USER | PAGE_PRESENT;1127 1128 if (aflags & AS_AREA_READ)1129 flags |= PAGE_READ;1130 1131 if (aflags & AS_AREA_WRITE)1132 flags |= PAGE_WRITE;1133 1134 if (aflags & AS_AREA_EXEC)1135 flags |= PAGE_EXEC;1136 1137 if (aflags & AS_AREA_CACHEABLE)1138 flags |= PAGE_CACHEABLE;1139 1140 return flags;1141 }1142 1143 1354 /** Compute flags for virtual address translation subsytem. 1144 1355 * 1145 * The address space area must be locked.1146 * Interrupts must be disabled.1147 * 1148 * @param a Address space area.1149 * 1150 * @return Flags to be used in page_mapping_insert(). 1151 */ 1152 int as_area_get_flags(as_area_t *a) 1153 { 1154 return area_flags_to_page_flags(a ->flags);1356 * @param area Address space area. 1357 * 1358 * @return Flags to be used in page_mapping_insert(). 1359 * 1360 */ 1361 NO_TRACE unsigned int as_area_get_flags(as_area_t *area) 1362 { 1363 ASSERT(mutex_locked(&area->lock)); 1364 1365 return area_flags_to_page_flags(area->flags); 1155 1366 } 1156 1367 … … 1160 1371 * table. 1161 1372 * 1162 * @param flags Flags saying whether the page table is for the kernel 1163 * address space. 1164 * 1165 * @return First entry of the page table. 1166 */ 1167 pte_t *page_table_create(int flags) 1373 * @param flags Flags saying whether the page table is for the kernel 1374 * address space. 1375 * 1376 * @return First entry of the page table. 1377 * 1378 */ 1379 NO_TRACE pte_t *page_table_create(unsigned int flags) 1168 1380 { 1169 1381 ASSERT(as_operations); … … 1177 1389 * Destroy page table in architecture specific way. 1178 1390 * 1179 * @param page_table Physical address of PTL0. 1180 */ 1181 void page_table_destroy(pte_t *page_table) 1391 * @param page_table Physical address of PTL0. 1392 * 1393 */ 1394 NO_TRACE void page_table_destroy(pte_t *page_table) 1182 1395 { 1183 1396 ASSERT(as_operations); … … 1191 1404 * This function should be called before any page_mapping_insert(), 1192 1405 * page_mapping_remove() and page_mapping_find(). 1193 * 1406 * 1194 1407 * Locking order is such that address space areas must be locked 1195 1408 * prior to this call. Address space can be locked prior to this 1196 1409 * call in which case the lock argument is false. 1197 1410 * 1198 * @param as Address space. 1199 * @param lock If false, do not attempt to lock as->lock. 1200 */ 1201 void page_table_lock(as_t *as, bool lock) 1411 * @param as Address space. 1412 * @param lock If false, do not attempt to lock as->lock. 1413 * 1414 */ 1415 NO_TRACE void page_table_lock(as_t *as, bool lock) 1202 1416 { 1203 1417 ASSERT(as_operations); … … 1209 1423 /** Unlock page table. 1210 1424 * 1211 * @param as Address space. 1212 * @param unlock If false, do not attempt to unlock as->lock. 1213 */ 1214 void page_table_unlock(as_t *as, bool unlock) 1425 * @param as Address space. 1426 * @param unlock If false, do not attempt to unlock as->lock. 1427 * 1428 */ 1429 NO_TRACE void page_table_unlock(as_t *as, bool unlock) 1215 1430 { 1216 1431 ASSERT(as_operations); … … 1220 1435 } 1221 1436 1222 1223 /** Find address space area and lock it. 1224 * 1225 * The address space must be locked and interrupts must be disabled. 1226 * 1227 * @param as Address space. 1228 * @param va Virtual address. 1229 * 1230 * @return Locked address space area containing va on success or 1231 * NULL on failure. 1232 */ 1233 as_area_t *find_area_and_lock(as_t *as, uintptr_t va) 1234 { 1235 as_area_t *a; 1236 btree_node_t *leaf, *lnode; 1237 unsigned int i; 1238 1239 a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); 1240 if (a) { 1241 /* va is the base address of an address space area */ 1242 mutex_lock(&a->lock); 1243 return a; 1244 } 1245 1246 /* 1247 * Search the leaf node and the righmost record of its left neighbour 1248 * to find out whether this is a miss or va belongs to an address 1249 * space area found there. 1250 */ 1251 1252 /* First, search the leaf node itself. */ 1253 for (i = 0; i < leaf->keys; i++) { 1254 a = (as_area_t *) leaf->value[i]; 1255 mutex_lock(&a->lock); 1256 if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) { 1257 return a; 1258 } 1259 mutex_unlock(&a->lock); 1260 } 1261 1262 /* 1263 * Second, locate the left neighbour and test its last record. 1264 * Because of its position in the B+tree, it must have base < va. 1265 */ 1266 lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); 1267 if (lnode) { 1268 a = (as_area_t *) lnode->value[lnode->keys - 1]; 1269 mutex_lock(&a->lock); 1270 if (va < a->base + a->pages * PAGE_SIZE) { 1271 return a; 1272 } 1273 mutex_unlock(&a->lock); 1274 } 1275 1276 return NULL; 1277 } 1278 1279 /** Check area conflicts with other areas. 1280 * 1281 * The address space must be locked and interrupts must be disabled. 1282 * 1283 * @param as Address space. 1284 * @param va Starting virtual address of the area being tested. 1285 * @param size Size of the area being tested. 1286 * @param avoid_area Do not touch this area. 1287 * 1288 * @return True if there is no conflict, false otherwise. 1289 */ 1290 bool 1291 check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area) 1292 { 1293 as_area_t *a; 1294 btree_node_t *leaf, *node; 1295 unsigned int i; 1296 1297 /* 1298 * We don't want any area to have conflicts with NULL page. 1299 */ 1300 if (overlaps(va, size, NULL, PAGE_SIZE)) 1301 return false; 1302 1303 /* 1304 * The leaf node is found in O(log n), where n is proportional to 1305 * the number of address space areas belonging to as. 1306 * The check for conflicts is then attempted on the rightmost 1307 * record in the left neighbour, the leftmost record in the right 1308 * neighbour and all records in the leaf node itself. 1309 */ 1310 1311 if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) { 1312 if (a != avoid_area) 1313 return false; 1314 } 1315 1316 /* First, check the two border cases. */ 1317 if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) { 1318 a = (as_area_t *) node->value[node->keys - 1]; 1319 mutex_lock(&a->lock); 1320 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { 1321 mutex_unlock(&a->lock); 1322 return false; 1323 } 1324 mutex_unlock(&a->lock); 1325 } 1326 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf); 1327 if (node) { 1328 a = (as_area_t *) node->value[0]; 1329 mutex_lock(&a->lock); 1330 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { 1331 mutex_unlock(&a->lock); 1332 return false; 1333 } 1334 mutex_unlock(&a->lock); 1335 } 1336 1337 /* Second, check the leaf node. */ 1338 for (i = 0; i < leaf->keys; i++) { 1339 a = (as_area_t *) leaf->value[i]; 1340 1341 if (a == avoid_area) 1342 continue; 1343 1344 mutex_lock(&a->lock); 1345 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { 1346 mutex_unlock(&a->lock); 1347 return false; 1348 } 1349 mutex_unlock(&a->lock); 1350 } 1351 1352 /* 1353 * So far, the area does not conflict with other areas. 1354 * Check if it doesn't conflict with kernel address space. 1355 */ 1356 if (!KERNEL_ADDRESS_SPACE_SHADOWED) { 1357 return !overlaps(va, size, 1358 KERNEL_ADDRESS_SPACE_START, 1359 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START); 1360 } 1361 1362 return true; 1437 /** Test whether page tables are locked. 1438 * 1439 * @param as Address space where the page tables belong. 1440 * 1441 * @return True if the page tables belonging to the address soace 1442 * are locked, otherwise false. 1443 */ 1444 NO_TRACE bool page_table_locked(as_t *as) 1445 { 1446 ASSERT(as_operations); 1447 ASSERT(as_operations->page_table_locked); 1448 1449 return as_operations->page_table_locked(as); 1363 1450 } 1364 1451 1365 1452 /** Return size of the address space area with given base. 1366 1453 * 1367 * @param base Arbitrary address insede the address space area. 1368 * 1369 * @return Size of the address space area in bytes or zero if it 1370 * does not exist. 1454 * @param base Arbitrary address inside the address space area. 1455 * 1456 * @return Size of the address space area in bytes or zero if it 1457 * does not exist. 1458 * 1371 1459 */ 1372 1460 size_t as_area_get_size(uintptr_t base) 1373 1461 { 1374 ipl_t ipl;1375 as_area_t *src_area;1376 1462 size_t size; 1377 1378 ipl = interrupts_disable(); 1379 src_area = find_area_and_lock(AS, base); 1463 1464 page_table_lock(AS, true); 1465 as_area_t *src_area = find_area_and_lock(AS, base); 1466 1380 1467 if (src_area) { 1381 size = src_area->pages * PAGE_SIZE;1468 size = src_area->pages << PAGE_WIDTH; 1382 1469 mutex_unlock(&src_area->lock); 1383 } else {1470 } else 1384 1471 size = 0; 1385 }1386 interrupts_restore(ipl);1472 1473 page_table_unlock(AS, true); 1387 1474 return size; 1388 1475 } … … 1392 1479 * The address space area must be already locked. 1393 1480 * 1394 * @param a Address space area. 1395 * @param page First page to be marked. 1396 * @param count Number of page to be marked. 1397 * 1398 * @return Zero on failure and non-zero on success. 1399 */ 1400 int used_space_insert(as_area_t *a, uintptr_t page, size_t count) 1401 { 1402 btree_node_t *leaf, *node; 1403 size_t pages; 1404 unsigned int i; 1405 1481 * @param area Address space area. 1482 * @param page First page to be marked. 1483 * @param count Number of page to be marked. 1484 * 1485 * @return False on failure or true on success. 1486 * 1487 */ 1488 bool used_space_insert(as_area_t *area, uintptr_t page, size_t count) 1489 { 1490 ASSERT(mutex_locked(&area->lock)); 1406 1491 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE)); 1407 1492 ASSERT(count); 1408 1409 pages = (size_t) btree_search(&a->used_space, page, &leaf); 1493 1494 btree_node_t *leaf; 1495 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf); 1410 1496 if (pages) { 1411 1497 /* 1412 1498 * We hit the beginning of some used space. 1413 1499 */ 1414 return 0;1415 } 1416 1500 return false; 1501 } 1502 1417 1503 if (!leaf->keys) { 1418 btree_insert(&a ->used_space, page, (void *) count, leaf);1419 return 1;1420 } 1421 1422 node = btree_leaf_node_left_neighbour(&a->used_space, leaf);1504 btree_insert(&area->used_space, page, (void *) count, leaf); 1505 goto success; 1506 } 1507 1508 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, leaf); 1423 1509 if (node) { 1424 1510 uintptr_t left_pg = node->key[node->keys - 1]; … … 1432 1518 * the left neigbour and the first interval of the leaf. 1433 1519 */ 1434 1520 1435 1521 if (page >= right_pg) { 1436 1522 /* Do nothing. */ 1437 } else if (overlaps(page, count * PAGE_SIZE, left_pg,1438 left_cnt * PAGE_SIZE)) {1523 } else if (overlaps(page, count << PAGE_WIDTH, left_pg, 1524 left_cnt << PAGE_WIDTH)) { 1439 1525 /* The interval intersects with the left interval. */ 1440 return 0;1441 } else if (overlaps(page, count * PAGE_SIZE, right_pg,1442 right_cnt * PAGE_SIZE)) {1526 return false; 1527 } else if (overlaps(page, count << PAGE_WIDTH, right_pg, 1528 right_cnt << PAGE_WIDTH)) { 1443 1529 /* The interval intersects with the right interval. */ 1444 return 0;1445 } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&1446 (page + count * PAGE_SIZE== right_pg)) {1530 return false; 1531 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) && 1532 (page + (count << PAGE_WIDTH) == right_pg)) { 1447 1533 /* 1448 1534 * The interval can be added by merging the two already … … 1450 1536 */ 1451 1537 node->value[node->keys - 1] += count + right_cnt; 1452 btree_remove(&a ->used_space, right_pg, leaf);1453 return 1;1454 } else if (page == left_pg + left_cnt * PAGE_SIZE) {1455 /* 1538 btree_remove(&area->used_space, right_pg, leaf); 1539 goto success; 1540 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) { 1541 /* 1456 1542 * The interval can be added by simply growing the left 1457 1543 * interval. 1458 1544 */ 1459 1545 node->value[node->keys - 1] += count; 1460 return 1;1461 } else if (page + count * PAGE_SIZE== right_pg) {1546 goto success; 1547 } else if (page + (count << PAGE_WIDTH) == right_pg) { 1462 1548 /* 1463 1549 * The interval can be addded by simply moving base of … … 1467 1553 leaf->value[0] += count; 1468 1554 leaf->key[0] = page; 1469 return 1;1555 goto success; 1470 1556 } else { 1471 1557 /* … … 1473 1559 * but cannot be merged with any of them. 1474 1560 */ 1475 btree_insert(&a ->used_space, page, (void *) count,1561 btree_insert(&area->used_space, page, (void *) count, 1476 1562 leaf); 1477 return 1;1563 goto success; 1478 1564 } 1479 1565 } else if (page < leaf->key[0]) { 1480 1566 uintptr_t right_pg = leaf->key[0]; 1481 1567 size_t right_cnt = (size_t) leaf->value[0]; 1482 1568 1483 1569 /* 1484 1570 * Investigate the border case in which the left neighbour does 1485 1571 * not exist but the interval fits from the left. 1486 1572 */ 1487 1488 if (overlaps(page, count * PAGE_SIZE, right_pg,1489 right_cnt * PAGE_SIZE)) {1573 1574 if (overlaps(page, count << PAGE_WIDTH, right_pg, 1575 right_cnt << PAGE_WIDTH)) { 1490 1576 /* The interval intersects with the right interval. */ 1491 return 0;1492 } else if (page + count * PAGE_SIZE== right_pg) {1577 return false; 1578 } else if (page + (count << PAGE_WIDTH) == right_pg) { 1493 1579 /* 1494 1580 * The interval can be added by moving the base of the … … 1498 1584 leaf->key[0] = page; 1499 1585 leaf->value[0] += count; 1500 return 1;1586 goto success; 1501 1587 } else { 1502 1588 /* … … 1504 1590 * It must be added individually. 1505 1591 */ 1506 btree_insert(&a ->used_space, page, (void *) count,1592 btree_insert(&area->used_space, page, (void *) count, 1507 1593 leaf); 1508 return 1;1509 } 1510 } 1511 1512 node = btree_leaf_node_right_neighbour(&a ->used_space, leaf);1594 goto success; 1595 } 1596 } 1597 1598 node = btree_leaf_node_right_neighbour(&area->used_space, leaf); 1513 1599 if (node) { 1514 1600 uintptr_t left_pg = leaf->key[leaf->keys - 1]; … … 1522 1608 * the right neigbour and the last interval of the leaf. 1523 1609 */ 1524 1610 1525 1611 if (page < left_pg) { 1526 1612 /* Do nothing. */ 1527 } else if (overlaps(page, count * PAGE_SIZE, left_pg,1528 left_cnt * PAGE_SIZE)) {1613 } else if (overlaps(page, count << PAGE_WIDTH, left_pg, 1614 left_cnt << PAGE_WIDTH)) { 1529 1615 /* The interval intersects with the left interval. */ 1530 return 0;1531 } else if (overlaps(page, count * PAGE_SIZE, right_pg,1532 right_cnt * PAGE_SIZE)) {1616 return false; 1617 } else if (overlaps(page, count << PAGE_WIDTH, right_pg, 1618 right_cnt << PAGE_WIDTH)) { 1533 1619 /* The interval intersects with the right interval. */ 1534 return 0;1535 } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&1536 (page + count * PAGE_SIZE== right_pg)) {1620 return false; 1621 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) && 1622 (page + (count << PAGE_WIDTH) == right_pg)) { 1537 1623 /* 1538 1624 * The interval can be added by merging the two already 1539 1625 * present intervals. 1540 * */1626 */ 1541 1627 leaf->value[leaf->keys - 1] += count + right_cnt; 1542 btree_remove(&a ->used_space, right_pg, node);1543 return 1;1544 } else if (page == left_pg + left_cnt * PAGE_SIZE) {1628 btree_remove(&area->used_space, right_pg, node); 1629 goto success; 1630 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) { 1545 1631 /* 1546 1632 * The interval can be added by simply growing the left 1547 1633 * interval. 1548 * */1549 leaf->value[leaf->keys - 1] += count;1550 return 1;1551 } else if (page + count * PAGE_SIZE== right_pg) {1634 */ 1635 leaf->value[leaf->keys - 1] += count; 1636 goto success; 1637 } else if (page + (count << PAGE_WIDTH) == right_pg) { 1552 1638 /* 1553 1639 * The interval can be addded by simply moving base of … … 1557 1643 node->value[0] += count; 1558 1644 node->key[0] = page; 1559 return 1;1645 goto success; 1560 1646 } else { 1561 1647 /* … … 1563 1649 * but cannot be merged with any of them. 1564 1650 */ 1565 btree_insert(&a ->used_space, page, (void *) count,1651 btree_insert(&area->used_space, page, (void *) count, 1566 1652 leaf); 1567 return 1;1653 goto success; 1568 1654 } 1569 1655 } else if (page >= leaf->key[leaf->keys - 1]) { 1570 1656 uintptr_t left_pg = leaf->key[leaf->keys - 1]; 1571 1657 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 1572 1658 1573 1659 /* 1574 1660 * Investigate the border case in which the right neighbour 1575 1661 * does not exist but the interval fits from the right. 1576 1662 */ 1577 1578 if (overlaps(page, count * PAGE_SIZE, left_pg,1579 left_cnt * PAGE_SIZE)) {1663 1664 if (overlaps(page, count << PAGE_WIDTH, left_pg, 1665 left_cnt << PAGE_WIDTH)) { 1580 1666 /* The interval intersects with the left interval. */ 1581 return 0;1582 } else if (left_pg + left_cnt * PAGE_SIZE== page) {1667 return false; 1668 } else if (left_pg + (left_cnt << PAGE_WIDTH) == page) { 1583 1669 /* 1584 1670 * The interval can be added by growing the left … … 1586 1672 */ 1587 1673 leaf->value[leaf->keys - 1] += count; 1588 return 1;1674 goto success; 1589 1675 } else { 1590 1676 /* … … 1592 1678 * It must be added individually. 1593 1679 */ 1594 btree_insert(&a ->used_space, page, (void *) count,1680 btree_insert(&area->used_space, page, (void *) count, 1595 1681 leaf); 1596 return 1;1682 goto success; 1597 1683 } 1598 1684 } … … 1603 1689 * were already resolved. 1604 1690 */ 1691 btree_key_t i; 1605 1692 for (i = 1; i < leaf->keys; i++) { 1606 1693 if (page < leaf->key[i]) { … … 1609 1696 size_t left_cnt = (size_t) leaf->value[i - 1]; 1610 1697 size_t right_cnt = (size_t) leaf->value[i]; 1611 1698 1612 1699 /* 1613 1700 * The interval fits between left_pg and right_pg. 1614 1701 */ 1615 1616 if (overlaps(page, count * PAGE_SIZE, left_pg,1617 left_cnt * PAGE_SIZE)) {1702 1703 if (overlaps(page, count << PAGE_WIDTH, left_pg, 1704 left_cnt << PAGE_WIDTH)) { 1618 1705 /* 1619 1706 * The interval intersects with the left 1620 1707 * interval. 1621 1708 */ 1622 return 0;1623 } else if (overlaps(page, count * PAGE_SIZE, right_pg,1624 right_cnt * PAGE_SIZE)) {1709 return false; 1710 } else if (overlaps(page, count << PAGE_WIDTH, right_pg, 1711 right_cnt << PAGE_WIDTH)) { 1625 1712 /* 1626 1713 * The interval intersects with the right 1627 1714 * interval. 1628 1715 */ 1629 return 0;1630 } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&1631 (page + count * PAGE_SIZE== right_pg)) {1716 return false; 1717 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) && 1718 (page + (count << PAGE_WIDTH) == right_pg)) { 1632 1719 /* 1633 1720 * The interval can be added by merging the two … … 1635 1722 */ 1636 1723 leaf->value[i - 1] += count + right_cnt; 1637 btree_remove(&a ->used_space, right_pg, leaf);1638 return 1;1639 } else if (page == left_pg + left_cnt * PAGE_SIZE) {1724 btree_remove(&area->used_space, right_pg, leaf); 1725 goto success; 1726 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) { 1640 1727 /* 1641 1728 * The interval can be added by simply growing … … 1643 1730 */ 1644 1731 leaf->value[i - 1] += count; 1645 return 1;1646 } else if (page + count * PAGE_SIZE== right_pg) {1732 goto success; 1733 } else if (page + (count << PAGE_WIDTH) == right_pg) { 1647 1734 /* 1648 * The interval can be addded by simply moving1735 * The interval can be addded by simply moving 1649 1736 * base of the right interval down and 1650 1737 * increasing its size accordingly. 1651 */1738 */ 1652 1739 leaf->value[i] += count; 1653 1740 leaf->key[i] = page; 1654 return 1;1741 goto success; 1655 1742 } else { 1656 1743 /* … … 1659 1746 * them. 1660 1747 */ 1661 btree_insert(&a ->used_space, page,1748 btree_insert(&area->used_space, page, 1662 1749 (void *) count, leaf); 1663 return 1;1750 goto success; 1664 1751 } 1665 1752 } 1666 1753 } 1667 1668 panic("Inconsistency detected while adding %" PRIs " pages of used " 1669 "space at %p.", count, page); 1754 1755 panic("Inconsistency detected while adding %zu pages of used " 1756 "space at %p.", count, (void *) page); 1757 1758 success: 1759 area->resident += count; 1760 return true; 1670 1761 } 1671 1762 … … 1674 1765 * The address space area must be already locked. 1675 1766 * 1676 * @param a Address space area. 1677 * @param page First page to be marked. 1678 * @param count Number of page to be marked. 1679 * 1680 * @return Zero on failure and non-zero on success. 1681 */ 1682 int used_space_remove(as_area_t *a, uintptr_t page, size_t count) 1683 { 1684 btree_node_t *leaf, *node; 1685 size_t pages; 1686 unsigned int i; 1687 1767 * @param area Address space area. 1768 * @param page First page to be marked. 1769 * @param count Number of page to be marked. 1770 * 1771 * @return False on failure or true on success. 1772 * 1773 */ 1774 bool used_space_remove(as_area_t *area, uintptr_t page, size_t count) 1775 { 1776 ASSERT(mutex_locked(&area->lock)); 1688 1777 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE)); 1689 1778 ASSERT(count); 1690 1691 pages = (size_t) btree_search(&a->used_space, page, &leaf); 1779 1780 btree_node_t *leaf; 1781 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf); 1692 1782 if (pages) { 1693 1783 /* … … 1695 1785 */ 1696 1786 if (count > pages) { 1697 return 0;1787 return false; 1698 1788 } else if (count == pages) { 1699 btree_remove(&a ->used_space, page, leaf);1700 return 1;1789 btree_remove(&area->used_space, page, leaf); 1790 goto success; 1701 1791 } else { 1702 1792 /* … … 1704 1794 * Decrease its size and relocate its start address. 1705 1795 */ 1796 btree_key_t i; 1706 1797 for (i = 0; i < leaf->keys; i++) { 1707 1798 if (leaf->key[i] == page) { 1708 leaf->key[i] += count * PAGE_SIZE;1799 leaf->key[i] += count << PAGE_WIDTH; 1709 1800 leaf->value[i] -= count; 1710 return 1;1801 goto success; 1711 1802 } 1712 1803 } 1804 1713 1805 goto error; 1714 1806 } 1715 1807 } 1716 1717 node = btree_leaf_node_left_neighbour(&a->used_space, leaf);1718 if ( node && page < leaf->key[0]) {1808 1809 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, leaf); 1810 if ((node) && (page < leaf->key[0])) { 1719 1811 uintptr_t left_pg = node->key[node->keys - 1]; 1720 1812 size_t left_cnt = (size_t) node->value[node->keys - 1]; 1721 1722 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,1723 count * PAGE_SIZE)) {1724 if (page + count * PAGE_SIZE==1725 left_pg + left_cnt * PAGE_SIZE) {1813 1814 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page, 1815 count << PAGE_WIDTH)) { 1816 if (page + (count << PAGE_WIDTH) == 1817 left_pg + (left_cnt << PAGE_WIDTH)) { 1726 1818 /* 1727 1819 * The interval is contained in the rightmost … … 1731 1823 */ 1732 1824 node->value[node->keys - 1] -= count; 1733 return 1; 1734 } else if (page + count * PAGE_SIZE < 1735 left_pg + left_cnt*PAGE_SIZE) { 1736 size_t new_cnt; 1737 1825 goto success; 1826 } else if (page + (count << PAGE_WIDTH) < 1827 left_pg + (left_cnt << PAGE_WIDTH)) { 1738 1828 /* 1739 1829 * The interval is contained in the rightmost … … 1743 1833 * new interval. 1744 1834 */ 1745 new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -1746 (page + count*PAGE_SIZE)) >> PAGE_WIDTH;1835 size_t new_cnt = ((left_pg + (left_cnt << PAGE_WIDTH)) - 1836 (page + (count << PAGE_WIDTH))) >> PAGE_WIDTH; 1747 1837 node->value[node->keys - 1] -= count + new_cnt; 1748 btree_insert(&a ->used_space, page +1749 count * PAGE_SIZE, (void *) new_cnt, leaf);1750 return 1;1838 btree_insert(&area->used_space, page + 1839 (count << PAGE_WIDTH), (void *) new_cnt, leaf); 1840 goto success; 1751 1841 } 1752 1842 } 1753 return 0;1754 } else if (page < leaf->key[0]) {1755 return 0;1756 }1843 1844 return false; 1845 } else if (page < leaf->key[0]) 1846 return false; 1757 1847 1758 1848 if (page > leaf->key[leaf->keys - 1]) { 1759 1849 uintptr_t left_pg = leaf->key[leaf->keys - 1]; 1760 1850 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 1761 1762 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,1763 count * PAGE_SIZE)) {1764 if (page + count * PAGE_SIZE ==1765 left_pg + left_cnt * PAGE_SIZE) {1851 1852 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page, 1853 count << PAGE_WIDTH)) { 1854 if (page + (count << PAGE_WIDTH) == 1855 left_pg + (left_cnt << PAGE_WIDTH)) { 1766 1856 /* 1767 1857 * The interval is contained in the rightmost … … 1770 1860 */ 1771 1861 leaf->value[leaf->keys - 1] -= count; 1772 return 1; 1773 } else if (page + count * PAGE_SIZE < left_pg + 1774 left_cnt * PAGE_SIZE) { 1775 size_t new_cnt; 1776 1862 goto success; 1863 } else if (page + (count << PAGE_WIDTH) < left_pg + 1864 (left_cnt << PAGE_WIDTH)) { 1777 1865 /* 1778 1866 * The interval is contained in the rightmost … … 1782 1870 * interval. 1783 1871 */ 1784 new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -1785 (page + count * PAGE_SIZE)) >> PAGE_WIDTH;1872 size_t new_cnt = ((left_pg + (left_cnt << PAGE_WIDTH)) - 1873 (page + (count << PAGE_WIDTH))) >> PAGE_WIDTH; 1786 1874 leaf->value[leaf->keys - 1] -= count + new_cnt; 1787 btree_insert(&a ->used_space, page +1788 count * PAGE_SIZE, (void *) new_cnt, leaf);1789 return 1;1875 btree_insert(&area->used_space, page + 1876 (count << PAGE_WIDTH), (void *) new_cnt, leaf); 1877 goto success; 1790 1878 } 1791 1879 } 1792 return 0; 1793 } 1880 1881 return false; 1882 } 1794 1883 1795 1884 /* 1796 1885 * The border cases have been already resolved. 1797 * Now the interval can be only between intervals of the leaf. 1798 */ 1886 * Now the interval can be only between intervals of the leaf. 1887 */ 1888 btree_key_t i; 1799 1889 for (i = 1; i < leaf->keys - 1; i++) { 1800 1890 if (page < leaf->key[i]) { 1801 1891 uintptr_t left_pg = leaf->key[i - 1]; 1802 1892 size_t left_cnt = (size_t) leaf->value[i - 1]; 1803 1893 1804 1894 /* 1805 1895 * Now the interval is between intervals corresponding 1806 1896 * to (i - 1) and i. 1807 1897 */ 1808 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,1809 count * PAGE_SIZE)) {1810 if (page + count * PAGE_SIZE==1811 left_pg + left_cnt*PAGE_SIZE) {1898 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page, 1899 count << PAGE_WIDTH)) { 1900 if (page + (count << PAGE_WIDTH) == 1901 left_pg + (left_cnt << PAGE_WIDTH)) { 1812 1902 /* 1813 1903 * The interval is contained in the … … 1817 1907 */ 1818 1908 leaf->value[i - 1] -= count; 1819 return 1; 1820 } else if (page + count * PAGE_SIZE < 1821 left_pg + left_cnt * PAGE_SIZE) { 1822 size_t new_cnt; 1823 1909 goto success; 1910 } else if (page + (count << PAGE_WIDTH) < 1911 left_pg + (left_cnt << PAGE_WIDTH)) { 1824 1912 /* 1825 1913 * The interval is contained in the … … 1829 1917 * also inserting a new interval. 1830 1918 */ 1831 new_cnt = ((left_pg +1832 left_cnt * PAGE_SIZE) -1833 (page + count * PAGE_SIZE)) >>1919 size_t new_cnt = ((left_pg + 1920 (left_cnt << PAGE_WIDTH)) - 1921 (page + (count << PAGE_WIDTH))) >> 1834 1922 PAGE_WIDTH; 1835 1923 leaf->value[i - 1] -= count + new_cnt; 1836 btree_insert(&a ->used_space, page +1837 count * PAGE_SIZE, (void *) new_cnt,1924 btree_insert(&area->used_space, page + 1925 (count << PAGE_WIDTH), (void *) new_cnt, 1838 1926 leaf); 1839 return 1;1927 goto success; 1840 1928 } 1841 1929 } 1842 return 0; 1843 } 1844 } 1845 1930 1931 return false; 1932 } 1933 } 1934 1846 1935 error: 1847 panic("Inconsistency detected while removing %" PRIs " pages of used " 1848 "space from %p.", count, page); 1849 } 1850 1851 /** Remove reference to address space area share info. 1852 * 1853 * If the reference count drops to 0, the sh_info is deallocated. 1854 * 1855 * @param sh_info Pointer to address space area share info. 1856 */ 1857 void sh_info_remove_reference(share_info_t *sh_info) 1858 { 1859 bool dealloc = false; 1860 1861 mutex_lock(&sh_info->lock); 1862 ASSERT(sh_info->refcount); 1863 if (--sh_info->refcount == 0) { 1864 dealloc = true; 1865 link_t *cur; 1866 1867 /* 1868 * Now walk carefully the pagemap B+tree and free/remove 1869 * reference from all frames found there. 1870 */ 1871 for (cur = sh_info->pagemap.leaf_head.next; 1872 cur != &sh_info->pagemap.leaf_head; cur = cur->next) { 1873 btree_node_t *node; 1874 unsigned int i; 1875 1876 node = list_get_instance(cur, btree_node_t, leaf_link); 1877 for (i = 0; i < node->keys; i++) 1878 frame_free((uintptr_t) node->value[i]); 1879 } 1880 1881 } 1882 mutex_unlock(&sh_info->lock); 1883 1884 if (dealloc) { 1885 btree_destroy(&sh_info->pagemap); 1886 free(sh_info); 1887 } 1936 panic("Inconsistency detected while removing %zu pages of used " 1937 "space from %p.", count, (void *) page); 1938 1939 success: 1940 area->resident -= count; 1941 return true; 1888 1942 } 1889 1943 … … 1893 1947 1894 1948 /** Wrapper for as_area_create(). */ 1895 unative_t sys_as_area_create(uintptr_t address, size_t size,int flags)1949 sysarg_t sys_as_area_create(uintptr_t address, size_t size, unsigned int flags) 1896 1950 { 1897 1951 if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, 1898 1952 AS_AREA_ATTR_NONE, &anon_backend, NULL)) 1899 return ( unative_t) address;1953 return (sysarg_t) address; 1900 1954 else 1901 return ( unative_t) -1;1955 return (sysarg_t) -1; 1902 1956 } 1903 1957 1904 1958 /** Wrapper for as_area_resize(). */ 1905 unative_t sys_as_area_resize(uintptr_t address, size_t size,int flags)1906 { 1907 return ( unative_t) as_area_resize(AS, address, size, 0);1959 sysarg_t sys_as_area_resize(uintptr_t address, size_t size, unsigned int flags) 1960 { 1961 return (sysarg_t) as_area_resize(AS, address, size, 0); 1908 1962 } 1909 1963 1910 1964 /** Wrapper for as_area_change_flags(). */ 1911 unative_t sys_as_area_change_flags(uintptr_t address,int flags)1912 { 1913 return ( unative_t) as_area_change_flags(AS, flags, address);1965 sysarg_t sys_as_area_change_flags(uintptr_t address, unsigned int flags) 1966 { 1967 return (sysarg_t) as_area_change_flags(AS, flags, address); 1914 1968 } 1915 1969 1916 1970 /** Wrapper for as_area_destroy(). */ 1917 unative_t sys_as_area_destroy(uintptr_t address) 1918 { 1919 return (unative_t) as_area_destroy(AS, address); 1971 sysarg_t sys_as_area_destroy(uintptr_t address) 1972 { 1973 return (sysarg_t) as_area_destroy(AS, address); 1974 } 1975 1976 /** Return pointer to unmapped address space area 1977 * 1978 * @param base Lowest address bound. 1979 * @param size Requested size of the allocation. 1980 * 1981 * @return Pointer to the beginning of unmapped address space area. 1982 * 1983 */ 1984 sysarg_t sys_as_get_unmapped_area(uintptr_t base, size_t size) 1985 { 1986 if (size == 0) 1987 return 0; 1988 1989 /* 1990 * Make sure we allocate from page-aligned 1991 * address. Check for possible overflow in 1992 * each step. 1993 */ 1994 1995 size_t pages = SIZE2FRAMES(size); 1996 uintptr_t ret = 0; 1997 1998 /* 1999 * Find the lowest unmapped address aligned on the sz 2000 * boundary, not smaller than base and of the required size. 2001 */ 2002 2003 mutex_lock(&AS->lock); 2004 2005 /* First check the base address itself */ 2006 uintptr_t addr = ALIGN_UP(base, PAGE_SIZE); 2007 if ((addr >= base) && 2008 (check_area_conflicts(AS, addr, pages, NULL))) 2009 ret = addr; 2010 2011 /* Eventually check the addresses behind each area */ 2012 link_t *cur; 2013 for (cur = AS->as_area_btree.leaf_head.next; 2014 (ret == 0) && (cur != &AS->as_area_btree.leaf_head); 2015 cur = cur->next) { 2016 btree_node_t *node = 2017 list_get_instance(cur, btree_node_t, leaf_link); 2018 2019 btree_key_t i; 2020 for (i = 0; (ret == 0) && (i < node->keys); i++) { 2021 as_area_t *area = (as_area_t *) node->value[i]; 2022 2023 mutex_lock(&area->lock); 2024 2025 uintptr_t addr = 2026 ALIGN_UP(area->base + (area->pages << PAGE_WIDTH), 2027 PAGE_SIZE); 2028 2029 if ((addr >= base) && (addr >= area->base) && 2030 (check_area_conflicts(AS, addr, pages, area))) 2031 ret = addr; 2032 2033 mutex_unlock(&area->lock); 2034 } 2035 } 2036 2037 mutex_unlock(&AS->lock); 2038 2039 return (sysarg_t) ret; 2040 } 2041 2042 /** Get list of adress space areas. 2043 * 2044 * @param as Address space. 2045 * @param obuf Place to save pointer to returned buffer. 2046 * @param osize Place to save size of returned buffer. 2047 * 2048 */ 2049 void as_get_area_info(as_t *as, as_area_info_t **obuf, size_t *osize) 2050 { 2051 mutex_lock(&as->lock); 2052 2053 /* First pass, count number of areas. */ 2054 2055 size_t area_cnt = 0; 2056 link_t *cur; 2057 2058 for (cur = as->as_area_btree.leaf_head.next; 2059 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 2060 btree_node_t *node = 2061 list_get_instance(cur, btree_node_t, leaf_link); 2062 area_cnt += node->keys; 2063 } 2064 2065 size_t isize = area_cnt * sizeof(as_area_info_t); 2066 as_area_info_t *info = malloc(isize, 0); 2067 2068 /* Second pass, record data. */ 2069 2070 size_t area_idx = 0; 2071 2072 for (cur = as->as_area_btree.leaf_head.next; 2073 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 2074 btree_node_t *node = 2075 list_get_instance(cur, btree_node_t, leaf_link); 2076 btree_key_t i; 2077 2078 for (i = 0; i < node->keys; i++) { 2079 as_area_t *area = node->value[i]; 2080 2081 ASSERT(area_idx < area_cnt); 2082 mutex_lock(&area->lock); 2083 2084 info[area_idx].start_addr = area->base; 2085 info[area_idx].size = FRAMES2SIZE(area->pages); 2086 info[area_idx].flags = area->flags; 2087 ++area_idx; 2088 2089 mutex_unlock(&area->lock); 2090 } 2091 } 2092 2093 mutex_unlock(&as->lock); 2094 2095 *obuf = info; 2096 *osize = isize; 1920 2097 } 1921 2098 1922 2099 /** Print out information about address space. 1923 2100 * 1924 * @param as Address space. 2101 * @param as Address space. 2102 * 1925 2103 */ 1926 2104 void as_print(as_t *as) 1927 2105 { 1928 ipl_t ipl;1929 1930 ipl = interrupts_disable();1931 2106 mutex_lock(&as->lock); 1932 2107 1933 /* print out info about address space areas */2108 /* Print out info about address space areas */ 1934 2109 link_t *cur; 1935 2110 for (cur = as->as_area_btree.leaf_head.next; 1936 2111 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 1937 btree_node_t *node; 1938 1939 node = list_get_instance(cur, btree_node_t, leaf_link); 1940 1941 unsigned int i; 2112 btree_node_t *node 2113 = list_get_instance(cur, btree_node_t, leaf_link); 2114 btree_key_t i; 2115 1942 2116 for (i = 0; i < node->keys; i++) { 1943 2117 as_area_t *area = node->value[i]; 1944 2118 1945 2119 mutex_lock(&area->lock); 1946 printf("as_area: %p, base=%p, pages=%" PRIs 1947 " (%p - %p)\n", area, area->base, area->pages, 1948 area->base, area->base + FRAMES2SIZE(area->pages)); 2120 printf("as_area: %p, base=%p, pages=%zu" 2121 " (%p - %p)\n", area, (void *) area->base, 2122 area->pages, (void *) area->base, 2123 (void *) (area->base + FRAMES2SIZE(area->pages))); 1949 2124 mutex_unlock(&area->lock); 1950 2125 } … … 1952 2127 1953 2128 mutex_unlock(&as->lock); 1954 interrupts_restore(ipl);1955 2129 } 1956 2130
Note:
See TracChangeset
for help on using the changeset viewer.
