Changeset 46c20c8 in mainline for kernel/generic/src/mm
- Timestamp:
- 2010-11-26T20:08:10Z (15 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 45df59a
- Parents:
- fb150d78 (diff), ffdd2b9 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- kernel/generic/src/mm
- Files:
-
- 9 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/as.c
rfb150d78 r46c20c8 1 1 /* 2 * Copyright (c) 20 01-2006Jakub Jermar2 * Copyright (c) 2010 Jakub Jermar 3 3 * All rights reserved. 4 4 * … … 33 33 /** 34 34 * @file 35 * @brief Address space related functions.35 * @brief Address space related functions. 36 36 * 37 37 * This file contains address space manipulation functions. … … 75 75 #include <config.h> 76 76 #include <align.h> 77 #include < arch/types.h>77 #include <typedefs.h> 78 78 #include <syscall/copy.h> 79 79 #include <arch/interrupt.h> … … 86 86 * Each architecture decides what functions will be used to carry out 87 87 * address space operations such as creating or locking page tables. 88 * 88 89 */ 89 90 as_operations_t *as_operations = NULL; … … 91 92 /** 92 93 * Slab for as_t objects. 94 * 93 95 */ 94 96 static slab_cache_t *as_slab; … … 100 102 * - as->asid for each as of the as_t type 101 103 * - asids_allocated counter 104 * 102 105 */ 103 106 SPINLOCK_INITIALIZE(asidlock); … … 106 109 * This list contains address spaces that are not active on any 107 110 * processor and that have valid ASID. 111 * 108 112 */ 109 113 LIST_INITIALIZE(inactive_as_with_asid_head); … … 112 116 as_t *AS_KERNEL = NULL; 113 117 114 static int area_flags_to_page_flags(int); 115 static as_area_t *find_area_and_lock(as_t *, uintptr_t); 116 static bool check_area_conflicts(as_t *, uintptr_t, size_t, as_area_t *); 117 static void sh_info_remove_reference(share_info_t *); 118 119 static int as_constructor(void *obj, int flags) 118 NO_TRACE static int as_constructor(void *obj, unsigned int flags) 120 119 { 121 120 as_t *as = (as_t *) obj; 122 int rc; 123 121 124 122 link_initialize(&as->inactive_as_with_asid_link); 125 123 mutex_initialize(&as->lock, MUTEX_PASSIVE); 126 124 127 rc = as_constructor_arch(as, flags);125 int rc = as_constructor_arch(as, flags); 128 126 129 127 return rc; 130 128 } 131 129 132 static int as_destructor(void *obj)130 NO_TRACE static size_t as_destructor(void *obj) 133 131 { 134 132 as_t *as = (as_t *) obj; 135 136 133 return as_destructor_arch(as); 137 134 } … … 141 138 { 142 139 as_arch_init(); 143 140 144 141 as_slab = slab_cache_create("as_slab", sizeof(as_t), 0, 145 142 as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED); … … 152 149 * reference count never drops to zero. 153 150 */ 154 a tomic_set(&AS_KERNEL->refcount, 1);151 as_hold(AS_KERNEL); 155 152 } 156 153 157 154 /** Create address space. 158 155 * 159 * @param flags Flags that influence the way in wich the address space 160 * is created. 161 */ 162 as_t *as_create(int flags) 163 { 164 as_t *as; 165 166 as = (as_t *) slab_alloc(as_slab, 0); 156 * @param flags Flags that influence the way in wich the address 157 * space is created. 158 * 159 */ 160 as_t *as_create(unsigned int flags) 161 { 162 as_t *as = (as_t *) slab_alloc(as_slab, 0); 167 163 (void) as_create_arch(as, 0); 168 164 … … 176 172 atomic_set(&as->refcount, 0); 177 173 as->cpu_refcount = 0; 174 178 175 #ifdef AS_PAGE_TABLE 179 176 as->genarch.page_table = page_table_create(flags); … … 192 189 * We know that we don't hold any spinlock. 193 190 * 194 * @param as Address space to be destroyed. 191 * @param as Address space to be destroyed. 192 * 195 193 */ 196 194 void as_destroy(as_t *as) 197 195 { 198 ipl_t ipl;199 bool cond;200 196 DEADLOCK_PROBE_INIT(p_asidlock); 201 197 198 ASSERT(as != AS); 202 199 ASSERT(atomic_get(&as->refcount) == 0); 203 200 204 201 /* 205 * Since there is no reference to this a rea,206 * it is safe not tolock its mutex.202 * Since there is no reference to this address space, it is safe not to 203 * lock its mutex. 207 204 */ 208 205 … … 213 210 * disabled to prevent nested context switches. We also depend on the 214 211 * fact that so far no spinlocks are held. 212 * 215 213 */ 216 214 preemption_disable(); 217 ipl = interrupts_read(); 215 ipl_t ipl = interrupts_read(); 216 218 217 retry: 219 218 interrupts_disable(); … … 223 222 goto retry; 224 223 } 225 preemption_enable(); /* Interrupts disabled, enable preemption */ 226 if (as->asid != ASID_INVALID && as != AS_KERNEL) { 227 if (as != AS && as->cpu_refcount == 0) 224 225 /* Interrupts disabled, enable preemption */ 226 preemption_enable(); 227 228 if ((as->asid != ASID_INVALID) && (as != AS_KERNEL)) { 229 if (as->cpu_refcount == 0) 228 230 list_remove(&as->inactive_as_with_asid_link); 231 229 232 asid_put(as->asid); 230 233 } 234 231 235 spinlock_unlock(&asidlock); 232 236 interrupts_restore(ipl); 237 238 233 239 /* 234 240 * Destroy address space areas of the address space. 235 241 * The B+tree must be walked carefully because it is 236 242 * also being destroyed. 237 * /238 for (cond = true; cond; ) {239 btree_node_t *node;240 243 * 244 */ 245 bool cond = true; 246 while (cond) { 241 247 ASSERT(!list_empty(&as->as_area_btree.leaf_head)); 242 node = list_get_instance(as->as_area_btree.leaf_head.next, 248 249 btree_node_t *node = 250 list_get_instance(as->as_area_btree.leaf_head.next, 243 251 btree_node_t, leaf_link); 244 245 if ((cond = node->keys)) {252 253 if ((cond = node->keys)) 246 254 as_area_destroy(as, node->key[0]); 247 } 248 } 249 255 } 256 250 257 btree_destroy(&as->as_area_btree); 258 251 259 #ifdef AS_PAGE_TABLE 252 260 page_table_destroy(as->genarch.page_table); … … 254 262 page_table_destroy(NULL); 255 263 #endif 256 257 interrupts_restore(ipl); 258 264 259 265 slab_free(as_slab, as); 260 266 } 261 267 268 /** Hold a reference to an address space. 269 * 270 * Holding a reference to an address space prevents destruction of that address 271 * space. 272 * 273 * @param as Address space to be held. 274 * 275 */ 276 NO_TRACE void as_hold(as_t *as) 277 { 278 atomic_inc(&as->refcount); 279 } 280 281 /** Release a reference to an address space. 282 * 283 * The last one to release a reference to an address space destroys the address 284 * space. 285 * 286 * @param asAddress space to be released. 287 * 288 */ 289 NO_TRACE void as_release(as_t *as) 290 { 291 if (atomic_predec(&as->refcount) == 0) 292 as_destroy(as); 293 } 294 295 /** Check area conflicts with other areas. 296 * 297 * @param as Address space. 298 * @param va Starting virtual address of the area being tested. 299 * @param size Size of the area being tested. 300 * @param avoid_area Do not touch this area. 301 * 302 * @return True if there is no conflict, false otherwise. 303 * 304 */ 305 NO_TRACE static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, 306 as_area_t *avoid_area) 307 { 308 ASSERT(mutex_locked(&as->lock)); 309 310 /* 311 * We don't want any area to have conflicts with NULL page. 312 * 313 */ 314 if (overlaps(va, size, (uintptr_t) NULL, PAGE_SIZE)) 315 return false; 316 317 /* 318 * The leaf node is found in O(log n), where n is proportional to 319 * the number of address space areas belonging to as. 320 * The check for conflicts is then attempted on the rightmost 321 * record in the left neighbour, the leftmost record in the right 322 * neighbour and all records in the leaf node itself. 323 * 324 */ 325 btree_node_t *leaf; 326 as_area_t *area = 327 (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); 328 if (area) { 329 if (area != avoid_area) 330 return false; 331 } 332 333 /* First, check the two border cases. */ 334 btree_node_t *node = 335 btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); 336 if (node) { 337 area = (as_area_t *) node->value[node->keys - 1]; 338 339 mutex_lock(&area->lock); 340 341 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 342 mutex_unlock(&area->lock); 343 return false; 344 } 345 346 mutex_unlock(&area->lock); 347 } 348 349 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf); 350 if (node) { 351 area = (as_area_t *) node->value[0]; 352 353 mutex_lock(&area->lock); 354 355 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 356 mutex_unlock(&area->lock); 357 return false; 358 } 359 360 mutex_unlock(&area->lock); 361 } 362 363 /* Second, check the leaf node. */ 364 btree_key_t i; 365 for (i = 0; i < leaf->keys; i++) { 366 area = (as_area_t *) leaf->value[i]; 367 368 if (area == avoid_area) 369 continue; 370 371 mutex_lock(&area->lock); 372 373 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 374 mutex_unlock(&area->lock); 375 return false; 376 } 377 378 mutex_unlock(&area->lock); 379 } 380 381 /* 382 * So far, the area does not conflict with other areas. 383 * Check if it doesn't conflict with kernel address space. 384 * 385 */ 386 if (!KERNEL_ADDRESS_SPACE_SHADOWED) { 387 return !overlaps(va, size, 388 KERNEL_ADDRESS_SPACE_START, 389 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START); 390 } 391 392 return true; 393 } 394 262 395 /** Create address space area of common attributes. 263 396 * 264 397 * The created address space area is added to the target address space. 265 398 * 266 * @param as Target address space. 267 * @param flags Flags of the area memory. 268 * @param size Size of area. 269 * @param base Base address of area. 270 * @param attrs Attributes of the area. 271 * @param backend Address space area backend. NULL if no backend is used. 272 * @param backend_data NULL or a pointer to an array holding two void *. 273 * 274 * @return Address space area on success or NULL on failure. 275 */ 276 as_area_t * 277 as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs, 278 mem_backend_t *backend, mem_backend_data_t *backend_data) 279 { 280 ipl_t ipl; 281 as_area_t *a; 282 399 * @param as Target address space. 400 * @param flags Flags of the area memory. 401 * @param size Size of area. 402 * @param base Base address of area. 403 * @param attrs Attributes of the area. 404 * @param backend Address space area backend. NULL if no backend is used. 405 * @param backend_data NULL or a pointer to an array holding two void *. 406 * 407 * @return Address space area on success or NULL on failure. 408 * 409 */ 410 as_area_t *as_area_create(as_t *as, unsigned int flags, size_t size, 411 uintptr_t base, unsigned int attrs, mem_backend_t *backend, 412 mem_backend_data_t *backend_data) 413 { 283 414 if (base % PAGE_SIZE) 284 415 return NULL; 285 416 286 417 if (!size) 287 418 return NULL; 288 419 289 420 /* Writeable executable areas are not supported. */ 290 421 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE)) 291 422 return NULL; 292 423 293 ipl = interrupts_disable();294 424 mutex_lock(&as->lock); 295 425 296 426 if (!check_area_conflicts(as, base, size, NULL)) { 297 427 mutex_unlock(&as->lock); 298 interrupts_restore(ipl);299 428 return NULL; 300 429 } 301 430 302 a = (as_area_t *) malloc(sizeof(as_area_t), 0); 303 304 mutex_initialize(&a->lock, MUTEX_PASSIVE); 305 306 a->as = as; 307 a->flags = flags; 308 a->attributes = attrs; 309 a->pages = SIZE2FRAMES(size); 310 a->base = base; 311 a->sh_info = NULL; 312 a->backend = backend; 431 as_area_t *area = (as_area_t *) malloc(sizeof(as_area_t), 0); 432 433 mutex_initialize(&area->lock, MUTEX_PASSIVE); 434 435 area->as = as; 436 area->flags = flags; 437 area->attributes = attrs; 438 area->pages = SIZE2FRAMES(size); 439 area->base = base; 440 area->sh_info = NULL; 441 area->backend = backend; 442 313 443 if (backend_data) 314 a ->backend_data = *backend_data;444 area->backend_data = *backend_data; 315 445 else 316 memsetb(&a->backend_data, sizeof(a->backend_data), 0); 317 318 btree_create(&a->used_space); 319 320 btree_insert(&as->as_area_btree, base, (void *) a, NULL); 321 446 memsetb(&area->backend_data, sizeof(area->backend_data), 0); 447 448 btree_create(&area->used_space); 449 btree_insert(&as->as_area_btree, base, (void *) area, NULL); 450 322 451 mutex_unlock(&as->lock); 323 interrupts_restore(ipl); 324 325 return a; 452 453 return area; 454 } 455 456 /** Find address space area and lock it. 457 * 458 * @param as Address space. 459 * @param va Virtual address. 460 * 461 * @return Locked address space area containing va on success or 462 * NULL on failure. 463 * 464 */ 465 NO_TRACE static as_area_t *find_area_and_lock(as_t *as, uintptr_t va) 466 { 467 ASSERT(mutex_locked(&as->lock)); 468 469 btree_node_t *leaf; 470 as_area_t *area = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); 471 if (area) { 472 /* va is the base address of an address space area */ 473 mutex_lock(&area->lock); 474 return area; 475 } 476 477 /* 478 * Search the leaf node and the righmost record of its left neighbour 479 * to find out whether this is a miss or va belongs to an address 480 * space area found there. 481 * 482 */ 483 484 /* First, search the leaf node itself. */ 485 btree_key_t i; 486 487 for (i = 0; i < leaf->keys; i++) { 488 area = (as_area_t *) leaf->value[i]; 489 490 mutex_lock(&area->lock); 491 492 if ((area->base <= va) && (va < area->base + area->pages * PAGE_SIZE)) 493 return area; 494 495 mutex_unlock(&area->lock); 496 } 497 498 /* 499 * Second, locate the left neighbour and test its last record. 500 * Because of its position in the B+tree, it must have base < va. 501 * 502 */ 503 btree_node_t *lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); 504 if (lnode) { 505 area = (as_area_t *) lnode->value[lnode->keys - 1]; 506 507 mutex_lock(&area->lock); 508 509 if (va < area->base + area->pages * PAGE_SIZE) 510 return area; 511 512 mutex_unlock(&area->lock); 513 } 514 515 return NULL; 326 516 } 327 517 328 518 /** Find address space area and change it. 329 519 * 330 * @param as Address space. 331 * @param address Virtual address belonging to the area to be changed. 332 * Must be page-aligned. 333 * @param size New size of the virtual memory block starting at 334 * address. 335 * @param flags Flags influencing the remap operation. Currently unused. 336 * 337 * @return Zero on success or a value from @ref errno.h otherwise. 338 */ 339 int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags) 340 { 341 as_area_t *area; 342 ipl_t ipl; 343 size_t pages; 344 345 ipl = interrupts_disable(); 520 * @param as Address space. 521 * @param address Virtual address belonging to the area to be changed. 522 * Must be page-aligned. 523 * @param size New size of the virtual memory block starting at 524 * address. 525 * @param flags Flags influencing the remap operation. Currently unused. 526 * 527 * @return Zero on success or a value from @ref errno.h otherwise. 528 * 529 */ 530 int as_area_resize(as_t *as, uintptr_t address, size_t size, unsigned int flags) 531 { 346 532 mutex_lock(&as->lock); 347 533 348 534 /* 349 535 * Locate the area. 350 */ 351 area = find_area_and_lock(as, address); 536 * 537 */ 538 as_area_t *area = find_area_and_lock(as, address); 352 539 if (!area) { 353 540 mutex_unlock(&as->lock); 354 interrupts_restore(ipl);355 541 return ENOENT; 356 542 } 357 543 358 544 if (area->backend == &phys_backend) { 359 545 /* 360 546 * Remapping of address space areas associated 361 547 * with memory mapped devices is not supported. 548 * 362 549 */ 363 550 mutex_unlock(&area->lock); 364 551 mutex_unlock(&as->lock); 365 interrupts_restore(ipl);366 552 return ENOTSUP; 367 553 } 554 368 555 if (area->sh_info) { 369 556 /* 370 * Remapping of shared address space areas 557 * Remapping of shared address space areas 371 558 * is not supported. 559 * 372 560 */ 373 561 mutex_unlock(&area->lock); 374 562 mutex_unlock(&as->lock); 375 interrupts_restore(ipl);376 563 return ENOTSUP; 377 564 } 378 379 pages = SIZE2FRAMES((address - area->base) + size);565 566 size_t pages = SIZE2FRAMES((address - area->base) + size); 380 567 if (!pages) { 381 568 /* 382 569 * Zero size address space areas are not allowed. 570 * 383 571 */ 384 572 mutex_unlock(&area->lock); 385 573 mutex_unlock(&as->lock); 386 interrupts_restore(ipl);387 574 return EPERM; 388 575 } 389 576 390 577 if (pages < area->pages) { 391 bool cond;392 578 uintptr_t start_free = area->base + pages * PAGE_SIZE; 393 579 394 580 /* 395 581 * Shrinking the area. 396 582 * No need to check for overlaps. 397 */ 398 583 * 584 */ 585 586 page_table_lock(as, false); 587 399 588 /* 400 589 * Start TLB shootdown sequence. 401 */ 402 tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base + 403 pages * PAGE_SIZE, area->pages - pages); 404 590 * 591 */ 592 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, 593 area->base + pages * PAGE_SIZE, area->pages - pages); 594 405 595 /* 406 596 * Remove frames belonging to used space starting from … … 409 599 * is also the right way to remove part of the used_space 410 600 * B+tree leaf list. 411 * /412 for (cond = true; cond;) {413 btree_node_t *node;414 601 * 602 */ 603 bool cond = true; 604 while (cond) { 415 605 ASSERT(!list_empty(&area->used_space.leaf_head)); 416 node = 606 607 btree_node_t *node = 417 608 list_get_instance(area->used_space.leaf_head.prev, 418 609 btree_node_t, leaf_link); 610 419 611 if ((cond = (bool) node->keys)) { 420 uintptr_t b= node->key[node->keys - 1];421 size_t c=612 uintptr_t ptr = node->key[node->keys - 1]; 613 size_t size = 422 614 (size_t) node->value[node->keys - 1]; 423 unsigned int i = 0;424 425 if (overlaps( b, c* PAGE_SIZE, area->base,615 size_t i = 0; 616 617 if (overlaps(ptr, size * PAGE_SIZE, area->base, 426 618 pages * PAGE_SIZE)) { 427 619 428 if ( b + c* PAGE_SIZE <= start_free) {620 if (ptr + size * PAGE_SIZE <= start_free) { 429 621 /* 430 622 * The whole interval fits 431 623 * completely in the resized 432 624 * address space area. 625 * 433 626 */ 434 627 break; 435 628 } 436 629 437 630 /* 438 631 * Part of the interval corresponding 439 632 * to b and c overlaps with the resized 440 633 * address space area. 634 * 441 635 */ 442 443 cond = false; /* we are almost done */ 444 i = (start_free - b) >> PAGE_WIDTH; 636 637 /* We are almost done */ 638 cond = false; 639 i = (start_free - ptr) >> PAGE_WIDTH; 445 640 if (!used_space_remove(area, start_free, 446 c - i)) 447 panic("Cannot remove used " 448 "space."); 641 size - i)) 642 panic("Cannot remove used space."); 449 643 } else { 450 644 /* … … 452 646 * completely removed. 453 647 */ 454 if (!used_space_remove(area, b, c)) 455 panic("Cannot remove used " 456 "space."); 648 if (!used_space_remove(area, ptr, size)) 649 panic("Cannot remove used space."); 457 650 } 458 459 for (; i < c; i++) { 460 pte_t *pte; 461 462 page_table_lock(as, false); 463 pte = page_mapping_find(as, b + 651 652 for (; i < size; i++) { 653 pte_t *pte = page_mapping_find(as, ptr + 464 654 i * PAGE_SIZE); 465 ASSERT(pte && PTE_VALID(pte) && 466 PTE_PRESENT(pte)); 467 if (area->backend && 468 area->backend->frame_free) { 655 656 ASSERT(pte); 657 ASSERT(PTE_VALID(pte)); 658 ASSERT(PTE_PRESENT(pte)); 659 660 if ((area->backend) && 661 (area->backend->frame_free)) { 469 662 area->backend->frame_free(area, 470 b+ i * PAGE_SIZE,663 ptr + i * PAGE_SIZE, 471 664 PTE_GET_FRAME(pte)); 472 665 } 473 page_mapping_remove(as, b + 666 667 page_mapping_remove(as, ptr + 474 668 i * PAGE_SIZE); 475 page_table_unlock(as, false);476 669 } 477 670 } 478 671 } 479 672 480 673 /* 481 674 * Finish TLB shootdown sequence. 482 */ 483 675 * 676 */ 677 484 678 tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE, 485 679 area->pages - pages); 680 486 681 /* 487 682 * Invalidate software translation caches (e.g. TSB on sparc64). 683 * 488 684 */ 489 685 as_invalidate_translation_cache(as, area->base + 490 686 pages * PAGE_SIZE, area->pages - pages); 491 tlb_shootdown_finalize(); 492 687 tlb_shootdown_finalize(ipl); 688 689 page_table_unlock(as, false); 493 690 } else { 494 691 /* 495 692 * Growing the area. 496 693 * Check for overlaps with other address space areas. 694 * 497 695 */ 498 696 if (!check_area_conflicts(as, address, pages * PAGE_SIZE, 499 697 area)) { 500 698 mutex_unlock(&area->lock); 501 mutex_unlock(&as->lock); 502 interrupts_restore(ipl); 699 mutex_unlock(&as->lock); 503 700 return EADDRNOTAVAIL; 504 701 } 505 } 506 702 } 703 507 704 area->pages = pages; 508 705 509 706 mutex_unlock(&area->lock); 510 707 mutex_unlock(&as->lock); 511 interrupts_restore(ipl); 512 708 513 709 return 0; 514 710 } 515 711 712 /** Remove reference to address space area share info. 713 * 714 * If the reference count drops to 0, the sh_info is deallocated. 715 * 716 * @param sh_info Pointer to address space area share info. 717 * 718 */ 719 NO_TRACE static void sh_info_remove_reference(share_info_t *sh_info) 720 { 721 bool dealloc = false; 722 723 mutex_lock(&sh_info->lock); 724 ASSERT(sh_info->refcount); 725 726 if (--sh_info->refcount == 0) { 727 dealloc = true; 728 link_t *cur; 729 730 /* 731 * Now walk carefully the pagemap B+tree and free/remove 732 * reference from all frames found there. 733 */ 734 for (cur = sh_info->pagemap.leaf_head.next; 735 cur != &sh_info->pagemap.leaf_head; cur = cur->next) { 736 btree_node_t *node 737 = list_get_instance(cur, btree_node_t, leaf_link); 738 btree_key_t i; 739 740 for (i = 0; i < node->keys; i++) 741 frame_free((uintptr_t) node->value[i]); 742 } 743 744 } 745 mutex_unlock(&sh_info->lock); 746 747 if (dealloc) { 748 btree_destroy(&sh_info->pagemap); 749 free(sh_info); 750 } 751 } 752 516 753 /** Destroy address space area. 517 754 * 518 * @param as Address space. 519 * @param address Address within the area to be deleted. 520 * 521 * @return Zero on success or a value from @ref errno.h on failure. 755 * @param as Address space. 756 * @param address Address within the area to be deleted. 757 * 758 * @return Zero on success or a value from @ref errno.h on failure. 759 * 522 760 */ 523 761 int as_area_destroy(as_t *as, uintptr_t address) 524 762 { 525 as_area_t *area;526 uintptr_t base;527 link_t *cur;528 ipl_t ipl;529 530 ipl = interrupts_disable();531 763 mutex_lock(&as->lock); 532 533 a rea = find_area_and_lock(as, address);764 765 as_area_t *area = find_area_and_lock(as, address); 534 766 if (!area) { 535 767 mutex_unlock(&as->lock); 536 interrupts_restore(ipl);537 768 return ENOENT; 538 769 } 539 540 base = area->base; 541 770 771 uintptr_t base = area->base; 772 773 page_table_lock(as, false); 774 542 775 /* 543 776 * Start TLB shootdown sequence. 544 777 */ 545 tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages); 546 778 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, 779 area->pages); 780 547 781 /* 548 782 * Visit only the pages mapped by used_space B+tree. 549 783 */ 784 link_t *cur; 550 785 for (cur = area->used_space.leaf_head.next; 551 786 cur != &area->used_space.leaf_head; cur = cur->next) { 552 787 btree_node_t *node; 553 unsigned int i;788 btree_key_t i; 554 789 555 790 node = list_get_instance(cur, btree_node_t, leaf_link); 556 791 for (i = 0; i < node->keys; i++) { 557 uintptr_t b = node->key[i]; 558 size_t j; 559 pte_t *pte; 792 uintptr_t ptr = node->key[i]; 793 size_t size; 560 794 561 for (j = 0; j < (size_t) node->value[i]; j++) { 562 page_table_lock(as, false); 563 pte = page_mapping_find(as, b + j * PAGE_SIZE); 564 ASSERT(pte && PTE_VALID(pte) && 565 PTE_PRESENT(pte)); 566 if (area->backend && 567 area->backend->frame_free) { 568 area->backend->frame_free(area, b + 569 j * PAGE_SIZE, PTE_GET_FRAME(pte)); 795 for (size = 0; size < (size_t) node->value[i]; size++) { 796 pte_t *pte = page_mapping_find(as, ptr + size * PAGE_SIZE); 797 798 ASSERT(pte); 799 ASSERT(PTE_VALID(pte)); 800 ASSERT(PTE_PRESENT(pte)); 801 802 if ((area->backend) && 803 (area->backend->frame_free)) { 804 area->backend->frame_free(area, 805 ptr + size * PAGE_SIZE, PTE_GET_FRAME(pte)); 570 806 } 571 page_mapping_remove(as, b + j * PAGE_SIZE);572 page_ table_unlock(as, false);807 808 page_mapping_remove(as, ptr + size * PAGE_SIZE); 573 809 } 574 810 } 575 811 } 576 812 577 813 /* 578 814 * Finish TLB shootdown sequence. 579 */ 580 815 * 816 */ 817 581 818 tlb_invalidate_pages(as->asid, area->base, area->pages); 819 582 820 /* 583 821 * Invalidate potential software translation caches (e.g. TSB on 584 822 * sparc64). 823 * 585 824 */ 586 825 as_invalidate_translation_cache(as, area->base, area->pages); 587 tlb_shootdown_finalize(); 826 tlb_shootdown_finalize(ipl); 827 828 page_table_unlock(as, false); 588 829 589 830 btree_destroy(&area->used_space); 590 831 591 832 area->attributes |= AS_AREA_ATTR_PARTIAL; 592 833 593 834 if (area->sh_info) 594 835 sh_info_remove_reference(area->sh_info); 595 836 596 837 mutex_unlock(&area->lock); 597 838 598 839 /* 599 840 * Remove the empty area from address space. 841 * 600 842 */ 601 843 btree_remove(&as->as_area_btree, base, NULL); … … 604 846 605 847 mutex_unlock(&as->lock); 606 interrupts_restore(ipl);607 848 return 0; 608 849 } … … 615 856 * sh_info of the source area. The process of duplicating the 616 857 * mapping is done through the backend share function. 617 * 618 * @param src_as Pointer to source address space.619 * @param src_base Base address of the source address space area.620 * @param acc_size Expected size of the source area.621 * @param dst_as Pointer to destination address space.622 * @param dst_base Target base address.858 * 859 * @param src_as Pointer to source address space. 860 * @param src_base Base address of the source address space area. 861 * @param acc_size Expected size of the source area. 862 * @param dst_as Pointer to destination address space. 863 * @param dst_base Target base address. 623 864 * @param dst_flags_mask Destination address space area flags mask. 624 865 * 625 * @return Zero on success or ENOENT if there is no such task or if 626 * there is no such address space area, EPERM if there was 627 * a problem in accepting the area or ENOMEM if there was a 628 * problem in allocating destination address space area. 629 * ENOTSUP is returned if the address space area backend 630 * does not support sharing. 866 * @return Zero on success. 867 * @return ENOENT if there is no such task or such address space. 868 * @return EPERM if there was a problem in accepting the area. 869 * @return ENOMEM if there was a problem in allocating destination 870 * address space area. 871 * @return ENOTSUP if the address space area backend does not support 872 * sharing. 873 * 631 874 */ 632 875 int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size, 633 as_t *dst_as, uintptr_t dst_base, int dst_flags_mask) 634 { 635 ipl_t ipl; 636 int src_flags; 637 size_t src_size; 638 as_area_t *src_area, *dst_area; 639 share_info_t *sh_info; 640 mem_backend_t *src_backend; 641 mem_backend_data_t src_backend_data; 642 643 ipl = interrupts_disable(); 876 as_t *dst_as, uintptr_t dst_base, unsigned int dst_flags_mask) 877 { 644 878 mutex_lock(&src_as->lock); 645 src_area = find_area_and_lock(src_as, src_base);879 as_area_t *src_area = find_area_and_lock(src_as, src_base); 646 880 if (!src_area) { 647 881 /* 648 882 * Could not find the source address space area. 883 * 649 884 */ 650 885 mutex_unlock(&src_as->lock); 651 interrupts_restore(ipl);652 886 return ENOENT; 653 887 } 654 655 if ( !src_area->backend || !src_area->backend->share) {888 889 if ((!src_area->backend) || (!src_area->backend->share)) { 656 890 /* 657 891 * There is no backend or the backend does not 658 892 * know how to share the area. 893 * 659 894 */ 660 895 mutex_unlock(&src_area->lock); 661 896 mutex_unlock(&src_as->lock); 662 interrupts_restore(ipl);663 897 return ENOTSUP; 664 898 } 665 899 666 s rc_size = src_area->pages * PAGE_SIZE;667 src_flags = src_area->flags;668 src_backend = src_area->backend;669 src_backend_data = src_area->backend_data;670 900 size_t src_size = src_area->pages * PAGE_SIZE; 901 unsigned int src_flags = src_area->flags; 902 mem_backend_t *src_backend = src_area->backend; 903 mem_backend_data_t src_backend_data = src_area->backend_data; 904 671 905 /* Share the cacheable flag from the original mapping */ 672 906 if (src_flags & AS_AREA_CACHEABLE) 673 907 dst_flags_mask |= AS_AREA_CACHEABLE; 674 675 if ( src_size != acc_size||676 ( src_flags & dst_flags_mask) != dst_flags_mask) {908 909 if ((src_size != acc_size) || 910 ((src_flags & dst_flags_mask) != dst_flags_mask)) { 677 911 mutex_unlock(&src_area->lock); 678 912 mutex_unlock(&src_as->lock); 679 interrupts_restore(ipl);680 913 return EPERM; 681 914 } 682 915 683 916 /* 684 917 * Now we are committed to sharing the area. 685 918 * First, prepare the area for sharing. 686 919 * Then it will be safe to unlock it. 687 */ 688 sh_info = src_area->sh_info; 920 * 921 */ 922 share_info_t *sh_info = src_area->sh_info; 689 923 if (!sh_info) { 690 924 sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0); … … 693 927 btree_create(&sh_info->pagemap); 694 928 src_area->sh_info = sh_info; 929 695 930 /* 696 931 * Call the backend to setup sharing. 932 * 697 933 */ 698 934 src_area->backend->share(src_area); … … 702 938 mutex_unlock(&sh_info->lock); 703 939 } 704 940 705 941 mutex_unlock(&src_area->lock); 706 942 mutex_unlock(&src_as->lock); 707 943 708 944 /* 709 945 * Create copy of the source address space area. … … 713 949 * The flags of the source area are masked against dst_flags_mask 714 950 * to support sharing in less privileged mode. 715 */ 716 dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base, 717 AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data); 951 * 952 */ 953 as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask, src_size, 954 dst_base, AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data); 718 955 if (!dst_area) { 719 956 /* … … 722 959 sh_info_remove_reference(sh_info); 723 960 724 interrupts_restore(ipl);725 961 return ENOMEM; 726 962 } 727 963 728 964 /* 729 965 * Now the destination address space area has been 730 966 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL 731 967 * attribute and set the sh_info. 732 */ 733 mutex_lock(&dst_as->lock); 968 * 969 */ 970 mutex_lock(&dst_as->lock); 734 971 mutex_lock(&dst_area->lock); 735 972 dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL; 736 973 dst_area->sh_info = sh_info; 737 974 mutex_unlock(&dst_area->lock); 738 mutex_unlock(&dst_as->lock); 739 740 interrupts_restore(ipl); 975 mutex_unlock(&dst_as->lock); 741 976 742 977 return 0; … … 745 980 /** Check access mode for address space area. 746 981 * 747 * The address space area must be locked prior to this call. 748 * 749 * @param area Address space area. 750 * @param access Access mode. 751 * 752 * @return False if access violates area's permissions, true 753 * otherwise. 754 */ 755 bool as_area_check_access(as_area_t *area, pf_access_t access) 982 * @param area Address space area. 983 * @param access Access mode. 984 * 985 * @return False if access violates area's permissions, true 986 * otherwise. 987 * 988 */ 989 NO_TRACE bool as_area_check_access(as_area_t *area, pf_access_t access) 756 990 { 757 991 int flagmap[] = { … … 761 995 }; 762 996 997 ASSERT(mutex_locked(&area->lock)); 998 763 999 if (!(area->flags & flagmap[access])) 764 1000 return false; 765 1001 766 1002 return true; 1003 } 1004 1005 /** Convert address space area flags to page flags. 1006 * 1007 * @param aflags Flags of some address space area. 1008 * 1009 * @return Flags to be passed to page_mapping_insert(). 1010 * 1011 */ 1012 NO_TRACE static unsigned int area_flags_to_page_flags(unsigned int aflags) 1013 { 1014 unsigned int flags = PAGE_USER | PAGE_PRESENT; 1015 1016 if (aflags & AS_AREA_READ) 1017 flags |= PAGE_READ; 1018 1019 if (aflags & AS_AREA_WRITE) 1020 flags |= PAGE_WRITE; 1021 1022 if (aflags & AS_AREA_EXEC) 1023 flags |= PAGE_EXEC; 1024 1025 if (aflags & AS_AREA_CACHEABLE) 1026 flags |= PAGE_CACHEABLE; 1027 1028 return flags; 767 1029 } 768 1030 … … 781 1043 * 782 1044 */ 783 int as_area_change_flags(as_t *as, int flags, uintptr_t address) 784 { 785 as_area_t *area; 786 uintptr_t base; 787 link_t *cur; 788 ipl_t ipl; 789 int page_flags; 790 uintptr_t *old_frame; 791 size_t frame_idx; 792 size_t used_pages; 793 1045 int as_area_change_flags(as_t *as, unsigned int flags, uintptr_t address) 1046 { 794 1047 /* Flags for the new memory mapping */ 795 page_flags = area_flags_to_page_flags(flags); 796 797 ipl = interrupts_disable(); 1048 unsigned int page_flags = area_flags_to_page_flags(flags); 1049 798 1050 mutex_lock(&as->lock); 799 800 a rea = find_area_and_lock(as, address);1051 1052 as_area_t *area = find_area_and_lock(as, address); 801 1053 if (!area) { 802 1054 mutex_unlock(&as->lock); 803 interrupts_restore(ipl);804 1055 return ENOENT; 805 1056 } 806 1057 807 1058 if ((area->sh_info) || (area->backend != &anon_backend)) { 808 1059 /* Copying shared areas not supported yet */ … … 810 1061 mutex_unlock(&area->lock); 811 1062 mutex_unlock(&as->lock); 812 interrupts_restore(ipl);813 1063 return ENOTSUP; 814 1064 } 815 816 base = area->base; 817 1065 818 1066 /* 819 1067 * Compute total number of used pages in the used_space B+tree 820 */ 821 used_pages = 0; 822 1068 * 1069 */ 1070 size_t used_pages = 0; 1071 link_t *cur; 1072 823 1073 for (cur = area->used_space.leaf_head.next; 824 1074 cur != &area->used_space.leaf_head; cur = cur->next) { 825 btree_node_t *node ;826 unsigned int i;827 828 node = list_get_instance(cur, btree_node_t, leaf_link);829 for (i = 0; i < node->keys; i++) {1075 btree_node_t *node 1076 = list_get_instance(cur, btree_node_t, leaf_link); 1077 btree_key_t i; 1078 1079 for (i = 0; i < node->keys; i++) 830 1080 used_pages += (size_t) node->value[i]; 831 } 832 } 833 1081 } 1082 834 1083 /* An array for storing frame numbers */ 835 old_frame = malloc(used_pages * sizeof(uintptr_t), 0); 836 1084 uintptr_t *old_frame = malloc(used_pages * sizeof(uintptr_t), 0); 1085 1086 page_table_lock(as, false); 1087 837 1088 /* 838 1089 * Start TLB shootdown sequence. 839 */ 840 tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages); 841 1090 * 1091 */ 1092 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, 1093 area->pages); 1094 842 1095 /* 843 1096 * Remove used pages from page tables and remember their frame 844 1097 * numbers. 845 */ 846 frame_idx = 0; 847 1098 * 1099 */ 1100 size_t frame_idx = 0; 1101 848 1102 for (cur = area->used_space.leaf_head.next; 849 1103 cur != &area->used_space.leaf_head; cur = cur->next) { 850 btree_node_t *node ;851 unsigned int i;852 853 node = list_get_instance(cur, btree_node_t, leaf_link);1104 btree_node_t *node 1105 = list_get_instance(cur, btree_node_t, leaf_link); 1106 btree_key_t i; 1107 854 1108 for (i = 0; i < node->keys; i++) { 855 uintptr_t b = node->key[i]; 856 size_t j; 857 pte_t *pte; 1109 uintptr_t ptr = node->key[i]; 1110 size_t size; 858 1111 859 for (j = 0; j < (size_t) node->value[i]; j++) { 1112 for (size = 0; size < (size_t) node->value[i]; size++) { 1113 pte_t *pte = page_mapping_find(as, ptr + size * PAGE_SIZE); 1114 1115 ASSERT(pte); 1116 ASSERT(PTE_VALID(pte)); 1117 ASSERT(PTE_PRESENT(pte)); 1118 1119 old_frame[frame_idx++] = PTE_GET_FRAME(pte); 1120 1121 /* Remove old mapping */ 1122 page_mapping_remove(as, ptr + size * PAGE_SIZE); 1123 } 1124 } 1125 } 1126 1127 /* 1128 * Finish TLB shootdown sequence. 1129 * 1130 */ 1131 1132 tlb_invalidate_pages(as->asid, area->base, area->pages); 1133 1134 /* 1135 * Invalidate potential software translation caches (e.g. TSB on 1136 * sparc64). 1137 * 1138 */ 1139 as_invalidate_translation_cache(as, area->base, area->pages); 1140 tlb_shootdown_finalize(ipl); 1141 1142 page_table_unlock(as, false); 1143 1144 /* 1145 * Set the new flags. 1146 */ 1147 area->flags = flags; 1148 1149 /* 1150 * Map pages back in with new flags. This step is kept separate 1151 * so that the memory area could not be accesed with both the old and 1152 * the new flags at once. 1153 */ 1154 frame_idx = 0; 1155 1156 for (cur = area->used_space.leaf_head.next; 1157 cur != &area->used_space.leaf_head; cur = cur->next) { 1158 btree_node_t *node 1159 = list_get_instance(cur, btree_node_t, leaf_link); 1160 btree_key_t i; 1161 1162 for (i = 0; i < node->keys; i++) { 1163 uintptr_t ptr = node->key[i]; 1164 size_t size; 1165 1166 for (size = 0; size < (size_t) node->value[i]; size++) { 860 1167 page_table_lock(as, false); 861 pte = page_mapping_find(as, b + j * PAGE_SIZE); 862 ASSERT(pte && PTE_VALID(pte) && 863 PTE_PRESENT(pte)); 864 old_frame[frame_idx++] = PTE_GET_FRAME(pte); 865 866 /* Remove old mapping */ 867 page_mapping_remove(as, b + j * PAGE_SIZE); 1168 1169 /* Insert the new mapping */ 1170 page_mapping_insert(as, ptr + size * PAGE_SIZE, 1171 old_frame[frame_idx++], page_flags); 1172 868 1173 page_table_unlock(as, false); 869 1174 } 870 1175 } 871 1176 } 872 873 /* 874 * Finish TLB shootdown sequence. 875 */ 876 877 tlb_invalidate_pages(as->asid, area->base, area->pages); 878 879 /* 880 * Invalidate potential software translation caches (e.g. TSB on 881 * sparc64). 882 */ 883 as_invalidate_translation_cache(as, area->base, area->pages); 884 tlb_shootdown_finalize(); 885 886 /* 887 * Set the new flags. 888 */ 889 area->flags = flags; 890 891 /* 892 * Map pages back in with new flags. This step is kept separate 893 * so that the memory area could not be accesed with both the old and 894 * the new flags at once. 895 */ 896 frame_idx = 0; 897 898 for (cur = area->used_space.leaf_head.next; 899 cur != &area->used_space.leaf_head; cur = cur->next) { 900 btree_node_t *node; 901 unsigned int i; 902 903 node = list_get_instance(cur, btree_node_t, leaf_link); 904 for (i = 0; i < node->keys; i++) { 905 uintptr_t b = node->key[i]; 906 size_t j; 907 908 for (j = 0; j < (size_t) node->value[i]; j++) { 909 page_table_lock(as, false); 910 911 /* Insert the new mapping */ 912 page_mapping_insert(as, b + j * PAGE_SIZE, 913 old_frame[frame_idx++], page_flags); 914 915 page_table_unlock(as, false); 916 } 917 } 918 } 919 1177 920 1178 free(old_frame); 921 1179 922 1180 mutex_unlock(&area->lock); 923 1181 mutex_unlock(&as->lock); 924 interrupts_restore(ipl); 925 1182 926 1183 return 0; 927 1184 } 928 929 1185 930 1186 /** Handle page fault within the current address space. … … 936 1192 * Interrupts are assumed disabled. 937 1193 * 938 * @param page Faulting page. 939 * @param access Access mode that caused the page fault (i.e. 940 * read/write/exec). 941 * @param istate Pointer to the interrupted state. 942 * 943 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or 944 * AS_PF_DEFER if the fault was caused by copy_to_uspace() 945 * or copy_from_uspace(). 1194 * @param page Faulting page. 1195 * @param access Access mode that caused the page fault (i.e. 1196 * read/write/exec). 1197 * @param istate Pointer to the interrupted state. 1198 * 1199 * @return AS_PF_FAULT on page fault. 1200 * @return AS_PF_OK on success. 1201 * @return AS_PF_DEFER if the fault was caused by copy_to_uspace() 1202 * or copy_from_uspace(). 1203 * 946 1204 */ 947 1205 int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate) 948 1206 { 949 pte_t *pte;950 as_area_t *area;951 952 1207 if (!THREAD) 953 1208 return AS_PF_FAULT; 954 955 ASSERT(AS); 956 1209 1210 if (!AS) 1211 return AS_PF_FAULT; 1212 957 1213 mutex_lock(&AS->lock); 958 a rea = find_area_and_lock(AS, page);1214 as_area_t *area = find_area_and_lock(AS, page); 959 1215 if (!area) { 960 1216 /* 961 1217 * No area contained mapping for 'page'. 962 1218 * Signal page fault to low-level handler. 1219 * 963 1220 */ 964 1221 mutex_unlock(&AS->lock); 965 1222 goto page_fault; 966 1223 } 967 1224 968 1225 if (area->attributes & AS_AREA_ATTR_PARTIAL) { 969 1226 /* … … 973 1230 mutex_unlock(&area->lock); 974 1231 mutex_unlock(&AS->lock); 975 goto page_fault; 976 } 977 978 if ( !area->backend || !area->backend->page_fault) {1232 goto page_fault; 1233 } 1234 1235 if ((!area->backend) || (!area->backend->page_fault)) { 979 1236 /* 980 1237 * The address space area is not backed by any backend 981 1238 * or the backend cannot handle page faults. 1239 * 982 1240 */ 983 1241 mutex_unlock(&area->lock); 984 1242 mutex_unlock(&AS->lock); 985 goto page_fault; 986 } 987 1243 goto page_fault; 1244 } 1245 988 1246 page_table_lock(AS, false); 989 1247 … … 991 1249 * To avoid race condition between two page faults on the same address, 992 1250 * we need to make sure the mapping has not been already inserted. 993 */ 1251 * 1252 */ 1253 pte_t *pte; 994 1254 if ((pte = page_mapping_find(AS, page))) { 995 1255 if (PTE_PRESENT(pte)) { … … 1007 1267 /* 1008 1268 * Resort to the backend page fault handler. 1269 * 1009 1270 */ 1010 1271 if (area->backend->page_fault(area, page, access) != AS_PF_OK) { … … 1019 1280 mutex_unlock(&AS->lock); 1020 1281 return AS_PF_OK; 1021 1282 1022 1283 page_fault: 1023 1284 if (THREAD->in_copy_from_uspace) { … … 1032 1293 return AS_PF_FAULT; 1033 1294 } 1034 1295 1035 1296 return AS_PF_DEFER; 1036 1297 } … … 1044 1305 * When this function is enetered, no spinlocks may be held. 1045 1306 * 1046 * @param old Old address space or NULL. 1047 * @param new New address space. 1307 * @param old Old address space or NULL. 1308 * @param new New address space. 1309 * 1048 1310 */ 1049 1311 void as_switch(as_t *old_as, as_t *new_as) … … 1051 1313 DEADLOCK_PROBE_INIT(p_asidlock); 1052 1314 preemption_disable(); 1315 1053 1316 retry: 1054 1317 (void) interrupts_disable(); 1055 1318 if (!spinlock_trylock(&asidlock)) { 1056 /* 1319 /* 1057 1320 * Avoid deadlock with TLB shootdown. 1058 1321 * We can enable interrupts here because 1059 1322 * preemption is disabled. We should not be 1060 1323 * holding any other lock. 1324 * 1061 1325 */ 1062 1326 (void) interrupts_enable(); … … 1065 1329 } 1066 1330 preemption_enable(); 1067 1331 1068 1332 /* 1069 1333 * First, take care of the old address space. 1070 */ 1334 */ 1071 1335 if (old_as) { 1072 1336 ASSERT(old_as->cpu_refcount); 1073 if((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) { 1337 1338 if ((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) { 1074 1339 /* 1075 1340 * The old address space is no longer active on … … 1077 1342 * list of inactive address spaces with assigned 1078 1343 * ASID. 1344 * 1079 1345 */ 1080 1346 ASSERT(old_as->asid != ASID_INVALID); 1347 1081 1348 list_append(&old_as->inactive_as_with_asid_link, 1082 1349 &inactive_as_with_asid_head); 1083 1350 } 1084 1351 1085 1352 /* 1086 1353 * Perform architecture-specific tasks when the address space 1087 1354 * is being removed from the CPU. 1355 * 1088 1356 */ 1089 1357 as_deinstall_arch(old_as); 1090 1358 } 1091 1359 1092 1360 /* 1093 1361 * Second, prepare the new address space. 1362 * 1094 1363 */ 1095 1364 if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) { … … 1099 1368 new_as->asid = asid_get(); 1100 1369 } 1370 1101 1371 #ifdef AS_PAGE_TABLE 1102 1372 SET_PTL0_ADDRESS(new_as->genarch.page_table); … … 1106 1376 * Perform architecture-specific steps. 1107 1377 * (e.g. write ASID to hardware register etc.) 1378 * 1108 1379 */ 1109 1380 as_install_arch(new_as); 1110 1381 1111 1382 spinlock_unlock(&asidlock); 1112 1383 … … 1114 1385 } 1115 1386 1116 /** Convert address space area flags to page flags.1117 *1118 * @param aflags Flags of some address space area.1119 *1120 * @return Flags to be passed to page_mapping_insert().1121 */1122 int area_flags_to_page_flags(int aflags)1123 {1124 int flags;1125 1126 flags = PAGE_USER | PAGE_PRESENT;1127 1128 if (aflags & AS_AREA_READ)1129 flags |= PAGE_READ;1130 1131 if (aflags & AS_AREA_WRITE)1132 flags |= PAGE_WRITE;1133 1134 if (aflags & AS_AREA_EXEC)1135 flags |= PAGE_EXEC;1136 1137 if (aflags & AS_AREA_CACHEABLE)1138 flags |= PAGE_CACHEABLE;1139 1140 return flags;1141 }1142 1143 1387 /** Compute flags for virtual address translation subsytem. 1144 1388 * 1145 * The address space area must be locked.1146 * Interrupts must be disabled.1147 * 1148 * @param a Address space area.1149 * 1150 * @return Flags to be used in page_mapping_insert(). 1151 */ 1152 int as_area_get_flags(as_area_t *a) 1153 { 1154 return area_flags_to_page_flags(a ->flags);1389 * @param area Address space area. 1390 * 1391 * @return Flags to be used in page_mapping_insert(). 1392 * 1393 */ 1394 NO_TRACE unsigned int as_area_get_flags(as_area_t *area) 1395 { 1396 ASSERT(mutex_locked(&area->lock)); 1397 1398 return area_flags_to_page_flags(area->flags); 1155 1399 } 1156 1400 … … 1160 1404 * table. 1161 1405 * 1162 * @param flags Flags saying whether the page table is for the kernel 1163 * address space. 1164 * 1165 * @return First entry of the page table. 1166 */ 1167 pte_t *page_table_create(int flags) 1406 * @param flags Flags saying whether the page table is for the kernel 1407 * address space. 1408 * 1409 * @return First entry of the page table. 1410 * 1411 */ 1412 NO_TRACE pte_t *page_table_create(unsigned int flags) 1168 1413 { 1169 1414 ASSERT(as_operations); … … 1177 1422 * Destroy page table in architecture specific way. 1178 1423 * 1179 * @param page_table Physical address of PTL0. 1180 */ 1181 void page_table_destroy(pte_t *page_table) 1424 * @param page_table Physical address of PTL0. 1425 * 1426 */ 1427 NO_TRACE void page_table_destroy(pte_t *page_table) 1182 1428 { 1183 1429 ASSERT(as_operations); … … 1191 1437 * This function should be called before any page_mapping_insert(), 1192 1438 * page_mapping_remove() and page_mapping_find(). 1193 * 1439 * 1194 1440 * Locking order is such that address space areas must be locked 1195 1441 * prior to this call. Address space can be locked prior to this 1196 1442 * call in which case the lock argument is false. 1197 1443 * 1198 * @param as Address space. 1199 * @param lock If false, do not attempt to lock as->lock. 1200 */ 1201 void page_table_lock(as_t *as, bool lock) 1444 * @param as Address space. 1445 * @param lock If false, do not attempt to lock as->lock. 1446 * 1447 */ 1448 NO_TRACE void page_table_lock(as_t *as, bool lock) 1202 1449 { 1203 1450 ASSERT(as_operations); … … 1209 1456 /** Unlock page table. 1210 1457 * 1211 * @param as Address space. 1212 * @param unlock If false, do not attempt to unlock as->lock. 1213 */ 1214 void page_table_unlock(as_t *as, bool unlock) 1458 * @param as Address space. 1459 * @param unlock If false, do not attempt to unlock as->lock. 1460 * 1461 */ 1462 NO_TRACE void page_table_unlock(as_t *as, bool unlock) 1215 1463 { 1216 1464 ASSERT(as_operations); … … 1220 1468 } 1221 1469 1222 1223 /** Find address space area and lock it. 1224 * 1225 * The address space must be locked and interrupts must be disabled. 1226 * 1227 * @param as Address space. 1228 * @param va Virtual address. 1229 * 1230 * @return Locked address space area containing va on success or 1231 * NULL on failure. 1232 */ 1233 as_area_t *find_area_and_lock(as_t *as, uintptr_t va) 1234 { 1235 as_area_t *a; 1236 btree_node_t *leaf, *lnode; 1237 unsigned int i; 1238 1239 a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); 1240 if (a) { 1241 /* va is the base address of an address space area */ 1242 mutex_lock(&a->lock); 1243 return a; 1244 } 1245 1246 /* 1247 * Search the leaf node and the righmost record of its left neighbour 1248 * to find out whether this is a miss or va belongs to an address 1249 * space area found there. 1250 */ 1251 1252 /* First, search the leaf node itself. */ 1253 for (i = 0; i < leaf->keys; i++) { 1254 a = (as_area_t *) leaf->value[i]; 1255 mutex_lock(&a->lock); 1256 if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) { 1257 return a; 1258 } 1259 mutex_unlock(&a->lock); 1260 } 1261 1262 /* 1263 * Second, locate the left neighbour and test its last record. 1264 * Because of its position in the B+tree, it must have base < va. 1265 */ 1266 lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); 1267 if (lnode) { 1268 a = (as_area_t *) lnode->value[lnode->keys - 1]; 1269 mutex_lock(&a->lock); 1270 if (va < a->base + a->pages * PAGE_SIZE) { 1271 return a; 1272 } 1273 mutex_unlock(&a->lock); 1274 } 1275 1276 return NULL; 1277 } 1278 1279 /** Check area conflicts with other areas. 1280 * 1281 * The address space must be locked and interrupts must be disabled. 1282 * 1283 * @param as Address space. 1284 * @param va Starting virtual address of the area being tested. 1285 * @param size Size of the area being tested. 1286 * @param avoid_area Do not touch this area. 1287 * 1288 * @return True if there is no conflict, false otherwise. 1289 */ 1290 bool 1291 check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area) 1292 { 1293 as_area_t *a; 1294 btree_node_t *leaf, *node; 1295 unsigned int i; 1296 1297 /* 1298 * We don't want any area to have conflicts with NULL page. 1299 */ 1300 if (overlaps(va, size, NULL, PAGE_SIZE)) 1301 return false; 1302 1303 /* 1304 * The leaf node is found in O(log n), where n is proportional to 1305 * the number of address space areas belonging to as. 1306 * The check for conflicts is then attempted on the rightmost 1307 * record in the left neighbour, the leftmost record in the right 1308 * neighbour and all records in the leaf node itself. 1309 */ 1310 1311 if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) { 1312 if (a != avoid_area) 1313 return false; 1314 } 1315 1316 /* First, check the two border cases. */ 1317 if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) { 1318 a = (as_area_t *) node->value[node->keys - 1]; 1319 mutex_lock(&a->lock); 1320 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { 1321 mutex_unlock(&a->lock); 1322 return false; 1323 } 1324 mutex_unlock(&a->lock); 1325 } 1326 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf); 1327 if (node) { 1328 a = (as_area_t *) node->value[0]; 1329 mutex_lock(&a->lock); 1330 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { 1331 mutex_unlock(&a->lock); 1332 return false; 1333 } 1334 mutex_unlock(&a->lock); 1335 } 1336 1337 /* Second, check the leaf node. */ 1338 for (i = 0; i < leaf->keys; i++) { 1339 a = (as_area_t *) leaf->value[i]; 1340 1341 if (a == avoid_area) 1342 continue; 1343 1344 mutex_lock(&a->lock); 1345 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { 1346 mutex_unlock(&a->lock); 1347 return false; 1348 } 1349 mutex_unlock(&a->lock); 1350 } 1351 1352 /* 1353 * So far, the area does not conflict with other areas. 1354 * Check if it doesn't conflict with kernel address space. 1355 */ 1356 if (!KERNEL_ADDRESS_SPACE_SHADOWED) { 1357 return !overlaps(va, size, 1358 KERNEL_ADDRESS_SPACE_START, 1359 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START); 1360 } 1361 1362 return true; 1470 /** Test whether page tables are locked. 1471 * 1472 * @param as Address space where the page tables belong. 1473 * 1474 * @return True if the page tables belonging to the address soace 1475 * are locked, otherwise false. 1476 */ 1477 NO_TRACE bool page_table_locked(as_t *as) 1478 { 1479 ASSERT(as_operations); 1480 ASSERT(as_operations->page_table_locked); 1481 1482 return as_operations->page_table_locked(as); 1363 1483 } 1364 1484 1365 1485 /** Return size of the address space area with given base. 1366 1486 * 1367 * @param base Arbitrary address insede the address space area. 1368 * 1369 * @return Size of the address space area in bytes or zero if it 1370 * does not exist. 1487 * @param base Arbitrary address inside the address space area. 1488 * 1489 * @return Size of the address space area in bytes or zero if it 1490 * does not exist. 1491 * 1371 1492 */ 1372 1493 size_t as_area_get_size(uintptr_t base) 1373 1494 { 1374 ipl_t ipl;1375 as_area_t *src_area;1376 1495 size_t size; 1377 1378 ipl = interrupts_disable(); 1379 src_area = find_area_and_lock(AS, base); 1496 1497 page_table_lock(AS, true); 1498 as_area_t *src_area = find_area_and_lock(AS, base); 1499 1380 1500 if (src_area) { 1381 1501 size = src_area->pages * PAGE_SIZE; 1382 1502 mutex_unlock(&src_area->lock); 1383 } else {1503 } else 1384 1504 size = 0; 1385 }1386 interrupts_restore(ipl);1505 1506 page_table_unlock(AS, true); 1387 1507 return size; 1388 1508 } … … 1392 1512 * The address space area must be already locked. 1393 1513 * 1394 * @param a Address space area. 1395 * @param page First page to be marked. 1396 * @param count Number of page to be marked. 1397 * 1398 * @return Zero on failure and non-zero on success. 1399 */ 1400 int used_space_insert(as_area_t *a, uintptr_t page, size_t count) 1401 { 1402 btree_node_t *leaf, *node; 1403 size_t pages; 1404 unsigned int i; 1405 1514 * @param area Address space area. 1515 * @param page First page to be marked. 1516 * @param count Number of page to be marked. 1517 * 1518 * @return Zero on failure and non-zero on success. 1519 * 1520 */ 1521 int used_space_insert(as_area_t *area, uintptr_t page, size_t count) 1522 { 1523 ASSERT(mutex_locked(&area->lock)); 1406 1524 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE)); 1407 1525 ASSERT(count); 1408 1409 pages = (size_t) btree_search(&a->used_space, page, &leaf); 1526 1527 btree_node_t *leaf; 1528 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf); 1410 1529 if (pages) { 1411 1530 /* 1412 1531 * We hit the beginning of some used space. 1532 * 1413 1533 */ 1414 1534 return 0; 1415 1535 } 1416 1536 1417 1537 if (!leaf->keys) { 1418 btree_insert(&a ->used_space, page, (void *) count, leaf);1538 btree_insert(&area->used_space, page, (void *) count, leaf); 1419 1539 return 1; 1420 1540 } 1421 1422 node = btree_leaf_node_left_neighbour(&a->used_space, leaf);1541 1542 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, leaf); 1423 1543 if (node) { 1424 1544 uintptr_t left_pg = node->key[node->keys - 1]; … … 1431 1551 * somewhere between the rightmost interval of 1432 1552 * the left neigbour and the first interval of the leaf. 1433 */ 1434 1553 * 1554 */ 1555 1435 1556 if (page >= right_pg) { 1436 1557 /* Do nothing. */ … … 1442 1563 right_cnt * PAGE_SIZE)) { 1443 1564 /* The interval intersects with the right interval. */ 1444 return 0; 1565 return 0; 1445 1566 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1446 1567 (page + count * PAGE_SIZE == right_pg)) { … … 1448 1569 * The interval can be added by merging the two already 1449 1570 * present intervals. 1571 * 1450 1572 */ 1451 1573 node->value[node->keys - 1] += count + right_cnt; 1452 btree_remove(&a ->used_space, right_pg, leaf);1453 return 1; 1574 btree_remove(&area->used_space, right_pg, leaf); 1575 return 1; 1454 1576 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1455 /* 1577 /* 1456 1578 * The interval can be added by simply growing the left 1457 1579 * interval. 1580 * 1458 1581 */ 1459 1582 node->value[node->keys - 1] += count; … … 1464 1587 * the right interval down and increasing its size 1465 1588 * accordingly. 1589 * 1466 1590 */ 1467 1591 leaf->value[0] += count; … … 1472 1596 * The interval is between both neigbouring intervals, 1473 1597 * but cannot be merged with any of them. 1598 * 1474 1599 */ 1475 btree_insert(&a ->used_space, page, (void *) count,1600 btree_insert(&area->used_space, page, (void *) count, 1476 1601 leaf); 1477 1602 return 1; … … 1480 1605 uintptr_t right_pg = leaf->key[0]; 1481 1606 size_t right_cnt = (size_t) leaf->value[0]; 1482 1607 1483 1608 /* 1484 1609 * Investigate the border case in which the left neighbour does 1485 1610 * not exist but the interval fits from the left. 1486 */ 1487 1611 * 1612 */ 1613 1488 1614 if (overlaps(page, count * PAGE_SIZE, right_pg, 1489 1615 right_cnt * PAGE_SIZE)) { … … 1495 1621 * right interval down and increasing its size 1496 1622 * accordingly. 1623 * 1497 1624 */ 1498 1625 leaf->key[0] = page; … … 1503 1630 * The interval doesn't adjoin with the right interval. 1504 1631 * It must be added individually. 1632 * 1505 1633 */ 1506 btree_insert(&a ->used_space, page, (void *) count,1634 btree_insert(&area->used_space, page, (void *) count, 1507 1635 leaf); 1508 1636 return 1; 1509 1637 } 1510 1638 } 1511 1512 node = btree_leaf_node_right_neighbour(&a ->used_space, leaf);1639 1640 node = btree_leaf_node_right_neighbour(&area->used_space, leaf); 1513 1641 if (node) { 1514 1642 uintptr_t left_pg = leaf->key[leaf->keys - 1]; … … 1521 1649 * somewhere between the leftmost interval of 1522 1650 * the right neigbour and the last interval of the leaf. 1523 */ 1524 1651 * 1652 */ 1653 1525 1654 if (page < left_pg) { 1526 1655 /* Do nothing. */ … … 1532 1661 right_cnt * PAGE_SIZE)) { 1533 1662 /* The interval intersects with the right interval. */ 1534 return 0; 1663 return 0; 1535 1664 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1536 1665 (page + count * PAGE_SIZE == right_pg)) { … … 1538 1667 * The interval can be added by merging the two already 1539 1668 * present intervals. 1540 * */ 1669 * 1670 */ 1541 1671 leaf->value[leaf->keys - 1] += count + right_cnt; 1542 btree_remove(&a ->used_space, right_pg, node);1543 return 1; 1672 btree_remove(&area->used_space, right_pg, node); 1673 return 1; 1544 1674 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1545 1675 /* 1546 1676 * The interval can be added by simply growing the left 1547 1677 * interval. 1548 * */ 1678 * 1679 */ 1549 1680 leaf->value[leaf->keys - 1] += count; 1550 1681 return 1; … … 1554 1685 * the right interval down and increasing its size 1555 1686 * accordingly. 1687 * 1556 1688 */ 1557 1689 node->value[0] += count; … … 1562 1694 * The interval is between both neigbouring intervals, 1563 1695 * but cannot be merged with any of them. 1696 * 1564 1697 */ 1565 btree_insert(&a ->used_space, page, (void *) count,1698 btree_insert(&area->used_space, page, (void *) count, 1566 1699 leaf); 1567 1700 return 1; … … 1570 1703 uintptr_t left_pg = leaf->key[leaf->keys - 1]; 1571 1704 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 1572 1705 1573 1706 /* 1574 1707 * Investigate the border case in which the right neighbour 1575 1708 * does not exist but the interval fits from the right. 1576 */ 1577 1709 * 1710 */ 1711 1578 1712 if (overlaps(page, count * PAGE_SIZE, left_pg, 1579 1713 left_cnt * PAGE_SIZE)) { … … 1584 1718 * The interval can be added by growing the left 1585 1719 * interval. 1720 * 1586 1721 */ 1587 1722 leaf->value[leaf->keys - 1] += count; … … 1591 1726 * The interval doesn't adjoin with the left interval. 1592 1727 * It must be added individually. 1728 * 1593 1729 */ 1594 btree_insert(&a ->used_space, page, (void *) count,1730 btree_insert(&area->used_space, page, (void *) count, 1595 1731 leaf); 1596 1732 return 1; … … 1602 1738 * only between two other intervals of the leaf. The two border cases 1603 1739 * were already resolved. 1604 */ 1740 * 1741 */ 1742 btree_key_t i; 1605 1743 for (i = 1; i < leaf->keys; i++) { 1606 1744 if (page < leaf->key[i]) { … … 1609 1747 size_t left_cnt = (size_t) leaf->value[i - 1]; 1610 1748 size_t right_cnt = (size_t) leaf->value[i]; 1611 1749 1612 1750 /* 1613 1751 * The interval fits between left_pg and right_pg. 1752 * 1614 1753 */ 1615 1754 1616 1755 if (overlaps(page, count * PAGE_SIZE, left_pg, 1617 1756 left_cnt * PAGE_SIZE)) { … … 1619 1758 * The interval intersects with the left 1620 1759 * interval. 1760 * 1621 1761 */ 1622 1762 return 0; … … 1626 1766 * The interval intersects with the right 1627 1767 * interval. 1768 * 1628 1769 */ 1629 return 0; 1770 return 0; 1630 1771 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1631 1772 (page + count * PAGE_SIZE == right_pg)) { … … 1633 1774 * The interval can be added by merging the two 1634 1775 * already present intervals. 1776 * 1635 1777 */ 1636 1778 leaf->value[i - 1] += count + right_cnt; 1637 btree_remove(&a ->used_space, right_pg, leaf);1638 return 1; 1779 btree_remove(&area->used_space, right_pg, leaf); 1780 return 1; 1639 1781 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1640 1782 /* 1641 1783 * The interval can be added by simply growing 1642 1784 * the left interval. 1785 * 1643 1786 */ 1644 1787 leaf->value[i - 1] += count; … … 1646 1789 } else if (page + count * PAGE_SIZE == right_pg) { 1647 1790 /* 1648 * The interval can be addded by simply moving1791 * The interval can be addded by simply moving 1649 1792 * base of the right interval down and 1650 1793 * increasing its size accordingly. 1651 */ 1794 * 1795 */ 1652 1796 leaf->value[i] += count; 1653 1797 leaf->key[i] = page; … … 1658 1802 * intervals, but cannot be merged with any of 1659 1803 * them. 1804 * 1660 1805 */ 1661 btree_insert(&a ->used_space, page,1806 btree_insert(&area->used_space, page, 1662 1807 (void *) count, leaf); 1663 1808 return 1; … … 1665 1810 } 1666 1811 } 1667 1668 panic("Inconsistency detected while adding % " PRIs "pages of used "1669 "space at %p.", count, page);1812 1813 panic("Inconsistency detected while adding %zu pages of used " 1814 "space at %p.", count, (void *) page); 1670 1815 } 1671 1816 … … 1674 1819 * The address space area must be already locked. 1675 1820 * 1676 * @param a Address space area. 1677 * @param page First page to be marked. 1678 * @param count Number of page to be marked. 1679 * 1680 * @return Zero on failure and non-zero on success. 1681 */ 1682 int used_space_remove(as_area_t *a, uintptr_t page, size_t count) 1683 { 1684 btree_node_t *leaf, *node; 1685 size_t pages; 1686 unsigned int i; 1687 1821 * @param area Address space area. 1822 * @param page First page to be marked. 1823 * @param count Number of page to be marked. 1824 * 1825 * @return Zero on failure and non-zero on success. 1826 * 1827 */ 1828 int used_space_remove(as_area_t *area, uintptr_t page, size_t count) 1829 { 1830 ASSERT(mutex_locked(&area->lock)); 1688 1831 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE)); 1689 1832 ASSERT(count); 1690 1691 pages = (size_t) btree_search(&a->used_space, page, &leaf); 1833 1834 btree_node_t *leaf; 1835 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf); 1692 1836 if (pages) { 1693 1837 /* 1694 1838 * We are lucky, page is the beginning of some interval. 1839 * 1695 1840 */ 1696 1841 if (count > pages) { 1697 1842 return 0; 1698 1843 } else if (count == pages) { 1699 btree_remove(&a ->used_space, page, leaf);1844 btree_remove(&area->used_space, page, leaf); 1700 1845 return 1; 1701 1846 } else { … … 1703 1848 * Find the respective interval. 1704 1849 * Decrease its size and relocate its start address. 1850 * 1705 1851 */ 1852 btree_key_t i; 1706 1853 for (i = 0; i < leaf->keys; i++) { 1707 1854 if (leaf->key[i] == page) { … … 1714 1861 } 1715 1862 } 1716 1717 node = btree_leaf_node_left_neighbour(&a->used_space, leaf);1718 if ( node && page < leaf->key[0]) {1863 1864 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, leaf); 1865 if ((node) && (page < leaf->key[0])) { 1719 1866 uintptr_t left_pg = node->key[node->keys - 1]; 1720 1867 size_t left_cnt = (size_t) node->value[node->keys - 1]; 1721 1868 1722 1869 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page, 1723 1870 count * PAGE_SIZE)) { … … 1729 1876 * removed by updating the size of the bigger 1730 1877 * interval. 1878 * 1731 1879 */ 1732 1880 node->value[node->keys - 1] -= count; … … 1734 1882 } else if (page + count * PAGE_SIZE < 1735 1883 left_pg + left_cnt*PAGE_SIZE) { 1736 size_t new_cnt;1737 1738 1884 /* 1739 1885 * The interval is contained in the rightmost … … 1742 1888 * the original interval and also inserting a 1743 1889 * new interval. 1890 * 1744 1891 */ 1745 new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -1892 size_t new_cnt = ((left_pg + left_cnt * PAGE_SIZE) - 1746 1893 (page + count*PAGE_SIZE)) >> PAGE_WIDTH; 1747 1894 node->value[node->keys - 1] -= count + new_cnt; 1748 btree_insert(&a ->used_space, page +1895 btree_insert(&area->used_space, page + 1749 1896 count * PAGE_SIZE, (void *) new_cnt, leaf); 1750 1897 return 1; … … 1752 1899 } 1753 1900 return 0; 1754 } else if (page < leaf->key[0]) {1901 } else if (page < leaf->key[0]) 1755 1902 return 0; 1756 }1757 1903 1758 1904 if (page > leaf->key[leaf->keys - 1]) { 1759 1905 uintptr_t left_pg = leaf->key[leaf->keys - 1]; 1760 1906 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 1761 1907 1762 1908 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page, 1763 1909 count * PAGE_SIZE)) { 1764 if (page + count * PAGE_SIZE == 1910 if (page + count * PAGE_SIZE == 1765 1911 left_pg + left_cnt * PAGE_SIZE) { 1766 1912 /* … … 1768 1914 * interval of the leaf and can be removed by 1769 1915 * updating the size of the bigger interval. 1916 * 1770 1917 */ 1771 1918 leaf->value[leaf->keys - 1] -= count; … … 1773 1920 } else if (page + count * PAGE_SIZE < left_pg + 1774 1921 left_cnt * PAGE_SIZE) { 1775 size_t new_cnt;1776 1777 1922 /* 1778 1923 * The interval is contained in the rightmost … … 1781 1926 * original interval and also inserting a new 1782 1927 * interval. 1928 * 1783 1929 */ 1784 new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -1930 size_t new_cnt = ((left_pg + left_cnt * PAGE_SIZE) - 1785 1931 (page + count * PAGE_SIZE)) >> PAGE_WIDTH; 1786 1932 leaf->value[leaf->keys - 1] -= count + new_cnt; 1787 btree_insert(&a ->used_space, page +1933 btree_insert(&area->used_space, page + 1788 1934 count * PAGE_SIZE, (void *) new_cnt, leaf); 1789 1935 return 1; … … 1791 1937 } 1792 1938 return 0; 1793 } 1939 } 1794 1940 1795 1941 /* … … 1797 1943 * Now the interval can be only between intervals of the leaf. 1798 1944 */ 1945 btree_key_t i; 1799 1946 for (i = 1; i < leaf->keys - 1; i++) { 1800 1947 if (page < leaf->key[i]) { 1801 1948 uintptr_t left_pg = leaf->key[i - 1]; 1802 1949 size_t left_cnt = (size_t) leaf->value[i - 1]; 1803 1950 1804 1951 /* 1805 1952 * Now the interval is between intervals corresponding … … 1815 1962 * be removed by updating the size of 1816 1963 * the bigger interval. 1964 * 1817 1965 */ 1818 1966 leaf->value[i - 1] -= count; … … 1820 1968 } else if (page + count * PAGE_SIZE < 1821 1969 left_pg + left_cnt * PAGE_SIZE) { 1822 size_t new_cnt;1823 1824 1970 /* 1825 1971 * The interval is contained in the … … 1829 1975 * also inserting a new interval. 1830 1976 */ 1831 new_cnt = ((left_pg +1977 size_t new_cnt = ((left_pg + 1832 1978 left_cnt * PAGE_SIZE) - 1833 1979 (page + count * PAGE_SIZE)) >> 1834 1980 PAGE_WIDTH; 1835 1981 leaf->value[i - 1] -= count + new_cnt; 1836 btree_insert(&a ->used_space, page +1982 btree_insert(&area->used_space, page + 1837 1983 count * PAGE_SIZE, (void *) new_cnt, 1838 1984 leaf); … … 1843 1989 } 1844 1990 } 1845 1991 1846 1992 error: 1847 panic("Inconsistency detected while removing %" PRIs " pages of used " 1848 "space from %p.", count, page); 1849 } 1850 1851 /** Remove reference to address space area share info. 1852 * 1853 * If the reference count drops to 0, the sh_info is deallocated. 1854 * 1855 * @param sh_info Pointer to address space area share info. 1856 */ 1857 void sh_info_remove_reference(share_info_t *sh_info) 1858 { 1859 bool dealloc = false; 1860 1861 mutex_lock(&sh_info->lock); 1862 ASSERT(sh_info->refcount); 1863 if (--sh_info->refcount == 0) { 1864 dealloc = true; 1865 link_t *cur; 1866 1867 /* 1868 * Now walk carefully the pagemap B+tree and free/remove 1869 * reference from all frames found there. 1870 */ 1871 for (cur = sh_info->pagemap.leaf_head.next; 1872 cur != &sh_info->pagemap.leaf_head; cur = cur->next) { 1873 btree_node_t *node; 1874 unsigned int i; 1875 1876 node = list_get_instance(cur, btree_node_t, leaf_link); 1877 for (i = 0; i < node->keys; i++) 1878 frame_free((uintptr_t) node->value[i]); 1879 } 1880 1881 } 1882 mutex_unlock(&sh_info->lock); 1883 1884 if (dealloc) { 1885 btree_destroy(&sh_info->pagemap); 1886 free(sh_info); 1887 } 1993 panic("Inconsistency detected while removing %zu pages of used " 1994 "space from %p.", count, (void *) page); 1888 1995 } 1889 1996 … … 1893 2000 1894 2001 /** Wrapper for as_area_create(). */ 1895 unative_t sys_as_area_create(uintptr_t address, size_t size, int flags)2002 unative_t sys_as_area_create(uintptr_t address, size_t size, unsigned int flags) 1896 2003 { 1897 2004 if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, … … 1903 2010 1904 2011 /** Wrapper for as_area_resize(). */ 1905 unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags)2012 unative_t sys_as_area_resize(uintptr_t address, size_t size, unsigned int flags) 1906 2013 { 1907 2014 return (unative_t) as_area_resize(AS, address, size, 0); … … 1909 2016 1910 2017 /** Wrapper for as_area_change_flags(). */ 1911 unative_t sys_as_area_change_flags(uintptr_t address, int flags)2018 unative_t sys_as_area_change_flags(uintptr_t address, unsigned int flags) 1912 2019 { 1913 2020 return (unative_t) as_area_change_flags(AS, flags, address); … … 1922 2029 /** Get list of adress space areas. 1923 2030 * 1924 * @param as Address space. 1925 * @param obuf Place to save pointer to returned buffer. 1926 * @param osize Place to save size of returned buffer. 2031 * @param as Address space. 2032 * @param obuf Place to save pointer to returned buffer. 2033 * @param osize Place to save size of returned buffer. 2034 * 1927 2035 */ 1928 2036 void as_get_area_info(as_t *as, as_area_info_t **obuf, size_t *osize) 1929 2037 { 1930 ipl_t ipl; 1931 size_t area_cnt, area_idx, i; 2038 mutex_lock(&as->lock); 2039 2040 /* First pass, count number of areas. */ 2041 2042 size_t area_cnt = 0; 1932 2043 link_t *cur; 1933 1934 as_area_info_t *info; 1935 size_t isize; 1936 1937 ipl = interrupts_disable(); 1938 mutex_lock(&as->lock); 1939 1940 /* First pass, count number of areas. */ 1941 1942 area_cnt = 0; 1943 2044 1944 2045 for (cur = as->as_area_btree.leaf_head.next; 1945 2046 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 1946 btree_node_t *node; 1947 1948 node = list_get_instance(cur, btree_node_t, leaf_link); 2047 btree_node_t *node = 2048 list_get_instance(cur, btree_node_t, leaf_link); 1949 2049 area_cnt += node->keys; 1950 2050 } 1951 1952 isize = area_cnt * sizeof(as_area_info_t);1953 info = malloc(isize, 0);1954 2051 2052 size_t isize = area_cnt * sizeof(as_area_info_t); 2053 as_area_info_t *info = malloc(isize, 0); 2054 1955 2055 /* Second pass, record data. */ 1956 1957 area_idx = 0;1958 2056 2057 size_t area_idx = 0; 2058 1959 2059 for (cur = as->as_area_btree.leaf_head.next; 1960 2060 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 1961 btree_node_t *node ;1962 1963 node = list_get_instance(cur, btree_node_t, leaf_link);1964 2061 btree_node_t *node = 2062 list_get_instance(cur, btree_node_t, leaf_link); 2063 btree_key_t i; 2064 1965 2065 for (i = 0; i < node->keys; i++) { 1966 2066 as_area_t *area = node->value[i]; 1967 2067 1968 2068 ASSERT(area_idx < area_cnt); 1969 2069 mutex_lock(&area->lock); 1970 2070 1971 2071 info[area_idx].start_addr = area->base; 1972 2072 info[area_idx].size = FRAMES2SIZE(area->pages); 1973 2073 info[area_idx].flags = area->flags; 1974 2074 ++area_idx; 1975 2075 1976 2076 mutex_unlock(&area->lock); 1977 2077 } 1978 2078 } 1979 2079 1980 2080 mutex_unlock(&as->lock); 1981 interrupts_restore(ipl); 1982 2081 1983 2082 *obuf = info; 1984 2083 *osize = isize; 1985 2084 } 1986 2085 1987 1988 2086 /** Print out information about address space. 1989 2087 * 1990 * @param as Address space. 2088 * @param as Address space. 2089 * 1991 2090 */ 1992 2091 void as_print(as_t *as) 1993 2092 { 1994 ipl_t ipl;1995 1996 ipl = interrupts_disable();1997 2093 mutex_lock(&as->lock); 1998 2094 … … 2001 2097 for (cur = as->as_area_btree.leaf_head.next; 2002 2098 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 2003 btree_node_t *node; 2004 2005 node = list_get_instance(cur, btree_node_t, leaf_link); 2006 2007 unsigned int i; 2099 btree_node_t *node 2100 = list_get_instance(cur, btree_node_t, leaf_link); 2101 btree_key_t i; 2102 2008 2103 for (i = 0; i < node->keys; i++) { 2009 2104 as_area_t *area = node->value[i]; 2010 2105 2011 2106 mutex_lock(&area->lock); 2012 printf("as_area: %p, base=%p, pages=%" PRIs 2013 " (%p - %p)\n", area, area->base, area->pages, 2014 area->base, area->base + FRAMES2SIZE(area->pages)); 2107 printf("as_area: %p, base=%p, pages=%zu" 2108 " (%p - %p)\n", area, (void *) area->base, 2109 area->pages, (void *) area->base, 2110 (void *) (area->base + FRAMES2SIZE(area->pages))); 2015 2111 mutex_unlock(&area->lock); 2016 2112 } … … 2018 2114 2019 2115 mutex_unlock(&as->lock); 2020 interrupts_restore(ipl);2021 2116 } 2022 2117 -
kernel/generic/src/mm/backend_anon.c
rfb150d78 r46c20c8 47 47 #include <adt/btree.h> 48 48 #include <errno.h> 49 #include < arch/types.h>49 #include <typedefs.h> 50 50 #include <align.h> 51 51 #include <arch.h> … … 79 79 { 80 80 uintptr_t frame; 81 82 ASSERT(page_table_locked(AS)); 83 ASSERT(mutex_locked(&area->lock)); 81 84 82 85 if (!as_area_check_access(area, access)) … … 168 171 void anon_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame) 169 172 { 173 ASSERT(page_table_locked(area->as)); 174 ASSERT(mutex_locked(&area->lock)); 175 170 176 frame_free(frame); 171 177 } … … 183 189 { 184 190 link_t *cur; 191 192 ASSERT(mutex_locked(&area->as->lock)); 193 ASSERT(mutex_locked(&area->lock)); 185 194 186 195 /* -
kernel/generic/src/mm/backend_elf.c
rfb150d78 r46c20c8 38 38 #include <lib/elf.h> 39 39 #include <debug.h> 40 #include < arch/types.h>40 #include <typedefs.h> 41 41 #include <mm/as.h> 42 42 #include <mm/frame.h> … … 85 85 size_t i; 86 86 bool dirty = false; 87 88 ASSERT(page_table_locked(AS)); 89 ASSERT(mutex_locked(&area->lock)); 87 90 88 91 if (!as_area_check_access(area, access)) … … 232 235 void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame) 233 236 { 234 elf_header_t *elf = area->backend_data.elf;235 237 elf_segment_header_t *entry = area->backend_data.segment; 236 uintptr_t base,start_anon;237 size_t i; 238 239 ASSERT( (page >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) &&240 (page < entry->p_vaddr + entry->p_memsz)); 241 i = (page - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH;242 base = (uintptr_t) (((void *) elf) +243 ALIGN_DOWN(entry->p_offset, FRAME_SIZE)); 238 uintptr_t start_anon; 239 240 ASSERT(page_table_locked(area->as)); 241 ASSERT(mutex_locked(&area->lock)); 242 243 ASSERT(page >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)); 244 ASSERT(page < entry->p_vaddr + entry->p_memsz); 245 244 246 start_anon = entry->p_vaddr + entry->p_filesz; 245 247 … … 257 259 * lower part is backed by the ELF image and the upper is 258 260 * anonymous). In any case, a frame needs to be freed. 259 */ 261 */ 260 262 frame_free(frame); 261 263 } … … 267 269 * Otherwise only portions of the area that are not backed by the ELF image 268 270 * are put into the pagemap. 269 *270 * The address space and address space area must be locked prior to the call.271 271 * 272 272 * @param area Address space area. … … 278 278 btree_node_t *leaf, *node; 279 279 uintptr_t start_anon = entry->p_vaddr + entry->p_filesz; 280 281 ASSERT(mutex_locked(&area->as->lock)); 282 ASSERT(mutex_locked(&area->lock)); 280 283 281 284 /* -
kernel/generic/src/mm/backend_phys.c
rfb150d78 r46c20c8 38 38 39 39 #include <debug.h> 40 #include < arch/types.h>40 #include <typedefs.h> 41 41 #include <mm/as.h> 42 42 #include <mm/page.h> … … 72 72 uintptr_t base = area->backend_data.base; 73 73 74 ASSERT(page_table_locked(AS)); 75 ASSERT(mutex_locked(&area->lock)); 76 74 77 if (!as_area_check_access(area, access)) 75 78 return AS_PF_FAULT; … … 93 96 void phys_share(as_area_t *area) 94 97 { 98 ASSERT(mutex_locked(&area->as->lock)); 99 ASSERT(mutex_locked(&area->lock)); 95 100 } 96 101 -
kernel/generic/src/mm/buddy.c
rfb150d78 r46c20c8 41 41 #include <mm/buddy.h> 42 42 #include <mm/frame.h> 43 #include < arch/types.h>43 #include <typedefs.h> 44 44 #include <debug.h> 45 45 #include <print.h> -
kernel/generic/src/mm/frame.c
rfb150d78 r46c20c8 43 43 */ 44 44 45 #include < arch/types.h>45 #include <typedefs.h> 46 46 #include <mm/frame.h> 47 47 #include <mm/as.h> … … 66 66 * available. 67 67 */ 68 mutex_t mem_avail_mtx;69 condvar_t mem_avail_cv;70 s ize_t mem_avail_req = 0; /**< Number of frames requested. */71 s ize_t mem_avail_gen = 0; /**< Generation counter. */68 static mutex_t mem_avail_mtx; 69 static condvar_t mem_avail_cv; 70 static size_t mem_avail_req = 0; /**< Number of frames requested. */ 71 static size_t mem_avail_gen = 0; /**< Generation counter. */ 72 72 73 73 /********************/ … … 75 75 /********************/ 76 76 77 static inline size_t frame_index(zone_t *zone, frame_t *frame)77 NO_TRACE static inline size_t frame_index(zone_t *zone, frame_t *frame) 78 78 { 79 79 return (size_t) (frame - zone->frames); 80 80 } 81 81 82 static inline size_t frame_index_abs(zone_t *zone, frame_t *frame)82 NO_TRACE static inline size_t frame_index_abs(zone_t *zone, frame_t *frame) 83 83 { 84 84 return (size_t) (frame - zone->frames) + zone->base; 85 85 } 86 86 87 static inline bool frame_index_valid(zone_t *zone, size_t index)87 NO_TRACE static inline bool frame_index_valid(zone_t *zone, size_t index) 88 88 { 89 89 return (index < zone->count); 90 90 } 91 91 92 static inline size_t make_frame_index(zone_t *zone, frame_t *frame)92 NO_TRACE static inline size_t make_frame_index(zone_t *zone, frame_t *frame) 93 93 { 94 94 return (frame - zone->frames); … … 100 100 * 101 101 */ 102 static void frame_initialize(frame_t *frame)102 NO_TRACE static void frame_initialize(frame_t *frame) 103 103 { 104 104 frame->refcount = 1; … … 121 121 * 122 122 */ 123 static size_t zones_insert_zone(pfn_t base, size_t count) 123 NO_TRACE static size_t zones_insert_zone(pfn_t base, size_t count, 124 zone_flags_t flags) 124 125 { 125 126 if (zones.count + 1 == ZONES_MAX) { … … 131 132 for (i = 0; i < zones.count; i++) { 132 133 /* Check for overlap */ 133 if (overlaps(base, count, 134 zones.info[i].base, zones.info[i].count)) { 135 printf("Zones overlap!\n"); 134 if (overlaps(zones.info[i].base, zones.info[i].count, 135 base, count)) { 136 137 /* 138 * If the overlaping zones are of the same type 139 * and the new zone is completely within the previous 140 * one, then quietly ignore the new zone. 141 * 142 */ 143 144 if ((zones.info[i].flags != flags) || 145 (!iswithin(zones.info[i].base, zones.info[i].count, 146 base, count))) { 147 printf("Zone (%p, %p) overlaps " 148 "with previous zone (%p %p)!\n", 149 (void *) PFN2ADDR(base), (void *) PFN2ADDR(count), 150 (void *) PFN2ADDR(zones.info[i].base), 151 (void *) PFN2ADDR(zones.info[i].count)); 152 } 153 136 154 return (size_t) -1; 137 155 } … … 144 162 for (j = zones.count; j > i; j--) { 145 163 zones.info[j] = zones.info[j - 1]; 146 zones.info[j].buddy_system->data = 147 (void *) &zones.info[j - 1]; 164 if (zones.info[j].buddy_system != NULL) 165 zones.info[j].buddy_system->data = 166 (void *) &zones.info[j]; 148 167 } 149 168 … … 162 181 */ 163 182 #ifdef CONFIG_DEBUG 164 static size_t total_frames_free(void)183 NO_TRACE static size_t total_frames_free(void) 165 184 { 166 185 size_t total = 0; … … 171 190 return total; 172 191 } 173 #endif 192 #endif /* CONFIG_DEBUG */ 174 193 175 194 /** Find a zone with a given frames. … … 185 204 * 186 205 */ 187 size_t find_zone(pfn_t frame, size_t count, size_t hint)206 NO_TRACE size_t find_zone(pfn_t frame, size_t count, size_t hint) 188 207 { 189 208 if (hint >= zones.count) … … 199 218 if (i >= zones.count) 200 219 i = 0; 220 201 221 } while (i != hint); 202 222 … … 205 225 206 226 /** @return True if zone can allocate specified order */ 207 static bool zone_can_alloc(zone_t *zone, uint8_t order)227 NO_TRACE static bool zone_can_alloc(zone_t *zone, uint8_t order) 208 228 { 209 229 return (zone_flags_available(zone->flags) … … 221 241 * 222 242 */ 223 static size_t find_free_zone(uint8_t order, zone_flags_t flags, size_t hint) 243 NO_TRACE static size_t find_free_zone(uint8_t order, zone_flags_t flags, 244 size_t hint) 224 245 { 225 246 if (hint >= zones.count) … … 242 263 if (i >= zones.count) 243 264 i = 0; 265 244 266 } while (i != hint); 245 267 … … 260 282 * 261 283 */ 262 static link_t *zone_buddy_find_block(buddy_system_t *buddy, link_t *child,263 uint8_t order)284 NO_TRACE static link_t *zone_buddy_find_block(buddy_system_t *buddy, 285 link_t *child, uint8_t order) 264 286 { 265 287 frame_t *frame = list_get_instance(child, frame_t, buddy_link); … … 283 305 * 284 306 */ 285 static link_t *zone_buddy_find_buddy(buddy_system_t *buddy, link_t *block) 307 NO_TRACE static link_t *zone_buddy_find_buddy(buddy_system_t *buddy, 308 link_t *block) 286 309 { 287 310 frame_t *frame = list_get_instance(block, frame_t, buddy_link); … … 296 319 index = (frame_index(zone, frame)) + 297 320 (1 << frame->buddy_order); 298 } else { /* is_right */321 } else { /* is_right */ 299 322 index = (frame_index(zone, frame)) - 300 323 (1 << frame->buddy_order); … … 319 342 * 320 343 */ 321 static link_t *zone_buddy_bisect(buddy_system_t *buddy, link_t *block)344 NO_TRACE static link_t *zone_buddy_bisect(buddy_system_t *buddy, link_t *block) 322 345 { 323 346 frame_t *frame_l = list_get_instance(block, frame_t, buddy_link); … … 337 360 * 338 361 */ 339 static link_t *zone_buddy_coalesce(buddy_system_t *buddy, link_t *block_1,340 link_t *block_ 2)362 NO_TRACE static link_t *zone_buddy_coalesce(buddy_system_t *buddy, 363 link_t *block_1, link_t *block_2) 341 364 { 342 365 frame_t *frame1 = list_get_instance(block_1, frame_t, buddy_link); … … 353 376 * 354 377 */ 355 static void zone_buddy_set_order(buddy_system_t *buddy, link_t *block,378 NO_TRACE static void zone_buddy_set_order(buddy_system_t *buddy, link_t *block, 356 379 uint8_t order) 357 380 { … … 367 390 * 368 391 */ 369 static uint8_t zone_buddy_get_order(buddy_system_t *buddy, link_t *block) 392 NO_TRACE static uint8_t zone_buddy_get_order(buddy_system_t *buddy, 393 link_t *block) 370 394 { 371 395 return list_get_instance(block, frame_t, buddy_link)->buddy_order; … … 378 402 * 379 403 */ 380 static void zone_buddy_mark_busy(buddy_system_t *buddy, link_t *block)404 NO_TRACE static void zone_buddy_mark_busy(buddy_system_t *buddy, link_t *block) 381 405 { 382 406 list_get_instance(block, frame_t, buddy_link)->refcount = 1; … … 387 411 * @param buddy Buddy system. 388 412 * @param block Buddy system block. 389 */ 390 static void zone_buddy_mark_available(buddy_system_t *buddy, link_t *block) 413 * 414 */ 415 NO_TRACE static void zone_buddy_mark_available(buddy_system_t *buddy, 416 link_t *block) 391 417 { 392 418 list_get_instance(block, frame_t, buddy_link)->refcount = 0; … … 419 445 * 420 446 */ 421 static pfn_t zone_frame_alloc(zone_t *zone, uint8_t order)447 NO_TRACE static pfn_t zone_frame_alloc(zone_t *zone, uint8_t order) 422 448 { 423 449 ASSERT(zone_flags_available(zone->flags)); … … 447 473 * 448 474 */ 449 static void zone_frame_free(zone_t *zone, size_t frame_idx)475 NO_TRACE static void zone_frame_free(zone_t *zone, size_t frame_idx) 450 476 { 451 477 ASSERT(zone_flags_available(zone->flags)); … … 468 494 469 495 /** Return frame from zone. */ 470 static frame_t *zone_get_frame(zone_t *zone, size_t frame_idx)496 NO_TRACE static frame_t *zone_get_frame(zone_t *zone, size_t frame_idx) 471 497 { 472 498 ASSERT(frame_idx < zone->count); … … 475 501 476 502 /** Mark frame in zone unavailable to allocation. */ 477 static void zone_mark_unavailable(zone_t *zone, size_t frame_idx)503 NO_TRACE static void zone_mark_unavailable(zone_t *zone, size_t frame_idx) 478 504 { 479 505 ASSERT(zone_flags_available(zone->flags)); … … 504 530 * 505 531 */ 506 static void zone_merge_internal(size_t z1, size_t z2, zone_t *old_z1, buddy_system_t *buddy) 532 NO_TRACE static void zone_merge_internal(size_t z1, size_t z2, zone_t *old_z1, 533 buddy_system_t *buddy) 507 534 { 508 535 ASSERT(zone_flags_available(zones.info[z1].flags)); … … 600 627 * 601 628 */ 602 static void return_config_frames(size_t znum, pfn_t pfn, size_t count)629 NO_TRACE static void return_config_frames(size_t znum, pfn_t pfn, size_t count) 603 630 { 604 631 ASSERT(zone_flags_available(zones.info[znum].flags)); … … 635 662 * 636 663 */ 637 static void zone_reduce_region(size_t znum, pfn_t frame_idx, size_t count) 664 NO_TRACE static void zone_reduce_region(size_t znum, pfn_t frame_idx, 665 size_t count) 638 666 { 639 667 ASSERT(zone_flags_available(zones.info[znum].flags)); … … 673 701 bool zone_merge(size_t z1, size_t z2) 674 702 { 675 ipl_t ipl = interrupts_disable(); 676 spinlock_lock(&zones.lock); 703 irq_spinlock_lock(&zones.lock, true); 677 704 678 705 bool ret = true; … … 737 764 for (i = z2 + 1; i < zones.count; i++) { 738 765 zones.info[i - 1] = zones.info[i]; 739 zones.info[i - 1].buddy_system->data = 740 (void *) &zones.info[i - 1]; 766 if (zones.info[i - 1].buddy_system != NULL) 767 zones.info[i - 1].buddy_system->data = 768 (void *) &zones.info[i - 1]; 741 769 } 742 770 … … 744 772 745 773 errout: 746 spinlock_unlock(&zones.lock); 747 interrupts_restore(ipl); 774 irq_spinlock_unlock(&zones.lock, true); 748 775 749 776 return ret; … … 777 804 * 778 805 */ 779 static void zone_construct(zone_t *zone, buddy_system_t *buddy, pfn_t start, size_t count, zone_flags_t flags) 806 NO_TRACE static void zone_construct(zone_t *zone, buddy_system_t *buddy, 807 pfn_t start, size_t count, zone_flags_t flags) 780 808 { 781 809 zone->base = start; … … 820 848 * 821 849 */ 822 uintptr_t zone_conf_size(size_t count)850 size_t zone_conf_size(size_t count) 823 851 { 824 852 return (count * sizeof(frame_t) + buddy_conf_size(fnzb(count))); … … 841 869 * 842 870 */ 843 size_t zone_create(pfn_t start, size_t count, pfn_t confframe, zone_flags_t flags)844 { 845 ipl_t ipl = interrupts_disable(); 846 spinlock_lock(&zones.lock);871 size_t zone_create(pfn_t start, size_t count, pfn_t confframe, 872 zone_flags_t flags) 873 { 874 irq_spinlock_lock(&zones.lock, true); 847 875 848 876 if (zone_flags_available(flags)) { /* Create available zone */ … … 851 879 * the assert 852 880 */ 853 ASSERT(confframe != NULL);881 ASSERT(confframe != ADDR2PFN((uintptr_t ) NULL)); 854 882 855 883 /* If confframe is supposed to be inside our zone, then make sure … … 887 915 } 888 916 889 size_t znum = zones_insert_zone(start, count );917 size_t znum = zones_insert_zone(start, count, flags); 890 918 if (znum == (size_t) -1) { 891 spinlock_unlock(&zones.lock); 892 interrupts_restore(ipl); 919 irq_spinlock_unlock(&zones.lock, true); 893 920 return (size_t) -1; 894 921 } … … 905 932 } 906 933 907 spinlock_unlock(&zones.lock); 908 interrupts_restore(ipl); 934 irq_spinlock_unlock(&zones.lock, true); 909 935 910 936 return znum; … … 912 938 913 939 /* Non-available zone */ 914 size_t znum = zones_insert_zone(start, count );940 size_t znum = zones_insert_zone(start, count, flags); 915 941 if (znum == (size_t) -1) { 916 spinlock_unlock(&zones.lock); 917 interrupts_restore(ipl); 942 irq_spinlock_unlock(&zones.lock, true); 918 943 return (size_t) -1; 919 944 } 920 945 zone_construct(&zones.info[znum], NULL, start, count, flags); 921 946 922 spinlock_unlock(&zones.lock); 923 interrupts_restore(ipl); 947 irq_spinlock_unlock(&zones.lock, true); 924 948 925 949 return znum; … … 933 957 void frame_set_parent(pfn_t pfn, void *data, size_t hint) 934 958 { 935 ipl_t ipl = interrupts_disable(); 936 spinlock_lock(&zones.lock); 959 irq_spinlock_lock(&zones.lock, true); 937 960 938 961 size_t znum = find_zone(pfn, 1, hint); … … 943 966 pfn - zones.info[znum].base)->parent = data; 944 967 945 spinlock_unlock(&zones.lock); 946 interrupts_restore(ipl); 968 irq_spinlock_unlock(&zones.lock, true); 947 969 } 948 970 949 971 void *frame_get_parent(pfn_t pfn, size_t hint) 950 972 { 951 ipl_t ipl = interrupts_disable(); 952 spinlock_lock(&zones.lock); 973 irq_spinlock_lock(&zones.lock, true); 953 974 954 975 size_t znum = find_zone(pfn, 1, hint); … … 959 980 pfn - zones.info[znum].base)->parent; 960 981 961 spinlock_unlock(&zones.lock); 962 interrupts_restore(ipl); 982 irq_spinlock_unlock(&zones.lock, true); 963 983 964 984 return res; … … 977 997 { 978 998 size_t size = ((size_t) 1) << order; 979 ipl_t ipl;980 999 size_t hint = pzone ? (*pzone) : 0; 981 1000 982 1001 loop: 983 ipl = interrupts_disable(); 984 spinlock_lock(&zones.lock); 1002 irq_spinlock_lock(&zones.lock, true); 985 1003 986 1004 /* … … 993 1011 if it does not help, reclaim all */ 994 1012 if ((znum == (size_t) -1) && (!(flags & FRAME_NO_RECLAIM))) { 995 spinlock_unlock(&zones.lock); 996 interrupts_restore(ipl); 997 1013 irq_spinlock_unlock(&zones.lock, true); 998 1014 size_t freed = slab_reclaim(0); 999 1000 ipl = interrupts_disable(); 1001 spinlock_lock(&zones.lock); 1015 irq_spinlock_lock(&zones.lock, true); 1002 1016 1003 1017 if (freed > 0) … … 1006 1020 1007 1021 if (znum == (size_t) -1) { 1008 spinlock_unlock(&zones.lock); 1009 interrupts_restore(ipl); 1010 1022 irq_spinlock_unlock(&zones.lock, true); 1011 1023 freed = slab_reclaim(SLAB_RECLAIM_ALL); 1012 1013 ipl = interrupts_disable(); 1014 spinlock_lock(&zones.lock); 1024 irq_spinlock_lock(&zones.lock, true); 1015 1025 1016 1026 if (freed > 0) … … 1022 1032 if (znum == (size_t) -1) { 1023 1033 if (flags & FRAME_ATOMIC) { 1024 spinlock_unlock(&zones.lock); 1025 interrupts_restore(ipl); 1034 irq_spinlock_unlock(&zones.lock, true); 1026 1035 return NULL; 1027 1036 } … … 1031 1040 #endif 1032 1041 1033 spinlock_unlock(&zones.lock); 1034 interrupts_restore(ipl); 1042 irq_spinlock_unlock(&zones.lock, true); 1043 1044 if (!THREAD) 1045 panic("Cannot wait for memory to become available."); 1035 1046 1036 1047 /* … … 1039 1050 1040 1051 #ifdef CONFIG_DEBUG 1041 printf("Thread %" PRIu64 " waiting for % " PRIs "frames, "1042 "% " PRIs "available.\n", THREAD->tid, size, avail);1052 printf("Thread %" PRIu64 " waiting for %zu frames, " 1053 "%zu available.\n", THREAD->tid, size, avail); 1043 1054 #endif 1044 1055 … … 1066 1077 + zones.info[znum].base; 1067 1078 1068 spinlock_unlock(&zones.lock); 1069 interrupts_restore(ipl); 1079 irq_spinlock_unlock(&zones.lock, true); 1070 1080 1071 1081 if (pzone) … … 1089 1099 void frame_free(uintptr_t frame) 1090 1100 { 1091 ipl_t ipl = interrupts_disable(); 1092 spinlock_lock(&zones.lock); 1101 irq_spinlock_lock(&zones.lock, true); 1093 1102 1094 1103 /* … … 1096 1105 */ 1097 1106 pfn_t pfn = ADDR2PFN(frame); 1098 size_t znum = find_zone(pfn, 1, NULL);1107 size_t znum = find_zone(pfn, 1, 0); 1099 1108 1100 1109 ASSERT(znum != (size_t) -1); … … 1102 1111 zone_frame_free(&zones.info[znum], pfn - zones.info[znum].base); 1103 1112 1104 spinlock_unlock(&zones.lock); 1105 interrupts_restore(ipl); 1113 irq_spinlock_unlock(&zones.lock, true); 1106 1114 1107 1115 /* … … 1127 1135 * 1128 1136 */ 1129 void frame_reference_add(pfn_t pfn) 1130 { 1131 ipl_t ipl = interrupts_disable(); 1132 spinlock_lock(&zones.lock); 1137 NO_TRACE void frame_reference_add(pfn_t pfn) 1138 { 1139 irq_spinlock_lock(&zones.lock, true); 1133 1140 1134 1141 /* 1135 1142 * First, find host frame zone for addr. 1136 1143 */ 1137 size_t znum = find_zone(pfn, 1, NULL);1144 size_t znum = find_zone(pfn, 1, 0); 1138 1145 1139 1146 ASSERT(znum != (size_t) -1); … … 1141 1148 zones.info[znum].frames[pfn - zones.info[znum].base].refcount++; 1142 1149 1143 spinlock_unlock(&zones.lock);1144 interrupts_restore(ipl); 1145 } 1146 1147 /** Mark given range unavailable in frame zones. */ 1148 void frame_mark_unavailable(pfn_t start, size_t count) 1149 { 1150 ipl_t ipl = interrupts_disable(); 1151 spinlock_lock(&zones.lock);1150 irq_spinlock_unlock(&zones.lock, true); 1151 } 1152 1153 /** Mark given range unavailable in frame zones. 1154 * 1155 */ 1156 NO_TRACE void frame_mark_unavailable(pfn_t start, size_t count) 1157 { 1158 irq_spinlock_lock(&zones.lock, true); 1152 1159 1153 1160 size_t i; … … 1161 1168 } 1162 1169 1163 spinlock_unlock(&zones.lock); 1164 interrupts_restore(ipl); 1165 } 1166 1167 /** Initialize physical memory management. */ 1170 irq_spinlock_unlock(&zones.lock, true); 1171 } 1172 1173 /** Initialize physical memory management. 1174 * 1175 */ 1168 1176 void frame_init(void) 1169 1177 { 1170 1178 if (config.cpu_active == 1) { 1171 1179 zones.count = 0; 1172 spinlock_initialize(&zones.lock, "zones.lock");1180 irq_spinlock_initialize(&zones.lock, "frame.zones.lock"); 1173 1181 mutex_initialize(&mem_avail_mtx, MUTEX_ACTIVE); 1174 1182 condvar_initialize(&mem_avail_cv); … … 1201 1209 } 1202 1210 1203 /** Return total size of all zones. */ 1204 uint64_t zone_total_size(void) 1205 { 1206 ipl_t ipl = interrupts_disable(); 1207 spinlock_lock(&zones.lock); 1211 /** Return total size of all zones. 1212 * 1213 */ 1214 uint64_t zones_total_size(void) 1215 { 1216 irq_spinlock_lock(&zones.lock, true); 1208 1217 1209 1218 uint64_t total = 0; … … 1212 1221 total += (uint64_t) FRAMES2SIZE(zones.info[i].count); 1213 1222 1214 spinlock_unlock(&zones.lock); 1215 interrupts_restore(ipl); 1223 irq_spinlock_unlock(&zones.lock, true); 1216 1224 1217 1225 return total; 1218 1226 } 1219 1227 1220 /** Prints list of zones. */ 1221 void zone_print_list(void) 1228 void zones_stats(uint64_t *total, uint64_t *unavail, uint64_t *busy, 1229 uint64_t *free) 1230 { 1231 ASSERT(total != NULL); 1232 ASSERT(unavail != NULL); 1233 ASSERT(busy != NULL); 1234 ASSERT(free != NULL); 1235 1236 irq_spinlock_lock(&zones.lock, true); 1237 1238 *total = 0; 1239 *unavail = 0; 1240 *busy = 0; 1241 *free = 0; 1242 1243 size_t i; 1244 for (i = 0; i < zones.count; i++) { 1245 *total += (uint64_t) FRAMES2SIZE(zones.info[i].count); 1246 1247 if (zone_flags_available(zones.info[i].flags)) { 1248 *busy += (uint64_t) FRAMES2SIZE(zones.info[i].busy_count); 1249 *free += (uint64_t) FRAMES2SIZE(zones.info[i].free_count); 1250 } else 1251 *unavail += (uint64_t) FRAMES2SIZE(zones.info[i].count); 1252 } 1253 1254 irq_spinlock_unlock(&zones.lock, true); 1255 } 1256 1257 /** Prints list of zones. 1258 * 1259 */ 1260 void zones_print_list(void) 1222 1261 { 1223 1262 #ifdef __32_BITS__ 1224 printf("# base address frames flags free frames busy frames\n"); 1225 printf("-- ------------ ------------ -------- ------------ ------------\n"); 1263 printf("[nr] [base addr] [frames ] [flags ] [free frames ] [busy frames ]\n"); 1226 1264 #endif 1227 1265 1228 1266 #ifdef __64_BITS__ 1229 printf("# base address frames flags free frames busy frames\n"); 1230 printf("-- -------------------- ------------ -------- ------------ ------------\n"); 1267 printf("[nr] [base address ] [frames ] [flags ] [free frames ] [busy frames ]\n"); 1231 1268 #endif 1232 1269 … … 1244 1281 size_t i; 1245 1282 for (i = 0;; i++) { 1246 ipl_t ipl = interrupts_disable(); 1247 spinlock_lock(&zones.lock); 1283 irq_spinlock_lock(&zones.lock, true); 1248 1284 1249 1285 if (i >= zones.count) { 1250 spinlock_unlock(&zones.lock); 1251 interrupts_restore(ipl); 1286 irq_spinlock_unlock(&zones.lock, true); 1252 1287 break; 1253 1288 } … … 1259 1294 size_t busy_count = zones.info[i].busy_count; 1260 1295 1261 spinlock_unlock(&zones.lock); 1262 interrupts_restore(ipl); 1296 irq_spinlock_unlock(&zones.lock, true); 1263 1297 1264 1298 bool available = zone_flags_available(flags); 1265 1299 1266 printf("%- 2" PRIs, i);1300 printf("%-4zu", i); 1267 1301 1268 1302 #ifdef __32_BITS__ 1269 printf(" %10p",base);1303 printf(" %p", (void *) base); 1270 1304 #endif 1271 1305 1272 1306 #ifdef __64_BITS__ 1273 printf(" %18p",base);1307 printf(" %p", (void *) base); 1274 1308 #endif 1275 1309 1276 printf(" %12 " PRIs "%c%c%c ", count,1310 printf(" %12zu %c%c%c ", count, 1277 1311 available ? 'A' : ' ', 1278 1312 (flags & ZONE_RESERVED) ? 'R' : ' ', … … 1280 1314 1281 1315 if (available) 1282 printf("%1 2" PRIs " %12" PRIs,1316 printf("%14zu %14zu", 1283 1317 free_count, busy_count); 1284 1318 … … 1294 1328 void zone_print_one(size_t num) 1295 1329 { 1296 ipl_t ipl = interrupts_disable(); 1297 spinlock_lock(&zones.lock); 1330 irq_spinlock_lock(&zones.lock, true); 1298 1331 size_t znum = (size_t) -1; 1299 1332 … … 1307 1340 1308 1341 if (znum == (size_t) -1) { 1309 spinlock_unlock(&zones.lock); 1310 interrupts_restore(ipl); 1342 irq_spinlock_unlock(&zones.lock, true); 1311 1343 printf("Zone not found.\n"); 1312 1344 return; … … 1319 1351 size_t busy_count = zones.info[i].busy_count; 1320 1352 1321 spinlock_unlock(&zones.lock); 1322 interrupts_restore(ipl); 1353 irq_spinlock_unlock(&zones.lock, true); 1323 1354 1324 1355 bool available = zone_flags_available(flags); 1325 1356 1326 printf("Zone number: % " PRIs "\n", znum);1327 printf("Zone base address: %p\n", base);1328 printf("Zone size: % " PRIs " frames (%" PRIs "KiB)\n", count,1357 printf("Zone number: %zu\n", znum); 1358 printf("Zone base address: %p\n", (void *) base); 1359 printf("Zone size: %zu frames (%zu KiB)\n", count, 1329 1360 SIZE2KB(FRAMES2SIZE(count))); 1330 1361 printf("Zone flags: %c%c%c\n", … … 1334 1365 1335 1366 if (available) { 1336 printf("Allocated space: % " PRIs " frames (%" PRIs "KiB)\n",1367 printf("Allocated space: %zu frames (%zu KiB)\n", 1337 1368 busy_count, SIZE2KB(FRAMES2SIZE(busy_count))); 1338 printf("Available space: % " PRIs " frames (%" PRIs "KiB)\n",1369 printf("Available space: %zu frames (%zu KiB)\n", 1339 1370 free_count, SIZE2KB(FRAMES2SIZE(free_count))); 1340 1371 } -
kernel/generic/src/mm/page.c
rfb150d78 r46c20c8 33 33 /** 34 34 * @file 35 * @brief Virtual Address Translation subsystem.35 * @brief Virtual Address Translation subsystem. 36 36 * 37 37 * This file contains code for creating, destroying and searching 38 38 * mappings between virtual addresses and physical addresses. 39 39 * Functions here are mere wrappers that call the real implementation. 40 * They however, define the single interface. 40 * They however, define the single interface. 41 * 41 42 */ 42 43 … … 55 56 * will do an implicit serialization by virtue of running the TLB shootdown 56 57 * interrupt handler. 58 * 57 59 */ 58 60 … … 63 65 #include <mm/frame.h> 64 66 #include <arch/barrier.h> 65 #include < arch/types.h>67 #include <typedefs.h> 66 68 #include <arch/asm.h> 67 69 #include <memstr.h> … … 83 85 * of page boundaries. 84 86 * 85 * @param s Address of the structure. 86 * @param size Size of the structure. 87 * @param addr Address of the structure. 88 * @param size Size of the structure. 89 * 87 90 */ 88 void map_structure(uintptr_t s, size_t size)91 void map_structure(uintptr_t addr, size_t size) 89 92 { 90 int i, cnt, length; 91 92 length = size + (s - (s & ~(PAGE_SIZE - 1))); 93 cnt = length / PAGE_SIZE + (length % PAGE_SIZE > 0); 94 93 size_t length = size + (addr - (addr & ~(PAGE_SIZE - 1))); 94 size_t cnt = length / PAGE_SIZE + (length % PAGE_SIZE > 0); 95 96 size_t i; 95 97 for (i = 0; i < cnt; i++) 96 page_mapping_insert(AS_KERNEL, s+ i * PAGE_SIZE,97 s+ i * PAGE_SIZE, PAGE_NOT_CACHEABLE | PAGE_WRITE);98 98 page_mapping_insert(AS_KERNEL, addr + i * PAGE_SIZE, 99 addr + i * PAGE_SIZE, PAGE_NOT_CACHEABLE | PAGE_WRITE); 100 99 101 /* Repel prefetched accesses to the old mapping. */ 100 102 memory_barrier(); … … 106 108 * using flags. Allocate and setup any missing page tables. 107 109 * 108 * The page table must be locked and interrupts must be disabled. 110 * @param as Address space to wich page belongs. 111 * @param page Virtual address of the page to be mapped. 112 * @param frame Physical address of memory frame to which the mapping is 113 * done. 114 * @param flags Flags to be used for mapping. 109 115 * 110 * @param as Address space to wich page belongs.111 * @param page Virtual address of the page to be mapped.112 * @param frame Physical address of memory frame to which the mapping is113 * done.114 * @param flags Flags to be used for mapping.115 116 */ 116 void page_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, int flags) 117 NO_TRACE void page_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, 118 unsigned int flags) 117 119 { 120 ASSERT(page_table_locked(as)); 121 118 122 ASSERT(page_mapping_operations); 119 123 ASSERT(page_mapping_operations->mapping_insert); 120 124 121 125 page_mapping_operations->mapping_insert(as, page, frame, flags); 122 126 … … 131 135 * this call visible. 132 136 * 133 * The page table must be locked and interrupts must be disabled. 137 * @param as Address space to wich page belongs. 138 * @param page Virtual address of the page to be demapped. 134 139 * 135 * @param as Address space to wich page belongs.136 * @param page Virtual address of the page to be demapped.137 140 */ 138 void page_mapping_remove(as_t *as, uintptr_t page)141 NO_TRACE void page_mapping_remove(as_t *as, uintptr_t page) 139 142 { 143 ASSERT(page_table_locked(as)); 144 140 145 ASSERT(page_mapping_operations); 141 146 ASSERT(page_mapping_operations->mapping_remove); 142 147 143 148 page_mapping_operations->mapping_remove(as, page); 144 149 145 150 /* Repel prefetched accesses to the old mapping. */ 146 151 memory_barrier(); … … 151 156 * Find mapping for virtual page. 152 157 * 153 * The page table must be locked and interrupts must be disabled. 158 * @param as Address space to wich page belongs. 159 * @param page Virtual page. 154 160 * 155 * @ param as Address space to wich page belongs.156 * @param page Virtual page.161 * @return NULL if there is no such mapping; requested mapping 162 * otherwise. 157 163 * 158 * @return NULL if there is no such mapping; requested mapping159 * otherwise.160 164 */ 161 pte_t *page_mapping_find(as_t *as, uintptr_t page)165 NO_TRACE pte_t *page_mapping_find(as_t *as, uintptr_t page) 162 166 { 167 ASSERT(page_table_locked(as)); 168 163 169 ASSERT(page_mapping_operations); 164 170 ASSERT(page_mapping_operations->mapping_find); 165 171 166 172 return page_mapping_operations->mapping_find(as, page); 167 173 } -
kernel/generic/src/mm/slab.c
rfb150d78 r46c20c8 33 33 /** 34 34 * @file 35 * @brief Slab allocator.35 * @brief Slab allocator. 36 36 * 37 37 * The slab allocator is closely modelled after OpenSolaris slab allocator. … … 50 50 * 51 51 * The slab allocator supports per-CPU caches ('magazines') to facilitate 52 * good SMP scaling. 52 * good SMP scaling. 53 53 * 54 54 * When a new object is being allocated, it is first checked, if it is … … 65 65 * thrashing when somebody is allocating/deallocating 1 item at the magazine 66 66 * size boundary. LIFO order is enforced, which should avoid fragmentation 67 * as much as possible. 68 * 67 * as much as possible. 68 * 69 69 * Every cache contains list of full slabs and list of partially full slabs. 70 70 * Empty slabs are immediately freed (thrashing will be avoided because 71 * of magazines). 71 * of magazines). 72 72 * 73 73 * The slab information structure is kept inside the data area, if possible. … … 95 95 * 96 96 * @todo 97 * it might be good to add granularity of locks even to slab level,97 * It might be good to add granularity of locks even to slab level, 98 98 * we could then try_spinlock over all partial slabs and thus improve 99 * scalability even on slab level 99 * scalability even on slab level. 100 * 100 101 */ 101 102 … … 114 115 #include <macros.h> 115 116 116 SPINLOCK_INITIALIZE(slab_cache_lock);117 IRQ_SPINLOCK_STATIC_INITIALIZE(slab_cache_lock); 117 118 static LIST_INITIALIZE(slab_cache_list); 118 119 119 120 /** Magazine cache */ 120 121 static slab_cache_t mag_cache; 122 121 123 /** Cache for cache descriptors */ 122 124 static slab_cache_t slab_cache_cache; 125 123 126 /** Cache for external slab descriptors 124 127 * This time we want per-cpu cache, so do not make it static … … 128 131 */ 129 132 static slab_cache_t *slab_extern_cache; 133 130 134 /** Caches for malloc */ 131 135 static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1]; 132 static char *malloc_names[] = { 136 137 static const char *malloc_names[] = { 133 138 "malloc-16", 134 139 "malloc-32", … … 154 159 /** Slab descriptor */ 155 160 typedef struct { 156 slab_cache_t *cache; /**< Pointer to parent cache. */157 link_t link; /**< List of full/partial slabs. */158 void *start; /**< Start address of first available item. */159 size_t available; /**< Count of available items in this slab. */160 size_t nextavail; /**< The index of next available item. */161 slab_cache_t *cache; /**< Pointer to parent cache. */ 162 link_t link; /**< List of full/partial slabs. */ 163 void *start; /**< Start address of first available item. */ 164 size_t available; /**< Count of available items in this slab. */ 165 size_t nextavail; /**< The index of next available item. */ 161 166 } slab_t; 162 167 163 168 #ifdef CONFIG_DEBUG 164 static int _slab_initialized = 0;169 static unsigned int _slab_initialized = 0; 165 170 #endif 166 171 167 172 /**************************************/ 168 173 /* Slab allocation functions */ 169 170 /** 171 * Allocate frames for slab space and initialize 172 * 173 */ 174 static slab_t *slab_space_alloc(slab_cache_t *cache, int flags) 175 { 176 void *data; 174 /**************************************/ 175 176 /** Allocate frames for slab space and initialize 177 * 178 */ 179 NO_TRACE static slab_t *slab_space_alloc(slab_cache_t *cache, 180 unsigned int flags) 181 { 182 183 184 size_t zone = 0; 185 186 void *data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone); 187 if (!data) { 188 return NULL; 189 } 190 177 191 slab_t *slab; 178 192 size_t fsize; 179 unsigned int i; 180 size_t zone = 0; 181 182 data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone); 183 if (!data) { 184 return NULL; 185 } 193 186 194 if (!(cache->flags & SLAB_CACHE_SLINSIDE)) { 187 195 slab = slab_alloc(slab_extern_cache, flags); … … 196 204 197 205 /* Fill in slab structures */ 198 for (i = 0; i < ((unsigned int) 1 << cache->order); i++) 206 size_t i; 207 for (i = 0; i < ((size_t) 1 << cache->order); i++) 199 208 frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab, zone); 200 209 201 210 slab->start = data; 202 211 slab->available = cache->objects; 203 212 slab->nextavail = 0; 204 213 slab->cache = cache; 205 214 206 215 for (i = 0; i < cache->objects; i++) 207 *(( int *) (slab->start + i*cache->size)) = i + 1;208 216 *((size_t *) (slab->start + i * cache->size)) = i + 1; 217 209 218 atomic_inc(&cache->allocated_slabs); 210 219 return slab; 211 220 } 212 221 213 /** 214 * Deallocate space associated with slab 222 /** Deallocate space associated with slab 215 223 * 216 224 * @return number of freed frames 217 */ 218 static size_t slab_space_free(slab_cache_t *cache, slab_t *slab) 225 * 226 */ 227 NO_TRACE static size_t slab_space_free(slab_cache_t *cache, slab_t *slab) 219 228 { 220 229 frame_free(KA2PA(slab->start)); 221 if (! (cache->flags & SLAB_CACHE_SLINSIDE))230 if (!(cache->flags & SLAB_CACHE_SLINSIDE)) 222 231 slab_free(slab_extern_cache, slab); 223 232 224 233 atomic_dec(&cache->allocated_slabs); 225 234 226 return 1 << cache->order;235 return (1 << cache->order); 227 236 } 228 237 229 238 /** Map object to slab structure */ 230 static slab_t *obj2slab(void *obj)239 NO_TRACE static slab_t *obj2slab(void *obj) 231 240 { 232 241 return (slab_t *) frame_get_parent(ADDR2PFN(KA2PA(obj)), 0); 233 242 } 234 243 235 /****************** ********************/244 /******************/ 236 245 /* Slab functions */ 237 238 239 /** 240 * Return object to slab and call a destructor 246 /******************/ 247 248 /** Return object to slab and call a destructor 241 249 * 242 250 * @param slab If the caller knows directly slab of the object, otherwise NULL 243 251 * 244 252 * @return Number of freed pages 245 * /246 static size_t slab_obj_destroy(slab_cache_t *cache, void *obj, slab_t *slab) 247 { 248 int freed = 0; 249 253 * 254 */ 255 NO_TRACE static size_t slab_obj_destroy(slab_cache_t *cache, void *obj, 256 slab_t *slab) 257 { 250 258 if (!slab) 251 259 slab = obj2slab(obj); 252 260 253 261 ASSERT(slab->cache == cache); 254 262 263 size_t freed = 0; 264 255 265 if (cache->destructor) 256 266 freed = cache->destructor(obj); … … 258 268 spinlock_lock(&cache->slablock); 259 269 ASSERT(slab->available < cache->objects); 260 261 *(( int *)obj) = slab->nextavail;270 271 *((size_t *) obj) = slab->nextavail; 262 272 slab->nextavail = (obj - slab->start) / cache->size; 263 273 slab->available++; 264 274 265 275 /* Move it to correct list */ 266 276 if (slab->available == cache->objects) { … … 268 278 list_remove(&slab->link); 269 279 spinlock_unlock(&cache->slablock); 270 280 271 281 return freed + slab_space_free(cache, slab); 272 273 282 } else if (slab->available == 1) { 274 283 /* It was in full, move to partial */ … … 276 285 list_prepend(&slab->link, &cache->partial_slabs); 277 286 } 287 278 288 spinlock_unlock(&cache->slablock); 279 289 return freed; 280 290 } 281 291 282 /** 283 * Take new object from slab or create new if needed 292 /** Take new object from slab or create new if needed 284 293 * 285 294 * @return Object address or null 286 */ 287 static void *slab_obj_create(slab_cache_t *cache, int flags) 288 { 295 * 296 */ 297 NO_TRACE static void *slab_obj_create(slab_cache_t *cache, unsigned int flags) 298 { 299 spinlock_lock(&cache->slablock); 300 289 301 slab_t *slab; 290 void *obj; 291 292 spinlock_lock(&cache->slablock); 293 302 294 303 if (list_empty(&cache->partial_slabs)) { 295 /* Allow recursion and reclaiming 304 /* 305 * Allow recursion and reclaiming 296 306 * - this should work, as the slab control structures 297 307 * are small and do not need to allocate with anything 298 308 * other than frame_alloc when they are allocating, 299 309 * that's why we should get recursion at most 1-level deep 310 * 300 311 */ 301 312 spinlock_unlock(&cache->slablock); … … 303 314 if (!slab) 304 315 return NULL; 316 305 317 spinlock_lock(&cache->slablock); 306 318 } else { … … 309 321 list_remove(&slab->link); 310 322 } 311 obj = slab->start + slab->nextavail * cache->size; 312 slab->nextavail = *((int *)obj); 323 324 void *obj = slab->start + slab->nextavail * cache->size; 325 slab->nextavail = *((size_t *) obj); 313 326 slab->available--; 314 327 315 328 if (!slab->available) 316 329 list_prepend(&slab->link, &cache->full_slabs); 317 330 else 318 331 list_prepend(&slab->link, &cache->partial_slabs); 319 332 320 333 spinlock_unlock(&cache->slablock); 321 322 if ( cache->constructor && cache->constructor(obj, flags)) {334 335 if ((cache->constructor) && (cache->constructor(obj, flags))) { 323 336 /* Bad, bad, construction failed */ 324 337 slab_obj_destroy(cache, obj, slab); 325 338 return NULL; 326 339 } 340 327 341 return obj; 328 342 } 329 343 330 /**************************** **********/344 /****************************/ 331 345 /* CPU-Cache slab functions */ 332 333 /** 334 * Finds a full magazine in cache, takes it from list 335 * and returns it 336 * 337 * @param first If true, return first, else last mag 338 */ 339 static slab_magazine_t *get_mag_from_cache(slab_cache_t *cache, int first) 346 /****************************/ 347 348 /** Find a full magazine in cache, take it from list and return it 349 * 350 * @param first If true, return first, else last mag. 351 * 352 */ 353 NO_TRACE static slab_magazine_t *get_mag_from_cache(slab_cache_t *cache, 354 bool first) 340 355 { 341 356 slab_magazine_t *mag = NULL; 342 357 link_t *cur; 343 358 344 359 spinlock_lock(&cache->maglock); 345 360 if (!list_empty(&cache->magazines)) { … … 348 363 else 349 364 cur = cache->magazines.prev; 365 350 366 mag = list_get_instance(cur, slab_magazine_t, link); 351 367 list_remove(&mag->link); 352 368 atomic_dec(&cache->magazine_counter); 353 369 } 370 354 371 spinlock_unlock(&cache->maglock); 355 372 return mag; 356 373 } 357 374 358 /** Prepend magazine to magazine list in cache */ 359 static void put_mag_to_cache(slab_cache_t *cache, slab_magazine_t *mag) 375 /** Prepend magazine to magazine list in cache 376 * 377 */ 378 NO_TRACE static void put_mag_to_cache(slab_cache_t *cache, 379 slab_magazine_t *mag) 360 380 { 361 381 spinlock_lock(&cache->maglock); 362 382 363 383 list_prepend(&mag->link, &cache->magazines); 364 384 atomic_inc(&cache->magazine_counter); … … 367 387 } 368 388 369 /** 370 * Free all objects in magazine and free memory associated with magazine 389 /** Free all objects in magazine and free memory associated with magazine 371 390 * 372 391 * @return Number of freed pages 373 */ 374 static size_t magazine_destroy(slab_cache_t *cache, slab_magazine_t *mag) 375 { 376 unsigned int i; 392 * 393 */ 394 NO_TRACE static size_t magazine_destroy(slab_cache_t *cache, 395 slab_magazine_t *mag) 396 { 397 size_t i; 377 398 size_t frames = 0; 378 399 379 400 for (i = 0; i < mag->busy; i++) { 380 401 frames += slab_obj_destroy(cache, mag->objs[i], NULL); … … 383 404 384 405 slab_free(&mag_cache, mag); 385 406 386 407 return frames; 387 408 } 388 409 389 /** 390 * Find full magazine, set it as current and return it 391 * 392 * Assume cpu_magazine lock is held 393 */ 394 static slab_magazine_t *get_full_current_mag(slab_cache_t *cache) 395 { 396 slab_magazine_t *cmag, *lastmag, *newmag; 397 398 cmag = cache->mag_cache[CPU->id].current; 399 lastmag = cache->mag_cache[CPU->id].last; 410 /** Find full magazine, set it as current and return it 411 * 412 */ 413 NO_TRACE static slab_magazine_t *get_full_current_mag(slab_cache_t *cache) 414 { 415 slab_magazine_t *cmag = cache->mag_cache[CPU->id].current; 416 slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last; 417 418 ASSERT(spinlock_locked(&cache->mag_cache[CPU->id].lock)); 419 400 420 if (cmag) { /* First try local CPU magazines */ 401 421 if (cmag->busy) 402 422 return cmag; 403 404 if ( lastmag && lastmag->busy) {423 424 if ((lastmag) && (lastmag->busy)) { 405 425 cache->mag_cache[CPU->id].current = lastmag; 406 426 cache->mag_cache[CPU->id].last = cmag; … … 408 428 } 409 429 } 430 410 431 /* Local magazines are empty, import one from magazine list */ 411 newmag = get_mag_from_cache(cache, 1);432 slab_magazine_t *newmag = get_mag_from_cache(cache, 1); 412 433 if (!newmag) 413 434 return NULL; 414 435 415 436 if (lastmag) 416 437 magazine_destroy(cache, lastmag); 417 438 418 439 cache->mag_cache[CPU->id].last = cmag; 419 440 cache->mag_cache[CPU->id].current = newmag; 441 420 442 return newmag; 421 443 } 422 444 423 /** 424 * Try to find object in CPU-cache magazines 445 /** Try to find object in CPU-cache magazines 425 446 * 426 447 * @return Pointer to object or NULL if not available 427 */ 428 static void *magazine_obj_get(slab_cache_t *cache) 429 { 430 slab_magazine_t *mag; 431 void *obj; 432 448 * 449 */ 450 NO_TRACE static void *magazine_obj_get(slab_cache_t *cache) 451 { 433 452 if (!CPU) 434 453 return NULL; 435 454 436 455 spinlock_lock(&cache->mag_cache[CPU->id].lock); 437 438 mag = get_full_current_mag(cache);456 457 slab_magazine_t *mag = get_full_current_mag(cache); 439 458 if (!mag) { 440 459 spinlock_unlock(&cache->mag_cache[CPU->id].lock); 441 460 return NULL; 442 461 } 443 obj = mag->objs[--mag->busy]; 462 463 void *obj = mag->objs[--mag->busy]; 444 464 spinlock_unlock(&cache->mag_cache[CPU->id].lock); 465 445 466 atomic_dec(&cache->cached_objs); 446 467 … … 448 469 } 449 470 450 /** 451 * Assure that the current magazine is empty, return pointer to it, or NULL if 452 * no empty magazine is available and cannot be allocated 453 * 454 * Assume mag_cache[CPU->id].lock is held 455 * 456 * We have 2 magazines bound to processor. 457 * First try the current. 458 * If full, try the last. 459 * If full, put to magazines list. 460 * allocate new, exchange last & current 461 * 462 */ 463 static slab_magazine_t *make_empty_current_mag(slab_cache_t *cache) 464 { 465 slab_magazine_t *cmag,*lastmag,*newmag; 466 467 cmag = cache->mag_cache[CPU->id].current; 468 lastmag = cache->mag_cache[CPU->id].last; 469 471 /** Assure that the current magazine is empty, return pointer to it, 472 * or NULL if no empty magazine is available and cannot be allocated 473 * 474 * We have 2 magazines bound to processor. 475 * First try the current. 476 * If full, try the last. 477 * If full, put to magazines list. 478 * 479 */ 480 NO_TRACE static slab_magazine_t *make_empty_current_mag(slab_cache_t *cache) 481 { 482 slab_magazine_t *cmag = cache->mag_cache[CPU->id].current; 483 slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last; 484 485 ASSERT(spinlock_locked(&cache->mag_cache[CPU->id].lock)); 486 470 487 if (cmag) { 471 488 if (cmag->busy < cmag->size) 472 489 return cmag; 473 if (lastmag && lastmag->busy < lastmag->size) { 490 491 if ((lastmag) && (lastmag->busy < lastmag->size)) { 474 492 cache->mag_cache[CPU->id].last = cmag; 475 493 cache->mag_cache[CPU->id].current = lastmag; … … 477 495 } 478 496 } 497 479 498 /* current | last are full | nonexistent, allocate new */ 480 /* We do not want to sleep just because of caching */ 481 /* Especially we do not want reclaiming to start, as 482 * this would deadlock */ 483 newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); 499 500 /* 501 * We do not want to sleep just because of caching, 502 * especially we do not want reclaiming to start, as 503 * this would deadlock. 504 * 505 */ 506 slab_magazine_t *newmag = slab_alloc(&mag_cache, 507 FRAME_ATOMIC | FRAME_NO_RECLAIM); 484 508 if (!newmag) 485 509 return NULL; 510 486 511 newmag->size = SLAB_MAG_SIZE; 487 512 newmag->busy = 0; 488 513 489 514 /* Flush last to magazine list */ 490 515 if (lastmag) 491 516 put_mag_to_cache(cache, lastmag); 492 517 493 518 /* Move current as last, save new as current */ 494 cache->mag_cache[CPU->id].last = cmag; 495 cache->mag_cache[CPU->id].current = newmag; 496 519 cache->mag_cache[CPU->id].last = cmag; 520 cache->mag_cache[CPU->id].current = newmag; 521 497 522 return newmag; 498 523 } 499 524 500 /** 501 * Put object into CPU-cache magazine 502 * 503 * @return 0 - success, -1 - could not get memory 504 */ 505 static int magazine_obj_put(slab_cache_t *cache, void *obj) 506 { 507 slab_magazine_t *mag; 508 525 /** Put object into CPU-cache magazine 526 * 527 * @return 0 on success, -1 on no memory 528 * 529 */ 530 NO_TRACE static int magazine_obj_put(slab_cache_t *cache, void *obj) 531 { 509 532 if (!CPU) 510 533 return -1; 511 534 512 535 spinlock_lock(&cache->mag_cache[CPU->id].lock); 513 514 mag = make_empty_current_mag(cache);536 537 slab_magazine_t *mag = make_empty_current_mag(cache); 515 538 if (!mag) { 516 539 spinlock_unlock(&cache->mag_cache[CPU->id].lock); … … 519 542 520 543 mag->objs[mag->busy++] = obj; 521 544 522 545 spinlock_unlock(&cache->mag_cache[CPU->id].lock); 546 523 547 atomic_inc(&cache->cached_objs); 548 524 549 return 0; 525 550 } 526 551 527 528 /**************************************/ 552 /************************/ 529 553 /* Slab cache functions */ 530 531 /** Return number of objects that fit in certain cache size */ 532 static unsigned int comp_objects(slab_cache_t *cache) 554 /************************/ 555 556 /** Return number of objects that fit in certain cache size 557 * 558 */ 559 NO_TRACE static size_t comp_objects(slab_cache_t *cache) 533 560 { 534 561 if (cache->flags & SLAB_CACHE_SLINSIDE) 535 return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) /536 cache->size;537 else 562 return ((PAGE_SIZE << cache->order) 563 - sizeof(slab_t)) / cache->size; 564 else 538 565 return (PAGE_SIZE << cache->order) / cache->size; 539 566 } 540 567 541 /** Return wasted space in slab */542 static unsigned int badness(slab_cache_t *cache) 543 { 544 unsigned int objects; 545 unsigned int ssize; 546 547 objects = comp_objects(cache);548 ssize = PAGE_SIZE << cache->order;568 /** Return wasted space in slab 569 * 570 */ 571 NO_TRACE static size_t badness(slab_cache_t *cache) 572 { 573 size_t objects = comp_objects(cache); 574 size_t ssize = PAGE_SIZE << cache->order; 575 549 576 if (cache->flags & SLAB_CACHE_SLINSIDE) 550 577 ssize -= sizeof(slab_t); 578 551 579 return ssize - objects * cache->size; 552 580 } 553 581 554 /** 555 * Initialize mag_cache structure in slab cache 556 */ 557 static void make_magcache(slab_cache_t *cache) 558 { 559 unsigned int i; 560 582 /** Initialize mag_cache structure in slab cache 583 * 584 */ 585 NO_TRACE static bool make_magcache(slab_cache_t *cache) 586 { 561 587 ASSERT(_slab_initialized >= 2); 562 588 563 589 cache->mag_cache = malloc(sizeof(slab_mag_cache_t) * config.cpu_count, 564 0); 590 FRAME_ATOMIC); 591 if (!cache->mag_cache) 592 return false; 593 594 size_t i; 565 595 for (i = 0; i < config.cpu_count; i++) { 566 596 memsetb(&cache->mag_cache[i], sizeof(cache->mag_cache[i]), 0); 567 597 spinlock_initialize(&cache->mag_cache[i].lock, 568 "slab _maglock_cpu");569 } 570 } 571 572 /** Initialize allocated memory as a slab cache */ 573 static void 574 _slab_cache_create(slab_cache_t *cache, char *name, size_t size, size_t align, 575 int (*constructor)(void *obj, int kmflag), int (*destructor)(void *obj),576 int flags)577 { 578 int pages; 579 ipl_t ipl; 580 598 "slab.cache.mag_cache[].lock"); 599 } 600 601 return true; 602 } 603 604 /** Initialize allocated memory as a slab cache 605 * 606 */ 607 NO_TRACE static void _slab_cache_create(slab_cache_t *cache, const char *name, 608 size_t size, size_t align, int (*constructor)(void *obj, 609 unsigned int kmflag), size_t (*destructor)(void *obj), unsigned int flags) 610 { 581 611 memsetb(cache, sizeof(*cache), 0); 582 612 cache->name = name; 583 613 584 614 if (align < sizeof(unative_t)) 585 615 align = sizeof(unative_t); 616 586 617 size = ALIGN_UP(size, align); 587 618 588 619 cache->size = size; 589 590 620 cache->constructor = constructor; 591 621 cache->destructor = destructor; 592 622 cache->flags = flags; 593 623 594 624 list_initialize(&cache->full_slabs); 595 625 list_initialize(&cache->partial_slabs); 596 626 list_initialize(&cache->magazines); 597 spinlock_initialize(&cache->slablock, "slab_lock"); 598 spinlock_initialize(&cache->maglock, "slab_maglock"); 627 628 spinlock_initialize(&cache->slablock, "slab.cache.slablock"); 629 spinlock_initialize(&cache->maglock, "slab.cache.maglock"); 630 599 631 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) 600 make_magcache(cache);601 632 (void) make_magcache(cache); 633 602 634 /* Compute slab sizes, object counts in slabs etc. */ 603 635 if (cache->size < SLAB_INSIDE_SIZE) 604 636 cache->flags |= SLAB_CACHE_SLINSIDE; 605 637 606 638 /* Minimum slab order */ 607 pages = SIZE2FRAMES(cache->size); 639 size_t pages = SIZE2FRAMES(cache->size); 640 608 641 /* We need the 2^order >= pages */ 609 642 if (pages == 1) … … 611 644 else 612 645 cache->order = fnzb(pages - 1) + 1; 613 614 while (badness(cache) > SLAB_MAX_BADNESS(cache)) {646 647 while (badness(cache) > SLAB_MAX_BADNESS(cache)) 615 648 cache->order += 1; 616 }649 617 650 cache->objects = comp_objects(cache); 651 618 652 /* If info fits in, put it inside */ 619 653 if (badness(cache) > sizeof(slab_t)) 620 654 cache->flags |= SLAB_CACHE_SLINSIDE; 621 655 622 656 /* Add cache to cache list */ 623 ipl = interrupts_disable(); 624 spinlock_lock(&slab_cache_lock); 625 657 irq_spinlock_lock(&slab_cache_lock, true); 626 658 list_append(&cache->link, &slab_cache_list); 627 628 spinlock_unlock(&slab_cache_lock); 629 interrupts_restore(ipl); 630 } 631 632 /** Create slab cache */ 633 slab_cache_t * 634 slab_cache_create(char *name, size_t size, size_t align, 635 int (*constructor)(void *obj, int kmflag), int (*destructor)(void *obj), 636 int flags) 637 { 638 slab_cache_t *cache; 639 640 cache = slab_alloc(&slab_cache_cache, 0); 659 irq_spinlock_unlock(&slab_cache_lock, true); 660 } 661 662 /** Create slab cache 663 * 664 */ 665 slab_cache_t *slab_cache_create(const char *name, size_t size, size_t align, 666 int (*constructor)(void *obj, unsigned int kmflag), 667 size_t (*destructor)(void *obj), unsigned int flags) 668 { 669 slab_cache_t *cache = slab_alloc(&slab_cache_cache, 0); 641 670 _slab_cache_create(cache, name, size, align, constructor, destructor, 642 671 flags); 672 643 673 return cache; 644 674 } 645 675 646 /** 647 * Reclaim space occupied by objects that are already free 676 /** Reclaim space occupied by objects that are already free 648 677 * 649 678 * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing 679 * 650 680 * @return Number of freed pages 651 */ 652 static size_t _slab_reclaim(slab_cache_t *cache, int flags) 653 { 654 unsigned int i; 681 * 682 */ 683 NO_TRACE static size_t _slab_reclaim(slab_cache_t *cache, unsigned int flags) 684 { 685 if (cache->flags & SLAB_CACHE_NOMAGAZINE) 686 return 0; /* Nothing to do */ 687 688 /* 689 * We count up to original magazine count to avoid 690 * endless loop 691 */ 692 atomic_count_t magcount = atomic_get(&cache->magazine_counter); 693 655 694 slab_magazine_t *mag; 656 695 size_t frames = 0; 657 int magcount; 658 659 if (cache->flags & SLAB_CACHE_NOMAGAZINE) 660 return 0; /* Nothing to do */ 661 662 /* We count up to original magazine count to avoid 663 * endless loop 664 */ 665 magcount = atomic_get(&cache->magazine_counter); 666 while (magcount-- && (mag=get_mag_from_cache(cache, 0))) { 667 frames += magazine_destroy(cache,mag); 668 if (!(flags & SLAB_RECLAIM_ALL) && frames) 696 697 while ((magcount--) && (mag = get_mag_from_cache(cache, 0))) { 698 frames += magazine_destroy(cache, mag); 699 if ((!(flags & SLAB_RECLAIM_ALL)) && (frames)) 669 700 break; 670 701 } … … 673 704 /* Free cpu-bound magazines */ 674 705 /* Destroy CPU magazines */ 706 size_t i; 675 707 for (i = 0; i < config.cpu_count; i++) { 676 708 spinlock_lock(&cache->mag_cache[i].lock); 677 709 678 710 mag = cache->mag_cache[i].current; 679 711 if (mag) … … 685 717 frames += magazine_destroy(cache, mag); 686 718 cache->mag_cache[i].last = NULL; 687 719 688 720 spinlock_unlock(&cache->mag_cache[i].lock); 689 721 } 690 722 } 691 723 692 724 return frames; 693 725 } 694 726 695 /** Check that there are no slabs and remove cache from system */ 727 /** Check that there are no slabs and remove cache from system 728 * 729 */ 696 730 void slab_cache_destroy(slab_cache_t *cache) 697 731 { 698 ipl_t ipl; 699 700 /* First remove cache from link, so that we don't need 732 /* 733 * First remove cache from link, so that we don't need 701 734 * to disable interrupts later 735 * 702 736 */ 703 704 ipl = interrupts_disable(); 705 spinlock_lock(&slab_cache_lock); 706 737 irq_spinlock_lock(&slab_cache_lock, true); 707 738 list_remove(&cache->link); 708 709 spinlock_unlock(&slab_cache_lock); 710 interrupts_restore(ipl); 711 712 /* Do not lock anything, we assume the software is correct and 713 * does not touch the cache when it decides to destroy it */ 739 irq_spinlock_unlock(&slab_cache_lock, true); 740 741 /* 742 * Do not lock anything, we assume the software is correct and 743 * does not touch the cache when it decides to destroy it 744 * 745 */ 714 746 715 747 /* Destroy all magazines */ 716 748 _slab_reclaim(cache, SLAB_RECLAIM_ALL); 717 749 718 750 /* All slabs must be empty */ 719 if ( !list_empty(&cache->full_slabs) ||720 !list_empty(&cache->partial_slabs))751 if ((!list_empty(&cache->full_slabs)) || 752 (!list_empty(&cache->partial_slabs))) 721 753 panic("Destroying cache that is not empty."); 722 754 723 755 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) 724 756 free(cache->mag_cache); 757 725 758 slab_free(&slab_cache_cache, cache); 726 759 } 727 760 728 /** Allocate new object from cache - if no flags given, always returns memory */ 729 void *slab_alloc(slab_cache_t *cache, int flags) 730 { 731 ipl_t ipl; 761 /** Allocate new object from cache - if no flags given, always returns memory 762 * 763 */ 764 void *slab_alloc(slab_cache_t *cache, unsigned int flags) 765 { 766 /* Disable interrupts to avoid deadlocks with interrupt handlers */ 767 ipl_t ipl = interrupts_disable(); 768 732 769 void *result = NULL; 733 770 734 /* Disable interrupts to avoid deadlocks with interrupt handlers */ 735 ipl = interrupts_disable(); 736 737 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) { 771 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) 738 772 result = magazine_obj_get(cache); 739 }773 740 774 if (!result) 741 775 result = slab_obj_create(cache, flags); 742 776 743 777 interrupts_restore(ipl); 744 778 745 779 if (result) 746 780 atomic_inc(&cache->allocated_objs); 747 781 748 782 return result; 749 783 } 750 784 751 /** Return object to cache, use slab if known */752 static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab) 753 { 754 ipl_t ipl; 755 756 ipl = interrupts_disable();757 785 /** Return object to cache, use slab if known 786 * 787 */ 788 NO_TRACE static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab) 789 { 790 ipl_t ipl = interrupts_disable(); 791 758 792 if ((cache->flags & SLAB_CACHE_NOMAGAZINE) || 759 magazine_obj_put(cache, obj)) {793 (magazine_obj_put(cache, obj))) 760 794 slab_obj_destroy(cache, obj, slab); 761 762 } 795 763 796 interrupts_restore(ipl); 764 797 atomic_dec(&cache->allocated_objs); 765 798 } 766 799 767 /** Return slab object to cache */ 800 /** Return slab object to cache 801 * 802 */ 768 803 void slab_free(slab_cache_t *cache, void *obj) 769 804 { … … 771 806 } 772 807 773 /* Go through all caches and reclaim what is possible */ 774 size_t slab_reclaim(int flags) 775 { 776 slab_cache_t *cache; 808 /** Go through all caches and reclaim what is possible 809 * 810 * Interrupts must be disabled before calling this function, 811 * otherwise memory allocation from interrupts can deadlock. 812 * 813 */ 814 size_t slab_reclaim(unsigned int flags) 815 { 816 irq_spinlock_lock(&slab_cache_lock, false); 817 818 size_t frames = 0; 777 819 link_t *cur; 778 size_t frames = 0;779 780 spinlock_lock(&slab_cache_lock);781 782 /* TODO: Add assert, that interrupts are disabled, otherwise783 * memory allocation from interrupts can deadlock.784 */785 786 820 for (cur = slab_cache_list.next; cur != &slab_cache_list; 787 821 cur = cur->next) { 788 cache = list_get_instance(cur, slab_cache_t, link);822 slab_cache_t *cache = list_get_instance(cur, slab_cache_t, link); 789 823 frames += _slab_reclaim(cache, flags); 790 824 } 791 792 spinlock_unlock(&slab_cache_lock);793 825 826 irq_spinlock_unlock(&slab_cache_lock, false); 827 794 828 return frames; 795 829 } 796 830 797 798 /* Print list of slabs */ 831 /* Print list of slabs 832 * 833 */ 799 834 void slab_print_list(void) 800 835 { 801 int skip = 0; 802 803 printf("slab name size pages obj/pg slabs cached allocated" 804 " ctl\n"); 805 printf("---------------- -------- ------ ------ ------ ------ ---------" 806 " ---\n"); 807 836 printf("[slab name ] [size ] [pages ] [obj/pg] [slabs ]" 837 " [cached] [alloc ] [ctl]\n"); 838 839 size_t skip = 0; 808 840 while (true) { 809 slab_cache_t *cache;810 link_t *cur;811 ipl_t ipl;812 int i;813 814 841 /* 815 842 * We must not hold the slab_cache_lock spinlock when printing … … 834 861 * statistics. 835 862 */ 836 837 ipl = interrupts_disable(); 838 spinlock_lock(&slab_cache_lock); 839 863 864 irq_spinlock_lock(&slab_cache_lock, true); 865 866 link_t *cur; 867 size_t i; 840 868 for (i = 0, cur = slab_cache_list.next; 841 i < skip && cur != &slab_cache_list; 842 i++, cur = cur->next) 843 ; 844 869 (i < skip) && (cur != &slab_cache_list); 870 i++, cur = cur->next); 871 845 872 if (cur == &slab_cache_list) { 846 spinlock_unlock(&slab_cache_lock); 847 interrupts_restore(ipl); 873 irq_spinlock_unlock(&slab_cache_lock, true); 848 874 break; 849 875 } 850 876 851 877 skip++; 852 853 cache = list_get_instance(cur, slab_cache_t, link);854 855 c har *name = cache->name;878 879 slab_cache_t *cache = list_get_instance(cur, slab_cache_t, link); 880 881 const char *name = cache->name; 856 882 uint8_t order = cache->order; 857 883 size_t size = cache->size; 858 unsigned int objects = cache->objects;884 size_t objects = cache->objects; 859 885 long allocated_slabs = atomic_get(&cache->allocated_slabs); 860 886 long cached_objs = atomic_get(&cache->cached_objs); 861 887 long allocated_objs = atomic_get(&cache->allocated_objs); 862 int flags = cache->flags; 863 864 spinlock_unlock(&slab_cache_lock); 865 interrupts_restore(ipl); 866 867 printf("%-16s %8" PRIs " %6d %6u %6ld %6ld %9ld %-3s\n", 888 unsigned int flags = cache->flags; 889 890 irq_spinlock_unlock(&slab_cache_lock, true); 891 892 printf("%-18s %8zu %8u %8zu %8ld %8ld %8ld %-5s\n", 868 893 name, size, (1 << order), objects, allocated_slabs, 869 894 cached_objs, allocated_objs, … … 874 899 void slab_cache_init(void) 875 900 { 876 int i, size;877 878 901 /* Initialize magazine cache */ 879 902 _slab_cache_create(&mag_cache, "slab_magazine", … … 881 904 sizeof(uintptr_t), NULL, NULL, SLAB_CACHE_NOMAGAZINE | 882 905 SLAB_CACHE_SLINSIDE); 906 883 907 /* Initialize slab_cache cache */ 884 908 _slab_cache_create(&slab_cache_cache, "slab_cache", 885 909 sizeof(slab_cache_cache), sizeof(uintptr_t), NULL, NULL, 886 910 SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); 911 887 912 /* Initialize external slab cache */ 888 913 slab_extern_cache = slab_cache_create("slab_extern", sizeof(slab_t), 0, 889 914 NULL, NULL, SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED); 890 915 891 916 /* Initialize structures for malloc */ 917 size_t i; 918 size_t size; 919 892 920 for (i = 0, size = (1 << SLAB_MIN_MALLOC_W); 893 921 i < (SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1); … … 896 924 NULL, NULL, SLAB_CACHE_MAGDEFERRED); 897 925 } 898 #ifdef CONFIG_DEBUG 926 927 #ifdef CONFIG_DEBUG 899 928 _slab_initialized = 1; 900 929 #endif … … 904 933 * 905 934 * Kernel calls this function, when it knows the real number of 906 * processors. 907 * Allocate slab for cpucache and enable it on all existing908 * slabs that are SLAB_CACHE_MAGDEFERRED935 * processors. Allocate slab for cpucache and enable it on all 936 * existing slabs that are SLAB_CACHE_MAGDEFERRED 937 * 909 938 */ 910 939 void slab_enable_cpucache(void) 911 940 { 912 link_t *cur;913 slab_cache_t *s;914 915 941 #ifdef CONFIG_DEBUG 916 942 _slab_initialized = 2; 917 943 #endif 918 919 spinlock_lock(&slab_cache_lock); 920 944 945 irq_spinlock_lock(&slab_cache_lock, false); 946 947 link_t *cur; 921 948 for (cur = slab_cache_list.next; cur != &slab_cache_list; 922 cur = cur->next) {923 s = list_get_instance(cur, slab_cache_t, link);924 if ((s ->flags & SLAB_CACHE_MAGDEFERRED) !=949 cur = cur->next) { 950 slab_cache_t *slab = list_get_instance(cur, slab_cache_t, link); 951 if ((slab->flags & SLAB_CACHE_MAGDEFERRED) != 925 952 SLAB_CACHE_MAGDEFERRED) 926 953 continue; 927 make_magcache(s); 928 s->flags &= ~SLAB_CACHE_MAGDEFERRED; 929 } 930 931 spinlock_unlock(&slab_cache_lock); 932 } 933 934 /**************************************/ 935 /* kalloc/kfree functions */ 936 void *malloc(unsigned int size, int flags) 954 955 (void) make_magcache(slab); 956 slab->flags &= ~SLAB_CACHE_MAGDEFERRED; 957 } 958 959 irq_spinlock_unlock(&slab_cache_lock, false); 960 } 961 962 void *malloc(size_t size, unsigned int flags) 937 963 { 938 964 ASSERT(_slab_initialized); … … 941 967 if (size < (1 << SLAB_MIN_MALLOC_W)) 942 968 size = (1 << SLAB_MIN_MALLOC_W); 943 944 int idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;945 969 970 uint8_t idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1; 971 946 972 return slab_alloc(malloc_caches[idx], flags); 947 973 } 948 974 949 void *realloc(void *ptr, unsigned int size,int flags)975 void *realloc(void *ptr, size_t size, unsigned int flags) 950 976 { 951 977 ASSERT(_slab_initialized); … … 957 983 if (size < (1 << SLAB_MIN_MALLOC_W)) 958 984 size = (1 << SLAB_MIN_MALLOC_W); 959 int idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;985 uint8_t idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1; 960 986 961 987 new_ptr = slab_alloc(malloc_caches[idx], flags); … … 978 1004 if (!ptr) 979 1005 return; 980 1006 981 1007 slab_t *slab = obj2slab(ptr); 982 1008 _slab_free(slab->cache, ptr, slab); -
kernel/generic/src/mm/tlb.c
rfb150d78 r46c20c8 33 33 /** 34 34 * @file 35 * @brief Generic TLB shootdown algorithm.35 * @brief Generic TLB shootdown algorithm. 36 36 * 37 37 * The algorithm implemented here is based on the CMU TLB shootdown … … 53 53 #include <cpu.h> 54 54 55 /**56 * This lock is used for synchronisation between sender and57 * recipients of TLB shootdown message. It must be acquired58 * before CPU structure lock.59 */60 SPINLOCK_INITIALIZE(tlblock);61 62 55 void tlb_init(void) 63 56 { … … 67 60 #ifdef CONFIG_SMP 68 61 62 /** 63 * This lock is used for synchronisation between sender and 64 * recipients of TLB shootdown message. It must be acquired 65 * before CPU structure lock. 66 * 67 */ 68 IRQ_SPINLOCK_STATIC_INITIALIZE(tlblock); 69 69 70 /** Send TLB shootdown message. 70 71 * … … 72 73 * to all other processors. 73 74 * 74 * This function must be called with interrupts disabled. 75 * @param type Type describing scope of shootdown. 76 * @param asid Address space, if required by type. 77 * @param page Virtual page address, if required by type. 78 * @param count Number of pages, if required by type. 75 79 * 76 * @param type Type describing scope of shootdown. 77 * @param asid Address space, if required by type. 78 * @param page Virtual page address, if required by type. 79 * @param count Number of pages, if required by type. 80 * @return The interrupt priority level as it existed prior to this call. 81 * 80 82 */ 81 voidtlb_shootdown_start(tlb_invalidate_type_t type, asid_t asid,83 ipl_t tlb_shootdown_start(tlb_invalidate_type_t type, asid_t asid, 82 84 uintptr_t page, size_t count) 83 85 { 84 unsigned int i; 85 86 CPU->tlb_active = 0; 87 spinlock_lock(&tlblock); 86 ipl_t ipl = interrupts_disable(); 87 CPU->tlb_active = false; 88 irq_spinlock_lock(&tlblock, false); 88 89 90 size_t i; 89 91 for (i = 0; i < config.cpu_count; i++) { 90 cpu_t *cpu;91 92 92 if (i == CPU->id) 93 93 continue; 94 95 cpu = &cpus[i]; 96 spinlock_lock(&cpu->lock); 94 95 cpu_t *cpu = &cpus[i]; 96 97 irq_spinlock_lock(&cpu->lock, false); 97 98 if (cpu->tlb_messages_count == TLB_MESSAGE_QUEUE_LEN) { 98 99 /* … … 115 116 cpu->tlb_messages[idx].count = count; 116 117 } 117 spinlock_unlock(&cpu->lock);118 irq_spinlock_unlock(&cpu->lock, false); 118 119 } 119 120 120 121 tlb_shootdown_ipi_send(); 121 122 busy_wait: 123 for (i = 0; i < config.cpu_count; i++) 122 123 busy_wait: 124 for (i = 0; i < config.cpu_count; i++) { 124 125 if (cpus[i].tlb_active) 125 126 goto busy_wait; 127 } 128 129 return ipl; 126 130 } 127 131 128 /** Finish TLB shootdown sequence. */ 129 void tlb_shootdown_finalize(void) 132 /** Finish TLB shootdown sequence. 133 * 134 * @param ipl Previous interrupt priority level. 135 * 136 */ 137 void tlb_shootdown_finalize(ipl_t ipl) 130 138 { 131 spinlock_unlock(&tlblock); 132 CPU->tlb_active = 1; 139 irq_spinlock_unlock(&tlblock, false); 140 CPU->tlb_active = true; 141 interrupts_restore(ipl); 133 142 } 134 143 … … 138 147 } 139 148 140 /** Receive TLB shootdown message. */ 149 /** Receive TLB shootdown message. 150 * 151 */ 141 152 void tlb_shootdown_ipi_recv(void) 142 153 { 143 tlb_invalidate_type_t type;144 asid_t asid;145 uintptr_t page;146 size_t count;147 unsigned int i;148 149 154 ASSERT(CPU); 150 155 151 CPU->tlb_active = 0;152 spinlock_lock(&tlblock);153 spinlock_unlock(&tlblock);156 CPU->tlb_active = false; 157 irq_spinlock_lock(&tlblock, false); 158 irq_spinlock_unlock(&tlblock, false); 154 159 155 spinlock_lock(&CPU->lock);160 irq_spinlock_lock(&CPU->lock, false); 156 161 ASSERT(CPU->tlb_messages_count <= TLB_MESSAGE_QUEUE_LEN); 157 162 163 size_t i; 158 164 for (i = 0; i < CPU->tlb_messages_count; CPU->tlb_messages_count--) { 159 t ype = CPU->tlb_messages[i].type;160 asid = CPU->tlb_messages[i].asid;161 page = CPU->tlb_messages[i].page;162 count = CPU->tlb_messages[i].count;163 165 tlb_invalidate_type_t type = CPU->tlb_messages[i].type; 166 asid_t asid = CPU->tlb_messages[i].asid; 167 uintptr_t page = CPU->tlb_messages[i].page; 168 size_t count = CPU->tlb_messages[i].count; 169 164 170 switch (type) { 165 171 case TLB_INVL_ALL: … … 170 176 break; 171 177 case TLB_INVL_PAGES: 172 ASSERT(count);178 ASSERT(count); 173 179 tlb_invalidate_pages(asid, page, count); 174 180 break; … … 177 183 break; 178 184 } 185 179 186 if (type == TLB_INVL_ALL) 180 187 break; 181 188 } 182 189 183 spinlock_unlock(&CPU->lock);184 CPU->tlb_active = 1;190 irq_spinlock_unlock(&CPU->lock, false); 191 CPU->tlb_active = true; 185 192 } 186 193
Note:
See TracChangeset
for help on using the changeset viewer.
