Changeset da1bafb in mainline for kernel/generic/src/mm
- Timestamp:
- 2010-05-24T18:57:31Z (16 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 0095368
- Parents:
- 666f492
- Location:
- kernel/generic/src/mm
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/as.c
r666f492 rda1bafb 33 33 /** 34 34 * @file 35 * @brief Address space related functions.35 * @brief Address space related functions. 36 36 * 37 37 * This file contains address space manipulation functions. … … 86 86 * Each architecture decides what functions will be used to carry out 87 87 * address space operations such as creating or locking page tables. 88 * 88 89 */ 89 90 as_operations_t *as_operations = NULL; … … 91 92 /** 92 93 * Slab for as_t objects. 94 * 93 95 */ 94 96 static slab_cache_t *as_slab; … … 100 102 * - as->asid for each as of the as_t type 101 103 * - asids_allocated counter 104 * 102 105 */ 103 106 SPINLOCK_INITIALIZE(asidlock); … … 106 109 * This list contains address spaces that are not active on any 107 110 * processor and that have valid ASID. 111 * 108 112 */ 109 113 LIST_INITIALIZE(inactive_as_with_asid_head); … … 112 116 as_t *AS_KERNEL = NULL; 113 117 114 static int area_flags_to_page_flags(int);118 static unsigned int area_flags_to_page_flags(unsigned int); 115 119 static as_area_t *find_area_and_lock(as_t *, uintptr_t); 116 120 static bool check_area_conflicts(as_t *, uintptr_t, size_t, as_area_t *); 117 121 static void sh_info_remove_reference(share_info_t *); 118 122 119 static int as_constructor(void *obj, int flags)123 static int as_constructor(void *obj, unsigned int flags) 120 124 { 121 125 as_t *as = (as_t *) obj; 122 int rc; 123 126 124 127 link_initialize(&as->inactive_as_with_asid_link); 125 128 mutex_initialize(&as->lock, MUTEX_PASSIVE); 126 129 127 rc = as_constructor_arch(as, flags);130 int rc = as_constructor_arch(as, flags); 128 131 129 132 return rc; 130 133 } 131 134 132 static int as_destructor(void *obj)135 static size_t as_destructor(void *obj) 133 136 { 134 137 as_t *as = (as_t *) obj; 135 136 138 return as_destructor_arch(as); 137 139 } … … 141 143 { 142 144 as_arch_init(); 143 145 144 146 as_slab = slab_cache_create("as_slab", sizeof(as_t), 0, 145 147 as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED); … … 157 159 /** Create address space. 158 160 * 159 * @param flags Flags that influence the way in wich the address space 160 * is created. 161 */ 162 as_t *as_create(int flags) 163 { 164 as_t *as; 165 166 as = (as_t *) slab_alloc(as_slab, 0); 161 * @param flags Flags that influence the way in wich the address 162 * space is created. 163 * 164 */ 165 as_t *as_create(unsigned int flags) 166 { 167 as_t *as = (as_t *) slab_alloc(as_slab, 0); 167 168 (void) as_create_arch(as, 0); 168 169 … … 176 177 atomic_set(&as->refcount, 0); 177 178 as->cpu_refcount = 0; 179 178 180 #ifdef AS_PAGE_TABLE 179 181 as->genarch.page_table = page_table_create(flags); … … 192 194 * We know that we don't hold any spinlock. 193 195 * 194 * @param as Address space to be destroyed. 196 * @param as Address space to be destroyed. 197 * 195 198 */ 196 199 void as_destroy(as_t *as) 197 200 { 198 ipl_t ipl;199 bool cond;200 201 DEADLOCK_PROBE_INIT(p_asidlock); 201 202 … … 214 215 * disabled to prevent nested context switches. We also depend on the 215 216 * fact that so far no spinlocks are held. 217 * 216 218 */ 217 219 preemption_disable(); 218 ipl = interrupts_read(); 220 ipl_t ipl = interrupts_read(); 221 219 222 retry: 220 223 interrupts_disable(); … … 224 227 goto retry; 225 228 } 226 preemption_enable(); /* Interrupts disabled, enable preemption */ 227 if (as->asid != ASID_INVALID && as != AS_KERNEL) { 229 230 /* Interrupts disabled, enable preemption */ 231 preemption_enable(); 232 233 if ((as->asid != ASID_INVALID) && (as != AS_KERNEL)) { 228 234 if (as->cpu_refcount == 0) 229 235 list_remove(&as->inactive_as_with_asid_link); 236 230 237 asid_put(as->asid); 231 238 } 239 232 240 spinlock_unlock(&asidlock); 233 241 234 242 /* 235 243 * Destroy address space areas of the address space. 236 244 * The B+tree must be walked carefully because it is 237 245 * also being destroyed. 238 * /239 for (cond = true; cond; ) {240 btree_node_t *node;241 246 * 247 */ 248 bool cond = true; 249 while (cond) { 242 250 ASSERT(!list_empty(&as->as_area_btree.leaf_head)); 243 node = list_get_instance(as->as_area_btree.leaf_head.next, 251 252 btree_node_t *node = 253 list_get_instance(as->as_area_btree.leaf_head.next, 244 254 btree_node_t, leaf_link); 245 246 if ((cond = node->keys)) {255 256 if ((cond = node->keys)) 247 257 as_area_destroy(as, node->key[0]); 248 } 249 } 250 258 } 259 251 260 btree_destroy(&as->as_area_btree); 261 252 262 #ifdef AS_PAGE_TABLE 253 263 page_table_destroy(as->genarch.page_table); … … 255 265 page_table_destroy(NULL); 256 266 #endif 257 267 258 268 interrupts_restore(ipl); 259 269 260 270 slab_free(as_slab, as); 261 271 } … … 266 276 * space. 267 277 * 268 * @param a Address space to be held. 278 * @param as Address space to be held. 279 * 269 280 */ 270 281 void as_hold(as_t *as) … … 278 289 * space. 279 290 * 280 * @param a Address space to be released. 291 * @param asAddress space to be released. 292 * 281 293 */ 282 294 void as_release(as_t *as) … … 290 302 * The created address space area is added to the target address space. 291 303 * 292 * @param as Target address space. 293 * @param flags Flags of the area memory. 294 * @param size Size of area. 295 * @param base Base address of area. 296 * @param attrs Attributes of the area. 297 * @param backend Address space area backend. NULL if no backend is used. 298 * @param backend_data NULL or a pointer to an array holding two void *. 299 * 300 * @return Address space area on success or NULL on failure. 301 */ 302 as_area_t * 303 as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs, 304 mem_backend_t *backend, mem_backend_data_t *backend_data) 305 { 306 ipl_t ipl; 307 as_area_t *a; 308 304 * @param as Target address space. 305 * @param flags Flags of the area memory. 306 * @param size Size of area. 307 * @param base Base address of area. 308 * @param attrs Attributes of the area. 309 * @param backend Address space area backend. NULL if no backend is used. 310 * @param backend_data NULL or a pointer to an array holding two void *. 311 * 312 * @return Address space area on success or NULL on failure. 313 * 314 */ 315 as_area_t *as_area_create(as_t *as, unsigned int flags, size_t size, 316 uintptr_t base, unsigned int attrs, mem_backend_t *backend, 317 mem_backend_data_t *backend_data) 318 { 309 319 if (base % PAGE_SIZE) 310 320 return NULL; 311 321 312 322 if (!size) 313 323 return NULL; 314 324 315 325 /* Writeable executable areas are not supported. */ 316 326 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE)) 317 327 return NULL; 318 328 319 ipl = interrupts_disable();329 ipl_t ipl = interrupts_disable(); 320 330 mutex_lock(&as->lock); 321 331 … … 326 336 } 327 337 328 a = (as_area_t *) malloc(sizeof(as_area_t), 0); 329 330 mutex_initialize(&a->lock, MUTEX_PASSIVE); 331 332 a->as = as; 333 a->flags = flags; 334 a->attributes = attrs; 335 a->pages = SIZE2FRAMES(size); 336 a->base = base; 337 a->sh_info = NULL; 338 a->backend = backend; 338 as_area_t *area = (as_area_t *) malloc(sizeof(as_area_t), 0); 339 340 mutex_initialize(&area->lock, MUTEX_PASSIVE); 341 342 area->as = as; 343 area->flags = flags; 344 area->attributes = attrs; 345 area->pages = SIZE2FRAMES(size); 346 area->base = base; 347 area->sh_info = NULL; 348 area->backend = backend; 349 339 350 if (backend_data) 340 a ->backend_data = *backend_data;351 area->backend_data = *backend_data; 341 352 else 342 memsetb(&a->backend_data, sizeof(a->backend_data), 0); 343 344 btree_create(&a->used_space); 345 346 btree_insert(&as->as_area_btree, base, (void *) a, NULL); 347 353 memsetb(&area->backend_data, sizeof(area->backend_data), 0); 354 355 btree_create(&area->used_space); 356 btree_insert(&as->as_area_btree, base, (void *) area, NULL); 357 348 358 mutex_unlock(&as->lock); 349 359 interrupts_restore(ipl); 350 351 return a ;360 361 return area; 352 362 } 353 363 354 364 /** Find address space area and change it. 355 365 * 356 * @param as Address space. 357 * @param address Virtual address belonging to the area to be changed. 358 * Must be page-aligned. 359 * @param size New size of the virtual memory block starting at 360 * address. 361 * @param flags Flags influencing the remap operation. Currently unused. 362 * 363 * @return Zero on success or a value from @ref errno.h otherwise. 364 */ 365 int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags) 366 { 367 as_area_t *area; 368 ipl_t ipl; 369 size_t pages; 370 371 ipl = interrupts_disable(); 366 * @param as Address space. 367 * @param address Virtual address belonging to the area to be changed. 368 * Must be page-aligned. 369 * @param size New size of the virtual memory block starting at 370 * address. 371 * @param flags Flags influencing the remap operation. Currently unused. 372 * 373 * @return Zero on success or a value from @ref errno.h otherwise. 374 * 375 */ 376 int as_area_resize(as_t *as, uintptr_t address, size_t size, unsigned int flags) 377 { 378 ipl_t ipl = interrupts_disable(); 372 379 mutex_lock(&as->lock); 373 380 374 381 /* 375 382 * Locate the area. 376 */ 377 area = find_area_and_lock(as, address); 383 * 384 */ 385 as_area_t *area = find_area_and_lock(as, address); 378 386 if (!area) { 379 387 mutex_unlock(&as->lock); … … 381 389 return ENOENT; 382 390 } 383 391 384 392 if (area->backend == &phys_backend) { 385 393 /* 386 394 * Remapping of address space areas associated 387 395 * with memory mapped devices is not supported. 396 * 388 397 */ 389 398 mutex_unlock(&area->lock); … … 392 401 return ENOTSUP; 393 402 } 403 394 404 if (area->sh_info) { 395 405 /* 396 * Remapping of shared address space areas 406 * Remapping of shared address space areas 397 407 * is not supported. 408 * 398 409 */ 399 410 mutex_unlock(&area->lock); … … 402 413 return ENOTSUP; 403 414 } 404 405 pages = SIZE2FRAMES((address - area->base) + size);415 416 size_t pages = SIZE2FRAMES((address - area->base) + size); 406 417 if (!pages) { 407 418 /* 408 419 * Zero size address space areas are not allowed. 420 * 409 421 */ 410 422 mutex_unlock(&area->lock); … … 415 427 416 428 if (pages < area->pages) { 417 bool cond;418 429 uintptr_t start_free = area->base + pages * PAGE_SIZE; 419 430 420 431 /* 421 432 * Shrinking the area. 422 433 * No need to check for overlaps. 423 */ 424 434 * 435 */ 436 425 437 page_table_lock(as, false); 426 438 427 439 /* 428 440 * Start TLB shootdown sequence. 441 * 429 442 */ 430 443 tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base + 431 444 pages * PAGE_SIZE, area->pages - pages); 432 445 433 446 /* 434 447 * Remove frames belonging to used space starting from … … 437 450 * is also the right way to remove part of the used_space 438 451 * B+tree leaf list. 439 * /440 for (cond = true; cond;) {441 btree_node_t *node;442 452 * 453 */ 454 bool cond = true; 455 while (cond) { 443 456 ASSERT(!list_empty(&area->used_space.leaf_head)); 444 node = 457 458 btree_node_t *node = 445 459 list_get_instance(area->used_space.leaf_head.prev, 446 460 btree_node_t, leaf_link); 461 447 462 if ((cond = (bool) node->keys)) { 448 uintptr_t b= node->key[node->keys - 1];449 size_t c=463 uintptr_t ptr = node->key[node->keys - 1]; 464 size_t size = 450 465 (size_t) node->value[node->keys - 1]; 451 unsigned int i = 0;452 453 if (overlaps( b, c* PAGE_SIZE, area->base,466 size_t i = 0; 467 468 if (overlaps(ptr, size * PAGE_SIZE, area->base, 454 469 pages * PAGE_SIZE)) { 455 470 456 if ( b + c* PAGE_SIZE <= start_free) {471 if (ptr + size * PAGE_SIZE <= start_free) { 457 472 /* 458 473 * The whole interval fits 459 474 * completely in the resized 460 475 * address space area. 476 * 461 477 */ 462 478 break; 463 479 } 464 480 465 481 /* 466 482 * Part of the interval corresponding 467 483 * to b and c overlaps with the resized 468 484 * address space area. 485 * 469 486 */ 470 471 cond = false; /* we are almost done */ 472 i = (start_free - b) >> PAGE_WIDTH; 487 488 /* We are almost done */ 489 cond = false; 490 i = (start_free - ptr) >> PAGE_WIDTH; 473 491 if (!used_space_remove(area, start_free, 474 c - i)) 475 panic("Cannot remove used " 476 "space."); 492 size - i)) 493 panic("Cannot remove used space."); 477 494 } else { 478 495 /* … … 480 497 * completely removed. 481 498 */ 482 if (!used_space_remove(area, b, c)) 483 panic("Cannot remove used " 484 "space."); 499 if (!used_space_remove(area, ptr, size)) 500 panic("Cannot remove used space."); 485 501 } 486 487 for (; i < c; i++) { 488 pte_t *pte; 489 490 pte = page_mapping_find(as, b + 502 503 for (; i < size; i++) { 504 pte_t *pte = page_mapping_find(as, ptr + 491 505 i * PAGE_SIZE); 492 ASSERT(pte && PTE_VALID(pte) && 493 PTE_PRESENT(pte)); 494 if (area->backend && 495 area->backend->frame_free) { 506 507 ASSERT(pte); 508 ASSERT(PTE_VALID(pte)); 509 ASSERT(PTE_PRESENT(pte)); 510 511 if ((area->backend) && 512 (area->backend->frame_free)) { 496 513 area->backend->frame_free(area, 497 b+ i * PAGE_SIZE,514 ptr + i * PAGE_SIZE, 498 515 PTE_GET_FRAME(pte)); 499 516 } 500 page_mapping_remove(as, b + 517 518 page_mapping_remove(as, ptr + 501 519 i * PAGE_SIZE); 502 520 } 503 521 } 504 522 } 505 523 506 524 /* 507 525 * Finish TLB shootdown sequence. 508 */ 509 526 * 527 */ 528 510 529 tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE, 511 530 area->pages - pages); 512 531 513 532 /* 514 533 * Invalidate software translation caches (e.g. TSB on sparc64). 534 * 515 535 */ 516 536 as_invalidate_translation_cache(as, area->base + 517 537 pages * PAGE_SIZE, area->pages - pages); 518 538 tlb_shootdown_finalize(); 519 539 520 540 page_table_unlock(as, false); 521 522 541 } else { 523 542 /* 524 543 * Growing the area. 525 544 * Check for overlaps with other address space areas. 545 * 526 546 */ 527 547 if (!check_area_conflicts(as, address, pages * PAGE_SIZE, 528 548 area)) { 529 549 mutex_unlock(&area->lock); 530 mutex_unlock(&as->lock); 550 mutex_unlock(&as->lock); 531 551 interrupts_restore(ipl); 532 552 return EADDRNOTAVAIL; 533 553 } 534 } 535 554 } 555 536 556 area->pages = pages; 537 557 … … 539 559 mutex_unlock(&as->lock); 540 560 interrupts_restore(ipl); 541 561 542 562 return 0; 543 563 } … … 545 565 /** Destroy address space area. 546 566 * 547 * @param as Address space. 548 * @param address Address within the area to be deleted. 549 * 550 * @return Zero on success or a value from @ref errno.h on failure. 567 * @param as Address space. 568 * @param address Address within the area to be deleted. 569 * 570 * @return Zero on success or a value from @ref errno.h on failure. 571 * 551 572 */ 552 573 int as_area_destroy(as_t *as, uintptr_t address) 553 574 { 554 as_area_t *area; 555 uintptr_t base; 556 link_t *cur; 557 ipl_t ipl; 558 559 ipl = interrupts_disable(); 575 ipl_t ipl = interrupts_disable(); 560 576 mutex_lock(&as->lock); 561 562 a rea = find_area_and_lock(as, address);577 578 as_area_t *area = find_area_and_lock(as, address); 563 579 if (!area) { 564 580 mutex_unlock(&as->lock); … … 566 582 return ENOENT; 567 583 } 568 569 base = area->base;570 584 585 uintptr_t base = area->base; 586 571 587 page_table_lock(as, false); 572 588 573 589 /* 574 590 * Start TLB shootdown sequence. 575 591 */ 576 592 tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages); 577 593 578 594 /* 579 595 * Visit only the pages mapped by used_space B+tree. 580 596 */ 597 link_t *cur; 581 598 for (cur = area->used_space.leaf_head.next; 582 599 cur != &area->used_space.leaf_head; cur = cur->next) { 583 600 btree_node_t *node; 584 unsigned int i;601 btree_key_t i; 585 602 586 603 node = list_get_instance(cur, btree_node_t, leaf_link); 587 604 for (i = 0; i < node->keys; i++) { 588 uintptr_t b = node->key[i]; 589 size_t j; 590 pte_t *pte; 605 uintptr_t ptr = node->key[i]; 606 size_t size; 591 607 592 for (j = 0; j < (size_t) node->value[i]; j++) { 593 pte = page_mapping_find(as, b + j * PAGE_SIZE); 594 ASSERT(pte && PTE_VALID(pte) && 595 PTE_PRESENT(pte)); 596 if (area->backend && 597 area->backend->frame_free) { 598 area->backend->frame_free(area, b + 599 j * PAGE_SIZE, PTE_GET_FRAME(pte)); 608 for (size = 0; size < (size_t) node->value[i]; size++) { 609 pte_t *pte = page_mapping_find(as, ptr + size * PAGE_SIZE); 610 611 ASSERT(pte); 612 ASSERT(PTE_VALID(pte)); 613 ASSERT(PTE_PRESENT(pte)); 614 615 if ((area->backend) && 616 (area->backend->frame_free)) { 617 area->backend->frame_free(area, 618 ptr + size * PAGE_SIZE, PTE_GET_FRAME(pte)); 600 619 } 601 page_mapping_remove(as, b + j * PAGE_SIZE); 620 621 page_mapping_remove(as, ptr + size * PAGE_SIZE); 602 622 } 603 623 } 604 624 } 605 625 606 626 /* 607 627 * Finish TLB shootdown sequence. 608 */ 609 628 * 629 */ 630 610 631 tlb_invalidate_pages(as->asid, area->base, area->pages); 611 632 612 633 /* 613 634 * Invalidate potential software translation caches (e.g. TSB on 614 635 * sparc64). 636 * 615 637 */ 616 638 as_invalidate_translation_cache(as, area->base, area->pages); 617 639 tlb_shootdown_finalize(); 618 640 619 641 page_table_unlock(as, false); 620 642 621 643 btree_destroy(&area->used_space); 622 644 623 645 area->attributes |= AS_AREA_ATTR_PARTIAL; 624 646 625 647 if (area->sh_info) 626 648 sh_info_remove_reference(area->sh_info); 627 649 628 650 mutex_unlock(&area->lock); 629 651 630 652 /* 631 653 * Remove the empty area from address space. 654 * 632 655 */ 633 656 btree_remove(&as->as_area_btree, base, NULL); … … 647 670 * sh_info of the source area. The process of duplicating the 648 671 * mapping is done through the backend share function. 649 * 650 * @param src_as Pointer to source address space.651 * @param src_base Base address of the source address space area.652 * @param acc_size Expected size of the source area.653 * @param dst_as Pointer to destination address space.654 * @param dst_base Target base address.672 * 673 * @param src_as Pointer to source address space. 674 * @param src_base Base address of the source address space area. 675 * @param acc_size Expected size of the source area. 676 * @param dst_as Pointer to destination address space. 677 * @param dst_base Target base address. 655 678 * @param dst_flags_mask Destination address space area flags mask. 656 679 * 657 * @return Zero on success or ENOENT if there is no such task or if 658 * there is no such address space area, EPERM if there was 659 * a problem in accepting the area or ENOMEM if there was a 660 * problem in allocating destination address space area. 661 * ENOTSUP is returned if the address space area backend 662 * does not support sharing. 680 * @return Zero on success. 681 * @return ENOENT if there is no such task or such address space. 682 * @return EPERM if there was a problem in accepting the area. 683 * @return ENOMEM if there was a problem in allocating destination 684 * address space area. 685 * @return ENOTSUP if the address space area backend does not support 686 * sharing. 687 * 663 688 */ 664 689 int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size, 665 as_t *dst_as, uintptr_t dst_base, int dst_flags_mask) 666 { 667 ipl_t ipl; 668 int src_flags; 669 size_t src_size; 670 as_area_t *src_area, *dst_area; 671 share_info_t *sh_info; 672 mem_backend_t *src_backend; 673 mem_backend_data_t src_backend_data; 674 675 ipl = interrupts_disable(); 690 as_t *dst_as, uintptr_t dst_base, unsigned int dst_flags_mask) 691 { 692 ipl_t ipl = interrupts_disable(); 676 693 mutex_lock(&src_as->lock); 677 src_area = find_area_and_lock(src_as, src_base);694 as_area_t *src_area = find_area_and_lock(src_as, src_base); 678 695 if (!src_area) { 679 696 /* 680 697 * Could not find the source address space area. 698 * 681 699 */ 682 700 mutex_unlock(&src_as->lock); … … 684 702 return ENOENT; 685 703 } 686 687 if ( !src_area->backend || !src_area->backend->share) {704 705 if ((!src_area->backend) || (!src_area->backend->share)) { 688 706 /* 689 707 * There is no backend or the backend does not 690 708 * know how to share the area. 709 * 691 710 */ 692 711 mutex_unlock(&src_area->lock); … … 696 715 } 697 716 698 s rc_size = src_area->pages * PAGE_SIZE;699 src_flags = src_area->flags;700 src_backend = src_area->backend;701 src_backend_data = src_area->backend_data;702 717 size_t src_size = src_area->pages * PAGE_SIZE; 718 unsigned int src_flags = src_area->flags; 719 mem_backend_t *src_backend = src_area->backend; 720 mem_backend_data_t src_backend_data = src_area->backend_data; 721 703 722 /* Share the cacheable flag from the original mapping */ 704 723 if (src_flags & AS_AREA_CACHEABLE) 705 724 dst_flags_mask |= AS_AREA_CACHEABLE; 706 707 if ( src_size != acc_size||708 ( src_flags & dst_flags_mask) != dst_flags_mask) {725 726 if ((src_size != acc_size) || 727 ((src_flags & dst_flags_mask) != dst_flags_mask)) { 709 728 mutex_unlock(&src_area->lock); 710 729 mutex_unlock(&src_as->lock); … … 712 731 return EPERM; 713 732 } 714 733 715 734 /* 716 735 * Now we are committed to sharing the area. 717 736 * First, prepare the area for sharing. 718 737 * Then it will be safe to unlock it. 719 */ 720 sh_info = src_area->sh_info; 738 * 739 */ 740 share_info_t *sh_info = src_area->sh_info; 721 741 if (!sh_info) { 722 742 sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0); … … 725 745 btree_create(&sh_info->pagemap); 726 746 src_area->sh_info = sh_info; 747 727 748 /* 728 749 * Call the backend to setup sharing. 750 * 729 751 */ 730 752 src_area->backend->share(src_area); … … 734 756 mutex_unlock(&sh_info->lock); 735 757 } 736 758 737 759 mutex_unlock(&src_area->lock); 738 760 mutex_unlock(&src_as->lock); 739 761 740 762 /* 741 763 * Create copy of the source address space area. … … 745 767 * The flags of the source area are masked against dst_flags_mask 746 768 * to support sharing in less privileged mode. 747 */ 748 dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base, 749 AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data); 769 * 770 */ 771 as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask, src_size, 772 dst_base, AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data); 750 773 if (!dst_area) { 751 774 /* … … 757 780 return ENOMEM; 758 781 } 759 782 760 783 /* 761 784 * Now the destination address space area has been 762 785 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL 763 786 * attribute and set the sh_info. 764 */ 765 mutex_lock(&dst_as->lock); 787 * 788 */ 789 mutex_lock(&dst_as->lock); 766 790 mutex_lock(&dst_area->lock); 767 791 dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL; 768 792 dst_area->sh_info = sh_info; 769 793 mutex_unlock(&dst_area->lock); 770 mutex_unlock(&dst_as->lock); 771 794 mutex_unlock(&dst_as->lock); 795 772 796 interrupts_restore(ipl); 773 797 … … 779 803 * The address space area must be locked prior to this call. 780 804 * 781 * @param area Address space area. 782 * @param access Access mode. 783 * 784 * @return False if access violates area's permissions, true 785 * otherwise. 805 * @param area Address space area. 806 * @param access Access mode. 807 * 808 * @return False if access violates area's permissions, true 809 * otherwise. 810 * 786 811 */ 787 812 bool as_area_check_access(as_area_t *area, pf_access_t access) … … 792 817 [PF_ACCESS_EXEC] = AS_AREA_EXEC 793 818 }; 794 819 795 820 if (!(area->flags & flagmap[access])) 796 821 return false; … … 813 838 * 814 839 */ 815 int as_area_change_flags(as_t *as, int flags, uintptr_t address) 816 { 817 as_area_t *area; 818 link_t *cur; 819 ipl_t ipl; 820 int page_flags; 821 uintptr_t *old_frame; 822 size_t frame_idx; 823 size_t used_pages; 824 840 int as_area_change_flags(as_t *as, unsigned int flags, uintptr_t address) 841 { 825 842 /* Flags for the new memory mapping */ 826 page_flags = area_flags_to_page_flags(flags);827 828 ipl = interrupts_disable();843 unsigned int page_flags = area_flags_to_page_flags(flags); 844 845 ipl_t ipl = interrupts_disable(); 829 846 mutex_lock(&as->lock); 830 831 a rea = find_area_and_lock(as, address);847 848 as_area_t *area = find_area_and_lock(as, address); 832 849 if (!area) { 833 850 mutex_unlock(&as->lock); … … 835 852 return ENOENT; 836 853 } 837 854 838 855 if ((area->sh_info) || (area->backend != &anon_backend)) { 839 856 /* Copying shared areas not supported yet */ … … 844 861 return ENOTSUP; 845 862 } 846 863 847 864 /* 848 865 * Compute total number of used pages in the used_space B+tree 849 */ 850 used_pages = 0; 851 866 * 867 */ 868 size_t used_pages = 0; 869 link_t *cur; 870 852 871 for (cur = area->used_space.leaf_head.next; 853 872 cur != &area->used_space.leaf_head; cur = cur->next) { 854 btree_node_t *node ;855 unsigned int i;856 857 node = list_get_instance(cur, btree_node_t, leaf_link);858 for (i = 0; i < node->keys; i++) {873 btree_node_t *node 874 = list_get_instance(cur, btree_node_t, leaf_link); 875 btree_key_t i; 876 877 for (i = 0; i < node->keys; i++) 859 878 used_pages += (size_t) node->value[i]; 860 } 861 } 862 879 } 880 863 881 /* An array for storing frame numbers */ 864 old_frame = malloc(used_pages * sizeof(uintptr_t), 0);865 882 uintptr_t *old_frame = malloc(used_pages * sizeof(uintptr_t), 0); 883 866 884 page_table_lock(as, false); 867 885 868 886 /* 869 887 * Start TLB shootdown sequence. 888 * 870 889 */ 871 890 tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages); 872 891 873 892 /* 874 893 * Remove used pages from page tables and remember their frame 875 894 * numbers. 876 */ 877 frame_idx = 0; 878 895 * 896 */ 897 size_t frame_idx = 0; 898 879 899 for (cur = area->used_space.leaf_head.next; 880 900 cur != &area->used_space.leaf_head; cur = cur->next) { 881 btree_node_t *node ;882 unsigned int i;883 884 node = list_get_instance(cur, btree_node_t, leaf_link);901 btree_node_t *node 902 = list_get_instance(cur, btree_node_t, leaf_link); 903 btree_key_t i; 904 885 905 for (i = 0; i < node->keys; i++) { 886 uintptr_t b = node->key[i]; 887 size_t j; 888 pte_t *pte; 906 uintptr_t ptr = node->key[i]; 907 size_t size; 889 908 890 for (j = 0; j < (size_t) node->value[i]; j++) { 891 pte = page_mapping_find(as, b + j * PAGE_SIZE); 892 ASSERT(pte && PTE_VALID(pte) && 893 PTE_PRESENT(pte)); 909 for (size = 0; size < (size_t) node->value[i]; size++) { 910 pte_t *pte = page_mapping_find(as, ptr + size * PAGE_SIZE); 911 912 ASSERT(pte); 913 ASSERT(PTE_VALID(pte)); 914 ASSERT(PTE_PRESENT(pte)); 915 894 916 old_frame[frame_idx++] = PTE_GET_FRAME(pte); 895 917 896 918 /* Remove old mapping */ 897 page_mapping_remove(as, b + j* PAGE_SIZE);919 page_mapping_remove(as, ptr + size * PAGE_SIZE); 898 920 } 899 921 } 900 922 } 901 923 902 924 /* 903 925 * Finish TLB shootdown sequence. 904 */ 905 926 * 927 */ 928 906 929 tlb_invalidate_pages(as->asid, area->base, area->pages); 907 930 … … 909 932 * Invalidate potential software translation caches (e.g. TSB on 910 933 * sparc64). 934 * 911 935 */ 912 936 as_invalidate_translation_cache(as, area->base, area->pages); 913 937 tlb_shootdown_finalize(); 914 938 915 939 page_table_unlock(as, false); 916 940 917 941 /* 918 942 * Set the new flags. 919 943 */ 920 944 area->flags = flags; 921 945 922 946 /* 923 947 * Map pages back in with new flags. This step is kept separate … … 926 950 */ 927 951 frame_idx = 0; 928 952 929 953 for (cur = area->used_space.leaf_head.next; 930 954 cur != &area->used_space.leaf_head; cur = cur->next) { 931 btree_node_t *node ;932 unsigned int i;933 934 node = list_get_instance(cur, btree_node_t, leaf_link);955 btree_node_t *node 956 = list_get_instance(cur, btree_node_t, leaf_link); 957 btree_key_t i; 958 935 959 for (i = 0; i < node->keys; i++) { 936 uintptr_t b= node->key[i];937 size_t j;960 uintptr_t ptr = node->key[i]; 961 size_t size; 938 962 939 for ( j = 0; j < (size_t) node->value[i]; j++) {963 for (size = 0; size < (size_t) node->value[i]; size++) { 940 964 page_table_lock(as, false); 941 965 942 966 /* Insert the new mapping */ 943 page_mapping_insert(as, b + j* PAGE_SIZE,967 page_mapping_insert(as, ptr + size * PAGE_SIZE, 944 968 old_frame[frame_idx++], page_flags); 945 969 946 970 page_table_unlock(as, false); 947 971 } 948 972 } 949 973 } 950 974 951 975 free(old_frame); 952 976 953 977 mutex_unlock(&area->lock); 954 978 mutex_unlock(&as->lock); 955 979 interrupts_restore(ipl); 956 980 957 981 return 0; 958 982 } 959 960 983 961 984 /** Handle page fault within the current address space. … … 967 990 * Interrupts are assumed disabled. 968 991 * 969 * @param page Faulting page. 970 * @param access Access mode that caused the page fault (i.e. 971 * read/write/exec). 972 * @param istate Pointer to the interrupted state. 973 * 974 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or 975 * AS_PF_DEFER if the fault was caused by copy_to_uspace() 976 * or copy_from_uspace(). 992 * @param page Faulting page. 993 * @param access Access mode that caused the page fault (i.e. 994 * read/write/exec). 995 * @param istate Pointer to the interrupted state. 996 * 997 * @return AS_PF_FAULT on page fault. 998 * @return AS_PF_OK on success. 999 * @return AS_PF_DEFER if the fault was caused by copy_to_uspace() 1000 * or copy_from_uspace(). 1001 * 977 1002 */ 978 1003 int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate) 979 1004 { 980 pte_t *pte;981 as_area_t *area;982 983 1005 if (!THREAD) 984 1006 return AS_PF_FAULT; … … 988 1010 989 1011 mutex_lock(&AS->lock); 990 a rea = find_area_and_lock(AS, page);1012 as_area_t *area = find_area_and_lock(AS, page); 991 1013 if (!area) { 992 1014 /* 993 1015 * No area contained mapping for 'page'. 994 1016 * Signal page fault to low-level handler. 1017 * 995 1018 */ 996 1019 mutex_unlock(&AS->lock); 997 1020 goto page_fault; 998 1021 } 999 1022 1000 1023 if (area->attributes & AS_AREA_ATTR_PARTIAL) { 1001 1024 /* … … 1005 1028 mutex_unlock(&area->lock); 1006 1029 mutex_unlock(&AS->lock); 1007 goto page_fault; 1008 } 1009 1010 if ( !area->backend || !area->backend->page_fault) {1030 goto page_fault; 1031 } 1032 1033 if ((!area->backend) || (!area->backend->page_fault)) { 1011 1034 /* 1012 1035 * The address space area is not backed by any backend 1013 1036 * or the backend cannot handle page faults. 1037 * 1014 1038 */ 1015 1039 mutex_unlock(&area->lock); 1016 1040 mutex_unlock(&AS->lock); 1017 goto page_fault; 1018 } 1019 1041 goto page_fault; 1042 } 1043 1020 1044 page_table_lock(AS, false); 1021 1045 … … 1023 1047 * To avoid race condition between two page faults on the same address, 1024 1048 * we need to make sure the mapping has not been already inserted. 1025 */ 1049 * 1050 */ 1051 pte_t *pte; 1026 1052 if ((pte = page_mapping_find(AS, page))) { 1027 1053 if (PTE_PRESENT(pte)) { … … 1039 1065 /* 1040 1066 * Resort to the backend page fault handler. 1067 * 1041 1068 */ 1042 1069 if (area->backend->page_fault(area, page, access) != AS_PF_OK) { … … 1051 1078 mutex_unlock(&AS->lock); 1052 1079 return AS_PF_OK; 1053 1080 1054 1081 page_fault: 1055 1082 if (THREAD->in_copy_from_uspace) { … … 1064 1091 return AS_PF_FAULT; 1065 1092 } 1066 1093 1067 1094 return AS_PF_DEFER; 1068 1095 } … … 1076 1103 * When this function is enetered, no spinlocks may be held. 1077 1104 * 1078 * @param old Old address space or NULL. 1079 * @param new New address space. 1105 * @param old Old address space or NULL. 1106 * @param new New address space. 1107 * 1080 1108 */ 1081 1109 void as_switch(as_t *old_as, as_t *new_as) … … 1083 1111 DEADLOCK_PROBE_INIT(p_asidlock); 1084 1112 preemption_disable(); 1113 1085 1114 retry: 1086 1115 (void) interrupts_disable(); 1087 1116 if (!spinlock_trylock(&asidlock)) { 1088 /* 1117 /* 1089 1118 * Avoid deadlock with TLB shootdown. 1090 1119 * We can enable interrupts here because 1091 1120 * preemption is disabled. We should not be 1092 1121 * holding any other lock. 1122 * 1093 1123 */ 1094 1124 (void) interrupts_enable(); … … 1097 1127 } 1098 1128 preemption_enable(); 1099 1129 1100 1130 /* 1101 1131 * First, take care of the old address space. 1102 */ 1132 */ 1103 1133 if (old_as) { 1104 1134 ASSERT(old_as->cpu_refcount); 1105 if((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) { 1135 1136 if ((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) { 1106 1137 /* 1107 1138 * The old address space is no longer active on … … 1109 1140 * list of inactive address spaces with assigned 1110 1141 * ASID. 1142 * 1111 1143 */ 1112 1144 ASSERT(old_as->asid != ASID_INVALID); 1145 1113 1146 list_append(&old_as->inactive_as_with_asid_link, 1114 1147 &inactive_as_with_asid_head); 1115 1148 } 1116 1149 1117 1150 /* 1118 1151 * Perform architecture-specific tasks when the address space 1119 1152 * is being removed from the CPU. 1153 * 1120 1154 */ 1121 1155 as_deinstall_arch(old_as); 1122 1156 } 1123 1157 1124 1158 /* 1125 1159 * Second, prepare the new address space. 1160 * 1126 1161 */ 1127 1162 if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) { … … 1131 1166 new_as->asid = asid_get(); 1132 1167 } 1168 1133 1169 #ifdef AS_PAGE_TABLE 1134 1170 SET_PTL0_ADDRESS(new_as->genarch.page_table); … … 1138 1174 * Perform architecture-specific steps. 1139 1175 * (e.g. write ASID to hardware register etc.) 1176 * 1140 1177 */ 1141 1178 as_install_arch(new_as); 1142 1179 1143 1180 spinlock_unlock(&asidlock); 1144 1181 … … 1148 1185 /** Convert address space area flags to page flags. 1149 1186 * 1150 * @param aflags Flags of some address space area. 1151 * 1152 * @return Flags to be passed to page_mapping_insert(). 1153 */ 1154 int area_flags_to_page_flags(int aflags) 1155 { 1156 int flags; 1157 1158 flags = PAGE_USER | PAGE_PRESENT; 1187 * @param aflags Flags of some address space area. 1188 * 1189 * @return Flags to be passed to page_mapping_insert(). 1190 * 1191 */ 1192 unsigned int area_flags_to_page_flags(unsigned int aflags) 1193 { 1194 unsigned int flags = PAGE_USER | PAGE_PRESENT; 1159 1195 1160 1196 if (aflags & AS_AREA_READ) … … 1169 1205 if (aflags & AS_AREA_CACHEABLE) 1170 1206 flags |= PAGE_CACHEABLE; 1171 1207 1172 1208 return flags; 1173 1209 } … … 1178 1214 * Interrupts must be disabled. 1179 1215 * 1180 * @param a Address space area. 1181 * 1182 * @return Flags to be used in page_mapping_insert(). 1183 */ 1184 int as_area_get_flags(as_area_t *a) 1185 { 1186 return area_flags_to_page_flags(a->flags); 1216 * @param area Address space area. 1217 * 1218 * @return Flags to be used in page_mapping_insert(). 1219 * 1220 */ 1221 unsigned int as_area_get_flags(as_area_t *area) 1222 { 1223 return area_flags_to_page_flags(area->flags); 1187 1224 } 1188 1225 … … 1192 1229 * table. 1193 1230 * 1194 * @param flags Flags saying whether the page table is for the kernel 1195 * address space. 1196 * 1197 * @return First entry of the page table. 1198 */ 1199 pte_t *page_table_create(int flags) 1231 * @param flags Flags saying whether the page table is for the kernel 1232 * address space. 1233 * 1234 * @return First entry of the page table. 1235 * 1236 */ 1237 pte_t *page_table_create(unsigned int flags) 1200 1238 { 1201 1239 ASSERT(as_operations); … … 1209 1247 * Destroy page table in architecture specific way. 1210 1248 * 1211 * @param page_table Physical address of PTL0. 1249 * @param page_table Physical address of PTL0. 1250 * 1212 1251 */ 1213 1252 void page_table_destroy(pte_t *page_table) … … 1223 1262 * This function should be called before any page_mapping_insert(), 1224 1263 * page_mapping_remove() and page_mapping_find(). 1225 * 1264 * 1226 1265 * Locking order is such that address space areas must be locked 1227 1266 * prior to this call. Address space can be locked prior to this 1228 1267 * call in which case the lock argument is false. 1229 1268 * 1230 * @param as Address space. 1231 * @param lock If false, do not attempt to lock as->lock. 1269 * @param as Address space. 1270 * @param lock If false, do not attempt to lock as->lock. 1271 * 1232 1272 */ 1233 1273 void page_table_lock(as_t *as, bool lock) … … 1241 1281 /** Unlock page table. 1242 1282 * 1243 * @param as Address space. 1244 * @param unlock If false, do not attempt to unlock as->lock. 1283 * @param as Address space. 1284 * @param unlock If false, do not attempt to unlock as->lock. 1285 * 1245 1286 */ 1246 1287 void page_table_unlock(as_t *as, bool unlock) … … 1257 1298 * The address space must be locked and interrupts must be disabled. 1258 1299 * 1259 * @param as Address space. 1260 * @param va Virtual address. 1261 * 1262 * @return Locked address space area containing va on success or 1263 * NULL on failure. 1300 * @param as Address space. 1301 * @param va Virtual address. 1302 * 1303 * @return Locked address space area containing va on success or 1304 * NULL on failure. 1305 * 1264 1306 */ 1265 1307 as_area_t *find_area_and_lock(as_t *as, uintptr_t va) 1266 1308 { 1267 as_area_t *a; 1268 btree_node_t *leaf, *lnode; 1269 unsigned int i; 1270 1271 a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); 1272 if (a) { 1309 btree_node_t *leaf; 1310 as_area_t *area = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); 1311 if (area) { 1273 1312 /* va is the base address of an address space area */ 1274 mutex_lock(&a ->lock);1275 return a ;1313 mutex_lock(&area->lock); 1314 return area; 1276 1315 } 1277 1316 … … 1280 1319 * to find out whether this is a miss or va belongs to an address 1281 1320 * space area found there. 1321 * 1282 1322 */ 1283 1323 1284 1324 /* First, search the leaf node itself. */ 1325 btree_key_t i; 1326 1285 1327 for (i = 0; i < leaf->keys; i++) { 1286 a = (as_area_t *) leaf->value[i]; 1287 mutex_lock(&a->lock); 1288 if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) { 1289 return a; 1290 } 1291 mutex_unlock(&a->lock); 1292 } 1293 1328 area = (as_area_t *) leaf->value[i]; 1329 1330 mutex_lock(&area->lock); 1331 1332 if ((area->base <= va) && (va < area->base + area->pages * PAGE_SIZE)) 1333 return area; 1334 1335 mutex_unlock(&area->lock); 1336 } 1337 1294 1338 /* 1295 1339 * Second, locate the left neighbour and test its last record. 1296 1340 * Because of its position in the B+tree, it must have base < va. 1297 */ 1298 lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); 1341 * 1342 */ 1343 btree_node_t *lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); 1299 1344 if (lnode) { 1300 a = (as_area_t *) lnode->value[lnode->keys - 1]; 1301 mutex_lock(&a->lock); 1302 if (va < a->base + a->pages * PAGE_SIZE) { 1303 return a; 1304 } 1305 mutex_unlock(&a->lock); 1306 } 1307 1345 area = (as_area_t *) lnode->value[lnode->keys - 1]; 1346 1347 mutex_lock(&area->lock); 1348 1349 if (va < area->base + area->pages * PAGE_SIZE) 1350 return area; 1351 1352 mutex_unlock(&area->lock); 1353 } 1354 1308 1355 return NULL; 1309 1356 } … … 1313 1360 * The address space must be locked and interrupts must be disabled. 1314 1361 * 1315 * @param as Address space. 1316 * @param va Starting virtual address of the area being tested. 1317 * @param size Size of the area being tested. 1318 * @param avoid_area Do not touch this area. 1319 * 1320 * @return True if there is no conflict, false otherwise. 1321 */ 1322 bool 1323 check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area) 1324 { 1325 as_area_t *a; 1326 btree_node_t *leaf, *node; 1327 unsigned int i; 1328 1362 * @param as Address space. 1363 * @param va Starting virtual address of the area being tested. 1364 * @param size Size of the area being tested. 1365 * @param avoid_area Do not touch this area. 1366 * 1367 * @return True if there is no conflict, false otherwise. 1368 * 1369 */ 1370 bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, 1371 as_area_t *avoid_area) 1372 { 1329 1373 /* 1330 1374 * We don't want any area to have conflicts with NULL page. 1375 * 1331 1376 */ 1332 1377 if (overlaps(va, size, NULL, PAGE_SIZE)) … … 1339 1384 * record in the left neighbour, the leftmost record in the right 1340 1385 * neighbour and all records in the leaf node itself. 1341 */ 1342 1343 if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) { 1344 if (a != avoid_area) 1386 * 1387 */ 1388 btree_node_t *leaf; 1389 as_area_t *area = 1390 (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); 1391 if (area) { 1392 if (area != avoid_area) 1345 1393 return false; 1346 1394 } 1347 1395 1348 1396 /* First, check the two border cases. */ 1349 if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) { 1350 a = (as_area_t *) node->value[node->keys - 1]; 1351 mutex_lock(&a->lock); 1352 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { 1353 mutex_unlock(&a->lock); 1397 btree_node_t *node = 1398 btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); 1399 if (node) { 1400 area = (as_area_t *) node->value[node->keys - 1]; 1401 1402 mutex_lock(&area->lock); 1403 1404 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 1405 mutex_unlock(&area->lock); 1354 1406 return false; 1355 1407 } 1356 mutex_unlock(&a->lock); 1357 } 1408 1409 mutex_unlock(&area->lock); 1410 } 1411 1358 1412 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf); 1359 1413 if (node) { 1360 a = (as_area_t *) node->value[0]; 1361 mutex_lock(&a->lock); 1362 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { 1363 mutex_unlock(&a->lock); 1414 area = (as_area_t *) node->value[0]; 1415 1416 mutex_lock(&area->lock); 1417 1418 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 1419 mutex_unlock(&area->lock); 1364 1420 return false; 1365 1421 } 1366 mutex_unlock(&a->lock); 1422 1423 mutex_unlock(&area->lock); 1367 1424 } 1368 1425 1369 1426 /* Second, check the leaf node. */ 1427 btree_key_t i; 1370 1428 for (i = 0; i < leaf->keys; i++) { 1371 a = (as_area_t *) leaf->value[i];1372 1373 if (a == avoid_area)1429 area = (as_area_t *) leaf->value[i]; 1430 1431 if (area == avoid_area) 1374 1432 continue; 1375 1376 mutex_lock(&a->lock); 1377 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { 1378 mutex_unlock(&a->lock); 1433 1434 mutex_lock(&area->lock); 1435 1436 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 1437 mutex_unlock(&area->lock); 1379 1438 return false; 1380 1439 } 1381 mutex_unlock(&a->lock); 1382 } 1383 1440 1441 mutex_unlock(&area->lock); 1442 } 1443 1384 1444 /* 1385 1445 * So far, the area does not conflict with other areas. 1386 1446 * Check if it doesn't conflict with kernel address space. 1387 */ 1447 * 1448 */ 1388 1449 if (!KERNEL_ADDRESS_SPACE_SHADOWED) { 1389 return !overlaps(va, size, 1450 return !overlaps(va, size, 1390 1451 KERNEL_ADDRESS_SPACE_START, 1391 1452 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START); 1392 1453 } 1393 1454 1394 1455 return true; 1395 1456 } … … 1397 1458 /** Return size of the address space area with given base. 1398 1459 * 1399 * @param base Arbitrary address insede the address space area. 1400 * 1401 * @return Size of the address space area in bytes or zero if it 1402 * does not exist. 1460 * @param base Arbitrary address insede the address space area. 1461 * 1462 * @return Size of the address space area in bytes or zero if it 1463 * does not exist. 1464 * 1403 1465 */ 1404 1466 size_t as_area_get_size(uintptr_t base) 1405 1467 { 1406 ipl_t ipl;1407 as_area_t *src_area;1408 1468 size_t size; 1409 1410 ipl = interrupts_disable(); 1411 src_area = find_area_and_lock(AS, base); 1469 1470 ipl_t ipl = interrupts_disable(); 1471 as_area_t *src_area = find_area_and_lock(AS, base); 1472 1412 1473 if (src_area) { 1413 1474 size = src_area->pages * PAGE_SIZE; 1414 1475 mutex_unlock(&src_area->lock); 1415 } else {1476 } else 1416 1477 size = 0; 1417 }1478 1418 1479 interrupts_restore(ipl); 1419 1480 return size; … … 1424 1485 * The address space area must be already locked. 1425 1486 * 1426 * @param a Address space area. 1427 * @param page First page to be marked. 1428 * @param count Number of page to be marked. 1429 * 1430 * @return Zero on failure and non-zero on success. 1431 */ 1432 int used_space_insert(as_area_t *a, uintptr_t page, size_t count) 1433 { 1434 btree_node_t *leaf, *node; 1435 size_t pages; 1436 unsigned int i; 1437 1487 * @param area Address space area. 1488 * @param page First page to be marked. 1489 * @param count Number of page to be marked. 1490 * 1491 * @return Zero on failure and non-zero on success. 1492 * 1493 */ 1494 int used_space_insert(as_area_t *area, uintptr_t page, size_t count) 1495 { 1438 1496 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE)); 1439 1497 ASSERT(count); 1440 1441 pages = (size_t) btree_search(&a->used_space, page, &leaf); 1498 1499 btree_node_t *leaf; 1500 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf); 1442 1501 if (pages) { 1443 1502 /* 1444 1503 * We hit the beginning of some used space. 1504 * 1445 1505 */ 1446 1506 return 0; 1447 1507 } 1448 1508 1449 1509 if (!leaf->keys) { 1450 btree_insert(&a ->used_space, page, (void *) count, leaf);1510 btree_insert(&area->used_space, page, (void *) count, leaf); 1451 1511 return 1; 1452 1512 } 1453 1454 node = btree_leaf_node_left_neighbour(&a->used_space, leaf);1513 1514 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, leaf); 1455 1515 if (node) { 1456 1516 uintptr_t left_pg = node->key[node->keys - 1]; … … 1463 1523 * somewhere between the rightmost interval of 1464 1524 * the left neigbour and the first interval of the leaf. 1465 */ 1466 1525 * 1526 */ 1527 1467 1528 if (page >= right_pg) { 1468 1529 /* Do nothing. */ … … 1474 1535 right_cnt * PAGE_SIZE)) { 1475 1536 /* The interval intersects with the right interval. */ 1476 return 0; 1537 return 0; 1477 1538 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1478 1539 (page + count * PAGE_SIZE == right_pg)) { … … 1480 1541 * The interval can be added by merging the two already 1481 1542 * present intervals. 1543 * 1482 1544 */ 1483 1545 node->value[node->keys - 1] += count + right_cnt; 1484 btree_remove(&a ->used_space, right_pg, leaf);1485 return 1; 1546 btree_remove(&area->used_space, right_pg, leaf); 1547 return 1; 1486 1548 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1487 /* 1549 /* 1488 1550 * The interval can be added by simply growing the left 1489 1551 * interval. 1552 * 1490 1553 */ 1491 1554 node->value[node->keys - 1] += count; … … 1496 1559 * the right interval down and increasing its size 1497 1560 * accordingly. 1561 * 1498 1562 */ 1499 1563 leaf->value[0] += count; … … 1504 1568 * The interval is between both neigbouring intervals, 1505 1569 * but cannot be merged with any of them. 1570 * 1506 1571 */ 1507 btree_insert(&a ->used_space, page, (void *) count,1572 btree_insert(&area->used_space, page, (void *) count, 1508 1573 leaf); 1509 1574 return 1; … … 1512 1577 uintptr_t right_pg = leaf->key[0]; 1513 1578 size_t right_cnt = (size_t) leaf->value[0]; 1514 1579 1515 1580 /* 1516 1581 * Investigate the border case in which the left neighbour does 1517 1582 * not exist but the interval fits from the left. 1518 */ 1519 1583 * 1584 */ 1585 1520 1586 if (overlaps(page, count * PAGE_SIZE, right_pg, 1521 1587 right_cnt * PAGE_SIZE)) { … … 1527 1593 * right interval down and increasing its size 1528 1594 * accordingly. 1595 * 1529 1596 */ 1530 1597 leaf->key[0] = page; … … 1535 1602 * The interval doesn't adjoin with the right interval. 1536 1603 * It must be added individually. 1604 * 1537 1605 */ 1538 btree_insert(&a ->used_space, page, (void *) count,1606 btree_insert(&area->used_space, page, (void *) count, 1539 1607 leaf); 1540 1608 return 1; 1541 1609 } 1542 1610 } 1543 1544 node = btree_leaf_node_right_neighbour(&a ->used_space, leaf);1611 1612 node = btree_leaf_node_right_neighbour(&area->used_space, leaf); 1545 1613 if (node) { 1546 1614 uintptr_t left_pg = leaf->key[leaf->keys - 1]; … … 1553 1621 * somewhere between the leftmost interval of 1554 1622 * the right neigbour and the last interval of the leaf. 1555 */ 1556 1623 * 1624 */ 1625 1557 1626 if (page < left_pg) { 1558 1627 /* Do nothing. */ … … 1564 1633 right_cnt * PAGE_SIZE)) { 1565 1634 /* The interval intersects with the right interval. */ 1566 return 0; 1635 return 0; 1567 1636 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1568 1637 (page + count * PAGE_SIZE == right_pg)) { … … 1570 1639 * The interval can be added by merging the two already 1571 1640 * present intervals. 1572 * */ 1641 * 1642 */ 1573 1643 leaf->value[leaf->keys - 1] += count + right_cnt; 1574 btree_remove(&a ->used_space, right_pg, node);1575 return 1; 1644 btree_remove(&area->used_space, right_pg, node); 1645 return 1; 1576 1646 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1577 1647 /* 1578 1648 * The interval can be added by simply growing the left 1579 1649 * interval. 1580 * */ 1650 * 1651 */ 1581 1652 leaf->value[leaf->keys - 1] += count; 1582 1653 return 1; … … 1586 1657 * the right interval down and increasing its size 1587 1658 * accordingly. 1659 * 1588 1660 */ 1589 1661 node->value[0] += count; … … 1594 1666 * The interval is between both neigbouring intervals, 1595 1667 * but cannot be merged with any of them. 1668 * 1596 1669 */ 1597 btree_insert(&a ->used_space, page, (void *) count,1670 btree_insert(&area->used_space, page, (void *) count, 1598 1671 leaf); 1599 1672 return 1; … … 1602 1675 uintptr_t left_pg = leaf->key[leaf->keys - 1]; 1603 1676 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 1604 1677 1605 1678 /* 1606 1679 * Investigate the border case in which the right neighbour 1607 1680 * does not exist but the interval fits from the right. 1608 */ 1609 1681 * 1682 */ 1683 1610 1684 if (overlaps(page, count * PAGE_SIZE, left_pg, 1611 1685 left_cnt * PAGE_SIZE)) { … … 1616 1690 * The interval can be added by growing the left 1617 1691 * interval. 1692 * 1618 1693 */ 1619 1694 leaf->value[leaf->keys - 1] += count; … … 1623 1698 * The interval doesn't adjoin with the left interval. 1624 1699 * It must be added individually. 1700 * 1625 1701 */ 1626 btree_insert(&a ->used_space, page, (void *) count,1702 btree_insert(&area->used_space, page, (void *) count, 1627 1703 leaf); 1628 1704 return 1; … … 1634 1710 * only between two other intervals of the leaf. The two border cases 1635 1711 * were already resolved. 1636 */ 1712 * 1713 */ 1714 btree_key_t i; 1637 1715 for (i = 1; i < leaf->keys; i++) { 1638 1716 if (page < leaf->key[i]) { … … 1641 1719 size_t left_cnt = (size_t) leaf->value[i - 1]; 1642 1720 size_t right_cnt = (size_t) leaf->value[i]; 1643 1721 1644 1722 /* 1645 1723 * The interval fits between left_pg and right_pg. 1724 * 1646 1725 */ 1647 1726 1648 1727 if (overlaps(page, count * PAGE_SIZE, left_pg, 1649 1728 left_cnt * PAGE_SIZE)) { … … 1651 1730 * The interval intersects with the left 1652 1731 * interval. 1732 * 1653 1733 */ 1654 1734 return 0; … … 1658 1738 * The interval intersects with the right 1659 1739 * interval. 1740 * 1660 1741 */ 1661 return 0; 1742 return 0; 1662 1743 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1663 1744 (page + count * PAGE_SIZE == right_pg)) { … … 1665 1746 * The interval can be added by merging the two 1666 1747 * already present intervals. 1748 * 1667 1749 */ 1668 1750 leaf->value[i - 1] += count + right_cnt; 1669 btree_remove(&a ->used_space, right_pg, leaf);1670 return 1; 1751 btree_remove(&area->used_space, right_pg, leaf); 1752 return 1; 1671 1753 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1672 1754 /* 1673 1755 * The interval can be added by simply growing 1674 1756 * the left interval. 1757 * 1675 1758 */ 1676 1759 leaf->value[i - 1] += count; … … 1678 1761 } else if (page + count * PAGE_SIZE == right_pg) { 1679 1762 /* 1680 * The interval can be addded by simply moving1763 * The interval can be addded by simply moving 1681 1764 * base of the right interval down and 1682 1765 * increasing its size accordingly. 1683 */ 1766 * 1767 */ 1684 1768 leaf->value[i] += count; 1685 1769 leaf->key[i] = page; … … 1690 1774 * intervals, but cannot be merged with any of 1691 1775 * them. 1776 * 1692 1777 */ 1693 btree_insert(&a ->used_space, page,1778 btree_insert(&area->used_space, page, 1694 1779 (void *) count, leaf); 1695 1780 return 1; … … 1697 1782 } 1698 1783 } 1699 1784 1700 1785 panic("Inconsistency detected while adding %" PRIs " pages of used " 1701 1786 "space at %p.", count, page); … … 1706 1791 * The address space area must be already locked. 1707 1792 * 1708 * @param a Address space area. 1709 * @param page First page to be marked. 1710 * @param count Number of page to be marked. 1711 * 1712 * @return Zero on failure and non-zero on success. 1713 */ 1714 int used_space_remove(as_area_t *a, uintptr_t page, size_t count) 1715 { 1716 btree_node_t *leaf, *node; 1717 size_t pages; 1718 unsigned int i; 1719 1793 * @param area Address space area. 1794 * @param page First page to be marked. 1795 * @param count Number of page to be marked. 1796 * 1797 * @return Zero on failure and non-zero on success. 1798 * 1799 */ 1800 int used_space_remove(as_area_t *area, uintptr_t page, size_t count) 1801 { 1720 1802 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE)); 1721 1803 ASSERT(count); 1722 1723 pages = (size_t) btree_search(&a->used_space, page, &leaf); 1804 1805 btree_node_t *leaf; 1806 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf); 1724 1807 if (pages) { 1725 1808 /* 1726 1809 * We are lucky, page is the beginning of some interval. 1810 * 1727 1811 */ 1728 1812 if (count > pages) { 1729 1813 return 0; 1730 1814 } else if (count == pages) { 1731 btree_remove(&a ->used_space, page, leaf);1815 btree_remove(&area->used_space, page, leaf); 1732 1816 return 1; 1733 1817 } else { … … 1735 1819 * Find the respective interval. 1736 1820 * Decrease its size and relocate its start address. 1821 * 1737 1822 */ 1823 btree_key_t i; 1738 1824 for (i = 0; i < leaf->keys; i++) { 1739 1825 if (leaf->key[i] == page) { … … 1746 1832 } 1747 1833 } 1748 1749 node = btree_leaf_node_left_neighbour(&a->used_space, leaf);1750 if ( node && page < leaf->key[0]) {1834 1835 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, leaf); 1836 if ((node) && (page < leaf->key[0])) { 1751 1837 uintptr_t left_pg = node->key[node->keys - 1]; 1752 1838 size_t left_cnt = (size_t) node->value[node->keys - 1]; 1753 1839 1754 1840 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page, 1755 1841 count * PAGE_SIZE)) { … … 1761 1847 * removed by updating the size of the bigger 1762 1848 * interval. 1849 * 1763 1850 */ 1764 1851 node->value[node->keys - 1] -= count; … … 1766 1853 } else if (page + count * PAGE_SIZE < 1767 1854 left_pg + left_cnt*PAGE_SIZE) { 1768 size_t new_cnt;1769 1770 1855 /* 1771 1856 * The interval is contained in the rightmost … … 1774 1859 * the original interval and also inserting a 1775 1860 * new interval. 1861 * 1776 1862 */ 1777 new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -1863 size_t new_cnt = ((left_pg + left_cnt * PAGE_SIZE) - 1778 1864 (page + count*PAGE_SIZE)) >> PAGE_WIDTH; 1779 1865 node->value[node->keys - 1] -= count + new_cnt; 1780 btree_insert(&a ->used_space, page +1866 btree_insert(&area->used_space, page + 1781 1867 count * PAGE_SIZE, (void *) new_cnt, leaf); 1782 1868 return 1; … … 1784 1870 } 1785 1871 return 0; 1786 } else if (page < leaf->key[0]) {1872 } else if (page < leaf->key[0]) 1787 1873 return 0; 1788 }1789 1874 1790 1875 if (page > leaf->key[leaf->keys - 1]) { 1791 1876 uintptr_t left_pg = leaf->key[leaf->keys - 1]; 1792 1877 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 1793 1878 1794 1879 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page, 1795 1880 count * PAGE_SIZE)) { 1796 if (page + count * PAGE_SIZE == 1881 if (page + count * PAGE_SIZE == 1797 1882 left_pg + left_cnt * PAGE_SIZE) { 1798 1883 /* … … 1800 1885 * interval of the leaf and can be removed by 1801 1886 * updating the size of the bigger interval. 1887 * 1802 1888 */ 1803 1889 leaf->value[leaf->keys - 1] -= count; … … 1805 1891 } else if (page + count * PAGE_SIZE < left_pg + 1806 1892 left_cnt * PAGE_SIZE) { 1807 size_t new_cnt;1808 1809 1893 /* 1810 1894 * The interval is contained in the rightmost … … 1813 1897 * original interval and also inserting a new 1814 1898 * interval. 1899 * 1815 1900 */ 1816 new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -1901 size_t new_cnt = ((left_pg + left_cnt * PAGE_SIZE) - 1817 1902 (page + count * PAGE_SIZE)) >> PAGE_WIDTH; 1818 1903 leaf->value[leaf->keys - 1] -= count + new_cnt; 1819 btree_insert(&a ->used_space, page +1904 btree_insert(&area->used_space, page + 1820 1905 count * PAGE_SIZE, (void *) new_cnt, leaf); 1821 1906 return 1; … … 1823 1908 } 1824 1909 return 0; 1825 } 1910 } 1826 1911 1827 1912 /* … … 1829 1914 * Now the interval can be only between intervals of the leaf. 1830 1915 */ 1916 btree_key_t i; 1831 1917 for (i = 1; i < leaf->keys - 1; i++) { 1832 1918 if (page < leaf->key[i]) { 1833 1919 uintptr_t left_pg = leaf->key[i - 1]; 1834 1920 size_t left_cnt = (size_t) leaf->value[i - 1]; 1835 1921 1836 1922 /* 1837 1923 * Now the interval is between intervals corresponding … … 1847 1933 * be removed by updating the size of 1848 1934 * the bigger interval. 1935 * 1849 1936 */ 1850 1937 leaf->value[i - 1] -= count; … … 1852 1939 } else if (page + count * PAGE_SIZE < 1853 1940 left_pg + left_cnt * PAGE_SIZE) { 1854 size_t new_cnt;1855 1856 1941 /* 1857 1942 * The interval is contained in the … … 1861 1946 * also inserting a new interval. 1862 1947 */ 1863 new_cnt = ((left_pg +1948 size_t new_cnt = ((left_pg + 1864 1949 left_cnt * PAGE_SIZE) - 1865 1950 (page + count * PAGE_SIZE)) >> 1866 1951 PAGE_WIDTH; 1867 1952 leaf->value[i - 1] -= count + new_cnt; 1868 btree_insert(&a ->used_space, page +1953 btree_insert(&area->used_space, page + 1869 1954 count * PAGE_SIZE, (void *) new_cnt, 1870 1955 leaf); … … 1875 1960 } 1876 1961 } 1877 1962 1878 1963 error: 1879 1964 panic("Inconsistency detected while removing %" PRIs " pages of used " … … 1885 1970 * If the reference count drops to 0, the sh_info is deallocated. 1886 1971 * 1887 * @param sh_info Pointer to address space area share info. 1972 * @param sh_info Pointer to address space area share info. 1973 * 1888 1974 */ 1889 1975 void sh_info_remove_reference(share_info_t *sh_info) 1890 1976 { 1891 1977 bool dealloc = false; 1892 1978 1893 1979 mutex_lock(&sh_info->lock); 1894 1980 ASSERT(sh_info->refcount); 1981 1895 1982 if (--sh_info->refcount == 0) { 1896 1983 dealloc = true; … … 1903 1990 for (cur = sh_info->pagemap.leaf_head.next; 1904 1991 cur != &sh_info->pagemap.leaf_head; cur = cur->next) { 1905 btree_node_t *node; 1906 unsigned int i; 1992 btree_node_t *node 1993 = list_get_instance(cur, btree_node_t, leaf_link); 1994 btree_key_t i; 1907 1995 1908 node = list_get_instance(cur, btree_node_t, leaf_link); 1909 for (i = 0; i < node->keys; i++) 1996 for (i = 0; i < node->keys; i++) 1910 1997 frame_free((uintptr_t) node->value[i]); 1911 1998 } … … 1925 2012 1926 2013 /** Wrapper for as_area_create(). */ 1927 unative_t sys_as_area_create(uintptr_t address, size_t size, int flags)2014 unative_t sys_as_area_create(uintptr_t address, size_t size, unsigned int flags) 1928 2015 { 1929 2016 if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, … … 1935 2022 1936 2023 /** Wrapper for as_area_resize(). */ 1937 unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags)2024 unative_t sys_as_area_resize(uintptr_t address, size_t size, unsigned int flags) 1938 2025 { 1939 2026 return (unative_t) as_area_resize(AS, address, size, 0); … … 1941 2028 1942 2029 /** Wrapper for as_area_change_flags(). */ 1943 unative_t sys_as_area_change_flags(uintptr_t address, int flags)2030 unative_t sys_as_area_change_flags(uintptr_t address, unsigned int flags) 1944 2031 { 1945 2032 return (unative_t) as_area_change_flags(AS, flags, address); … … 1954 2041 /** Get list of adress space areas. 1955 2042 * 1956 * @param as Address space. 1957 * @param obuf Place to save pointer to returned buffer. 1958 * @param osize Place to save size of returned buffer. 2043 * @param as Address space. 2044 * @param obuf Place to save pointer to returned buffer. 2045 * @param osize Place to save size of returned buffer. 2046 * 1959 2047 */ 1960 2048 void as_get_area_info(as_t *as, as_area_info_t **obuf, size_t *osize) 1961 2049 { 1962 ipl_t ipl; 1963 size_t area_cnt, area_idx, i; 2050 ipl_t ipl = interrupts_disable(); 2051 mutex_lock(&as->lock); 2052 2053 /* First pass, count number of areas. */ 2054 2055 size_t area_cnt = 0; 1964 2056 link_t *cur; 1965 1966 as_area_info_t *info; 1967 size_t isize; 1968 1969 ipl = interrupts_disable(); 1970 mutex_lock(&as->lock); 1971 1972 /* First pass, count number of areas. */ 1973 1974 area_cnt = 0; 1975 2057 1976 2058 for (cur = as->as_area_btree.leaf_head.next; 1977 2059 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 1978 btree_node_t *node; 1979 1980 node = list_get_instance(cur, btree_node_t, leaf_link); 2060 btree_node_t *node = 2061 list_get_instance(cur, btree_node_t, leaf_link); 1981 2062 area_cnt += node->keys; 1982 2063 } 1983 1984 isize = area_cnt * sizeof(as_area_info_t);1985 info = malloc(isize, 0);1986 2064 2065 size_t isize = area_cnt * sizeof(as_area_info_t); 2066 as_area_info_t *info = malloc(isize, 0); 2067 1987 2068 /* Second pass, record data. */ 1988 1989 area_idx = 0;1990 2069 2070 size_t area_idx = 0; 2071 1991 2072 for (cur = as->as_area_btree.leaf_head.next; 1992 2073 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 1993 btree_node_t *node ;1994 1995 node = list_get_instance(cur, btree_node_t, leaf_link);1996 2074 btree_node_t *node = 2075 list_get_instance(cur, btree_node_t, leaf_link); 2076 btree_key_t i; 2077 1997 2078 for (i = 0; i < node->keys; i++) { 1998 2079 as_area_t *area = node->value[i]; 1999 2080 2000 2081 ASSERT(area_idx < area_cnt); 2001 2082 mutex_lock(&area->lock); 2002 2083 2003 2084 info[area_idx].start_addr = area->base; 2004 2085 info[area_idx].size = FRAMES2SIZE(area->pages); 2005 2086 info[area_idx].flags = area->flags; 2006 2087 ++area_idx; 2007 2088 2008 2089 mutex_unlock(&area->lock); 2009 2090 } 2010 2091 } 2011 2092 2012 2093 mutex_unlock(&as->lock); 2013 2094 interrupts_restore(ipl); 2014 2095 2015 2096 *obuf = info; 2016 2097 *osize = isize; 2017 2098 } 2018 2099 2019 2020 2100 /** Print out information about address space. 2021 2101 * 2022 * @param as Address space. 2102 * @param as Address space. 2103 * 2023 2104 */ 2024 2105 void as_print(as_t *as) 2025 2106 { 2026 ipl_t ipl; 2027 2028 ipl = interrupts_disable(); 2107 ipl_t ipl = interrupts_disable(); 2029 2108 mutex_lock(&as->lock); 2030 2109 … … 2033 2112 for (cur = as->as_area_btree.leaf_head.next; 2034 2113 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 2035 btree_node_t *node; 2036 2037 node = list_get_instance(cur, btree_node_t, leaf_link); 2038 2039 unsigned int i; 2114 btree_node_t *node 2115 = list_get_instance(cur, btree_node_t, leaf_link); 2116 btree_key_t i; 2117 2040 2118 for (i = 0; i < node->keys; i++) { 2041 2119 as_area_t *area = node->value[i]; 2042 2120 2043 2121 mutex_lock(&area->lock); 2044 2122 printf("as_area: %p, base=%p, pages=%" PRIs -
kernel/generic/src/mm/frame.c
r666f492 rda1bafb 66 66 * available. 67 67 */ 68 mutex_t mem_avail_mtx;69 condvar_t mem_avail_cv;70 s ize_t mem_avail_req = 0; /**< Number of frames requested. */71 s ize_t mem_avail_gen = 0; /**< Generation counter. */68 static mutex_t mem_avail_mtx; 69 static condvar_t mem_avail_cv; 70 static size_t mem_avail_req = 0; /**< Number of frames requested. */ 71 static size_t mem_avail_gen = 0; /**< Generation counter. */ 72 72 73 73 /********************/ … … 171 171 return total; 172 172 } 173 #endif 173 #endif /* CONFIG_DEBUG */ 174 174 175 175 /** Find a zone with a given frames. … … 199 199 if (i >= zones.count) 200 200 i = 0; 201 201 202 } while (i != hint); 202 203 … … 242 243 if (i >= zones.count) 243 244 i = 0; 245 244 246 } while (i != hint); 245 247 … … 296 298 index = (frame_index(zone, frame)) + 297 299 (1 << frame->buddy_order); 298 } else { /* is_right */300 } else { /* is_right */ 299 301 index = (frame_index(zone, frame)) - 300 302 (1 << frame->buddy_order); … … 673 675 bool zone_merge(size_t z1, size_t z2) 674 676 { 675 ipl_t ipl = interrupts_disable(); 676 spinlock_lock(&zones.lock); 677 irq_spinlock_lock(&zones.lock, true); 677 678 678 679 bool ret = true; … … 744 745 745 746 errout: 746 spinlock_unlock(&zones.lock); 747 interrupts_restore(ipl); 747 irq_spinlock_unlock(&zones.lock, true); 748 748 749 749 return ret; … … 777 777 * 778 778 */ 779 static void zone_construct(zone_t *zone, buddy_system_t *buddy, pfn_t start, size_t count, zone_flags_t flags) 779 static void zone_construct(zone_t *zone, buddy_system_t *buddy, pfn_t start, 780 size_t count, zone_flags_t flags) 780 781 { 781 782 zone->base = start; … … 841 842 * 842 843 */ 843 size_t zone_create(pfn_t start, size_t count, pfn_t confframe, zone_flags_t flags)844 { 845 ipl_t ipl = interrupts_disable(); 846 spinlock_lock(&zones.lock);844 size_t zone_create(pfn_t start, size_t count, pfn_t confframe, 845 zone_flags_t flags) 846 { 847 irq_spinlock_lock(&zones.lock, true); 847 848 848 849 if (zone_flags_available(flags)) { /* Create available zone */ … … 889 890 size_t znum = zones_insert_zone(start, count); 890 891 if (znum == (size_t) -1) { 891 spinlock_unlock(&zones.lock); 892 interrupts_restore(ipl); 892 irq_spinlock_unlock(&zones.lock, true); 893 893 return (size_t) -1; 894 894 } … … 905 905 } 906 906 907 spinlock_unlock(&zones.lock); 908 interrupts_restore(ipl); 907 irq_spinlock_unlock(&zones.lock, true); 909 908 910 909 return znum; … … 914 913 size_t znum = zones_insert_zone(start, count); 915 914 if (znum == (size_t) -1) { 916 spinlock_unlock(&zones.lock); 917 interrupts_restore(ipl); 915 irq_spinlock_unlock(&zones.lock, true); 918 916 return (size_t) -1; 919 917 } 920 918 zone_construct(&zones.info[znum], NULL, start, count, flags); 921 919 922 spinlock_unlock(&zones.lock); 923 interrupts_restore(ipl); 920 irq_spinlock_unlock(&zones.lock, true); 924 921 925 922 return znum; … … 933 930 void frame_set_parent(pfn_t pfn, void *data, size_t hint) 934 931 { 935 ipl_t ipl = interrupts_disable(); 936 spinlock_lock(&zones.lock); 932 irq_spinlock_lock(&zones.lock, true); 937 933 938 934 size_t znum = find_zone(pfn, 1, hint); … … 943 939 pfn - zones.info[znum].base)->parent = data; 944 940 945 spinlock_unlock(&zones.lock); 946 interrupts_restore(ipl); 941 irq_spinlock_unlock(&zones.lock, true); 947 942 } 948 943 949 944 void *frame_get_parent(pfn_t pfn, size_t hint) 950 945 { 951 ipl_t ipl = interrupts_disable(); 952 spinlock_lock(&zones.lock); 946 irq_spinlock_lock(&zones.lock, true); 953 947 954 948 size_t znum = find_zone(pfn, 1, hint); … … 959 953 pfn - zones.info[znum].base)->parent; 960 954 961 spinlock_unlock(&zones.lock); 962 interrupts_restore(ipl); 955 irq_spinlock_unlock(&zones.lock, true); 963 956 964 957 return res; … … 977 970 { 978 971 size_t size = ((size_t) 1) << order; 979 ipl_t ipl;980 972 size_t hint = pzone ? (*pzone) : 0; 981 973 982 974 loop: 983 ipl = interrupts_disable(); 984 spinlock_lock(&zones.lock); 975 irq_spinlock_lock(&zones.lock, true); 985 976 986 977 /* … … 993 984 if it does not help, reclaim all */ 994 985 if ((znum == (size_t) -1) && (!(flags & FRAME_NO_RECLAIM))) { 995 spinlock_unlock(&zones.lock); 996 interrupts_restore(ipl); 997 986 irq_spinlock_unlock(&zones.lock, true); 998 987 size_t freed = slab_reclaim(0); 999 1000 ipl = interrupts_disable(); 1001 spinlock_lock(&zones.lock); 988 irq_spinlock_lock(&zones.lock, true); 1002 989 1003 990 if (freed > 0) … … 1006 993 1007 994 if (znum == (size_t) -1) { 1008 spinlock_unlock(&zones.lock); 1009 interrupts_restore(ipl); 1010 995 irq_spinlock_unlock(&zones.lock, true); 1011 996 freed = slab_reclaim(SLAB_RECLAIM_ALL); 1012 1013 ipl = interrupts_disable(); 1014 spinlock_lock(&zones.lock); 997 irq_spinlock_lock(&zones.lock, true); 1015 998 1016 999 if (freed > 0) … … 1022 1005 if (znum == (size_t) -1) { 1023 1006 if (flags & FRAME_ATOMIC) { 1024 spinlock_unlock(&zones.lock); 1025 interrupts_restore(ipl); 1007 irq_spinlock_unlock(&zones.lock, true); 1026 1008 return NULL; 1027 1009 } … … 1031 1013 #endif 1032 1014 1033 spinlock_unlock(&zones.lock); 1034 interrupts_restore(ipl); 1035 1015 irq_spinlock_unlock(&zones.lock, true); 1016 1036 1017 if (!THREAD) 1037 1018 panic("Cannot wait for memory to become available."); … … 1069 1050 + zones.info[znum].base; 1070 1051 1071 spinlock_unlock(&zones.lock); 1072 interrupts_restore(ipl); 1052 irq_spinlock_unlock(&zones.lock, true); 1073 1053 1074 1054 if (pzone) … … 1092 1072 void frame_free(uintptr_t frame) 1093 1073 { 1094 ipl_t ipl = interrupts_disable(); 1095 spinlock_lock(&zones.lock); 1074 irq_spinlock_lock(&zones.lock, true); 1096 1075 1097 1076 /* … … 1105 1084 zone_frame_free(&zones.info[znum], pfn - zones.info[znum].base); 1106 1085 1107 spinlock_unlock(&zones.lock); 1108 interrupts_restore(ipl); 1086 irq_spinlock_unlock(&zones.lock, true); 1109 1087 1110 1088 /* … … 1132 1110 void frame_reference_add(pfn_t pfn) 1133 1111 { 1134 ipl_t ipl = interrupts_disable(); 1135 spinlock_lock(&zones.lock); 1112 irq_spinlock_lock(&zones.lock, true); 1136 1113 1137 1114 /* … … 1144 1121 zones.info[znum].frames[pfn - zones.info[znum].base].refcount++; 1145 1122 1146 spinlock_unlock(&zones.lock); 1147 interrupts_restore(ipl); 1148 } 1149 1150 /** Mark given range unavailable in frame zones. */ 1123 irq_spinlock_unlock(&zones.lock, true); 1124 } 1125 1126 /** Mark given range unavailable in frame zones. 1127 * 1128 */ 1151 1129 void frame_mark_unavailable(pfn_t start, size_t count) 1152 1130 { 1153 ipl_t ipl = interrupts_disable(); 1154 spinlock_lock(&zones.lock); 1131 irq_spinlock_lock(&zones.lock, true); 1155 1132 1156 1133 size_t i; … … 1164 1141 } 1165 1142 1166 spinlock_unlock(&zones.lock); 1167 interrupts_restore(ipl); 1168 } 1169 1170 /** Initialize physical memory management. */ 1143 irq_spinlock_unlock(&zones.lock, true); 1144 } 1145 1146 /** Initialize physical memory management. 1147 * 1148 */ 1171 1149 void frame_init(void) 1172 1150 { 1173 1151 if (config.cpu_active == 1) { 1174 1152 zones.count = 0; 1175 spinlock_initialize(&zones.lock, "zones.lock");1153 irq_spinlock_initialize(&zones.lock, "frame.zones.lock"); 1176 1154 mutex_initialize(&mem_avail_mtx, MUTEX_ACTIVE); 1177 1155 condvar_initialize(&mem_avail_cv); … … 1204 1182 } 1205 1183 1206 /** Return total size of all zones. */ 1184 /** Return total size of all zones. 1185 * 1186 */ 1207 1187 uint64_t zones_total_size(void) 1208 1188 { 1209 ipl_t ipl = interrupts_disable(); 1210 spinlock_lock(&zones.lock); 1189 irq_spinlock_lock(&zones.lock, true); 1211 1190 1212 1191 uint64_t total = 0; … … 1215 1194 total += (uint64_t) FRAMES2SIZE(zones.info[i].count); 1216 1195 1217 spinlock_unlock(&zones.lock); 1218 interrupts_restore(ipl); 1196 irq_spinlock_unlock(&zones.lock, true); 1219 1197 1220 1198 return total; … … 1229 1207 ASSERT(free != NULL); 1230 1208 1231 ipl_t ipl = interrupts_disable(); 1232 spinlock_lock(&zones.lock); 1209 irq_spinlock_lock(&zones.lock, true); 1233 1210 1234 1211 *total = 0; … … 1248 1225 } 1249 1226 1250 spinlock_unlock(&zones.lock); 1251 interrupts_restore(ipl); 1252 } 1253 1254 /** Prints list of zones. */ 1227 irq_spinlock_unlock(&zones.lock, true); 1228 } 1229 1230 /** Prints list of zones. 1231 * 1232 */ 1255 1233 void zones_print_list(void) 1256 1234 { … … 1278 1256 size_t i; 1279 1257 for (i = 0;; i++) { 1280 ipl_t ipl = interrupts_disable(); 1281 spinlock_lock(&zones.lock); 1258 irq_spinlock_lock(&zones.lock, true); 1282 1259 1283 1260 if (i >= zones.count) { 1284 spinlock_unlock(&zones.lock); 1285 interrupts_restore(ipl); 1261 irq_spinlock_unlock(&zones.lock, true); 1286 1262 break; 1287 1263 } … … 1293 1269 size_t busy_count = zones.info[i].busy_count; 1294 1270 1295 spinlock_unlock(&zones.lock); 1296 interrupts_restore(ipl); 1271 irq_spinlock_unlock(&zones.lock, true); 1297 1272 1298 1273 bool available = zone_flags_available(flags); … … 1328 1303 void zone_print_one(size_t num) 1329 1304 { 1330 ipl_t ipl = interrupts_disable(); 1331 spinlock_lock(&zones.lock); 1305 irq_spinlock_lock(&zones.lock, true); 1332 1306 size_t znum = (size_t) -1; 1333 1307 … … 1341 1315 1342 1316 if (znum == (size_t) -1) { 1343 spinlock_unlock(&zones.lock); 1344 interrupts_restore(ipl); 1317 irq_spinlock_unlock(&zones.lock, true); 1345 1318 printf("Zone not found.\n"); 1346 1319 return; … … 1353 1326 size_t busy_count = zones.info[i].busy_count; 1354 1327 1355 spinlock_unlock(&zones.lock); 1356 interrupts_restore(ipl); 1328 irq_spinlock_unlock(&zones.lock, true); 1357 1329 1358 1330 bool available = zone_flags_available(flags); -
kernel/generic/src/mm/page.c
r666f492 rda1bafb 33 33 /** 34 34 * @file 35 * @brief Virtual Address Translation subsystem.35 * @brief Virtual Address Translation subsystem. 36 36 * 37 37 * This file contains code for creating, destroying and searching … … 39 39 * Functions here are mere wrappers that call the real implementation. 40 40 * They however, define the single interface. 41 * 41 42 */ 42 43 … … 55 56 * will do an implicit serialization by virtue of running the TLB shootdown 56 57 * interrupt handler. 58 * 57 59 */ 58 60 … … 83 85 * of page boundaries. 84 86 * 85 * @param s Address of the structure. 86 * @param size Size of the structure. 87 * @param addr Address of the structure. 88 * @param size Size of the structure. 89 * 87 90 */ 88 void map_structure(uintptr_t s, size_t size)91 void map_structure(uintptr_t addr, size_t size) 89 92 { 90 int i, cnt, length; 91 92 length = size + (s - (s & ~(PAGE_SIZE - 1))); 93 cnt = length / PAGE_SIZE + (length % PAGE_SIZE > 0); 94 93 size_t length = size + (addr - (addr & ~(PAGE_SIZE - 1))); 94 size_t cnt = length / PAGE_SIZE + (length % PAGE_SIZE > 0); 95 96 size_t i; 95 97 for (i = 0; i < cnt; i++) 96 page_mapping_insert(AS_KERNEL, s+ i * PAGE_SIZE,97 s+ i * PAGE_SIZE, PAGE_NOT_CACHEABLE | PAGE_WRITE);98 98 page_mapping_insert(AS_KERNEL, addr + i * PAGE_SIZE, 99 addr + i * PAGE_SIZE, PAGE_NOT_CACHEABLE | PAGE_WRITE); 100 99 101 /* Repel prefetched accesses to the old mapping. */ 100 102 memory_barrier(); … … 108 110 * The page table must be locked and interrupts must be disabled. 109 111 * 110 * @param as Address space to wich page belongs. 111 * @param page Virtual address of the page to be mapped. 112 * @param frame Physical address of memory frame to which the mapping is 113 * done. 114 * @param flags Flags to be used for mapping. 112 * @param as Address space to wich page belongs. 113 * @param page Virtual address of the page to be mapped. 114 * @param frame Physical address of memory frame to which the mapping is 115 * done. 116 * @param flags Flags to be used for mapping. 117 * 115 118 */ 116 void page_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, int flags) 119 void page_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, 120 unsigned int flags) 117 121 { 118 122 ASSERT(page_mapping_operations); … … 133 137 * The page table must be locked and interrupts must be disabled. 134 138 * 135 * @param as Address space to wich page belongs. 136 * @param page Virtual address of the page to be demapped. 139 * @param as Address space to wich page belongs. 140 * @param page Virtual address of the page to be demapped. 141 * 137 142 */ 138 143 void page_mapping_remove(as_t *as, uintptr_t page) … … 142 147 143 148 page_mapping_operations->mapping_remove(as, page); 144 149 145 150 /* Repel prefetched accesses to the old mapping. */ 146 151 memory_barrier(); … … 153 158 * The page table must be locked and interrupts must be disabled. 154 159 * 155 * @param as Address space to wich page belongs.156 * @param page Virtual page.160 * @param as Address space to wich page belongs. 161 * @param page Virtual page. 157 162 * 158 * @return NULL if there is no such mapping; requested mapping 159 * otherwise. 163 * @return NULL if there is no such mapping; requested mapping 164 * otherwise. 165 * 160 166 */ 161 167 pte_t *page_mapping_find(as_t *as, uintptr_t page) … … 163 169 ASSERT(page_mapping_operations); 164 170 ASSERT(page_mapping_operations->mapping_find); 165 171 166 172 return page_mapping_operations->mapping_find(as, page); 167 173 } -
kernel/generic/src/mm/slab.c
r666f492 rda1bafb 33 33 /** 34 34 * @file 35 * @brief Slab allocator.35 * @brief Slab allocator. 36 36 * 37 37 * The slab allocator is closely modelled after OpenSolaris slab allocator. … … 50 50 * 51 51 * The slab allocator supports per-CPU caches ('magazines') to facilitate 52 * good SMP scaling. 52 * good SMP scaling. 53 53 * 54 54 * When a new object is being allocated, it is first checked, if it is … … 65 65 * thrashing when somebody is allocating/deallocating 1 item at the magazine 66 66 * size boundary. LIFO order is enforced, which should avoid fragmentation 67 * as much as possible. 68 * 67 * as much as possible. 68 * 69 69 * Every cache contains list of full slabs and list of partially full slabs. 70 70 * Empty slabs are immediately freed (thrashing will be avoided because 71 * of magazines). 71 * of magazines). 72 72 * 73 73 * The slab information structure is kept inside the data area, if possible. … … 95 95 * 96 96 * @todo 97 * it might be good to add granularity of locks even to slab level,97 * It might be good to add granularity of locks even to slab level, 98 98 * we could then try_spinlock over all partial slabs and thus improve 99 * scalability even on slab level 99 * scalability even on slab level. 100 * 100 101 */ 101 102 … … 114 115 #include <macros.h> 115 116 116 SPINLOCK_INITIALIZE(slab_cache_lock);117 IRQ_SPINLOCK_STATIC_INITIALIZE(slab_cache_lock); 117 118 static LIST_INITIALIZE(slab_cache_list); 118 119 119 120 /** Magazine cache */ 120 121 static slab_cache_t mag_cache; 122 121 123 /** Cache for cache descriptors */ 122 124 static slab_cache_t slab_cache_cache; 125 123 126 /** Cache for external slab descriptors 124 127 * This time we want per-cpu cache, so do not make it static … … 128 131 */ 129 132 static slab_cache_t *slab_extern_cache; 133 130 134 /** Caches for malloc */ 131 135 static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1]; 136 132 137 static const char *malloc_names[] = { 133 138 "malloc-16", … … 154 159 /** Slab descriptor */ 155 160 typedef struct { 156 slab_cache_t *cache; /**< Pointer to parent cache. */157 link_t link; /**< List of full/partial slabs. */158 void *start; /**< Start address of first available item. */159 size_t available; /**< Count of available items in this slab. */160 size_t nextavail; /**< The index of next available item. */161 slab_cache_t *cache; /**< Pointer to parent cache. */ 162 link_t link; /**< List of full/partial slabs. */ 163 void *start; /**< Start address of first available item. */ 164 size_t available; /**< Count of available items in this slab. */ 165 size_t nextavail; /**< The index of next available item. */ 161 166 } slab_t; 162 167 163 168 #ifdef CONFIG_DEBUG 164 static int _slab_initialized = 0;169 static unsigned int _slab_initialized = 0; 165 170 #endif 166 171 167 172 /**************************************/ 168 173 /* Slab allocation functions */ 169 170 /** 171 * Allocate frames for slab space and initialize 172 * 173 */ 174 static slab_t *slab_space_alloc(slab_cache_t *cache, int flags) 175 { 176 void *data; 174 /**************************************/ 175 176 /** Allocate frames for slab space and initialize 177 * 178 */ 179 static slab_t *slab_space_alloc(slab_cache_t *cache, unsigned int flags) 180 { 181 182 183 size_t zone = 0; 184 185 void *data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone); 186 if (!data) { 187 return NULL; 188 } 189 177 190 slab_t *slab; 178 191 size_t fsize; 179 unsigned int i; 180 size_t zone = 0; 181 182 data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone); 183 if (!data) { 184 return NULL; 185 } 192 186 193 if (!(cache->flags & SLAB_CACHE_SLINSIDE)) { 187 194 slab = slab_alloc(slab_extern_cache, flags); … … 196 203 197 204 /* Fill in slab structures */ 198 for (i = 0; i < ((unsigned int) 1 << cache->order); i++) 205 size_t i; 206 for (i = 0; i < ((size_t) 1 << cache->order); i++) 199 207 frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab, zone); 200 208 201 209 slab->start = data; 202 210 slab->available = cache->objects; 203 211 slab->nextavail = 0; 204 212 slab->cache = cache; 205 213 206 214 for (i = 0; i < cache->objects; i++) 207 *(( int *) (slab->start + i*cache->size)) = i + 1;208 215 *((size_t *) (slab->start + i * cache->size)) = i + 1; 216 209 217 atomic_inc(&cache->allocated_slabs); 210 218 return slab; 211 219 } 212 220 213 /** 214 * Deallocate space associated with slab 221 /** Deallocate space associated with slab 215 222 * 216 223 * @return number of freed frames 224 * 217 225 */ 218 226 static size_t slab_space_free(slab_cache_t *cache, slab_t *slab) 219 227 { 220 228 frame_free(KA2PA(slab->start)); 221 if (! (cache->flags & SLAB_CACHE_SLINSIDE))229 if (!(cache->flags & SLAB_CACHE_SLINSIDE)) 222 230 slab_free(slab_extern_cache, slab); 223 231 224 232 atomic_dec(&cache->allocated_slabs); 225 233 226 return 1 << cache->order;234 return (1 << cache->order); 227 235 } 228 236 229 237 /** Map object to slab structure */ 230 static slab_t * obj2slab(void *obj)238 static slab_t *obj2slab(void *obj) 231 239 { 232 240 return (slab_t *) frame_get_parent(ADDR2PFN(KA2PA(obj)), 0); 233 241 } 234 242 235 /****************** ********************/243 /******************/ 236 244 /* Slab functions */ 237 238 239 /** 240 * Return object to slab and call a destructor 245 /******************/ 246 247 /** Return object to slab and call a destructor 241 248 * 242 249 * @param slab If the caller knows directly slab of the object, otherwise NULL 243 250 * 244 251 * @return Number of freed pages 252 * 245 253 */ 246 254 static size_t slab_obj_destroy(slab_cache_t *cache, void *obj, slab_t *slab) 247 255 { 248 int freed = 0;249 250 256 if (!slab) 251 257 slab = obj2slab(obj); 252 258 253 259 ASSERT(slab->cache == cache); 254 260 261 size_t freed = 0; 262 255 263 if (cache->destructor) 256 264 freed = cache->destructor(obj); … … 258 266 spinlock_lock(&cache->slablock); 259 267 ASSERT(slab->available < cache->objects); 260 261 *(( int *)obj) = slab->nextavail;268 269 *((size_t *) obj) = slab->nextavail; 262 270 slab->nextavail = (obj - slab->start) / cache->size; 263 271 slab->available++; 264 272 265 273 /* Move it to correct list */ 266 274 if (slab->available == cache->objects) { … … 268 276 list_remove(&slab->link); 269 277 spinlock_unlock(&cache->slablock); 270 278 271 279 return freed + slab_space_free(cache, slab); 272 273 280 } else if (slab->available == 1) { 274 281 /* It was in full, move to partial */ … … 276 283 list_prepend(&slab->link, &cache->partial_slabs); 277 284 } 285 278 286 spinlock_unlock(&cache->slablock); 279 287 return freed; 280 288 } 281 289 282 /** 283 * Take new object from slab or create new if needed 290 /** Take new object from slab or create new if needed 284 291 * 285 292 * @return Object address or null 293 * 286 294 */ 287 295 static void *slab_obj_create(slab_cache_t *cache, int flags) 288 296 { 297 spinlock_lock(&cache->slablock); 298 289 299 slab_t *slab; 290 void *obj; 291 292 spinlock_lock(&cache->slablock); 293 300 294 301 if (list_empty(&cache->partial_slabs)) { 295 /* Allow recursion and reclaiming 302 /* 303 * Allow recursion and reclaiming 296 304 * - this should work, as the slab control structures 297 305 * are small and do not need to allocate with anything 298 306 * other than frame_alloc when they are allocating, 299 307 * that's why we should get recursion at most 1-level deep 308 * 300 309 */ 301 310 spinlock_unlock(&cache->slablock); … … 303 312 if (!slab) 304 313 return NULL; 314 305 315 spinlock_lock(&cache->slablock); 306 316 } else { … … 309 319 list_remove(&slab->link); 310 320 } 311 obj = slab->start + slab->nextavail * cache->size; 312 slab->nextavail = *((int *)obj); 321 322 void *obj = slab->start + slab->nextavail * cache->size; 323 slab->nextavail = *((size_t *) obj); 313 324 slab->available--; 314 325 315 326 if (!slab->available) 316 327 list_prepend(&slab->link, &cache->full_slabs); 317 328 else 318 329 list_prepend(&slab->link, &cache->partial_slabs); 319 330 320 331 spinlock_unlock(&cache->slablock); 321 322 if ( cache->constructor && cache->constructor(obj, flags)) {332 333 if ((cache->constructor) && (cache->constructor(obj, flags))) { 323 334 /* Bad, bad, construction failed */ 324 335 slab_obj_destroy(cache, obj, slab); 325 336 return NULL; 326 337 } 338 327 339 return obj; 328 340 } 329 341 330 /**************************** **********/342 /****************************/ 331 343 /* CPU-Cache slab functions */ 332 333 /** 334 * Finds a full magazine in cache, takes it from list335 * and returns it336 * 337 * @param first If true, return first, else last mag338 */ 339 static slab_magazine_t *get_mag_from_cache(slab_cache_t *cache, intfirst)344 /****************************/ 345 346 /** Find a full magazine in cache, take it from list and return it 347 * 348 * @param first If true, return first, else last mag. 349 * 350 */ 351 static slab_magazine_t *get_mag_from_cache(slab_cache_t *cache, bool first) 340 352 { 341 353 slab_magazine_t *mag = NULL; 342 354 link_t *cur; 343 355 344 356 spinlock_lock(&cache->maglock); 345 357 if (!list_empty(&cache->magazines)) { … … 348 360 else 349 361 cur = cache->magazines.prev; 362 350 363 mag = list_get_instance(cur, slab_magazine_t, link); 351 364 list_remove(&mag->link); 352 365 atomic_dec(&cache->magazine_counter); 353 366 } 367 354 368 spinlock_unlock(&cache->maglock); 355 369 return mag; 356 370 } 357 371 358 /** Prepend magazine to magazine list in cache */ 372 /** Prepend magazine to magazine list in cache 373 * 374 */ 359 375 static void put_mag_to_cache(slab_cache_t *cache, slab_magazine_t *mag) 360 376 { 361 377 spinlock_lock(&cache->maglock); 362 378 363 379 list_prepend(&mag->link, &cache->magazines); 364 380 atomic_inc(&cache->magazine_counter); … … 367 383 } 368 384 369 /** 370 * Free all objects in magazine and free memory associated with magazine 385 /** Free all objects in magazine and free memory associated with magazine 371 386 * 372 387 * @return Number of freed pages 388 * 373 389 */ 374 390 static size_t magazine_destroy(slab_cache_t *cache, slab_magazine_t *mag) 375 391 { 376 unsigned int i;392 size_t i; 377 393 size_t frames = 0; 378 394 379 395 for (i = 0; i < mag->busy; i++) { 380 396 frames += slab_obj_destroy(cache, mag->objs[i], NULL); … … 383 399 384 400 slab_free(&mag_cache, mag); 385 401 386 402 return frames; 387 403 } 388 404 389 /** 390 * Find full magazine, set it as current and return it 405 /** Find full magazine, set it as current and return it 391 406 * 392 407 * Assume cpu_magazine lock is held 408 * 393 409 */ 394 410 static slab_magazine_t *get_full_current_mag(slab_cache_t *cache) 395 411 { 396 slab_magazine_t *cmag, *lastmag, *newmag; 397 398 cmag = cache->mag_cache[CPU->id].current; 399 lastmag = cache->mag_cache[CPU->id].last; 412 slab_magazine_t *cmag = cache->mag_cache[CPU->id].current; 413 slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last; 414 400 415 if (cmag) { /* First try local CPU magazines */ 401 416 if (cmag->busy) 402 417 return cmag; 403 404 if ( lastmag && lastmag->busy) {418 419 if ((lastmag) && (lastmag->busy)) { 405 420 cache->mag_cache[CPU->id].current = lastmag; 406 421 cache->mag_cache[CPU->id].last = cmag; … … 408 423 } 409 424 } 425 410 426 /* Local magazines are empty, import one from magazine list */ 411 newmag = get_mag_from_cache(cache, 1);427 slab_magazine_t *newmag = get_mag_from_cache(cache, 1); 412 428 if (!newmag) 413 429 return NULL; 414 430 415 431 if (lastmag) 416 432 magazine_destroy(cache, lastmag); 417 433 418 434 cache->mag_cache[CPU->id].last = cmag; 419 435 cache->mag_cache[CPU->id].current = newmag; 436 420 437 return newmag; 421 438 } 422 439 423 /** 424 * Try to find object in CPU-cache magazines 440 /** Try to find object in CPU-cache magazines 425 441 * 426 442 * @return Pointer to object or NULL if not available 443 * 427 444 */ 428 445 static void *magazine_obj_get(slab_cache_t *cache) 429 446 { 430 slab_magazine_t *mag;431 void *obj;432 433 447 if (!CPU) 434 448 return NULL; 435 449 436 450 spinlock_lock(&cache->mag_cache[CPU->id].lock); 437 438 mag = get_full_current_mag(cache);451 452 slab_magazine_t *mag = get_full_current_mag(cache); 439 453 if (!mag) { 440 454 spinlock_unlock(&cache->mag_cache[CPU->id].lock); 441 455 return NULL; 442 456 } 443 obj = mag->objs[--mag->busy]; 457 458 void *obj = mag->objs[--mag->busy]; 444 459 spinlock_unlock(&cache->mag_cache[CPU->id].lock); 460 445 461 atomic_dec(&cache->cached_objs); 446 462 … … 448 464 } 449 465 450 /** 451 * Assure that the current magazine is empty, return pointer to it, or NULL if 452 * no empty magazine is available and cannot be allocated 466 /** Assure that the current magazine is empty, return pointer to it, 467 * or NULL if no empty magazine is available and cannot be allocated 453 468 * 454 469 * Assume mag_cache[CPU->id].lock is held 455 470 * 456 * We have 2 magazines bound to processor. 457 * First try the current. 458 * If full, try the last. 459 * If full, put to magazines list. 460 * allocate new, exchange last & current 471 * We have 2 magazines bound to processor. 472 * First try the current. 473 * If full, try the last. 474 * If full, put to magazines list. 461 475 * 462 476 */ 463 477 static slab_magazine_t *make_empty_current_mag(slab_cache_t *cache) 464 478 { 465 slab_magazine_t *cmag,*lastmag,*newmag; 466 467 cmag = cache->mag_cache[CPU->id].current; 468 lastmag = cache->mag_cache[CPU->id].last; 469 479 slab_magazine_t *cmag = cache->mag_cache[CPU->id].current; 480 slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last; 481 470 482 if (cmag) { 471 483 if (cmag->busy < cmag->size) 472 484 return cmag; 473 if (lastmag && lastmag->busy < lastmag->size) { 485 486 if ((lastmag) && (lastmag->busy < lastmag->size)) { 474 487 cache->mag_cache[CPU->id].last = cmag; 475 488 cache->mag_cache[CPU->id].current = lastmag; … … 477 490 } 478 491 } 492 479 493 /* current | last are full | nonexistent, allocate new */ 480 /* We do not want to sleep just because of caching */ 481 /* Especially we do not want reclaiming to start, as 482 * this would deadlock */ 483 newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); 494 495 /* 496 * We do not want to sleep just because of caching, 497 * especially we do not want reclaiming to start, as 498 * this would deadlock. 499 * 500 */ 501 slab_magazine_t *newmag = slab_alloc(&mag_cache, 502 FRAME_ATOMIC | FRAME_NO_RECLAIM); 484 503 if (!newmag) 485 504 return NULL; 505 486 506 newmag->size = SLAB_MAG_SIZE; 487 507 newmag->busy = 0; 488 508 489 509 /* Flush last to magazine list */ 490 510 if (lastmag) 491 511 put_mag_to_cache(cache, lastmag); 492 512 493 513 /* Move current as last, save new as current */ 494 cache->mag_cache[CPU->id].last = cmag; 495 cache->mag_cache[CPU->id].current = newmag; 496 514 cache->mag_cache[CPU->id].last = cmag; 515 cache->mag_cache[CPU->id].current = newmag; 516 497 517 return newmag; 498 518 } 499 519 500 /** 501 * Put object into CPU-cache magazine502 * 503 * @return 0 - success, -1 - could not get memory520 /** Put object into CPU-cache magazine 521 * 522 * @return 0 on success, -1 on no memory 523 * 504 524 */ 505 525 static int magazine_obj_put(slab_cache_t *cache, void *obj) 506 526 { 507 slab_magazine_t *mag;508 509 527 if (!CPU) 510 528 return -1; 511 529 512 530 spinlock_lock(&cache->mag_cache[CPU->id].lock); 513 514 mag = make_empty_current_mag(cache);531 532 slab_magazine_t *mag = make_empty_current_mag(cache); 515 533 if (!mag) { 516 534 spinlock_unlock(&cache->mag_cache[CPU->id].lock); … … 519 537 520 538 mag->objs[mag->busy++] = obj; 521 539 522 540 spinlock_unlock(&cache->mag_cache[CPU->id].lock); 541 523 542 atomic_inc(&cache->cached_objs); 543 524 544 return 0; 525 545 } 526 546 527 528 /**************************************/ 547 /************************/ 529 548 /* Slab cache functions */ 530 531 /** Return number of objects that fit in certain cache size */ 532 static unsigned int comp_objects(slab_cache_t *cache) 549 /************************/ 550 551 /** Return number of objects that fit in certain cache size 552 * 553 */ 554 static size_t comp_objects(slab_cache_t *cache) 533 555 { 534 556 if (cache->flags & SLAB_CACHE_SLINSIDE) 535 return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) /536 cache->size;537 else 557 return ((PAGE_SIZE << cache->order) 558 - sizeof(slab_t)) / cache->size; 559 else 538 560 return (PAGE_SIZE << cache->order) / cache->size; 539 561 } 540 562 541 /** Return wasted space in slab */542 static unsigned int badness(slab_cache_t *cache) 543 { 544 unsigned int objects; 545 unsigned int ssize; 546 547 objects = comp_objects(cache);548 ssize = PAGE_SIZE << cache->order;563 /** Return wasted space in slab 564 * 565 */ 566 static size_t badness(slab_cache_t *cache) 567 { 568 size_t objects = comp_objects(cache); 569 size_t ssize = PAGE_SIZE << cache->order; 570 549 571 if (cache->flags & SLAB_CACHE_SLINSIDE) 550 572 ssize -= sizeof(slab_t); 573 551 574 return ssize - objects * cache->size; 552 575 } 553 576 554 /** 555 * Initialize mag_cache structure in slab cache577 /** Initialize mag_cache structure in slab cache 578 * 556 579 */ 557 580 static bool make_magcache(slab_cache_t *cache) 558 581 { 559 unsigned int i;560 561 582 ASSERT(_slab_initialized >= 2); 562 583 563 584 cache->mag_cache = malloc(sizeof(slab_mag_cache_t) * config.cpu_count, 564 585 FRAME_ATOMIC); 565 586 if (!cache->mag_cache) 566 587 return false; 567 588 589 size_t i; 568 590 for (i = 0; i < config.cpu_count; i++) { 569 591 memsetb(&cache->mag_cache[i], sizeof(cache->mag_cache[i]), 0); 570 592 spinlock_initialize(&cache->mag_cache[i].lock, 571 "slab_maglock_cpu"); 572 } 593 "slab.cache.mag_cache[].lock"); 594 } 595 573 596 return true; 574 597 } 575 598 576 /** Initialize allocated memory as a slab cache */ 599 /** Initialize allocated memory as a slab cache 600 * 601 */ 577 602 static void _slab_cache_create(slab_cache_t *cache, const char *name, 578 size_t size, size_t align, int (*constructor)(void *obj, int kmflag), 579 int (*destructor)(void *obj), int flags) 580 { 581 int pages; 582 ipl_t ipl; 583 603 size_t size, size_t align, int (*constructor)(void *obj, 604 unsigned int kmflag), size_t (*destructor)(void *obj), unsigned int flags) 605 { 584 606 memsetb(cache, sizeof(*cache), 0); 585 607 cache->name = name; 586 608 587 609 if (align < sizeof(unative_t)) 588 610 align = sizeof(unative_t); 611 589 612 size = ALIGN_UP(size, align); 590 613 591 614 cache->size = size; 592 593 615 cache->constructor = constructor; 594 616 cache->destructor = destructor; 595 617 cache->flags = flags; 596 618 597 619 list_initialize(&cache->full_slabs); 598 620 list_initialize(&cache->partial_slabs); 599 621 list_initialize(&cache->magazines); 600 spinlock_initialize(&cache->slablock, "slab_lock"); 601 spinlock_initialize(&cache->maglock, "slab_maglock"); 622 623 spinlock_initialize(&cache->slablock, "slab.cache.slablock"); 624 spinlock_initialize(&cache->maglock, "slab.cache.maglock"); 625 602 626 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) 603 627 (void) make_magcache(cache); 604 628 605 629 /* Compute slab sizes, object counts in slabs etc. */ 606 630 if (cache->size < SLAB_INSIDE_SIZE) 607 631 cache->flags |= SLAB_CACHE_SLINSIDE; 608 632 609 633 /* Minimum slab order */ 610 pages = SIZE2FRAMES(cache->size); 634 size_t pages = SIZE2FRAMES(cache->size); 635 611 636 /* We need the 2^order >= pages */ 612 637 if (pages == 1) … … 614 639 else 615 640 cache->order = fnzb(pages - 1) + 1; 616 617 while (badness(cache) > SLAB_MAX_BADNESS(cache)) {641 642 while (badness(cache) > SLAB_MAX_BADNESS(cache)) 618 643 cache->order += 1; 619 }644 620 645 cache->objects = comp_objects(cache); 646 621 647 /* If info fits in, put it inside */ 622 648 if (badness(cache) > sizeof(slab_t)) 623 649 cache->flags |= SLAB_CACHE_SLINSIDE; 624 650 625 651 /* Add cache to cache list */ 626 ipl = interrupts_disable(); 627 spinlock_lock(&slab_cache_lock); 628 652 irq_spinlock_lock(&slab_cache_lock, true); 629 653 list_append(&cache->link, &slab_cache_list); 630 631 spinlock_unlock(&slab_cache_lock); 632 interrupts_restore(ipl); 633 } 634 635 /** Create slab cache*/654 irq_spinlock_unlock(&slab_cache_lock, true); 655 } 656 657 /** Create slab cache 658 * 659 */ 636 660 slab_cache_t *slab_cache_create(const char *name, size_t size, size_t align, 637 int (*constructor)(void *obj, int kmflag), int (*destructor)(void *obj), 638 int flags) 639 { 640 slab_cache_t *cache; 641 642 cache = slab_alloc(&slab_cache_cache, 0); 661 int (*constructor)(void *obj, unsigned int kmflag), 662 size_t (*destructor)(void *obj), unsigned int flags) 663 { 664 slab_cache_t *cache = slab_alloc(&slab_cache_cache, 0); 643 665 _slab_cache_create(cache, name, size, align, constructor, destructor, 644 666 flags); 667 645 668 return cache; 646 669 } 647 670 648 /** 649 * Reclaim space occupied by objects that are already free 671 /** Reclaim space occupied by objects that are already free 650 672 * 651 673 * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing 674 * 652 675 * @return Number of freed pages 653 */ 654 static size_t _slab_reclaim(slab_cache_t *cache, int flags) 655 { 656 unsigned int i; 676 * 677 */ 678 static size_t _slab_reclaim(slab_cache_t *cache, unsigned int flags) 679 { 680 if (cache->flags & SLAB_CACHE_NOMAGAZINE) 681 return 0; /* Nothing to do */ 682 683 /* 684 * We count up to original magazine count to avoid 685 * endless loop 686 */ 687 atomic_count_t magcount = atomic_get(&cache->magazine_counter); 688 657 689 slab_magazine_t *mag; 658 690 size_t frames = 0; 659 int magcount; 660 661 if (cache->flags & SLAB_CACHE_NOMAGAZINE) 662 return 0; /* Nothing to do */ 663 664 /* We count up to original magazine count to avoid 665 * endless loop 666 */ 667 magcount = atomic_get(&cache->magazine_counter); 668 while (magcount-- && (mag=get_mag_from_cache(cache, 0))) { 669 frames += magazine_destroy(cache,mag); 670 if (!(flags & SLAB_RECLAIM_ALL) && frames) 691 692 while ((magcount--) && (mag = get_mag_from_cache(cache, 0))) { 693 frames += magazine_destroy(cache, mag); 694 if ((!(flags & SLAB_RECLAIM_ALL)) && (frames)) 671 695 break; 672 696 } … … 675 699 /* Free cpu-bound magazines */ 676 700 /* Destroy CPU magazines */ 701 size_t i; 677 702 for (i = 0; i < config.cpu_count; i++) { 678 703 spinlock_lock(&cache->mag_cache[i].lock); 679 704 680 705 mag = cache->mag_cache[i].current; 681 706 if (mag) … … 687 712 frames += magazine_destroy(cache, mag); 688 713 cache->mag_cache[i].last = NULL; 689 714 690 715 spinlock_unlock(&cache->mag_cache[i].lock); 691 716 } 692 717 } 693 718 694 719 return frames; 695 720 } 696 721 697 /** Check that there are no slabs and remove cache from system */ 722 /** Check that there are no slabs and remove cache from system 723 * 724 */ 698 725 void slab_cache_destroy(slab_cache_t *cache) 699 726 { 700 ipl_t ipl; 701 702 /* First remove cache from link, so that we don't need 727 /* 728 * First remove cache from link, so that we don't need 703 729 * to disable interrupts later 730 * 704 731 */ 705 706 ipl = interrupts_disable(); 707 spinlock_lock(&slab_cache_lock); 708 732 irq_spinlock_lock(&slab_cache_lock, true); 709 733 list_remove(&cache->link); 710 711 spinlock_unlock(&slab_cache_lock); 712 interrupts_restore(ipl); 713 714 /* Do not lock anything, we assume the software is correct and 715 * does not touch the cache when it decides to destroy it */ 734 irq_spinlock_unlock(&slab_cache_lock, true); 735 736 /* 737 * Do not lock anything, we assume the software is correct and 738 * does not touch the cache when it decides to destroy it 739 * 740 */ 716 741 717 742 /* Destroy all magazines */ 718 743 _slab_reclaim(cache, SLAB_RECLAIM_ALL); 719 744 720 745 /* All slabs must be empty */ 721 if ( !list_empty(&cache->full_slabs) ||722 !list_empty(&cache->partial_slabs))746 if ((!list_empty(&cache->full_slabs)) || 747 (!list_empty(&cache->partial_slabs))) 723 748 panic("Destroying cache that is not empty."); 724 749 725 750 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) 726 751 free(cache->mag_cache); 752 727 753 slab_free(&slab_cache_cache, cache); 728 754 } 729 755 730 /** Allocate new object from cache - if no flags given, always returns memory */ 731 void *slab_alloc(slab_cache_t *cache, int flags) 732 { 733 ipl_t ipl; 756 /** Allocate new object from cache - if no flags given, always returns memory 757 * 758 */ 759 void *slab_alloc(slab_cache_t *cache, unsigned int flags) 760 { 761 /* Disable interrupts to avoid deadlocks with interrupt handlers */ 762 ipl_t ipl = interrupts_disable(); 763 734 764 void *result = NULL; 735 765 736 /* Disable interrupts to avoid deadlocks with interrupt handlers */ 737 ipl = interrupts_disable(); 738 739 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) { 766 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) 740 767 result = magazine_obj_get(cache); 741 }768 742 769 if (!result) 743 770 result = slab_obj_create(cache, flags); 744 771 745 772 interrupts_restore(ipl); 746 773 747 774 if (result) 748 775 atomic_inc(&cache->allocated_objs); 749 776 750 777 return result; 751 778 } 752 779 753 /** Return object to cache, use slab if known */ 780 /** Return object to cache, use slab if known 781 * 782 */ 754 783 static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab) 755 784 { 756 ipl_t ipl; 757 758 ipl = interrupts_disable(); 759 785 ipl_t ipl = interrupts_disable(); 786 760 787 if ((cache->flags & SLAB_CACHE_NOMAGAZINE) || 761 magazine_obj_put(cache, obj)) {788 (magazine_obj_put(cache, obj))) 762 789 slab_obj_destroy(cache, obj, slab); 763 764 } 790 765 791 interrupts_restore(ipl); 766 792 atomic_dec(&cache->allocated_objs); 767 793 } 768 794 769 /** Return slab object to cache */ 795 /** Return slab object to cache 796 * 797 */ 770 798 void slab_free(slab_cache_t *cache, void *obj) 771 799 { … … 773 801 } 774 802 775 /* Go through all caches and reclaim what is possible */ 776 size_t slab_reclaim(int flags) 777 { 778 slab_cache_t *cache; 803 /** Go through all caches and reclaim what is possible 804 * 805 * Interrupts must be disabled before calling this function, 806 * otherwise memory allocation from interrupts can deadlock. 807 * 808 */ 809 size_t slab_reclaim(unsigned int flags) 810 { 811 irq_spinlock_lock(&slab_cache_lock, false); 812 813 size_t frames = 0; 779 814 link_t *cur; 780 size_t frames = 0;781 782 spinlock_lock(&slab_cache_lock);783 784 /* TODO: Add assert, that interrupts are disabled, otherwise785 * memory allocation from interrupts can deadlock.786 */787 788 815 for (cur = slab_cache_list.next; cur != &slab_cache_list; 789 816 cur = cur->next) { 790 cache = list_get_instance(cur, slab_cache_t, link);817 slab_cache_t *cache = list_get_instance(cur, slab_cache_t, link); 791 818 frames += _slab_reclaim(cache, flags); 792 819 } 793 794 spinlock_unlock(&slab_cache_lock);795 820 821 irq_spinlock_unlock(&slab_cache_lock, false); 822 796 823 return frames; 797 824 } 798 825 799 800 /* Print list of slabs */ 826 /* Print list of slabs 827 * 828 */ 801 829 void slab_print_list(void) 802 830 { 803 int skip = 0; 804 805 printf("slab name size pages obj/pg slabs cached allocated" 831 printf("slab name size pages obj/pg slabs cached allocated" 806 832 " ctl\n"); 807 printf("---------------- -------- ------ ------ ------ ------ ---------"833 printf("---------------- -------- ------ -------- ------ ------ ---------" 808 834 " ---\n"); 809 835 836 size_t skip = 0; 810 837 while (true) { 811 slab_cache_t *cache;812 link_t *cur;813 ipl_t ipl;814 int i;815 816 838 /* 817 839 * We must not hold the slab_cache_lock spinlock when printing … … 836 858 * statistics. 837 859 */ 838 839 ipl = interrupts_disable(); 840 spinlock_lock(&slab_cache_lock); 841 860 861 irq_spinlock_lock(&slab_cache_lock, true); 862 863 link_t *cur; 864 size_t i; 842 865 for (i = 0, cur = slab_cache_list.next; 843 i < skip && cur != &slab_cache_list; 844 i++, cur = cur->next) 845 ; 846 866 (i < skip) && (cur != &slab_cache_list); 867 i++, cur = cur->next); 868 847 869 if (cur == &slab_cache_list) { 848 spinlock_unlock(&slab_cache_lock); 849 interrupts_restore(ipl); 870 irq_spinlock_unlock(&slab_cache_lock, true); 850 871 break; 851 872 } 852 873 853 874 skip++; 854 855 cache = list_get_instance(cur, slab_cache_t, link);856 875 876 slab_cache_t *cache = list_get_instance(cur, slab_cache_t, link); 877 857 878 const char *name = cache->name; 858 879 uint8_t order = cache->order; 859 880 size_t size = cache->size; 860 unsigned int objects = cache->objects;881 size_t objects = cache->objects; 861 882 long allocated_slabs = atomic_get(&cache->allocated_slabs); 862 883 long cached_objs = atomic_get(&cache->cached_objs); 863 884 long allocated_objs = atomic_get(&cache->allocated_objs); 864 int flags = cache->flags; 865 866 spinlock_unlock(&slab_cache_lock); 867 interrupts_restore(ipl); 868 869 printf("%-16s %8" PRIs " %6d %6u %6ld %6ld %9ld %-3s\n", 885 unsigned int flags = cache->flags; 886 887 irq_spinlock_unlock(&slab_cache_lock, true); 888 889 printf("%-16s %8" PRIs " %6u %8" PRIs " %6ld %6ld %9ld %-3s\n", 870 890 name, size, (1 << order), objects, allocated_slabs, 871 891 cached_objs, allocated_objs, … … 876 896 void slab_cache_init(void) 877 897 { 878 int i, size;879 880 898 /* Initialize magazine cache */ 881 899 _slab_cache_create(&mag_cache, "slab_magazine", … … 883 901 sizeof(uintptr_t), NULL, NULL, SLAB_CACHE_NOMAGAZINE | 884 902 SLAB_CACHE_SLINSIDE); 903 885 904 /* Initialize slab_cache cache */ 886 905 _slab_cache_create(&slab_cache_cache, "slab_cache", 887 906 sizeof(slab_cache_cache), sizeof(uintptr_t), NULL, NULL, 888 907 SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); 908 889 909 /* Initialize external slab cache */ 890 910 slab_extern_cache = slab_cache_create("slab_extern", sizeof(slab_t), 0, 891 911 NULL, NULL, SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED); 892 912 893 913 /* Initialize structures for malloc */ 914 size_t i; 915 size_t size; 916 894 917 for (i = 0, size = (1 << SLAB_MIN_MALLOC_W); 895 918 i < (SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1); … … 898 921 NULL, NULL, SLAB_CACHE_MAGDEFERRED); 899 922 } 923 900 924 #ifdef CONFIG_DEBUG 901 925 _slab_initialized = 1; … … 906 930 * 907 931 * Kernel calls this function, when it knows the real number of 908 * processors. 909 * Allocate slab for cpucache and enable it on all existing910 * slabs that are SLAB_CACHE_MAGDEFERRED932 * processors. Allocate slab for cpucache and enable it on all 933 * existing slabs that are SLAB_CACHE_MAGDEFERRED 934 * 911 935 */ 912 936 void slab_enable_cpucache(void) 913 937 { 914 link_t *cur;915 slab_cache_t *s;916 917 938 #ifdef CONFIG_DEBUG 918 939 _slab_initialized = 2; 919 940 #endif 920 921 spinlock_lock(&slab_cache_lock); 922 941 942 irq_spinlock_lock(&slab_cache_lock, false); 943 944 link_t *cur; 923 945 for (cur = slab_cache_list.next; cur != &slab_cache_list; 924 cur = cur->next) {925 s = list_get_instance(cur, slab_cache_t, link);926 if ((s ->flags & SLAB_CACHE_MAGDEFERRED) !=946 cur = cur->next) { 947 slab_cache_t *slab = list_get_instance(cur, slab_cache_t, link); 948 if ((slab->flags & SLAB_CACHE_MAGDEFERRED) != 927 949 SLAB_CACHE_MAGDEFERRED) 928 950 continue; 929 (void) make_magcache(s); 930 s->flags &= ~SLAB_CACHE_MAGDEFERRED; 931 } 932 933 spinlock_unlock(&slab_cache_lock); 934 } 935 936 /**************************************/ 937 /* kalloc/kfree functions */ 938 void *malloc(unsigned int size, int flags) 951 952 (void) make_magcache(slab); 953 slab->flags &= ~SLAB_CACHE_MAGDEFERRED; 954 } 955 956 irq_spinlock_unlock(&slab_cache_lock, false); 957 } 958 959 void *malloc(size_t size, unsigned int flags) 939 960 { 940 961 ASSERT(_slab_initialized); … … 943 964 if (size < (1 << SLAB_MIN_MALLOC_W)) 944 965 size = (1 << SLAB_MIN_MALLOC_W); 945 946 int idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;947 966 967 uint8_t idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1; 968 948 969 return slab_alloc(malloc_caches[idx], flags); 949 970 } 950 971 951 void *realloc(void *ptr, unsigned int size,int flags)972 void *realloc(void *ptr, size_t size, unsigned int flags) 952 973 { 953 974 ASSERT(_slab_initialized); … … 959 980 if (size < (1 << SLAB_MIN_MALLOC_W)) 960 981 size = (1 << SLAB_MIN_MALLOC_W); 961 int idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;982 uint8_t idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1; 962 983 963 984 new_ptr = slab_alloc(malloc_caches[idx], flags); … … 980 1001 if (!ptr) 981 1002 return; 982 1003 983 1004 slab_t *slab = obj2slab(ptr); 984 1005 _slab_free(slab->cache, ptr, slab); -
kernel/generic/src/mm/tlb.c
r666f492 rda1bafb 33 33 /** 34 34 * @file 35 * @brief Generic TLB shootdown algorithm.35 * @brief Generic TLB shootdown algorithm. 36 36 * 37 37 * The algorithm implemented here is based on the CMU TLB shootdown … … 53 53 #include <cpu.h> 54 54 55 /**56 * This lock is used for synchronisation between sender and57 * recipients of TLB shootdown message. It must be acquired58 * before CPU structure lock.59 */60 SPINLOCK_INITIALIZE(tlblock);61 62 55 void tlb_init(void) 63 56 { … … 66 59 67 60 #ifdef CONFIG_SMP 61 62 /** 63 * This lock is used for synchronisation between sender and 64 * recipients of TLB shootdown message. It must be acquired 65 * before CPU structure lock. 66 * 67 */ 68 IRQ_SPINLOCK_STATIC_INITIALIZE(tlblock); 68 69 69 70 /** Send TLB shootdown message. … … 78 79 * @param page Virtual page address, if required by type. 79 80 * @param count Number of pages, if required by type. 81 * 80 82 */ 81 83 void tlb_shootdown_start(tlb_invalidate_type_t type, asid_t asid, 82 84 uintptr_t page, size_t count) 83 85 { 84 unsigned int i; 85 86 CPU->tlb_active = 0; 87 spinlock_lock(&tlblock); 86 CPU->tlb_active = false; 87 irq_spinlock_lock(&tlblock, false); 88 88 89 size_t i; 89 90 for (i = 0; i < config.cpu_count; i++) { 90 91 cpu_t *cpu; … … 92 93 if (i == CPU->id) 93 94 continue; 94 95 95 96 cpu = &cpus[i]; 96 spinlock_lock(&cpu->lock);97 irq_spinlock_lock(&cpu->lock, false); 97 98 if (cpu->tlb_messages_count == TLB_MESSAGE_QUEUE_LEN) { 98 99 /* … … 115 116 cpu->tlb_messages[idx].count = count; 116 117 } 117 spinlock_unlock(&cpu->lock);118 irq_spinlock_unlock(&cpu->lock, false); 118 119 } 119 120 120 121 tlb_shootdown_ipi_send(); 121 122 busy_wait: 122 123 busy_wait: 123 124 for (i = 0; i < config.cpu_count; i++) 124 125 if (cpus[i].tlb_active) … … 126 127 } 127 128 128 /** Finish TLB shootdown sequence. */ 129 /** Finish TLB shootdown sequence. 130 * 131 */ 129 132 void tlb_shootdown_finalize(void) 130 133 { 131 spinlock_unlock(&tlblock);132 CPU->tlb_active = 1;134 irq_spinlock_unlock(&tlblock, false); 135 CPU->tlb_active = true; 133 136 } 134 137 … … 138 141 } 139 142 140 /** Receive TLB shootdown message. */ 143 /** Receive TLB shootdown message. 144 * 145 */ 141 146 void tlb_shootdown_ipi_recv(void) 142 147 { 143 tlb_invalidate_type_t type;144 asid_t asid;145 uintptr_t page;146 size_t count;147 unsigned int i;148 149 148 ASSERT(CPU); 150 149 151 CPU->tlb_active = 0;152 spinlock_lock(&tlblock);153 spinlock_unlock(&tlblock);150 CPU->tlb_active = false; 151 irq_spinlock_lock(&tlblock, false); 152 irq_spinlock_unlock(&tlblock, false); 154 153 155 spinlock_lock(&CPU->lock);154 irq_spinlock_lock(&CPU->lock, false); 156 155 ASSERT(CPU->tlb_messages_count <= TLB_MESSAGE_QUEUE_LEN); 157 156 157 size_t i; 158 158 for (i = 0; i < CPU->tlb_messages_count; CPU->tlb_messages_count--) { 159 t ype = CPU->tlb_messages[i].type;160 asid = CPU->tlb_messages[i].asid;161 page = CPU->tlb_messages[i].page;162 count = CPU->tlb_messages[i].count;163 159 tlb_invalidate_type_t type = CPU->tlb_messages[i].type; 160 asid_t asid = CPU->tlb_messages[i].asid; 161 uintptr_t page = CPU->tlb_messages[i].page; 162 size_t count = CPU->tlb_messages[i].count; 163 164 164 switch (type) { 165 165 case TLB_INVL_ALL: … … 170 170 break; 171 171 case TLB_INVL_PAGES: 172 ASSERT(count);172 ASSERT(count); 173 173 tlb_invalidate_pages(asid, page, count); 174 174 break; … … 177 177 break; 178 178 } 179 179 180 if (type == TLB_INVL_ALL) 180 181 break; 181 182 } 182 183 183 spinlock_unlock(&CPU->lock);184 CPU->tlb_active = 1;184 irq_spinlock_unlock(&CPU->lock, false); 185 CPU->tlb_active = true; 185 186 } 186 187
Note:
See TracChangeset
for help on using the changeset viewer.
