Changeset a35b458 in mainline for kernel/generic/src/mm/slab.c
- Timestamp:
- 2018-03-02T20:10:49Z (7 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- f1380b7
- Parents:
- 3061bc1
- git-author:
- Jiří Zárevúcky <zarevucky.jiri@…> (2018-02-28 17:38:31)
- git-committer:
- Jiří Zárevúcky <zarevucky.jiri@…> (2018-03-02 20:10:49)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/slab.c
r3061bc1 ra35b458 186 186 { 187 187 size_t zone = 0; 188 188 189 189 uintptr_t data_phys = 190 190 frame_alloc_generic(cache->frames, flags, 0, &zone); 191 191 if (!data_phys) 192 192 return NULL; 193 193 194 194 void *data = (void *) PA2KA(data_phys); 195 195 196 196 slab_t *slab; 197 197 size_t fsize; 198 198 199 199 if (!(cache->flags & SLAB_CACHE_SLINSIDE)) { 200 200 slab = slab_alloc(slab_extern_cache, flags); … … 207 207 slab = data + fsize - sizeof(*slab); 208 208 } 209 209 210 210 /* Fill in slab structures */ 211 211 size_t i; 212 212 for (i = 0; i < cache->frames; i++) 213 213 frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab, zone); 214 214 215 215 slab->start = data; 216 216 slab->available = cache->objects; 217 217 slab->nextavail = 0; 218 218 slab->cache = cache; 219 219 220 220 for (i = 0; i < cache->objects; i++) 221 221 *((size_t *) (slab->start + i * cache->size)) = i + 1; 222 222 223 223 atomic_inc(&cache->allocated_slabs); 224 224 return slab; … … 235 235 if (!(cache->flags & SLAB_CACHE_SLINSIDE)) 236 236 slab_free(slab_extern_cache, slab); 237 237 238 238 atomic_dec(&cache->allocated_slabs); 239 239 240 240 return cache->frames; 241 241 } … … 263 263 if (!slab) 264 264 slab = obj2slab(obj); 265 265 266 266 assert(slab->cache == cache); 267 267 268 268 size_t freed = 0; 269 269 270 270 if (cache->destructor) 271 271 freed = cache->destructor(obj); 272 272 273 273 irq_spinlock_lock(&cache->slablock, true); 274 274 assert(slab->available < cache->objects); 275 275 276 276 *((size_t *) obj) = slab->nextavail; 277 277 slab->nextavail = (obj - slab->start) / cache->size; 278 278 slab->available++; 279 279 280 280 /* Move it to correct list */ 281 281 if (slab->available == cache->objects) { … … 283 283 list_remove(&slab->link); 284 284 irq_spinlock_unlock(&cache->slablock, true); 285 285 286 286 return freed + slab_space_free(cache, slab); 287 287 } else if (slab->available == 1) { … … 290 290 list_prepend(&slab->link, &cache->partial_slabs); 291 291 } 292 292 293 293 irq_spinlock_unlock(&cache->slablock, true); 294 294 return freed; … … 303 303 { 304 304 irq_spinlock_lock(&cache->slablock, true); 305 305 306 306 slab_t *slab; 307 307 308 308 if (list_empty(&cache->partial_slabs)) { 309 309 /* … … 319 319 if (!slab) 320 320 return NULL; 321 321 322 322 irq_spinlock_lock(&cache->slablock, true); 323 323 } else { … … 326 326 list_remove(&slab->link); 327 327 } 328 328 329 329 void *obj = slab->start + slab->nextavail * cache->size; 330 330 slab->nextavail = *((size_t *) obj); 331 331 slab->available--; 332 332 333 333 if (!slab->available) 334 334 list_prepend(&slab->link, &cache->full_slabs); 335 335 else 336 336 list_prepend(&slab->link, &cache->partial_slabs); 337 337 338 338 irq_spinlock_unlock(&cache->slablock, true); 339 339 340 340 if ((cache->constructor) && (cache->constructor(obj, flags) != EOK)) { 341 341 /* Bad, bad, construction failed */ … … 343 343 return NULL; 344 344 } 345 345 346 346 return obj; 347 347 } … … 361 361 slab_magazine_t *mag = NULL; 362 362 link_t *cur; 363 363 364 364 irq_spinlock_lock(&cache->maglock, true); 365 365 if (!list_empty(&cache->magazines)) { … … 368 368 else 369 369 cur = list_last(&cache->magazines); 370 370 371 371 mag = list_get_instance(cur, slab_magazine_t, link); 372 372 list_remove(&mag->link); … … 385 385 { 386 386 irq_spinlock_lock(&cache->maglock, true); 387 387 388 388 list_prepend(&mag->link, &cache->magazines); 389 389 atomic_inc(&cache->magazine_counter); 390 390 391 391 irq_spinlock_unlock(&cache->maglock, true); 392 392 } … … 402 402 size_t i; 403 403 size_t frames = 0; 404 404 405 405 for (i = 0; i < mag->busy; i++) { 406 406 frames += slab_obj_destroy(cache, mag->objs[i], NULL); 407 407 atomic_dec(&cache->cached_objs); 408 408 } 409 409 410 410 slab_free(&mag_cache, mag); 411 411 412 412 return frames; 413 413 } … … 420 420 slab_magazine_t *cmag = cache->mag_cache[CPU->id].current; 421 421 slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last; 422 422 423 423 assert(irq_spinlock_locked(&cache->mag_cache[CPU->id].lock)); 424 424 425 425 if (cmag) { /* First try local CPU magazines */ 426 426 if (cmag->busy) 427 427 return cmag; 428 428 429 429 if ((lastmag) && (lastmag->busy)) { 430 430 cache->mag_cache[CPU->id].current = lastmag; … … 433 433 } 434 434 } 435 435 436 436 /* Local magazines are empty, import one from magazine list */ 437 437 slab_magazine_t *newmag = get_mag_from_cache(cache, 1); 438 438 if (!newmag) 439 439 return NULL; 440 440 441 441 if (lastmag) 442 442 magazine_destroy(cache, lastmag); 443 443 444 444 cache->mag_cache[CPU->id].last = cmag; 445 445 cache->mag_cache[CPU->id].current = newmag; 446 446 447 447 return newmag; 448 448 } … … 457 457 if (!CPU) 458 458 return NULL; 459 459 460 460 irq_spinlock_lock(&cache->mag_cache[CPU->id].lock, true); 461 461 462 462 slab_magazine_t *mag = get_full_current_mag(cache); 463 463 if (!mag) { … … 465 465 return NULL; 466 466 } 467 467 468 468 void *obj = mag->objs[--mag->busy]; 469 469 irq_spinlock_unlock(&cache->mag_cache[CPU->id].lock, true); 470 470 471 471 atomic_dec(&cache->cached_objs); 472 472 473 473 return obj; 474 474 } … … 487 487 slab_magazine_t *cmag = cache->mag_cache[CPU->id].current; 488 488 slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last; 489 489 490 490 assert(irq_spinlock_locked(&cache->mag_cache[CPU->id].lock)); 491 491 492 492 if (cmag) { 493 493 if (cmag->busy < cmag->size) 494 494 return cmag; 495 495 496 496 if ((lastmag) && (lastmag->busy < lastmag->size)) { 497 497 cache->mag_cache[CPU->id].last = cmag; … … 500 500 } 501 501 } 502 502 503 503 /* current | last are full | nonexistent, allocate new */ 504 504 505 505 /* 506 506 * We do not want to sleep just because of caching, … … 513 513 if (!newmag) 514 514 return NULL; 515 515 516 516 newmag->size = SLAB_MAG_SIZE; 517 517 newmag->busy = 0; 518 518 519 519 /* Flush last to magazine list */ 520 520 if (lastmag) 521 521 put_mag_to_cache(cache, lastmag); 522 522 523 523 /* Move current as last, save new as current */ 524 524 cache->mag_cache[CPU->id].last = cmag; 525 525 cache->mag_cache[CPU->id].current = newmag; 526 526 527 527 return newmag; 528 528 } … … 537 537 if (!CPU) 538 538 return -1; 539 539 540 540 irq_spinlock_lock(&cache->mag_cache[CPU->id].lock, true); 541 541 542 542 slab_magazine_t *mag = make_empty_current_mag(cache); 543 543 if (!mag) { … … 545 545 return -1; 546 546 } 547 547 548 548 mag->objs[mag->busy++] = obj; 549 549 550 550 irq_spinlock_unlock(&cache->mag_cache[CPU->id].lock, true); 551 551 552 552 atomic_inc(&cache->cached_objs); 553 553 554 554 return 0; 555 555 } … … 578 578 size_t objects = comp_objects(cache); 579 579 size_t ssize = FRAMES2SIZE(cache->frames); 580 580 581 581 if (cache->flags & SLAB_CACHE_SLINSIDE) 582 582 ssize -= sizeof(slab_t); 583 583 584 584 return ssize - objects * cache->size; 585 585 } … … 591 591 { 592 592 assert(_slab_initialized >= 2); 593 593 594 594 cache->mag_cache = slab_alloc(&slab_mag_cache, FRAME_ATOMIC); 595 595 if (!cache->mag_cache) 596 596 return false; 597 597 598 598 size_t i; 599 599 for (i = 0; i < config.cpu_count; i++) { … … 602 602 "slab.cache.mag_cache[].lock"); 603 603 } 604 604 605 605 return true; 606 606 } … … 614 614 { 615 615 assert(size > 0); 616 616 617 617 memsetb(cache, sizeof(*cache), 0); 618 618 cache->name = name; 619 619 620 620 if (align < sizeof(sysarg_t)) 621 621 align = sizeof(sysarg_t); 622 622 623 623 size = ALIGN_UP(size, align); 624 624 625 625 cache->size = size; 626 626 cache->constructor = constructor; 627 627 cache->destructor = destructor; 628 628 cache->flags = flags; 629 629 630 630 list_initialize(&cache->full_slabs); 631 631 list_initialize(&cache->partial_slabs); 632 632 list_initialize(&cache->magazines); 633 633 634 634 irq_spinlock_initialize(&cache->slablock, "slab.cache.slablock"); 635 635 irq_spinlock_initialize(&cache->maglock, "slab.cache.maglock"); 636 636 637 637 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) 638 638 (void) make_magcache(cache); 639 639 640 640 /* Compute slab sizes, object counts in slabs etc. */ 641 641 if (cache->size < SLAB_INSIDE_SIZE) 642 642 cache->flags |= SLAB_CACHE_SLINSIDE; 643 643 644 644 /* Minimum slab frames */ 645 645 cache->frames = SIZE2FRAMES(cache->size); 646 646 647 647 while (badness(cache) > SLAB_MAX_BADNESS(cache)) 648 648 cache->frames <<= 1; 649 649 650 650 cache->objects = comp_objects(cache); 651 651 652 652 /* If info fits in, put it inside */ 653 653 if (badness(cache) > sizeof(slab_t)) 654 654 cache->flags |= SLAB_CACHE_SLINSIDE; 655 655 656 656 /* Add cache to cache list */ 657 657 irq_spinlock_lock(&slab_cache_lock, true); … … 670 670 _slab_cache_create(cache, name, size, align, constructor, destructor, 671 671 flags); 672 672 673 673 return cache; 674 674 } … … 685 685 if (cache->flags & SLAB_CACHE_NOMAGAZINE) 686 686 return 0; /* Nothing to do */ 687 687 688 688 /* 689 689 * We count up to original magazine count to avoid … … 691 691 */ 692 692 atomic_count_t magcount = atomic_get(&cache->magazine_counter); 693 693 694 694 slab_magazine_t *mag; 695 695 size_t frames = 0; 696 696 697 697 while ((magcount--) && (mag = get_mag_from_cache(cache, 0))) { 698 698 frames += magazine_destroy(cache, mag); … … 700 700 break; 701 701 } 702 702 703 703 if (flags & SLAB_RECLAIM_ALL) { 704 704 /* Free cpu-bound magazines */ … … 707 707 for (i = 0; i < config.cpu_count; i++) { 708 708 irq_spinlock_lock(&cache->mag_cache[i].lock, true); 709 709 710 710 mag = cache->mag_cache[i].current; 711 711 if (mag) 712 712 frames += magazine_destroy(cache, mag); 713 713 cache->mag_cache[i].current = NULL; 714 714 715 715 mag = cache->mag_cache[i].last; 716 716 if (mag) 717 717 frames += magazine_destroy(cache, mag); 718 718 cache->mag_cache[i].last = NULL; 719 719 720 720 irq_spinlock_unlock(&cache->mag_cache[i].lock, true); 721 721 } 722 722 } 723 723 724 724 return frames; 725 725 } … … 731 731 { 732 732 ipl_t ipl = interrupts_disable(); 733 733 734 734 if ((cache->flags & SLAB_CACHE_NOMAGAZINE) || 735 735 (magazine_obj_put(cache, obj))) 736 736 slab_obj_destroy(cache, obj, slab); 737 737 738 738 interrupts_restore(ipl); 739 739 atomic_dec(&cache->allocated_objs); … … 753 753 list_remove(&cache->link); 754 754 irq_spinlock_unlock(&slab_cache_lock, true); 755 755 756 756 /* 757 757 * Do not lock anything, we assume the software is correct and … … 759 759 * 760 760 */ 761 761 762 762 /* Destroy all magazines */ 763 763 _slab_reclaim(cache, SLAB_RECLAIM_ALL); 764 764 765 765 /* All slabs must be empty */ 766 766 if ((!list_empty(&cache->full_slabs)) || 767 767 (!list_empty(&cache->partial_slabs))) 768 768 panic("Destroying cache that is not empty."); 769 769 770 770 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) { 771 771 slab_t *mag_slab = obj2slab(cache->mag_cache); 772 772 _slab_free(mag_slab->cache, cache->mag_cache, mag_slab); 773 773 } 774 774 775 775 slab_free(&slab_cache_cache, cache); 776 776 } … … 783 783 /* Disable interrupts to avoid deadlocks with interrupt handlers */ 784 784 ipl_t ipl = interrupts_disable(); 785 785 786 786 void *result = NULL; 787 787 788 788 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) 789 789 result = magazine_obj_get(cache); 790 790 791 791 if (!result) 792 792 result = slab_obj_create(cache, flags); 793 793 794 794 interrupts_restore(ipl); 795 795 796 796 if (result) 797 797 atomic_inc(&cache->allocated_objs); 798 798 799 799 return result; 800 800 } … … 812 812 { 813 813 irq_spinlock_lock(&slab_cache_lock, true); 814 814 815 815 size_t frames = 0; 816 816 list_foreach(slab_cache_list, link, slab_cache_t, cache) { 817 817 frames += _slab_reclaim(cache, flags); 818 818 } 819 819 820 820 irq_spinlock_unlock(&slab_cache_lock, true); 821 821 822 822 return frames; 823 823 } … … 828 828 printf("[cache name ] [size ] [pages ] [obj/pg] [slabs ]" 829 829 " [cached] [alloc ] [ctl]\n"); 830 830 831 831 size_t skip = 0; 832 832 while (true) { … … 853 853 * statistics. 854 854 */ 855 855 856 856 irq_spinlock_lock(&slab_cache_lock, true); 857 857 858 858 link_t *cur; 859 859 size_t i; … … 861 861 (i < skip) && (cur != &slab_cache_list.head); 862 862 i++, cur = cur->next); 863 863 864 864 if (cur == &slab_cache_list.head) { 865 865 irq_spinlock_unlock(&slab_cache_lock, true); 866 866 break; 867 867 } 868 868 869 869 skip++; 870 870 871 871 slab_cache_t *cache = list_get_instance(cur, slab_cache_t, link); 872 872 873 873 const char *name = cache->name; 874 874 size_t frames = cache->frames; … … 879 879 long allocated_objs = atomic_get(&cache->allocated_objs); 880 880 unsigned int flags = cache->flags; 881 881 882 882 irq_spinlock_unlock(&slab_cache_lock, true); 883 883 884 884 printf("%-18s %8zu %8zu %8zu %8ld %8ld %8ld %-5s\n", 885 885 name, size, frames, objects, allocated_slabs, … … 896 896 sizeof(uintptr_t), NULL, NULL, SLAB_CACHE_NOMAGAZINE | 897 897 SLAB_CACHE_SLINSIDE); 898 898 899 899 /* Initialize slab_cache cache */ 900 900 _slab_cache_create(&slab_cache_cache, "slab_cache_cache", 901 901 sizeof(slab_cache_cache), sizeof(uintptr_t), NULL, NULL, 902 902 SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); 903 903 904 904 /* Initialize external slab cache */ 905 905 slab_extern_cache = slab_cache_create("slab_t", sizeof(slab_t), 0, 906 906 NULL, NULL, SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED); 907 907 908 908 /* Initialize structures for malloc */ 909 909 size_t i; 910 910 size_t size; 911 911 912 912 for (i = 0, size = (1 << SLAB_MIN_MALLOC_W); 913 913 i < (SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1); … … 916 916 NULL, NULL, SLAB_CACHE_MAGDEFERRED); 917 917 } 918 918 919 919 #ifdef CONFIG_DEBUG 920 920 _slab_initialized = 1; … … 934 934 _slab_initialized = 2; 935 935 #endif 936 936 937 937 _slab_cache_create(&slab_mag_cache, "slab_mag_cache", 938 938 sizeof(slab_mag_cache_t) * config.cpu_count, sizeof(uintptr_t), 939 939 NULL, NULL, SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); 940 940 941 941 irq_spinlock_lock(&slab_cache_lock, false); 942 942 943 943 list_foreach(slab_cache_list, link, slab_cache_t, slab) { 944 944 if ((slab->flags & SLAB_CACHE_MAGDEFERRED) != 945 945 SLAB_CACHE_MAGDEFERRED) 946 946 continue; 947 947 948 948 (void) make_magcache(slab); 949 949 slab->flags &= ~SLAB_CACHE_MAGDEFERRED; 950 950 } 951 951 952 952 irq_spinlock_unlock(&slab_cache_lock, false); 953 953 } … … 957 957 assert(_slab_initialized); 958 958 assert(size <= (1 << SLAB_MAX_MALLOC_W)); 959 959 960 960 if (size < (1 << SLAB_MIN_MALLOC_W)) 961 961 size = (1 << SLAB_MIN_MALLOC_W); 962 962 963 963 uint8_t idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1; 964 964 965 965 return slab_alloc(malloc_caches[idx], flags); 966 966 } … … 970 970 assert(_slab_initialized); 971 971 assert(size <= (1 << SLAB_MAX_MALLOC_W)); 972 972 973 973 void *new_ptr; 974 974 975 975 if (size > 0) { 976 976 if (size < (1 << SLAB_MIN_MALLOC_W)) 977 977 size = (1 << SLAB_MIN_MALLOC_W); 978 978 uint8_t idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1; 979 979 980 980 new_ptr = slab_alloc(malloc_caches[idx], flags); 981 981 } else 982 982 new_ptr = NULL; 983 983 984 984 if ((new_ptr != NULL) && (ptr != NULL)) { 985 985 slab_t *slab = obj2slab(ptr); 986 986 memcpy(new_ptr, ptr, min(size, slab->cache->size)); 987 987 } 988 988 989 989 if (ptr != NULL) 990 990 free(ptr); 991 991 992 992 return new_ptr; 993 993 } … … 997 997 if (!ptr) 998 998 return; 999 999 1000 1000 slab_t *slab = obj2slab(ptr); 1001 1001 _slab_free(slab->cache, ptr, slab);
Note:
See TracChangeset
for help on using the changeset viewer.