source: mainline/kernel/generic/src/mm/slab.c@ bcd4dd4

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since bcd4dd4 was b60615bd, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 7 years ago

Modify kernel malloc()

This new implementation places the allocation size in front of the allocated
object, instead of relying on the slab allocator being able to determine source
slab cache for an object. This should improve scalability and help reduce
complexity of the memory management subsystem (further changes coming).

The drawback is more memory consumed by small malloc() allocations, however that
can be mitigated by switching to an API where the user provides known object
size to deallocation (most users know it either statically or from length they
necessarily remember).

  • Property mode set to 100644
File size: 24.8 KB
Line 
1/*
2 * Copyright (c) 2006 Ondrej Palkovsky
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup kernel_generic_mm
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Slab allocator.
36 *
37 * The slab allocator is closely modelled after OpenSolaris slab allocator.
38 * @see http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/
39 *
40 * with the following exceptions:
41 * @li empty slabs are deallocated immediately
42 * (in Linux they are kept in linked list, in Solaris ???)
43 * @li empty magazines are deallocated when not needed
44 * (in Solaris they are held in linked list in slab cache)
45 *
46 * Following features are not currently supported but would be easy to do:
47 * @li cache coloring
48 * @li dynamic magazine growing (different magazine sizes are already
49 * supported, but we would need to adjust allocation strategy)
50 *
51 * The slab allocator supports per-CPU caches ('magazines') to facilitate
52 * good SMP scaling.
53 *
54 * When a new object is being allocated, it is first checked, if it is
55 * available in a CPU-bound magazine. If it is not found there, it is
56 * allocated from a CPU-shared slab - if a partially full one is found,
57 * it is used, otherwise a new one is allocated.
58 *
59 * When an object is being deallocated, it is put to a CPU-bound magazine.
60 * If there is no such magazine, a new one is allocated (if this fails,
61 * the object is deallocated into slab). If the magazine is full, it is
62 * put into cpu-shared list of magazines and a new one is allocated.
63 *
64 * The CPU-bound magazine is actually a pair of magazines in order to avoid
65 * thrashing when somebody is allocating/deallocating 1 item at the magazine
66 * size boundary. LIFO order is enforced, which should avoid fragmentation
67 * as much as possible.
68 *
69 * Every cache contains list of full slabs and list of partially full slabs.
70 * Empty slabs are immediately freed (thrashing will be avoided because
71 * of magazines).
72 *
73 * The slab information structure is kept inside the data area, if possible.
74 * The cache can be marked that it should not use magazines. This is used
75 * only for slab related caches to avoid deadlocks and infinite recursion
76 * (the slab allocator uses itself for allocating all it's control structures).
77 *
78 * The slab allocator allocates a lot of space and does not free it. When
79 * the frame allocator fails to allocate a frame, it calls slab_reclaim().
80 * It tries 'light reclaim' first, then brutal reclaim. The light reclaim
81 * releases slabs from cpu-shared magazine-list, until at least 1 slab
82 * is deallocated in each cache (this algorithm should probably change).
83 * The brutal reclaim removes all cached objects, even from CPU-bound
84 * magazines.
85 *
86 * @todo
87 * For better CPU-scaling the magazine allocation strategy should
88 * be extended. Currently, if the cache does not have magazine, it asks
89 * for non-cpu cached magazine cache to provide one. It might be feasible
90 * to add cpu-cached magazine cache (which would allocate it's magazines
91 * from non-cpu-cached mag. cache). This would provide a nice per-cpu
92 * buffer. The other possibility is to use the per-cache
93 * 'empty-magazine-list', which decreases competing for 1 per-system
94 * magazine cache.
95 *
96 * @todo
97 * It might be good to add granularity of locks even to slab level,
98 * we could then try_spinlock over all partial slabs and thus improve
99 * scalability even on slab level.
100 *
101 */
102
103#include <assert.h>
104#include <errno.h>
105#include <synch/spinlock.h>
106#include <mm/slab.h>
107#include <adt/list.h>
108#include <mem.h>
109#include <align.h>
110#include <mm/frame.h>
111#include <config.h>
112#include <stdio.h>
113#include <arch.h>
114#include <panic.h>
115#include <bitops.h>
116#include <macros.h>
117#include <cpu.h>
118#include <stdlib.h>
119
120IRQ_SPINLOCK_STATIC_INITIALIZE(slab_cache_lock);
121static LIST_INITIALIZE(slab_cache_list);
122
123/** Magazine cache */
124static slab_cache_t mag_cache;
125
126/** Cache for cache descriptors */
127static slab_cache_t slab_cache_cache;
128
129/** Cache for per-CPU magazines of caches */
130static slab_cache_t slab_mag_cache;
131
132/** Cache for external slab descriptors
133 * This time we want per-cpu cache, so do not make it static
134 * - using slab for internal slab structures will not deadlock,
135 * as all slab structures are 'small' - control structures of
136 * their caches do not require further allocation
137 */
138static slab_cache_t *slab_extern_cache;
139
140/** Slab descriptor */
141typedef struct {
142 slab_cache_t *cache; /**< Pointer to parent cache. */
143 link_t link; /**< List of full/partial slabs. */
144 void *start; /**< Start address of first available item. */
145 size_t available; /**< Count of available items in this slab. */
146 size_t nextavail; /**< The index of next available item. */
147} slab_t;
148
149#ifdef CONFIG_DEBUG
150static unsigned int _slab_initialized = 0;
151#endif
152
153/**************************************/
154/* Slab allocation functions */
155/**************************************/
156
157/** Allocate frames for slab space and initialize
158 *
159 */
160NO_TRACE static slab_t *slab_space_alloc(slab_cache_t *cache,
161 unsigned int flags)
162{
163 size_t zone = 0;
164
165 uintptr_t data_phys =
166 frame_alloc_generic(cache->frames, FRAME_LOWMEM | flags, 0, &zone);
167 if (!data_phys)
168 return NULL;
169
170 void *data = (void *) PA2KA(data_phys);
171
172 slab_t *slab;
173 size_t fsize;
174
175 if (!(cache->flags & SLAB_CACHE_SLINSIDE)) {
176 slab = slab_alloc(slab_extern_cache, flags);
177 if (!slab) {
178 frame_free(KA2PA(data), cache->frames);
179 return NULL;
180 }
181 } else {
182 fsize = FRAMES2SIZE(cache->frames);
183 slab = data + fsize - sizeof(*slab);
184 }
185
186 /* Fill in slab structures */
187 size_t i;
188 for (i = 0; i < cache->frames; i++)
189 frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab, zone);
190
191 slab->start = data;
192 slab->available = cache->objects;
193 slab->nextavail = 0;
194 slab->cache = cache;
195
196 for (i = 0; i < cache->objects; i++)
197 *((size_t *) (slab->start + i * cache->size)) = i + 1;
198
199 atomic_inc(&cache->allocated_slabs);
200 return slab;
201}
202
203/** Deallocate space associated with slab
204 *
205 * @return number of freed frames
206 *
207 */
208NO_TRACE static size_t slab_space_free(slab_cache_t *cache, slab_t *slab)
209{
210 frame_free(KA2PA(slab->start), slab->cache->frames);
211 if (!(cache->flags & SLAB_CACHE_SLINSIDE))
212 slab_free(slab_extern_cache, slab);
213
214 atomic_dec(&cache->allocated_slabs);
215
216 return cache->frames;
217}
218
219/** Map object to slab structure */
220NO_TRACE static slab_t *obj2slab(void *obj)
221{
222 return (slab_t *) frame_get_parent(ADDR2PFN(KA2PA(obj)), 0);
223}
224
225/******************/
226/* Slab functions */
227/******************/
228
229/** Return object to slab and call a destructor
230 *
231 * @param slab If the caller knows directly slab of the object, otherwise NULL
232 *
233 * @return Number of freed pages
234 *
235 */
236NO_TRACE static size_t slab_obj_destroy(slab_cache_t *cache, void *obj,
237 slab_t *slab)
238{
239 if (!slab)
240 slab = obj2slab(obj);
241
242 assert(slab->cache == cache);
243
244 size_t freed = 0;
245
246 if (cache->destructor)
247 freed = cache->destructor(obj);
248
249 irq_spinlock_lock(&cache->slablock, true);
250 assert(slab->available < cache->objects);
251
252 *((size_t *) obj) = slab->nextavail;
253 slab->nextavail = (obj - slab->start) / cache->size;
254 slab->available++;
255
256 /* Move it to correct list */
257 if (slab->available == cache->objects) {
258 /* Free associated memory */
259 list_remove(&slab->link);
260 irq_spinlock_unlock(&cache->slablock, true);
261
262 return freed + slab_space_free(cache, slab);
263 } else if (slab->available == 1) {
264 /* It was in full, move to partial */
265 list_remove(&slab->link);
266 list_prepend(&slab->link, &cache->partial_slabs);
267 }
268
269 irq_spinlock_unlock(&cache->slablock, true);
270 return freed;
271}
272
273/** Take new object from slab or create new if needed
274 *
275 * @return Object address or null
276 *
277 */
278NO_TRACE static void *slab_obj_create(slab_cache_t *cache, unsigned int flags)
279{
280 irq_spinlock_lock(&cache->slablock, true);
281
282 slab_t *slab;
283
284 if (list_empty(&cache->partial_slabs)) {
285 /*
286 * Allow recursion and reclaiming
287 * - this should work, as the slab control structures
288 * are small and do not need to allocate with anything
289 * other than frame_alloc when they are allocating,
290 * that's why we should get recursion at most 1-level deep
291 *
292 */
293 irq_spinlock_unlock(&cache->slablock, true);
294 slab = slab_space_alloc(cache, flags);
295 if (!slab)
296 return NULL;
297
298 irq_spinlock_lock(&cache->slablock, true);
299 } else {
300 slab = list_get_instance(list_first(&cache->partial_slabs),
301 slab_t, link);
302 list_remove(&slab->link);
303 }
304
305 void *obj = slab->start + slab->nextavail * cache->size;
306 slab->nextavail = *((size_t *) obj);
307 slab->available--;
308
309 if (!slab->available)
310 list_prepend(&slab->link, &cache->full_slabs);
311 else
312 list_prepend(&slab->link, &cache->partial_slabs);
313
314 irq_spinlock_unlock(&cache->slablock, true);
315
316 if ((cache->constructor) && (cache->constructor(obj, flags) != EOK)) {
317 /* Bad, bad, construction failed */
318 slab_obj_destroy(cache, obj, slab);
319 return NULL;
320 }
321
322 return obj;
323}
324
325/****************************/
326/* CPU-Cache slab functions */
327/****************************/
328
329/** Find a full magazine in cache, take it from list and return it
330 *
331 * @param first If true, return first, else last mag.
332 *
333 */
334NO_TRACE static slab_magazine_t *get_mag_from_cache(slab_cache_t *cache,
335 bool first)
336{
337 slab_magazine_t *mag = NULL;
338 link_t *cur;
339
340 irq_spinlock_lock(&cache->maglock, true);
341 if (!list_empty(&cache->magazines)) {
342 if (first)
343 cur = list_first(&cache->magazines);
344 else
345 cur = list_last(&cache->magazines);
346
347 mag = list_get_instance(cur, slab_magazine_t, link);
348 list_remove(&mag->link);
349 atomic_dec(&cache->magazine_counter);
350 }
351 irq_spinlock_unlock(&cache->maglock, true);
352
353 return mag;
354}
355
356/** Prepend magazine to magazine list in cache
357 *
358 */
359NO_TRACE static void put_mag_to_cache(slab_cache_t *cache,
360 slab_magazine_t *mag)
361{
362 irq_spinlock_lock(&cache->maglock, true);
363
364 list_prepend(&mag->link, &cache->magazines);
365 atomic_inc(&cache->magazine_counter);
366
367 irq_spinlock_unlock(&cache->maglock, true);
368}
369
370/** Free all objects in magazine and free memory associated with magazine
371 *
372 * @return Number of freed pages
373 *
374 */
375NO_TRACE static size_t magazine_destroy(slab_cache_t *cache,
376 slab_magazine_t *mag)
377{
378 size_t i;
379 size_t frames = 0;
380
381 for (i = 0; i < mag->busy; i++) {
382 frames += slab_obj_destroy(cache, mag->objs[i], NULL);
383 atomic_dec(&cache->cached_objs);
384 }
385
386 slab_free(&mag_cache, mag);
387
388 return frames;
389}
390
391/** Find full magazine, set it as current and return it
392 *
393 */
394NO_TRACE static slab_magazine_t *get_full_current_mag(slab_cache_t *cache)
395{
396 slab_magazine_t *cmag = cache->mag_cache[CPU->id].current;
397 slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last;
398
399 assert(irq_spinlock_locked(&cache->mag_cache[CPU->id].lock));
400
401 if (cmag) { /* First try local CPU magazines */
402 if (cmag->busy)
403 return cmag;
404
405 if ((lastmag) && (lastmag->busy)) {
406 cache->mag_cache[CPU->id].current = lastmag;
407 cache->mag_cache[CPU->id].last = cmag;
408 return lastmag;
409 }
410 }
411
412 /* Local magazines are empty, import one from magazine list */
413 slab_magazine_t *newmag = get_mag_from_cache(cache, 1);
414 if (!newmag)
415 return NULL;
416
417 if (lastmag)
418 magazine_destroy(cache, lastmag);
419
420 cache->mag_cache[CPU->id].last = cmag;
421 cache->mag_cache[CPU->id].current = newmag;
422
423 return newmag;
424}
425
426/** Try to find object in CPU-cache magazines
427 *
428 * @return Pointer to object or NULL if not available
429 *
430 */
431NO_TRACE static void *magazine_obj_get(slab_cache_t *cache)
432{
433 if (!CPU)
434 return NULL;
435
436 irq_spinlock_lock(&cache->mag_cache[CPU->id].lock, true);
437
438 slab_magazine_t *mag = get_full_current_mag(cache);
439 if (!mag) {
440 irq_spinlock_unlock(&cache->mag_cache[CPU->id].lock, true);
441 return NULL;
442 }
443
444 void *obj = mag->objs[--mag->busy];
445 irq_spinlock_unlock(&cache->mag_cache[CPU->id].lock, true);
446
447 atomic_dec(&cache->cached_objs);
448
449 return obj;
450}
451
452/** Assure that the current magazine is empty, return pointer to it,
453 * or NULL if no empty magazine is available and cannot be allocated
454 *
455 * We have 2 magazines bound to processor.
456 * First try the current.
457 * If full, try the last.
458 * If full, put to magazines list.
459 *
460 */
461NO_TRACE static slab_magazine_t *make_empty_current_mag(slab_cache_t *cache)
462{
463 slab_magazine_t *cmag = cache->mag_cache[CPU->id].current;
464 slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last;
465
466 assert(irq_spinlock_locked(&cache->mag_cache[CPU->id].lock));
467
468 if (cmag) {
469 if (cmag->busy < cmag->size)
470 return cmag;
471
472 if ((lastmag) && (lastmag->busy < lastmag->size)) {
473 cache->mag_cache[CPU->id].last = cmag;
474 cache->mag_cache[CPU->id].current = lastmag;
475 return lastmag;
476 }
477 }
478
479 /* current | last are full | nonexistent, allocate new */
480
481 /*
482 * We do not want to sleep just because of caching,
483 * especially we do not want reclaiming to start, as
484 * this would deadlock.
485 *
486 */
487 slab_magazine_t *newmag = slab_alloc(&mag_cache,
488 FRAME_ATOMIC | FRAME_NO_RECLAIM);
489 if (!newmag)
490 return NULL;
491
492 newmag->size = SLAB_MAG_SIZE;
493 newmag->busy = 0;
494
495 /* Flush last to magazine list */
496 if (lastmag)
497 put_mag_to_cache(cache, lastmag);
498
499 /* Move current as last, save new as current */
500 cache->mag_cache[CPU->id].last = cmag;
501 cache->mag_cache[CPU->id].current = newmag;
502
503 return newmag;
504}
505
506/** Put object into CPU-cache magazine
507 *
508 * @return 0 on success, -1 on no memory
509 *
510 */
511NO_TRACE static int magazine_obj_put(slab_cache_t *cache, void *obj)
512{
513 if (!CPU)
514 return -1;
515
516 irq_spinlock_lock(&cache->mag_cache[CPU->id].lock, true);
517
518 slab_magazine_t *mag = make_empty_current_mag(cache);
519 if (!mag) {
520 irq_spinlock_unlock(&cache->mag_cache[CPU->id].lock, true);
521 return -1;
522 }
523
524 mag->objs[mag->busy++] = obj;
525
526 irq_spinlock_unlock(&cache->mag_cache[CPU->id].lock, true);
527
528 atomic_inc(&cache->cached_objs);
529
530 return 0;
531}
532
533/************************/
534/* Slab cache functions */
535/************************/
536
537/** Return number of objects that fit in certain cache size
538 *
539 */
540NO_TRACE static size_t comp_objects(slab_cache_t *cache)
541{
542 if (cache->flags & SLAB_CACHE_SLINSIDE)
543 return (FRAMES2SIZE(cache->frames) - sizeof(slab_t)) /
544 cache->size;
545 else
546 return FRAMES2SIZE(cache->frames) / cache->size;
547}
548
549/** Return wasted space in slab
550 *
551 */
552NO_TRACE static size_t badness(slab_cache_t *cache)
553{
554 size_t objects = comp_objects(cache);
555 size_t ssize = FRAMES2SIZE(cache->frames);
556
557 if (cache->flags & SLAB_CACHE_SLINSIDE)
558 ssize -= sizeof(slab_t);
559
560 return ssize - objects * cache->size;
561}
562
563/** Initialize mag_cache structure in slab cache
564 *
565 */
566NO_TRACE static bool make_magcache(slab_cache_t *cache)
567{
568 assert(_slab_initialized >= 2);
569
570 cache->mag_cache = slab_alloc(&slab_mag_cache, FRAME_ATOMIC);
571 if (!cache->mag_cache)
572 return false;
573
574 size_t i;
575 for (i = 0; i < config.cpu_count; i++) {
576 memsetb(&cache->mag_cache[i], sizeof(cache->mag_cache[i]), 0);
577 irq_spinlock_initialize(&cache->mag_cache[i].lock,
578 "slab.cache.mag_cache[].lock");
579 }
580
581 return true;
582}
583
584/** Initialize allocated memory as a slab cache
585 *
586 */
587NO_TRACE static void _slab_cache_create(slab_cache_t *cache, const char *name,
588 size_t size, size_t align, errno_t (*constructor)(void *obj,
589 unsigned int kmflag), size_t (*destructor)(void *obj), unsigned int flags)
590{
591 assert(size > 0);
592
593 memsetb(cache, sizeof(*cache), 0);
594 cache->name = name;
595
596 if (align < sizeof(sysarg_t))
597 align = sizeof(sysarg_t);
598
599 size = ALIGN_UP(size, align);
600
601 cache->size = size;
602 cache->constructor = constructor;
603 cache->destructor = destructor;
604 cache->flags = flags;
605
606 list_initialize(&cache->full_slabs);
607 list_initialize(&cache->partial_slabs);
608 list_initialize(&cache->magazines);
609
610 irq_spinlock_initialize(&cache->slablock, "slab.cache.slablock");
611 irq_spinlock_initialize(&cache->maglock, "slab.cache.maglock");
612
613 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
614 (void) make_magcache(cache);
615
616 /* Compute slab sizes, object counts in slabs etc. */
617 if (cache->size < SLAB_INSIDE_SIZE)
618 cache->flags |= SLAB_CACHE_SLINSIDE;
619
620 /* Minimum slab frames */
621 cache->frames = SIZE2FRAMES(cache->size);
622
623 while (badness(cache) > SLAB_MAX_BADNESS(cache))
624 cache->frames <<= 1;
625
626 cache->objects = comp_objects(cache);
627
628 /* If info fits in, put it inside */
629 if (badness(cache) > sizeof(slab_t))
630 cache->flags |= SLAB_CACHE_SLINSIDE;
631
632 /* Add cache to cache list */
633 irq_spinlock_lock(&slab_cache_lock, true);
634 list_append(&cache->link, &slab_cache_list);
635 irq_spinlock_unlock(&slab_cache_lock, true);
636}
637
638/** Create slab cache
639 *
640 */
641slab_cache_t *slab_cache_create(const char *name, size_t size, size_t align,
642 errno_t (*constructor)(void *obj, unsigned int kmflag),
643 size_t (*destructor)(void *obj), unsigned int flags)
644{
645 slab_cache_t *cache = slab_alloc(&slab_cache_cache, FRAME_ATOMIC);
646 if (!cache)
647 panic("Not enough memory to allocate slab cache %s.", name);
648
649 _slab_cache_create(cache, name, size, align, constructor, destructor,
650 flags);
651
652 return cache;
653}
654
655/** Reclaim space occupied by objects that are already free
656 *
657 * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing
658 *
659 * @return Number of freed pages
660 *
661 */
662NO_TRACE static size_t _slab_reclaim(slab_cache_t *cache, unsigned int flags)
663{
664 if (cache->flags & SLAB_CACHE_NOMAGAZINE)
665 return 0; /* Nothing to do */
666
667 /*
668 * We count up to original magazine count to avoid
669 * endless loop
670 */
671 size_t magcount = atomic_load(&cache->magazine_counter);
672
673 slab_magazine_t *mag;
674 size_t frames = 0;
675
676 while ((magcount--) && (mag = get_mag_from_cache(cache, 0))) {
677 frames += magazine_destroy(cache, mag);
678 if ((!(flags & SLAB_RECLAIM_ALL)) && (frames))
679 break;
680 }
681
682 if (flags & SLAB_RECLAIM_ALL) {
683 /* Free cpu-bound magazines */
684 /* Destroy CPU magazines */
685 size_t i;
686 for (i = 0; i < config.cpu_count; i++) {
687 irq_spinlock_lock(&cache->mag_cache[i].lock, true);
688
689 mag = cache->mag_cache[i].current;
690 if (mag)
691 frames += magazine_destroy(cache, mag);
692 cache->mag_cache[i].current = NULL;
693
694 mag = cache->mag_cache[i].last;
695 if (mag)
696 frames += magazine_destroy(cache, mag);
697 cache->mag_cache[i].last = NULL;
698
699 irq_spinlock_unlock(&cache->mag_cache[i].lock, true);
700 }
701 }
702
703 return frames;
704}
705
706/** Return object to cache, use slab if known
707 *
708 */
709NO_TRACE static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab)
710{
711 if (!obj)
712 return;
713
714 ipl_t ipl = interrupts_disable();
715
716 if ((cache->flags & SLAB_CACHE_NOMAGAZINE) ||
717 (magazine_obj_put(cache, obj)))
718 slab_obj_destroy(cache, obj, slab);
719
720 interrupts_restore(ipl);
721 atomic_dec(&cache->allocated_objs);
722}
723
724/** Check that there are no slabs and remove cache from system
725 *
726 */
727void slab_cache_destroy(slab_cache_t *cache)
728{
729 /*
730 * First remove cache from link, so that we don't need
731 * to disable interrupts later
732 *
733 */
734 irq_spinlock_lock(&slab_cache_lock, true);
735 list_remove(&cache->link);
736 irq_spinlock_unlock(&slab_cache_lock, true);
737
738 /*
739 * Do not lock anything, we assume the software is correct and
740 * does not touch the cache when it decides to destroy it
741 *
742 */
743
744 /* Destroy all magazines */
745 _slab_reclaim(cache, SLAB_RECLAIM_ALL);
746
747 /* All slabs must be empty */
748 if ((!list_empty(&cache->full_slabs)) ||
749 (!list_empty(&cache->partial_slabs)))
750 panic("Destroying cache that is not empty.");
751
752 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE) && cache->mag_cache) {
753 slab_free(&slab_mag_cache, cache->mag_cache);
754 }
755
756 slab_free(&slab_cache_cache, cache);
757}
758
759/** Allocate new object from cache - if no flags given, always returns memory
760 *
761 */
762void *slab_alloc(slab_cache_t *cache, unsigned int flags)
763{
764 /* Disable interrupts to avoid deadlocks with interrupt handlers */
765 ipl_t ipl = interrupts_disable();
766
767 void *result = NULL;
768
769 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
770 result = magazine_obj_get(cache);
771
772 if (!result)
773 result = slab_obj_create(cache, flags);
774
775 interrupts_restore(ipl);
776
777 if (result)
778 atomic_inc(&cache->allocated_objs);
779
780 return result;
781}
782
783/** Return slab object to cache
784 *
785 */
786void slab_free(slab_cache_t *cache, void *obj)
787{
788 _slab_free(cache, obj, NULL);
789}
790
791/** Go through all caches and reclaim what is possible */
792size_t slab_reclaim(unsigned int flags)
793{
794 irq_spinlock_lock(&slab_cache_lock, true);
795
796 size_t frames = 0;
797 list_foreach(slab_cache_list, link, slab_cache_t, cache) {
798 frames += _slab_reclaim(cache, flags);
799 }
800
801 irq_spinlock_unlock(&slab_cache_lock, true);
802
803 return frames;
804}
805
806/* Print list of caches */
807void slab_print_list(void)
808{
809 printf("[cache name ] [size ] [pages ] [obj/pg] [slabs ]"
810 " [cached] [alloc ] [ctl]\n");
811
812 size_t skip = 0;
813 while (true) {
814 /*
815 * We must not hold the slab_cache_lock spinlock when printing
816 * the statistics. Otherwise we can easily deadlock if the print
817 * needs to allocate memory.
818 *
819 * Therefore, we walk through the slab cache list, skipping some
820 * amount of already processed caches during each iteration and
821 * gathering statistics about the first unprocessed cache. For
822 * the sake of printing the statistics, we realese the
823 * slab_cache_lock and reacquire it afterwards. Then the walk
824 * starts again.
825 *
826 * This limits both the efficiency and also accuracy of the
827 * obtained statistics. The efficiency is decreased because the
828 * time complexity of the algorithm is quadratic instead of
829 * linear. The accuracy is impacted because we drop the lock
830 * after processing one cache. If there is someone else
831 * manipulating the cache list, we might omit an arbitrary
832 * number of caches or process one cache multiple times.
833 * However, we don't bleed for this algorithm for it is only
834 * statistics.
835 */
836
837 irq_spinlock_lock(&slab_cache_lock, true);
838
839 link_t *cur = slab_cache_list.head.next;
840 size_t i = 0;
841 while (i < skip && cur != &slab_cache_list.head) {
842 i++;
843 cur = cur->next;
844 }
845
846 if (cur == &slab_cache_list.head) {
847 irq_spinlock_unlock(&slab_cache_lock, true);
848 break;
849 }
850
851 skip++;
852
853 slab_cache_t *cache = list_get_instance(cur, slab_cache_t, link);
854
855 const char *name = cache->name;
856 size_t frames = cache->frames;
857 size_t size = cache->size;
858 size_t objects = cache->objects;
859 long allocated_slabs = atomic_load(&cache->allocated_slabs);
860 long cached_objs = atomic_load(&cache->cached_objs);
861 long allocated_objs = atomic_load(&cache->allocated_objs);
862 unsigned int flags = cache->flags;
863
864 irq_spinlock_unlock(&slab_cache_lock, true);
865
866 printf("%-18s %8zu %8zu %8zu %8ld %8ld %8ld %-5s\n",
867 name, size, frames, objects, allocated_slabs,
868 cached_objs, allocated_objs,
869 flags & SLAB_CACHE_SLINSIDE ? "in" : "out");
870 }
871}
872
873void slab_cache_init(void)
874{
875 /* Initialize magazine cache */
876 _slab_cache_create(&mag_cache, "slab_magazine_t",
877 sizeof(slab_magazine_t) + SLAB_MAG_SIZE * sizeof(void *),
878 sizeof(uintptr_t), NULL, NULL, SLAB_CACHE_NOMAGAZINE |
879 SLAB_CACHE_SLINSIDE);
880
881 /* Initialize slab_cache cache */
882 _slab_cache_create(&slab_cache_cache, "slab_cache_cache",
883 sizeof(slab_cache_cache), sizeof(uintptr_t), NULL, NULL,
884 SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
885
886 /* Initialize external slab cache */
887 slab_extern_cache = slab_cache_create("slab_t", sizeof(slab_t), 0,
888 NULL, NULL, SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED);
889
890#ifdef CONFIG_DEBUG
891 _slab_initialized = 1;
892#endif
893}
894
895/** Enable cpu_cache
896 *
897 * Kernel calls this function, when it knows the real number of
898 * processors. Allocate slab for cpucache and enable it on all
899 * existing slabs that are SLAB_CACHE_MAGDEFERRED
900 *
901 */
902void slab_enable_cpucache(void)
903{
904#ifdef CONFIG_DEBUG
905 _slab_initialized = 2;
906#endif
907
908 _slab_cache_create(&slab_mag_cache, "slab_mag_cache",
909 sizeof(slab_mag_cache_t) * config.cpu_count, sizeof(uintptr_t),
910 NULL, NULL, SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
911
912 irq_spinlock_lock(&slab_cache_lock, false);
913
914 list_foreach(slab_cache_list, link, slab_cache_t, slab) {
915 if ((slab->flags & SLAB_CACHE_MAGDEFERRED) !=
916 SLAB_CACHE_MAGDEFERRED)
917 continue;
918
919 (void) make_magcache(slab);
920 slab->flags &= ~SLAB_CACHE_MAGDEFERRED;
921 }
922
923 irq_spinlock_unlock(&slab_cache_lock, false);
924}
925
926/** @}
927 */
Note: See TracBrowser for help on using the repository browser.