source: mainline/generic/src/mm/slab.c@ 2e9eae2

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 2e9eae2 was 2e9eae2, checked in by Ondrej Palkovsky <ondrap@…>, 19 years ago

Changed interface of frame_alloc/free to use address of frame instead of the pfn.
This makes it impossible to use >4GB of memory on 32-bit machines, but who cares…

  • Property mode set to 100644
File size: 23.8 KB
Line 
1/*
2 * Copyright (C) 2006 Ondrej Palkovsky
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup genericmm
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Slab allocator.
36 *
37 * The slab allocator is closely modelled after OpenSolaris slab allocator.
38 * @see http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/
39 *
40 * with the following exceptions:
41 * @li empty slabs are deallocated immediately
42 * (in Linux they are kept in linked list, in Solaris ???)
43 * @li empty magazines are deallocated when not needed
44 * (in Solaris they are held in linked list in slab cache)
45 *
46 * Following features are not currently supported but would be easy to do:
47 * @li cache coloring
48 * @li dynamic magazine growing (different magazine sizes are already
49 * supported, but we would need to adjust allocation strategy)
50 *
51 * The slab allocator supports per-CPU caches ('magazines') to facilitate
52 * good SMP scaling.
53 *
54 * When a new object is being allocated, it is first checked, if it is
55 * available in a CPU-bound magazine. If it is not found there, it is
56 * allocated from a CPU-shared slab - if a partially full one is found,
57 * it is used, otherwise a new one is allocated.
58 *
59 * When an object is being deallocated, it is put to a CPU-bound magazine.
60 * If there is no such magazine, a new one is allocated (if this fails,
61 * the object is deallocated into slab). If the magazine is full, it is
62 * put into cpu-shared list of magazines and a new one is allocated.
63 *
64 * The CPU-bound magazine is actually a pair of magazines in order to avoid
65 * thrashing when somebody is allocating/deallocating 1 item at the magazine
66 * size boundary. LIFO order is enforced, which should avoid fragmentation
67 * as much as possible.
68 *
69 * Every cache contains list of full slabs and list of partially full slabs.
70 * Empty slabs are immediately freed (thrashing will be avoided because
71 * of magazines).
72 *
73 * The slab information structure is kept inside the data area, if possible.
74 * The cache can be marked that it should not use magazines. This is used
75 * only for slab related caches to avoid deadlocks and infinite recursion
76 * (the slab allocator uses itself for allocating all it's control structures).
77 *
78 * The slab allocator allocates a lot of space and does not free it. When
79 * the frame allocator fails to allocate a frame, it calls slab_reclaim().
80 * It tries 'light reclaim' first, then brutal reclaim. The light reclaim
81 * releases slabs from cpu-shared magazine-list, until at least 1 slab
82 * is deallocated in each cache (this algorithm should probably change).
83 * The brutal reclaim removes all cached objects, even from CPU-bound
84 * magazines.
85 *
86 * @todo
87 * For better CPU-scaling the magazine allocation strategy should
88 * be extended. Currently, if the cache does not have magazine, it asks
89 * for non-cpu cached magazine cache to provide one. It might be feasible
90 * to add cpu-cached magazine cache (which would allocate it's magazines
91 * from non-cpu-cached mag. cache). This would provide a nice per-cpu
92 * buffer. The other possibility is to use the per-cache
93 * 'empty-magazine-list', which decreases competing for 1 per-system
94 * magazine cache.
95 *
96 * @todo
97 * it might be good to add granularity of locks even to slab level,
98 * we could then try_spinlock over all partial slabs and thus improve
99 * scalability even on slab level
100 */
101
102#include <synch/spinlock.h>
103#include <mm/slab.h>
104#include <adt/list.h>
105#include <memstr.h>
106#include <align.h>
107#include <mm/frame.h>
108#include <config.h>
109#include <print.h>
110#include <arch.h>
111#include <panic.h>
112#include <debug.h>
113#include <bitops.h>
114
115SPINLOCK_INITIALIZE(slab_cache_lock);
116static LIST_INITIALIZE(slab_cache_list);
117
118/** Magazine cache */
119static slab_cache_t mag_cache;
120/** Cache for cache descriptors */
121static slab_cache_t slab_cache_cache;
122/** Cache for external slab descriptors
123 * This time we want per-cpu cache, so do not make it static
124 * - using slab for internal slab structures will not deadlock,
125 * as all slab structures are 'small' - control structures of
126 * their caches do not require further allocation
127 */
128static slab_cache_t *slab_extern_cache;
129/** Caches for malloc */
130static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1];
131char *malloc_names[] = {
132 "malloc-16","malloc-32","malloc-64","malloc-128",
133 "malloc-256","malloc-512","malloc-1K","malloc-2K",
134 "malloc-4K","malloc-8K","malloc-16K","malloc-32K",
135 "malloc-64K","malloc-128K","malloc-256K"
136};
137
138/** Slab descriptor */
139typedef struct {
140 slab_cache_t *cache; /**< Pointer to parent cache */
141 link_t link; /* List of full/partial slabs */
142 void *start; /**< Start address of first available item */
143 count_t available; /**< Count of available items in this slab */
144 index_t nextavail; /**< The index of next available item */
145}slab_t;
146
147#ifdef CONFIG_DEBUG
148static int _slab_initialized = 0;
149#endif
150
151/**************************************/
152/* Slab allocation functions */
153
154/**
155 * Allocate frames for slab space and initialize
156 *
157 */
158static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
159{
160 void *data;
161 slab_t *slab;
162 size_t fsize;
163 int i;
164 int status;
165 int zone=0;
166
167 data = frame_alloc_rc_zone(cache->order, FRAME_KA | flags, &status, &zone);
168 if (status != FRAME_OK) {
169 return NULL;
170 }
171 if (! (cache->flags & SLAB_CACHE_SLINSIDE)) {
172 slab = slab_alloc(slab_extern_cache, flags);
173 if (!slab) {
174 frame_free(KA2PA(data));
175 return NULL;
176 }
177 } else {
178 fsize = (PAGE_SIZE << cache->order);
179 slab = data + fsize - sizeof(*slab);
180 }
181
182 /* Fill in slab structures */
183 for (i=0; i < (1 << cache->order); i++)
184 frame_set_parent(ADDR2PFN(KA2PA(data))+i, slab, zone);
185
186 slab->start = data;
187 slab->available = cache->objects;
188 slab->nextavail = 0;
189 slab->cache = cache;
190
191 for (i=0; i<cache->objects;i++)
192 *((int *) (slab->start + i*cache->size)) = i+1;
193
194 atomic_inc(&cache->allocated_slabs);
195 return slab;
196}
197
198/**
199 * Deallocate space associated with slab
200 *
201 * @return number of freed frames
202 */
203static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
204{
205 frame_free(KA2PA(slab->start));
206 if (! (cache->flags & SLAB_CACHE_SLINSIDE))
207 slab_free(slab_extern_cache, slab);
208
209 atomic_dec(&cache->allocated_slabs);
210
211 return 1 << cache->order;
212}
213
214/** Map object to slab structure */
215static slab_t * obj2slab(void *obj)
216{
217 return (slab_t *)frame_get_parent(ADDR2PFN(KA2PA(obj)), 0);
218}
219
220/**************************************/
221/* Slab functions */
222
223
224/**
225 * Return object to slab and call a destructor
226 *
227 * @param slab If the caller knows directly slab of the object, otherwise NULL
228 *
229 * @return Number of freed pages
230 */
231static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
232 slab_t *slab)
233{
234 int freed = 0;
235
236 if (!slab)
237 slab = obj2slab(obj);
238
239 ASSERT(slab->cache == cache);
240
241 if (cache->destructor)
242 freed = cache->destructor(obj);
243
244 spinlock_lock(&cache->slablock);
245 ASSERT(slab->available < cache->objects);
246
247 *((int *)obj) = slab->nextavail;
248 slab->nextavail = (obj - slab->start)/cache->size;
249 slab->available++;
250
251 /* Move it to correct list */
252 if (slab->available == cache->objects) {
253 /* Free associated memory */
254 list_remove(&slab->link);
255 spinlock_unlock(&cache->slablock);
256
257 return freed + slab_space_free(cache, slab);
258
259 } else if (slab->available == 1) {
260 /* It was in full, move to partial */
261 list_remove(&slab->link);
262 list_prepend(&slab->link, &cache->partial_slabs);
263 }
264 spinlock_unlock(&cache->slablock);
265 return freed;
266}
267
268/**
269 * Take new object from slab or create new if needed
270 *
271 * @return Object address or null
272 */
273static void * slab_obj_create(slab_cache_t *cache, int flags)
274{
275 slab_t *slab;
276 void *obj;
277
278 spinlock_lock(&cache->slablock);
279
280 if (list_empty(&cache->partial_slabs)) {
281 /* Allow recursion and reclaiming
282 * - this should work, as the slab control structures
283 * are small and do not need to allocate with anything
284 * other than frame_alloc when they are allocating,
285 * that's why we should get recursion at most 1-level deep
286 */
287 spinlock_unlock(&cache->slablock);
288 slab = slab_space_alloc(cache, flags);
289 if (!slab)
290 return NULL;
291 spinlock_lock(&cache->slablock);
292 } else {
293 slab = list_get_instance(cache->partial_slabs.next,
294 slab_t,
295 link);
296 list_remove(&slab->link);
297 }
298 obj = slab->start + slab->nextavail * cache->size;
299 slab->nextavail = *((int *)obj);
300 slab->available--;
301
302 if (! slab->available)
303 list_prepend(&slab->link, &cache->full_slabs);
304 else
305 list_prepend(&slab->link, &cache->partial_slabs);
306
307 spinlock_unlock(&cache->slablock);
308
309 if (cache->constructor && cache->constructor(obj, flags)) {
310 /* Bad, bad, construction failed */
311 slab_obj_destroy(cache, obj, slab);
312 return NULL;
313 }
314 return obj;
315}
316
317/**************************************/
318/* CPU-Cache slab functions */
319
320/**
321 * Finds a full magazine in cache, takes it from list
322 * and returns it
323 *
324 * @param first If true, return first, else last mag
325 */
326static slab_magazine_t * get_mag_from_cache(slab_cache_t *cache,
327 int first)
328{
329 slab_magazine_t *mag = NULL;
330 link_t *cur;
331
332 spinlock_lock(&cache->maglock);
333 if (!list_empty(&cache->magazines)) {
334 if (first)
335 cur = cache->magazines.next;
336 else
337 cur = cache->magazines.prev;
338 mag = list_get_instance(cur, slab_magazine_t, link);
339 list_remove(&mag->link);
340 atomic_dec(&cache->magazine_counter);
341 }
342 spinlock_unlock(&cache->maglock);
343 return mag;
344}
345
346/** Prepend magazine to magazine list in cache */
347static void put_mag_to_cache(slab_cache_t *cache, slab_magazine_t *mag)
348{
349 spinlock_lock(&cache->maglock);
350
351 list_prepend(&mag->link, &cache->magazines);
352 atomic_inc(&cache->magazine_counter);
353
354 spinlock_unlock(&cache->maglock);
355}
356
357/**
358 * Free all objects in magazine and free memory associated with magazine
359 *
360 * @return Number of freed pages
361 */
362static count_t magazine_destroy(slab_cache_t *cache,
363 slab_magazine_t *mag)
364{
365 int i;
366 count_t frames = 0;
367
368 for (i=0;i < mag->busy; i++) {
369 frames += slab_obj_destroy(cache, mag->objs[i], NULL);
370 atomic_dec(&cache->cached_objs);
371 }
372
373 slab_free(&mag_cache, mag);
374
375 return frames;
376}
377
378/**
379 * Find full magazine, set it as current and return it
380 *
381 * Assume cpu_magazine lock is held
382 */
383static slab_magazine_t * get_full_current_mag(slab_cache_t *cache)
384{
385 slab_magazine_t *cmag, *lastmag, *newmag;
386
387 cmag = cache->mag_cache[CPU->id].current;
388 lastmag = cache->mag_cache[CPU->id].last;
389 if (cmag) { /* First try local CPU magazines */
390 if (cmag->busy)
391 return cmag;
392
393 if (lastmag && lastmag->busy) {
394 cache->mag_cache[CPU->id].current = lastmag;
395 cache->mag_cache[CPU->id].last = cmag;
396 return lastmag;
397 }
398 }
399 /* Local magazines are empty, import one from magazine list */
400 newmag = get_mag_from_cache(cache, 1);
401 if (!newmag)
402 return NULL;
403
404 if (lastmag)
405 magazine_destroy(cache, lastmag);
406
407 cache->mag_cache[CPU->id].last = cmag;
408 cache->mag_cache[CPU->id].current = newmag;
409 return newmag;
410}
411
412/**
413 * Try to find object in CPU-cache magazines
414 *
415 * @return Pointer to object or NULL if not available
416 */
417static void * magazine_obj_get(slab_cache_t *cache)
418{
419 slab_magazine_t *mag;
420 void *obj;
421
422 if (!CPU)
423 return NULL;
424
425 spinlock_lock(&cache->mag_cache[CPU->id].lock);
426
427 mag = get_full_current_mag(cache);
428 if (!mag) {
429 spinlock_unlock(&cache->mag_cache[CPU->id].lock);
430 return NULL;
431 }
432 obj = mag->objs[--mag->busy];
433 spinlock_unlock(&cache->mag_cache[CPU->id].lock);
434 atomic_dec(&cache->cached_objs);
435
436 return obj;
437}
438
439/**
440 * Assure that the current magazine is empty, return pointer to it, or NULL if
441 * no empty magazine is available and cannot be allocated
442 *
443 * Assume mag_cache[CPU->id].lock is held
444 *
445 * We have 2 magazines bound to processor.
446 * First try the current.
447 * If full, try the last.
448 * If full, put to magazines list.
449 * allocate new, exchange last & current
450 *
451 */
452static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache)
453{
454 slab_magazine_t *cmag,*lastmag,*newmag;
455
456 cmag = cache->mag_cache[CPU->id].current;
457 lastmag = cache->mag_cache[CPU->id].last;
458
459 if (cmag) {
460 if (cmag->busy < cmag->size)
461 return cmag;
462 if (lastmag && lastmag->busy < lastmag->size) {
463 cache->mag_cache[CPU->id].last = cmag;
464 cache->mag_cache[CPU->id].current = lastmag;
465 return lastmag;
466 }
467 }
468 /* current | last are full | nonexistent, allocate new */
469 /* We do not want to sleep just because of caching */
470 /* Especially we do not want reclaiming to start, as
471 * this would deadlock */
472 newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM);
473 if (!newmag)
474 return NULL;
475 newmag->size = SLAB_MAG_SIZE;
476 newmag->busy = 0;
477
478 /* Flush last to magazine list */
479 if (lastmag)
480 put_mag_to_cache(cache, lastmag);
481
482 /* Move current as last, save new as current */
483 cache->mag_cache[CPU->id].last = cmag;
484 cache->mag_cache[CPU->id].current = newmag;
485
486 return newmag;
487}
488
489/**
490 * Put object into CPU-cache magazine
491 *
492 * @return 0 - success, -1 - could not get memory
493 */
494static int magazine_obj_put(slab_cache_t *cache, void *obj)
495{
496 slab_magazine_t *mag;
497
498 if (!CPU)
499 return -1;
500
501 spinlock_lock(&cache->mag_cache[CPU->id].lock);
502
503 mag = make_empty_current_mag(cache);
504 if (!mag) {
505 spinlock_unlock(&cache->mag_cache[CPU->id].lock);
506 return -1;
507 }
508
509 mag->objs[mag->busy++] = obj;
510
511 spinlock_unlock(&cache->mag_cache[CPU->id].lock);
512 atomic_inc(&cache->cached_objs);
513 return 0;
514}
515
516
517/**************************************/
518/* Slab cache functions */
519
520/** Return number of objects that fit in certain cache size */
521static int comp_objects(slab_cache_t *cache)
522{
523 if (cache->flags & SLAB_CACHE_SLINSIDE)
524 return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
525 else
526 return (PAGE_SIZE << cache->order) / cache->size;
527}
528
529/** Return wasted space in slab */
530static int badness(slab_cache_t *cache)
531{
532 int objects;
533 int ssize;
534
535 objects = comp_objects(cache);
536 ssize = PAGE_SIZE << cache->order;
537 if (cache->flags & SLAB_CACHE_SLINSIDE)
538 ssize -= sizeof(slab_t);
539 return ssize - objects*cache->size;
540}
541
542/**
543 * Initialize mag_cache structure in slab cache
544 */
545static void make_magcache(slab_cache_t *cache)
546{
547 int i;
548
549 ASSERT(_slab_initialized >= 2);
550
551 cache->mag_cache = malloc(sizeof(slab_mag_cache_t)*config.cpu_count,0);
552 for (i=0; i < config.cpu_count; i++) {
553 memsetb((__address)&cache->mag_cache[i],
554 sizeof(cache->mag_cache[i]), 0);
555 spinlock_initialize(&cache->mag_cache[i].lock,
556 "slab_maglock_cpu");
557 }
558}
559
560/** Initialize allocated memory as a slab cache */
561static void
562_slab_cache_create(slab_cache_t *cache,
563 char *name,
564 size_t size,
565 size_t align,
566 int (*constructor)(void *obj, int kmflag),
567 int (*destructor)(void *obj),
568 int flags)
569{
570 int pages;
571 ipl_t ipl;
572
573 memsetb((__address)cache, sizeof(*cache), 0);
574 cache->name = name;
575
576 if (align < sizeof(__native))
577 align = sizeof(__native);
578 size = ALIGN_UP(size, align);
579
580 cache->size = size;
581
582 cache->constructor = constructor;
583 cache->destructor = destructor;
584 cache->flags = flags;
585
586 list_initialize(&cache->full_slabs);
587 list_initialize(&cache->partial_slabs);
588 list_initialize(&cache->magazines);
589 spinlock_initialize(&cache->slablock, "slab_lock");
590 spinlock_initialize(&cache->maglock, "slab_maglock");
591 if (! (cache->flags & SLAB_CACHE_NOMAGAZINE))
592 make_magcache(cache);
593
594 /* Compute slab sizes, object counts in slabs etc. */
595 if (cache->size < SLAB_INSIDE_SIZE)
596 cache->flags |= SLAB_CACHE_SLINSIDE;
597
598 /* Minimum slab order */
599 pages = SIZE2FRAMES(cache->size);
600 /* We need the 2^order >= pages */
601 if (pages == 1)
602 cache->order = 0;
603 else
604 cache->order = fnzb(pages-1)+1;
605
606 while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
607 cache->order += 1;
608 }
609 cache->objects = comp_objects(cache);
610 /* If info fits in, put it inside */
611 if (badness(cache) > sizeof(slab_t))
612 cache->flags |= SLAB_CACHE_SLINSIDE;
613
614 /* Add cache to cache list */
615 ipl = interrupts_disable();
616 spinlock_lock(&slab_cache_lock);
617
618 list_append(&cache->link, &slab_cache_list);
619
620 spinlock_unlock(&slab_cache_lock);
621 interrupts_restore(ipl);
622}
623
624/** Create slab cache */
625slab_cache_t * slab_cache_create(char *name,
626 size_t size,
627 size_t align,
628 int (*constructor)(void *obj, int kmflag),
629 int (*destructor)(void *obj),
630 int flags)
631{
632 slab_cache_t *cache;
633
634 cache = slab_alloc(&slab_cache_cache, 0);
635 _slab_cache_create(cache, name, size, align, constructor, destructor,
636 flags);
637 return cache;
638}
639
640/**
641 * Reclaim space occupied by objects that are already free
642 *
643 * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing
644 * @return Number of freed pages
645 */
646static count_t _slab_reclaim(slab_cache_t *cache, int flags)
647{
648 int i;
649 slab_magazine_t *mag;
650 count_t frames = 0;
651 int magcount;
652
653 if (cache->flags & SLAB_CACHE_NOMAGAZINE)
654 return 0; /* Nothing to do */
655
656 /* We count up to original magazine count to avoid
657 * endless loop
658 */
659 magcount = atomic_get(&cache->magazine_counter);
660 while (magcount-- && (mag=get_mag_from_cache(cache,0))) {
661 frames += magazine_destroy(cache,mag);
662 if (!(flags & SLAB_RECLAIM_ALL) && frames)
663 break;
664 }
665
666 if (flags & SLAB_RECLAIM_ALL) {
667 /* Free cpu-bound magazines */
668 /* Destroy CPU magazines */
669 for (i=0; i<config.cpu_count; i++) {
670 spinlock_lock(&cache->mag_cache[i].lock);
671
672 mag = cache->mag_cache[i].current;
673 if (mag)
674 frames += magazine_destroy(cache, mag);
675 cache->mag_cache[i].current = NULL;
676
677 mag = cache->mag_cache[i].last;
678 if (mag)
679 frames += magazine_destroy(cache, mag);
680 cache->mag_cache[i].last = NULL;
681
682 spinlock_unlock(&cache->mag_cache[i].lock);
683 }
684 }
685
686 return frames;
687}
688
689/** Check that there are no slabs and remove cache from system */
690void slab_cache_destroy(slab_cache_t *cache)
691{
692 ipl_t ipl;
693
694 /* First remove cache from link, so that we don't need
695 * to disable interrupts later
696 */
697
698 ipl = interrupts_disable();
699 spinlock_lock(&slab_cache_lock);
700
701 list_remove(&cache->link);
702
703 spinlock_unlock(&slab_cache_lock);
704 interrupts_restore(ipl);
705
706 /* Do not lock anything, we assume the software is correct and
707 * does not touch the cache when it decides to destroy it */
708
709 /* Destroy all magazines */
710 _slab_reclaim(cache, SLAB_RECLAIM_ALL);
711
712 /* All slabs must be empty */
713 if (!list_empty(&cache->full_slabs) \
714 || !list_empty(&cache->partial_slabs))
715 panic("Destroying cache that is not empty.");
716
717 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
718 free(cache->mag_cache);
719 slab_free(&slab_cache_cache, cache);
720}
721
722/** Allocate new object from cache - if no flags given, always returns
723 memory */
724void * slab_alloc(slab_cache_t *cache, int flags)
725{
726 ipl_t ipl;
727 void *result = NULL;
728
729 /* Disable interrupts to avoid deadlocks with interrupt handlers */
730 ipl = interrupts_disable();
731
732 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) {
733 result = magazine_obj_get(cache);
734 }
735 if (!result)
736 result = slab_obj_create(cache, flags);
737
738 interrupts_restore(ipl);
739
740 if (result)
741 atomic_inc(&cache->allocated_objs);
742
743 return result;
744}
745
746/** Return object to cache, use slab if known */
747static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab)
748{
749 ipl_t ipl;
750
751 ipl = interrupts_disable();
752
753 if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
754 || magazine_obj_put(cache, obj)) {
755
756 slab_obj_destroy(cache, obj, slab);
757
758 }
759 interrupts_restore(ipl);
760 atomic_dec(&cache->allocated_objs);
761}
762
763/** Return slab object to cache */
764void slab_free(slab_cache_t *cache, void *obj)
765{
766 _slab_free(cache,obj,NULL);
767}
768
769/* Go through all caches and reclaim what is possible */
770count_t slab_reclaim(int flags)
771{
772 slab_cache_t *cache;
773 link_t *cur;
774 count_t frames = 0;
775
776 spinlock_lock(&slab_cache_lock);
777
778 /* TODO: Add assert, that interrupts are disabled, otherwise
779 * memory allocation from interrupts can deadlock.
780 */
781
782 for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
783 cache = list_get_instance(cur, slab_cache_t, link);
784 frames += _slab_reclaim(cache, flags);
785 }
786
787 spinlock_unlock(&slab_cache_lock);
788
789 return frames;
790}
791
792
793/* Print list of slabs */
794void slab_print_list(void)
795{
796 slab_cache_t *cache;
797 link_t *cur;
798 ipl_t ipl;
799
800 ipl = interrupts_disable();
801 spinlock_lock(&slab_cache_lock);
802 printf("slab name\t Osize\t Pages\t Obj/pg\t Slabs\t Cached\tAllocobjs\tCtl\n");
803 for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
804 cache = list_get_instance(cur, slab_cache_t, link);
805 printf("%s\t%7zd\t%7zd\t%7zd\t%7zd\t%7zd\t%7zd\t\t%s\n", cache->name, cache->size,
806 (1 << cache->order), cache->objects,
807 atomic_get(&cache->allocated_slabs),
808 atomic_get(&cache->cached_objs),
809 atomic_get(&cache->allocated_objs),
810 cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out");
811 }
812 spinlock_unlock(&slab_cache_lock);
813 interrupts_restore(ipl);
814}
815
816void slab_cache_init(void)
817{
818 int i, size;
819
820 /* Initialize magazine cache */
821 _slab_cache_create(&mag_cache,
822 "slab_magazine",
823 sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*),
824 sizeof(__address),
825 NULL, NULL,
826 SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
827 /* Initialize slab_cache cache */
828 _slab_cache_create(&slab_cache_cache,
829 "slab_cache",
830 sizeof(slab_cache_cache),
831 sizeof(__address),
832 NULL, NULL,
833 SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
834 /* Initialize external slab cache */
835 slab_extern_cache = slab_cache_create("slab_extern",
836 sizeof(slab_t),
837 0, NULL, NULL,
838 SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED);
839
840 /* Initialize structures for malloc */
841 for (i=0, size=(1<<SLAB_MIN_MALLOC_W);
842 i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1);
843 i++, size <<= 1) {
844 malloc_caches[i] = slab_cache_create(malloc_names[i],
845 size, 0,
846 NULL,NULL, SLAB_CACHE_MAGDEFERRED);
847 }
848#ifdef CONFIG_DEBUG
849 _slab_initialized = 1;
850#endif
851}
852
853/** Enable cpu_cache
854 *
855 * Kernel calls this function, when it knows the real number of
856 * processors.
857 * Allocate slab for cpucache and enable it on all existing
858 * slabs that are SLAB_CACHE_MAGDEFERRED
859 */
860void slab_enable_cpucache(void)
861{
862 link_t *cur;
863 slab_cache_t *s;
864
865#ifdef CONFIG_DEBUG
866 _slab_initialized = 2;
867#endif
868
869 spinlock_lock(&slab_cache_lock);
870
871 for (cur=slab_cache_list.next; cur != &slab_cache_list;cur=cur->next){
872 s = list_get_instance(cur, slab_cache_t, link);
873 if ((s->flags & SLAB_CACHE_MAGDEFERRED) != SLAB_CACHE_MAGDEFERRED)
874 continue;
875 make_magcache(s);
876 s->flags &= ~SLAB_CACHE_MAGDEFERRED;
877 }
878
879 spinlock_unlock(&slab_cache_lock);
880}
881
882/**************************************/
883/* kalloc/kfree functions */
884void * malloc(unsigned int size, int flags)
885{
886 int idx;
887
888 ASSERT(_slab_initialized);
889 ASSERT(size && size <= (1 << SLAB_MAX_MALLOC_W));
890
891 if (size < (1 << SLAB_MIN_MALLOC_W))
892 size = (1 << SLAB_MIN_MALLOC_W);
893
894 idx = fnzb(size-1) - SLAB_MIN_MALLOC_W + 1;
895
896 return slab_alloc(malloc_caches[idx], flags);
897}
898
899void free(void *obj)
900{
901 slab_t *slab;
902
903 if (!obj) return;
904
905 slab = obj2slab(obj);
906 _slab_free(slab->cache, obj, slab);
907}
908
909/** @}
910 */
Note: See TracBrowser for help on using the repository browser.