source: mainline/generic/src/mm/slab.c@ 10e16a7

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 10e16a7 was 10e16a7, checked in by Ondrej Palkovsky <ondrap@…>, 19 years ago

Added scheduler queues output. The scheduler is buggy - on SMP
the cpus never get tu cpu_sleep, in slab2 test on 4 cpus everything
is on the first cpu.
The slab allocator passes tests in this configuration, but in slightly
different(more efficient) locking order it panics. TODO Find out why
does it panic.

  • Property mode set to 100644
File size: 21.6 KB
Line 
1/*
2 * Copyright (C) 2006 Ondrej Palkovsky
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/*
30 * The SLAB allocator is closely modelled after Opensolaris SLAB allocator
31 * http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/
32 *
33 * with the following exceptions:
34 * - empty SLABS are deallocated immediately
35 * (in Linux they are kept in linked list, in Solaris ???)
36 * - empty magazines are deallocated when not needed
37 * (in Solaris they are held in linked list in slab cache)
38 *
39 * Following features are not currently supported but would be easy to do:
40 * - cache coloring
41 * - dynamic magazine growing (different magazine sizes are already
42 * supported, but we would need to adjust allocating strategy)
43 *
44 * The SLAB allocator supports per-CPU caches ('magazines') to facilitate
45 * good SMP scaling.
46 *
47 * When a new object is being allocated, it is first checked, if it is
48 * available in CPU-bound magazine. If it is not found there, it is
49 * allocated from CPU-shared SLAB - if partial full is found, it is used,
50 * otherwise a new one is allocated.
51 *
52 * When an object is being deallocated, it is put to CPU-bound magazine.
53 * If there is no such magazine, new one is allocated (if it fails,
54 * the object is deallocated into SLAB). If the magazine is full, it is
55 * put into cpu-shared list of magazines and new one is allocated.
56 *
57 * The CPU-bound magazine is actually a pair of magazine to avoid
58 * thrashing when somebody is allocating/deallocating 1 item at the magazine
59 * size boundary. LIFO order is enforced, which should avoid fragmentation
60 * as much as possible.
61 *
62 * Every cache contains list of full slabs and list of partialy full slabs.
63 * Empty SLABS are immediately freed (thrashing will be avoided because
64 * of magazines).
65 *
66 * The SLAB information structure is kept inside the data area, if possible.
67 * The cache can be marked that it should not use magazines. This is used
68 * only for SLAB related caches to avoid deadlocks and infinite recursion
69 * (the SLAB allocator uses itself for allocating all it's control structures).
70 *
71 * The SLAB allocator allocates lot of space and does not free it. When
72 * frame allocator fails to allocate the frame, it calls slab_reclaim().
73 * It tries 'light reclaim' first, then brutal reclaim. The light reclaim
74 * releases slabs from cpu-shared magazine-list, until at least 1 slab
75 * is deallocated in each cache (this algorithm should probably change).
76 * The brutal reclaim removes all cached objects, even from CPU-bound
77 * magazines.
78 *
79 * TODO: For better CPU-scaling the magazine allocation strategy should
80 * be extended. Currently, if the cache does not have magazine, it asks
81 * for non-cpu cached magazine cache to provide one. It might be feasible
82 * to add cpu-cached magazine cache (which would allocate it's magazines
83 * from non-cpu-cached mag. cache). This would provide a nice per-cpu
84 * buffer. The other possibility is to use the per-cache
85 * 'empty-magazine-list', which decreases competing for 1 per-system
86 * magazine cache.
87 *
88 */
89
90
91#include <synch/spinlock.h>
92#include <mm/slab.h>
93#include <list.h>
94#include <memstr.h>
95#include <align.h>
96#include <mm/heap.h>
97#include <mm/frame.h>
98#include <config.h>
99#include <print.h>
100#include <arch.h>
101#include <panic.h>
102#include <debug.h>
103#include <bitops.h>
104
105SPINLOCK_INITIALIZE(slab_cache_lock);
106static LIST_INITIALIZE(slab_cache_list);
107
108/** Magazine cache */
109static slab_cache_t mag_cache;
110/** Cache for cache descriptors */
111static slab_cache_t slab_cache_cache;
112
113/** Cache for external slab descriptors
114 * This time we want per-cpu cache, so do not make it static
115 * - using SLAB for internal SLAB structures will not deadlock,
116 * as all slab structures are 'small' - control structures of
117 * their caches do not require further allocation
118 */
119static slab_cache_t *slab_extern_cache;
120/** Caches for malloc */
121static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1];
122char *malloc_names[] = {
123 "malloc-8","malloc-16","malloc-32","malloc-64","malloc-128",
124 "malloc-256","malloc-512","malloc-1K","malloc-2K",
125 "malloc-4K","malloc-8K","malloc-16K","malloc-32K",
126 "malloc-64K","malloc-128K"
127};
128
129/** Slab descriptor */
130typedef struct {
131 slab_cache_t *cache; /**< Pointer to parent cache */
132 link_t link; /* List of full/partial slabs */
133 void *start; /**< Start address of first available item */
134 count_t available; /**< Count of available items in this slab */
135 index_t nextavail; /**< The index of next available item */
136}slab_t;
137
138/**************************************/
139/* SLAB allocation functions */
140
141/**
142 * Allocate frames for slab space and initialize
143 *
144 */
145static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
146{
147 void *data;
148 slab_t *slab;
149 size_t fsize;
150 int i;
151 zone_t *zone = NULL;
152 int status;
153 frame_t *frame;
154
155 data = (void *)frame_alloc(FRAME_KA | flags, cache->order, &status, &zone);
156 if (status != FRAME_OK) {
157 return NULL;
158 }
159 if (! (cache->flags & SLAB_CACHE_SLINSIDE)) {
160 slab = slab_alloc(slab_extern_cache, flags);
161 if (!slab) {
162 frame_free((__address)data);
163 return NULL;
164 }
165 } else {
166 fsize = (PAGE_SIZE << cache->order);
167 slab = data + fsize - sizeof(*slab);
168 }
169
170 /* Fill in slab structures */
171 /* TODO: some better way of accessing the frame */
172 for (i=0; i < (1 << cache->order); i++) {
173 frame = ADDR2FRAME(zone, KA2PA((__address)(data+i*PAGE_SIZE)));
174 frame->parent = slab;
175 }
176
177 slab->start = data;
178 slab->available = cache->objects;
179 slab->nextavail = 0;
180 slab->cache = cache;
181
182 for (i=0; i<cache->objects;i++)
183 *((int *) (slab->start + i*cache->size)) = i+1;
184
185 atomic_inc(&cache->allocated_slabs);
186 return slab;
187}
188
189/**
190 * Deallocate space associated with SLAB
191 *
192 * @return number of freed frames
193 */
194static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
195{
196 frame_free((__address)slab->start);
197 if (! (cache->flags & SLAB_CACHE_SLINSIDE))
198 slab_free(slab_extern_cache, slab);
199
200 atomic_dec(&cache->allocated_slabs);
201
202 return 1 << cache->order;
203}
204
205/** Map object to slab structure */
206static slab_t * obj2slab(void *obj)
207{
208 frame_t *frame;
209
210 frame = frame_addr2frame((__address)obj);
211 return (slab_t *)frame->parent;
212}
213
214/**************************************/
215/* SLAB functions */
216
217
218/**
219 * Return object to slab and call a destructor
220 *
221 * Assume the cache->lock is held;
222 *
223 * @param slab If the caller knows directly slab of the object, otherwise NULL
224 *
225 * @return Number of freed pages
226 */
227static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
228 slab_t *slab)
229{
230 count_t frames = 0;
231
232 if (!slab)
233 slab = obj2slab(obj);
234
235 ASSERT(slab->cache == cache);
236
237 *((int *)obj) = slab->nextavail;
238 slab->nextavail = (obj - slab->start)/cache->size;
239 slab->available++;
240
241 /* Move it to correct list */
242 if (slab->available == 1) {
243 /* It was in full, move to partial */
244 list_remove(&slab->link);
245 list_prepend(&slab->link, &cache->partial_slabs);
246 }
247 if (slab->available == cache->objects) {
248 /* Free associated memory */
249 list_remove(&slab->link);
250 /* Avoid deadlock */
251 spinlock_unlock(&cache->lock);
252 frames = slab_space_free(cache, slab);
253 spinlock_lock(&cache->lock);
254 }
255
256 return frames;
257}
258
259/**
260 * Take new object from slab or create new if needed
261 *
262 * Assume cache->lock is held.
263 *
264 * @return Object address or null
265 */
266static void * slab_obj_create(slab_cache_t *cache, int flags)
267{
268 slab_t *slab;
269 void *obj;
270
271 if (list_empty(&cache->partial_slabs)) {
272 /* Allow recursion and reclaiming
273 * - this should work, as the SLAB control structures
274 * are small and do not need to allocte with anything
275 * other ten frame_alloc when they are allocating,
276 * that's why we should get recursion at most 1-level deep
277 */
278 spinlock_unlock(&cache->lock);
279 slab = slab_space_alloc(cache, flags);
280 spinlock_lock(&cache->lock);
281 if (!slab) {
282 return NULL;
283 }
284 } else {
285 slab = list_get_instance(cache->partial_slabs.next,
286 slab_t,
287 link);
288 list_remove(&slab->link);
289 }
290 obj = slab->start + slab->nextavail * cache->size;
291 slab->nextavail = *((int *)obj);
292 slab->available--;
293 if (! slab->available)
294 list_prepend(&slab->link, &cache->full_slabs);
295 else
296 list_prepend(&slab->link, &cache->partial_slabs);
297 return obj;
298}
299
300/**************************************/
301/* CPU-Cache slab functions */
302
303/**
304 * Free all objects in magazine and free memory associated with magazine
305 *
306 * Assume cache->lock is held
307 *
308 * @return Number of freed pages
309 */
310static count_t magazine_destroy(slab_cache_t *cache,
311 slab_magazine_t *mag)
312{
313 int i;
314 count_t frames = 0;
315
316 for (i=0;i < mag->busy; i++) {
317 frames += slab_obj_destroy(cache, mag->objs[i], NULL);
318 atomic_dec(&cache->cached_objs);
319 }
320
321 slab_free(&mag_cache, mag);
322
323 return frames;
324}
325
326/**
327 * Find full magazine, set it as current and return it
328 *
329 * Assume cpu_magazine lock is held
330 */
331static slab_magazine_t * get_full_current_mag(slab_cache_t *cache)
332{
333 slab_magazine_t *cmag, *lastmag, *newmag;
334
335 cmag = cache->mag_cache[CPU->id].current;
336 lastmag = cache->mag_cache[CPU->id].last;
337 if (cmag) { /* First try local CPU magazines */
338 if (cmag->busy)
339 return cmag;
340
341 if (lastmag && lastmag->busy) {
342 cache->mag_cache[CPU->id].current = lastmag;
343 cache->mag_cache[CPU->id].last = cmag;
344 return lastmag;
345 }
346 }
347 /* Local magazines are empty, import one from magazine list */
348 spinlock_lock(&cache->lock);
349 if (list_empty(&cache->magazines)) {
350 spinlock_unlock(&cache->lock);
351 return NULL;
352 }
353 newmag = list_get_instance(cache->magazines.next,
354 slab_magazine_t,
355 link);
356 list_remove(&newmag->link);
357 spinlock_unlock(&cache->lock);
358
359 if (lastmag)
360 slab_free(&mag_cache, lastmag);
361 cache->mag_cache[CPU->id].last = cmag;
362 cache->mag_cache[CPU->id].current = newmag;
363 return newmag;
364}
365
366/**
367 * Try to find object in CPU-cache magazines
368 *
369 * @return Pointer to object or NULL if not available
370 */
371static void * magazine_obj_get(slab_cache_t *cache)
372{
373 slab_magazine_t *mag;
374 void *obj;
375
376 if (!CPU)
377 return NULL;
378
379 spinlock_lock(&cache->mag_cache[CPU->id].lock);
380
381 mag = get_full_current_mag(cache);
382 if (!mag) {
383 spinlock_unlock(&cache->mag_cache[CPU->id].lock);
384 return NULL;
385 }
386 obj = mag->objs[--mag->busy];
387 spinlock_unlock(&cache->mag_cache[CPU->id].lock);
388 atomic_dec(&cache->cached_objs);
389
390 return obj;
391}
392
393/**
394 * Assure that the current magazine is empty, return pointer to it, or NULL if
395 * no empty magazine is available and cannot be allocated
396 *
397 * Assume mag_cache[CPU->id].lock is held
398 *
399 * We have 2 magazines bound to processor.
400 * First try the current.
401 * If full, try the last.
402 * If full, put to magazines list.
403 * allocate new, exchange last & current
404 *
405 */
406static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache)
407{
408 slab_magazine_t *cmag,*lastmag,*newmag;
409
410 cmag = cache->mag_cache[CPU->id].current;
411 lastmag = cache->mag_cache[CPU->id].last;
412
413 if (cmag) {
414 if (cmag->busy < cmag->size)
415 return cmag;
416 if (lastmag && lastmag->busy < lastmag->size) {
417 cache->mag_cache[CPU->id].last = cmag;
418 cache->mag_cache[CPU->id].current = lastmag;
419 return lastmag;
420 }
421 }
422 /* current | last are full | nonexistent, allocate new */
423 /* We do not want to sleep just because of caching */
424 /* Especially we do not want reclaiming to start, as
425 * this would deadlock */
426 newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM);
427 if (!newmag)
428 return NULL;
429 newmag->size = SLAB_MAG_SIZE;
430 newmag->busy = 0;
431
432 /* Flush last to magazine list */
433 if (lastmag) {
434 spinlock_lock(&cache->lock);
435 list_prepend(&lastmag->link, &cache->magazines);
436 spinlock_unlock(&cache->lock);
437 }
438 /* Move current as last, save new as current */
439 cache->mag_cache[CPU->id].last = cmag;
440 cache->mag_cache[CPU->id].current = newmag;
441
442 return newmag;
443}
444
445/**
446 * Put object into CPU-cache magazine
447 *
448 * @return 0 - success, -1 - could not get memory
449 */
450static int magazine_obj_put(slab_cache_t *cache, void *obj)
451{
452 slab_magazine_t *mag;
453
454 if (!CPU)
455 return -1;
456
457 spinlock_lock(&cache->mag_cache[CPU->id].lock);
458
459 mag = make_empty_current_mag(cache);
460 if (!mag) {
461 spinlock_unlock(&cache->mag_cache[CPU->id].lock);
462 return -1;
463 }
464
465 mag->objs[mag->busy++] = obj;
466
467 spinlock_unlock(&cache->mag_cache[CPU->id].lock);
468 atomic_inc(&cache->cached_objs);
469 return 0;
470}
471
472
473/**************************************/
474/* SLAB CACHE functions */
475
476/** Return number of objects that fit in certain cache size */
477static int comp_objects(slab_cache_t *cache)
478{
479 if (cache->flags & SLAB_CACHE_SLINSIDE)
480 return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
481 else
482 return (PAGE_SIZE << cache->order) / cache->size;
483}
484
485/** Return wasted space in slab */
486static int badness(slab_cache_t *cache)
487{
488 int objects;
489 int ssize;
490
491 objects = comp_objects(cache);
492 ssize = PAGE_SIZE << cache->order;
493 if (cache->flags & SLAB_CACHE_SLINSIDE)
494 ssize -= sizeof(slab_t);
495 return ssize - objects*cache->size;
496}
497
498/** Initialize allocated memory as a slab cache */
499static void
500_slab_cache_create(slab_cache_t *cache,
501 char *name,
502 size_t size,
503 size_t align,
504 int (*constructor)(void *obj, int kmflag),
505 void (*destructor)(void *obj),
506 int flags)
507{
508 int i;
509 int pages;
510
511 memsetb((__address)cache, sizeof(*cache), 0);
512 cache->name = name;
513
514 if (align < sizeof(__native))
515 align = sizeof(__native);
516 size = ALIGN_UP(size, align);
517
518 cache->size = size;
519
520 cache->constructor = constructor;
521 cache->destructor = destructor;
522 cache->flags = flags;
523
524 list_initialize(&cache->full_slabs);
525 list_initialize(&cache->partial_slabs);
526 list_initialize(&cache->magazines);
527 spinlock_initialize(&cache->lock, "cachelock");
528 if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) {
529 for (i=0; i < config.cpu_count; i++) {
530 memsetb((__address)&cache->mag_cache[i],
531 sizeof(cache->mag_cache[i]), 0);
532 spinlock_initialize(&cache->mag_cache[i].lock,
533 "cpucachelock");
534 }
535 }
536
537 /* Compute slab sizes, object counts in slabs etc. */
538 if (cache->size < SLAB_INSIDE_SIZE)
539 cache->flags |= SLAB_CACHE_SLINSIDE;
540
541 /* Minimum slab order */
542 pages = ((cache->size-1) >> PAGE_WIDTH) + 1;
543 cache->order = fnzb(pages);
544
545 while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
546 cache->order += 1;
547 }
548 cache->objects = comp_objects(cache);
549 /* If info fits in, put it inside */
550 if (badness(cache) > sizeof(slab_t))
551 cache->flags |= SLAB_CACHE_SLINSIDE;
552
553 spinlock_lock(&slab_cache_lock);
554
555 list_append(&cache->link, &slab_cache_list);
556
557 spinlock_unlock(&slab_cache_lock);
558}
559
560/** Create slab cache */
561slab_cache_t * slab_cache_create(char *name,
562 size_t size,
563 size_t align,
564 int (*constructor)(void *obj, int kmflag),
565 void (*destructor)(void *obj),
566 int flags)
567{
568 slab_cache_t *cache;
569
570 cache = slab_alloc(&slab_cache_cache, 0);
571 _slab_cache_create(cache, name, size, align, constructor, destructor,
572 flags);
573 return cache;
574}
575
576/**
577 * Reclaim space occupied by objects that are already free
578 *
579 * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing
580 * @return Number of freed pages
581 */
582static count_t _slab_reclaim(slab_cache_t *cache, int flags)
583{
584 int i;
585 slab_magazine_t *mag;
586 link_t *cur;
587 count_t frames = 0;
588
589 if (cache->flags & SLAB_CACHE_NOMAGAZINE)
590 return 0; /* Nothing to do */
591
592 /* First lock all cpu caches, then the complete cache lock */
593 if (flags & SLAB_RECLAIM_ALL) {
594 for (i=0; i < config.cpu_count; i++)
595 spinlock_lock(&cache->mag_cache[i].lock);
596 }
597 spinlock_lock(&cache->lock);
598
599 if (flags & SLAB_RECLAIM_ALL) {
600 /* Aggressive memfree */
601 /* Destroy CPU magazines */
602 for (i=0; i<config.cpu_count; i++) {
603 mag = cache->mag_cache[i].current;
604 if (mag)
605 frames += magazine_destroy(cache, mag);
606 cache->mag_cache[i].current = NULL;
607
608 mag = cache->mag_cache[i].last;
609 if (mag)
610 frames += magazine_destroy(cache, mag);
611 cache->mag_cache[i].last = NULL;
612 }
613 }
614 /* Destroy full magazines */
615 cur=cache->magazines.prev;
616
617 while (cur != &cache->magazines) {
618 mag = list_get_instance(cur, slab_magazine_t, link);
619
620 cur = cur->prev;
621 list_remove(&mag->link);
622 frames += magazine_destroy(cache,mag);
623 /* If we do not do full reclaim, break
624 * as soon as something is freed */
625 if (!(flags & SLAB_RECLAIM_ALL) && frames)
626 break;
627 }
628
629 spinlock_unlock(&cache->lock);
630 /* We can release the cache locks now */
631 if (flags & SLAB_RECLAIM_ALL) {
632 for (i=0; i < config.cpu_count; i++)
633 spinlock_unlock(&cache->mag_cache[i].lock);
634 }
635
636 return frames;
637}
638
639/** Check that there are no slabs and remove cache from system */
640void slab_cache_destroy(slab_cache_t *cache)
641{
642 /* Do not lock anything, we assume the software is correct and
643 * does not touch the cache when it decides to destroy it */
644
645 /* Destroy all magazines */
646 _slab_reclaim(cache, SLAB_RECLAIM_ALL);
647
648 /* All slabs must be empty */
649 if (!list_empty(&cache->full_slabs) \
650 || !list_empty(&cache->partial_slabs))
651 panic("Destroying cache that is not empty.");
652
653 spinlock_lock(&slab_cache_lock);
654 list_remove(&cache->link);
655 spinlock_unlock(&slab_cache_lock);
656
657 slab_free(&slab_cache_cache, cache);
658}
659
660/** Allocate new object from cache - if no flags given, always returns
661 memory */
662void * slab_alloc(slab_cache_t *cache, int flags)
663{
664 ipl_t ipl;
665 void *result = NULL;
666
667 /* Disable interrupts to avoid deadlocks with interrupt handlers */
668 ipl = interrupts_disable();
669
670 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
671 result = magazine_obj_get(cache);
672
673 if (!result) {
674 spinlock_lock(&cache->lock);
675 result = slab_obj_create(cache, flags);
676 spinlock_unlock(&cache->lock);
677 }
678
679 interrupts_restore(ipl);
680
681 if (result)
682 atomic_inc(&cache->allocated_objs);
683
684 return result;
685}
686
687/** Return object to cache, use slab if known */
688static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab)
689{
690 ipl_t ipl;
691
692 ipl = interrupts_disable();
693
694 if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
695 || magazine_obj_put(cache, obj)) {
696 spinlock_lock(&cache->lock);
697 slab_obj_destroy(cache, obj, slab);
698 spinlock_unlock(&cache->lock);
699 }
700 interrupts_restore(ipl);
701 atomic_dec(&cache->allocated_objs);
702}
703
704/** Return slab object to cache */
705void slab_free(slab_cache_t *cache, void *obj)
706{
707 _slab_free(cache,obj,NULL);
708}
709
710/* Go through all caches and reclaim what is possible */
711count_t slab_reclaim(int flags)
712{
713 slab_cache_t *cache;
714 link_t *cur;
715 count_t frames = 0;
716
717 spinlock_lock(&slab_cache_lock);
718
719 for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
720 cache = list_get_instance(cur, slab_cache_t, link);
721 frames += _slab_reclaim(cache, flags);
722 }
723
724 spinlock_unlock(&slab_cache_lock);
725
726 return frames;
727}
728
729
730/* Print list of slabs */
731void slab_print_list(void)
732{
733 slab_cache_t *cache;
734 link_t *cur;
735
736 spinlock_lock(&slab_cache_lock);
737 printf("SLAB name\tOsize\tPages\tObj/pg\tSlabs\tCached\tAllocobjs\tCtl\n");
738 for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
739 cache = list_get_instance(cur, slab_cache_t, link);
740 printf("%s\t%d\t%d\t%d\t%d\t%d\t%d\t\t%s\n", cache->name, cache->size,
741 (1 << cache->order), cache->objects,
742 atomic_get(&cache->allocated_slabs),
743 atomic_get(&cache->cached_objs),
744 atomic_get(&cache->allocated_objs),
745 cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out");
746 }
747 spinlock_unlock(&slab_cache_lock);
748}
749
750void slab_cache_init(void)
751{
752 int i, size;
753
754 /* Initialize magazine cache */
755 _slab_cache_create(&mag_cache,
756 "slab_magazine",
757 sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*),
758 sizeof(__address),
759 NULL, NULL,
760 SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
761 /* Initialize slab_cache cache */
762 _slab_cache_create(&slab_cache_cache,
763 "slab_cache",
764 sizeof(slab_cache_cache) + config.cpu_count*sizeof(slab_cache_cache.mag_cache[0]),
765 sizeof(__address),
766 NULL, NULL,
767 SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
768 /* Initialize external slab cache */
769 slab_extern_cache = slab_cache_create("slab_extern",
770 sizeof(slab_t),
771 0, NULL, NULL,
772 SLAB_CACHE_SLINSIDE);
773
774 /* Initialize structures for malloc */
775 for (i=0, size=(1<<SLAB_MIN_MALLOC_W);
776 i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1);
777 i++, size <<= 1) {
778 malloc_caches[i] = slab_cache_create(malloc_names[i],
779 size, 0,
780 NULL,NULL,0);
781 }
782}
783
784/**************************************/
785/* kalloc/kfree functions */
786void * kalloc(unsigned int size, int flags)
787{
788 int idx;
789
790 ASSERT( size && size <= (1 << SLAB_MAX_MALLOC_W));
791
792 if (size < (1 << SLAB_MIN_MALLOC_W))
793 size = (1 << SLAB_MIN_MALLOC_W);
794
795 idx = fnzb(size-1) - SLAB_MIN_MALLOC_W + 1;
796
797 return slab_alloc(malloc_caches[idx], flags);
798}
799
800
801void kfree(void *obj)
802{
803 slab_t *slab = obj2slab(obj);
804
805 _slab_free(slab->cache, obj, slab);
806}
Note: See TracBrowser for help on using the repository browser.