source: mainline/generic/src/mm/slab.c@ 8e1ea655

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 8e1ea655 was 8e1ea655, checked in by Ondrej Palkovsky <ondrap@…>, 19 years ago

Early SLAB initialization, the cpu-cache is initialized later.
If you want to use slab_cache_create befor slab_cpu_enable, add
a flag SLAB_CACHE_MAGDEFERRED.

  • Property mode set to 100644
File size: 23.9 KB
Line 
1/*
2 * Copyright (C) 2006 Ondrej Palkovsky
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/*
30 * The SLAB allocator is closely modelled after OpenSolaris SLAB allocator
31 * http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/
32 *
33 * with the following exceptions:
34 * - empty SLABS are deallocated immediately
35 * (in Linux they are kept in linked list, in Solaris ???)
36 * - empty magazines are deallocated when not needed
37 * (in Solaris they are held in linked list in slab cache)
38 *
39 * Following features are not currently supported but would be easy to do:
40 * - cache coloring
41 * - dynamic magazine growing (different magazine sizes are already
42 * supported, but we would need to adjust allocating strategy)
43 *
44 * The SLAB allocator supports per-CPU caches ('magazines') to facilitate
45 * good SMP scaling.
46 *
47 * When a new object is being allocated, it is first checked, if it is
48 * available in CPU-bound magazine. If it is not found there, it is
49 * allocated from CPU-shared SLAB - if partial full is found, it is used,
50 * otherwise a new one is allocated.
51 *
52 * When an object is being deallocated, it is put to CPU-bound magazine.
53 * If there is no such magazine, new one is allocated (if it fails,
54 * the object is deallocated into SLAB). If the magazine is full, it is
55 * put into cpu-shared list of magazines and new one is allocated.
56 *
57 * The CPU-bound magazine is actually a pair of magazine to avoid
58 * thrashing when somebody is allocating/deallocating 1 item at the magazine
59 * size boundary. LIFO order is enforced, which should avoid fragmentation
60 * as much as possible.
61 *
62 * Every cache contains list of full slabs and list of partialy full slabs.
63 * Empty SLABS are immediately freed (thrashing will be avoided because
64 * of magazines).
65 *
66 * The SLAB information structure is kept inside the data area, if possible.
67 * The cache can be marked that it should not use magazines. This is used
68 * only for SLAB related caches to avoid deadlocks and infinite recursion
69 * (the SLAB allocator uses itself for allocating all it's control structures).
70 *
71 * The SLAB allocator allocates lot of space and does not free it. When
72 * frame allocator fails to allocate the frame, it calls slab_reclaim().
73 * It tries 'light reclaim' first, then brutal reclaim. The light reclaim
74 * releases slabs from cpu-shared magazine-list, until at least 1 slab
75 * is deallocated in each cache (this algorithm should probably change).
76 * The brutal reclaim removes all cached objects, even from CPU-bound
77 * magazines.
78 *
79 * TODO: For better CPU-scaling the magazine allocation strategy should
80 * be extended. Currently, if the cache does not have magazine, it asks
81 * for non-cpu cached magazine cache to provide one. It might be feasible
82 * to add cpu-cached magazine cache (which would allocate it's magazines
83 * from non-cpu-cached mag. cache). This would provide a nice per-cpu
84 * buffer. The other possibility is to use the per-cache
85 * 'empty-magazine-list', which decreases competing for 1 per-system
86 * magazine cache.
87 *
88 * - it might be good to add granularity of locks even to slab level,
89 * we could then try_spinlock over all partial slabs and thus improve
90 * scalability even on slab level
91 */
92
93
94#include <synch/spinlock.h>
95#include <mm/slab.h>
96#include <adt/list.h>
97#include <memstr.h>
98#include <align.h>
99#include <mm/heap.h>
100#include <mm/frame.h>
101#include <config.h>
102#include <print.h>
103#include <arch.h>
104#include <panic.h>
105#include <debug.h>
106#include <bitops.h>
107
108SPINLOCK_INITIALIZE(slab_cache_lock);
109static LIST_INITIALIZE(slab_cache_list);
110
111/** Magazine cache */
112static slab_cache_t mag_cache;
113/** Cache for cache descriptors */
114static slab_cache_t slab_cache_cache;
115/** Cache for magcache structure from cache_t */
116static slab_cache_t *cpu_cache = NULL;
117/** Cache for external slab descriptors
118 * This time we want per-cpu cache, so do not make it static
119 * - using SLAB for internal SLAB structures will not deadlock,
120 * as all slab structures are 'small' - control structures of
121 * their caches do not require further allocation
122 */
123static slab_cache_t *slab_extern_cache;
124/** Caches for malloc */
125static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1];
126char *malloc_names[] = {
127 "malloc-8","malloc-16","malloc-32","malloc-64","malloc-128",
128 "malloc-256","malloc-512","malloc-1K","malloc-2K",
129 "malloc-4K","malloc-8K","malloc-16K","malloc-32K",
130 "malloc-64K","malloc-128K"
131};
132
133/** Slab descriptor */
134typedef struct {
135 slab_cache_t *cache; /**< Pointer to parent cache */
136 link_t link; /* List of full/partial slabs */
137 void *start; /**< Start address of first available item */
138 count_t available; /**< Count of available items in this slab */
139 index_t nextavail; /**< The index of next available item */
140}slab_t;
141
142/**************************************/
143/* SLAB allocation functions */
144
145/**
146 * Allocate frames for slab space and initialize
147 *
148 */
149static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
150{
151 void *data;
152 slab_t *slab;
153 size_t fsize;
154 int i;
155 zone_t *zone = NULL;
156 int status;
157 frame_t *frame;
158
159 data = (void *)frame_alloc_rc_zone(cache->order, FRAME_KA | flags, &status, &zone);
160 if (status != FRAME_OK) {
161 return NULL;
162 }
163 if (! (cache->flags & SLAB_CACHE_SLINSIDE)) {
164 slab = slab_alloc(slab_extern_cache, flags);
165 if (!slab) {
166 frame_free((__address)data);
167 return NULL;
168 }
169 } else {
170 fsize = (PAGE_SIZE << cache->order);
171 slab = data + fsize - sizeof(*slab);
172 }
173
174 /* Fill in slab structures */
175 /* TODO: some better way of accessing the frame */
176 for (i=0; i < (1 << cache->order); i++) {
177 frame = ADDR2FRAME(zone, KA2PA((__address)(data+i*PAGE_SIZE)));
178 frame->parent = slab;
179 }
180
181 slab->start = data;
182 slab->available = cache->objects;
183 slab->nextavail = 0;
184 slab->cache = cache;
185
186 for (i=0; i<cache->objects;i++)
187 *((int *) (slab->start + i*cache->size)) = i+1;
188
189 atomic_inc(&cache->allocated_slabs);
190 return slab;
191}
192
193/**
194 * Deallocate space associated with SLAB
195 *
196 * @return number of freed frames
197 */
198static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
199{
200 frame_free((__address)slab->start);
201 if (! (cache->flags & SLAB_CACHE_SLINSIDE))
202 slab_free(slab_extern_cache, slab);
203
204 atomic_dec(&cache->allocated_slabs);
205
206 return 1 << cache->order;
207}
208
209/** Map object to slab structure */
210static slab_t * obj2slab(void *obj)
211{
212 frame_t *frame;
213
214 frame = frame_addr2frame((__address)obj);
215 return (slab_t *)frame->parent;
216}
217
218/**************************************/
219/* SLAB functions */
220
221
222/**
223 * Return object to slab and call a destructor
224 *
225 * @param slab If the caller knows directly slab of the object, otherwise NULL
226 *
227 * @return Number of freed pages
228 */
229static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
230 slab_t *slab)
231{
232 int freed = 0;
233
234 if (!slab)
235 slab = obj2slab(obj);
236
237 ASSERT(slab->cache == cache);
238
239 if (cache->destructor)
240 freed = cache->destructor(obj);
241
242 spinlock_lock(&cache->slablock);
243 ASSERT(slab->available < cache->objects);
244
245 *((int *)obj) = slab->nextavail;
246 slab->nextavail = (obj - slab->start)/cache->size;
247 slab->available++;
248
249 /* Move it to correct list */
250 if (slab->available == cache->objects) {
251 /* Free associated memory */
252 list_remove(&slab->link);
253 spinlock_unlock(&cache->slablock);
254
255 return freed + slab_space_free(cache, slab);
256
257 } else if (slab->available == 1) {
258 /* It was in full, move to partial */
259 list_remove(&slab->link);
260 list_prepend(&slab->link, &cache->partial_slabs);
261 }
262 spinlock_unlock(&cache->slablock);
263 return freed;
264}
265
266/**
267 * Take new object from slab or create new if needed
268 *
269 * @return Object address or null
270 */
271static void * slab_obj_create(slab_cache_t *cache, int flags)
272{
273 slab_t *slab;
274 void *obj;
275
276 spinlock_lock(&cache->slablock);
277
278 if (list_empty(&cache->partial_slabs)) {
279 /* Allow recursion and reclaiming
280 * - this should work, as the SLAB control structures
281 * are small and do not need to allocte with anything
282 * other ten frame_alloc when they are allocating,
283 * that's why we should get recursion at most 1-level deep
284 */
285 spinlock_unlock(&cache->slablock);
286 slab = slab_space_alloc(cache, flags);
287 if (!slab)
288 return NULL;
289 spinlock_lock(&cache->slablock);
290 } else {
291 slab = list_get_instance(cache->partial_slabs.next,
292 slab_t,
293 link);
294 list_remove(&slab->link);
295 }
296 obj = slab->start + slab->nextavail * cache->size;
297 slab->nextavail = *((int *)obj);
298 slab->available--;
299
300 if (! slab->available)
301 list_prepend(&slab->link, &cache->full_slabs);
302 else
303 list_prepend(&slab->link, &cache->partial_slabs);
304
305 spinlock_unlock(&cache->slablock);
306
307 if (cache->constructor && cache->constructor(obj, flags)) {
308 /* Bad, bad, construction failed */
309 slab_obj_destroy(cache, obj, slab);
310 return NULL;
311 }
312 return obj;
313}
314
315/**************************************/
316/* CPU-Cache slab functions */
317
318/**
319 * Finds a full magazine in cache, takes it from list
320 * and returns it
321 *
322 * @param first If true, return first, else last mag
323 */
324static slab_magazine_t * get_mag_from_cache(slab_cache_t *cache,
325 int first)
326{
327 slab_magazine_t *mag = NULL;
328 link_t *cur;
329
330 spinlock_lock(&cache->maglock);
331 if (!list_empty(&cache->magazines)) {
332 if (first)
333 cur = cache->magazines.next;
334 else
335 cur = cache->magazines.prev;
336 mag = list_get_instance(cur, slab_magazine_t, link);
337 list_remove(&mag->link);
338 atomic_dec(&cache->magazine_counter);
339 }
340 spinlock_unlock(&cache->maglock);
341 return mag;
342}
343
344/** Prepend magazine to magazine list in cache */
345static void put_mag_to_cache(slab_cache_t *cache, slab_magazine_t *mag)
346{
347 spinlock_lock(&cache->maglock);
348
349 list_prepend(&mag->link, &cache->magazines);
350 atomic_inc(&cache->magazine_counter);
351
352 spinlock_unlock(&cache->maglock);
353}
354
355/**
356 * Free all objects in magazine and free memory associated with magazine
357 *
358 * @return Number of freed pages
359 */
360static count_t magazine_destroy(slab_cache_t *cache,
361 slab_magazine_t *mag)
362{
363 int i;
364 count_t frames = 0;
365
366 for (i=0;i < mag->busy; i++) {
367 frames += slab_obj_destroy(cache, mag->objs[i], NULL);
368 atomic_dec(&cache->cached_objs);
369 }
370
371 slab_free(&mag_cache, mag);
372
373 return frames;
374}
375
376/**
377 * Find full magazine, set it as current and return it
378 *
379 * Assume cpu_magazine lock is held
380 */
381static slab_magazine_t * get_full_current_mag(slab_cache_t *cache)
382{
383 slab_magazine_t *cmag, *lastmag, *newmag;
384
385 cmag = cache->mag_cache[CPU->id].current;
386 lastmag = cache->mag_cache[CPU->id].last;
387 if (cmag) { /* First try local CPU magazines */
388 if (cmag->busy)
389 return cmag;
390
391 if (lastmag && lastmag->busy) {
392 cache->mag_cache[CPU->id].current = lastmag;
393 cache->mag_cache[CPU->id].last = cmag;
394 return lastmag;
395 }
396 }
397 /* Local magazines are empty, import one from magazine list */
398 newmag = get_mag_from_cache(cache, 1);
399 if (!newmag)
400 return NULL;
401
402 if (lastmag)
403 magazine_destroy(cache, lastmag);
404
405 cache->mag_cache[CPU->id].last = cmag;
406 cache->mag_cache[CPU->id].current = newmag;
407 return newmag;
408}
409
410/**
411 * Try to find object in CPU-cache magazines
412 *
413 * @return Pointer to object or NULL if not available
414 */
415static void * magazine_obj_get(slab_cache_t *cache)
416{
417 slab_magazine_t *mag;
418 void *obj;
419
420 if (!CPU)
421 return NULL;
422
423 spinlock_lock(&cache->mag_cache[CPU->id].lock);
424
425 mag = get_full_current_mag(cache);
426 if (!mag) {
427 spinlock_unlock(&cache->mag_cache[CPU->id].lock);
428 return NULL;
429 }
430 obj = mag->objs[--mag->busy];
431 spinlock_unlock(&cache->mag_cache[CPU->id].lock);
432 atomic_dec(&cache->cached_objs);
433
434 return obj;
435}
436
437/**
438 * Assure that the current magazine is empty, return pointer to it, or NULL if
439 * no empty magazine is available and cannot be allocated
440 *
441 * Assume mag_cache[CPU->id].lock is held
442 *
443 * We have 2 magazines bound to processor.
444 * First try the current.
445 * If full, try the last.
446 * If full, put to magazines list.
447 * allocate new, exchange last & current
448 *
449 */
450static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache)
451{
452 slab_magazine_t *cmag,*lastmag,*newmag;
453
454 cmag = cache->mag_cache[CPU->id].current;
455 lastmag = cache->mag_cache[CPU->id].last;
456
457 if (cmag) {
458 if (cmag->busy < cmag->size)
459 return cmag;
460 if (lastmag && lastmag->busy < lastmag->size) {
461 cache->mag_cache[CPU->id].last = cmag;
462 cache->mag_cache[CPU->id].current = lastmag;
463 return lastmag;
464 }
465 }
466 /* current | last are full | nonexistent, allocate new */
467 /* We do not want to sleep just because of caching */
468 /* Especially we do not want reclaiming to start, as
469 * this would deadlock */
470 newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM);
471 if (!newmag)
472 return NULL;
473 newmag->size = SLAB_MAG_SIZE;
474 newmag->busy = 0;
475
476 /* Flush last to magazine list */
477 if (lastmag)
478 put_mag_to_cache(cache, lastmag);
479
480 /* Move current as last, save new as current */
481 cache->mag_cache[CPU->id].last = cmag;
482 cache->mag_cache[CPU->id].current = newmag;
483
484 return newmag;
485}
486
487/**
488 * Put object into CPU-cache magazine
489 *
490 * @return 0 - success, -1 - could not get memory
491 */
492static int magazine_obj_put(slab_cache_t *cache, void *obj)
493{
494 slab_magazine_t *mag;
495
496 if (!CPU)
497 return -1;
498
499 spinlock_lock(&cache->mag_cache[CPU->id].lock);
500
501 mag = make_empty_current_mag(cache);
502 if (!mag) {
503 spinlock_unlock(&cache->mag_cache[CPU->id].lock);
504 return -1;
505 }
506
507 mag->objs[mag->busy++] = obj;
508
509 spinlock_unlock(&cache->mag_cache[CPU->id].lock);
510 atomic_inc(&cache->cached_objs);
511 return 0;
512}
513
514
515/**************************************/
516/* SLAB CACHE functions */
517
518/** Return number of objects that fit in certain cache size */
519static int comp_objects(slab_cache_t *cache)
520{
521 if (cache->flags & SLAB_CACHE_SLINSIDE)
522 return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
523 else
524 return (PAGE_SIZE << cache->order) / cache->size;
525}
526
527/** Return wasted space in slab */
528static int badness(slab_cache_t *cache)
529{
530 int objects;
531 int ssize;
532
533 objects = comp_objects(cache);
534 ssize = PAGE_SIZE << cache->order;
535 if (cache->flags & SLAB_CACHE_SLINSIDE)
536 ssize -= sizeof(slab_t);
537 return ssize - objects*cache->size;
538}
539
540/**
541 * Initialize mag_cache structure in slab cache
542 */
543static void make_magcache(slab_cache_t *cache)
544{
545 int i;
546
547 ASSERT(cpu_cache);
548 cache->mag_cache = slab_alloc(cpu_cache, 0);
549 for (i=0; i < config.cpu_count; i++) {
550 memsetb((__address)&cache->mag_cache[i],
551 sizeof(cache->mag_cache[i]), 0);
552 spinlock_initialize(&cache->mag_cache[i].lock,
553 "slab_maglock_cpu");
554 }
555}
556
557/** Initialize allocated memory as a slab cache */
558static void
559_slab_cache_create(slab_cache_t *cache,
560 char *name,
561 size_t size,
562 size_t align,
563 int (*constructor)(void *obj, int kmflag),
564 int (*destructor)(void *obj),
565 int flags)
566{
567 int pages;
568 ipl_t ipl;
569
570 memsetb((__address)cache, sizeof(*cache), 0);
571 cache->name = name;
572
573 if (align < sizeof(__native))
574 align = sizeof(__native);
575 size = ALIGN_UP(size, align);
576
577 cache->size = size;
578
579 cache->constructor = constructor;
580 cache->destructor = destructor;
581 cache->flags = flags;
582
583 list_initialize(&cache->full_slabs);
584 list_initialize(&cache->partial_slabs);
585 list_initialize(&cache->magazines);
586 spinlock_initialize(&cache->slablock, "slab_lock");
587 spinlock_initialize(&cache->maglock, "slab_maglock");
588 if (! (cache->flags & SLAB_CACHE_NOMAGAZINE))
589 make_magcache(cache);
590
591 /* Compute slab sizes, object counts in slabs etc. */
592 if (cache->size < SLAB_INSIDE_SIZE)
593 cache->flags |= SLAB_CACHE_SLINSIDE;
594
595 /* Minimum slab order */
596 pages = ((cache->size-1) >> PAGE_WIDTH) + 1;
597 cache->order = fnzb(pages);
598
599 while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
600 cache->order += 1;
601 }
602 cache->objects = comp_objects(cache);
603 /* If info fits in, put it inside */
604 if (badness(cache) > sizeof(slab_t))
605 cache->flags |= SLAB_CACHE_SLINSIDE;
606
607 /* Add cache to cache list */
608 ipl = interrupts_disable();
609 spinlock_lock(&slab_cache_lock);
610
611 list_append(&cache->link, &slab_cache_list);
612
613 spinlock_unlock(&slab_cache_lock);
614 interrupts_restore(ipl);
615}
616
617/** Create slab cache */
618slab_cache_t * slab_cache_create(char *name,
619 size_t size,
620 size_t align,
621 int (*constructor)(void *obj, int kmflag),
622 int (*destructor)(void *obj),
623 int flags)
624{
625 slab_cache_t *cache;
626
627 cache = slab_alloc(&slab_cache_cache, 0);
628 _slab_cache_create(cache, name, size, align, constructor, destructor,
629 flags);
630 return cache;
631}
632
633/**
634 * Reclaim space occupied by objects that are already free
635 *
636 * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing
637 * @return Number of freed pages
638 */
639static count_t _slab_reclaim(slab_cache_t *cache, int flags)
640{
641 int i;
642 slab_magazine_t *mag;
643 count_t frames = 0;
644 int magcount;
645
646 if (cache->flags & SLAB_CACHE_NOMAGAZINE)
647 return 0; /* Nothing to do */
648
649 /* We count up to original magazine count to avoid
650 * endless loop
651 */
652 magcount = atomic_get(&cache->magazine_counter);
653 while (magcount-- && (mag=get_mag_from_cache(cache,0))) {
654 frames += magazine_destroy(cache,mag);
655 if (!(flags & SLAB_RECLAIM_ALL) && frames)
656 break;
657 }
658
659 if (flags & SLAB_RECLAIM_ALL) {
660 /* Free cpu-bound magazines */
661 /* Destroy CPU magazines */
662 for (i=0; i<config.cpu_count; i++) {
663 spinlock_lock(&cache->mag_cache[i].lock);
664
665 mag = cache->mag_cache[i].current;
666 if (mag)
667 frames += magazine_destroy(cache, mag);
668 cache->mag_cache[i].current = NULL;
669
670 mag = cache->mag_cache[i].last;
671 if (mag)
672 frames += magazine_destroy(cache, mag);
673 cache->mag_cache[i].last = NULL;
674
675 spinlock_unlock(&cache->mag_cache[i].lock);
676 }
677 }
678
679 return frames;
680}
681
682/** Check that there are no slabs and remove cache from system */
683void slab_cache_destroy(slab_cache_t *cache)
684{
685 ipl_t ipl;
686
687 /* First remove cache from link, so that we don't need
688 * to disable interrupts later
689 */
690
691 ipl = interrupts_disable();
692 spinlock_lock(&slab_cache_lock);
693
694 list_remove(&cache->link);
695
696 spinlock_unlock(&slab_cache_lock);
697 interrupts_restore(ipl);
698
699 /* Do not lock anything, we assume the software is correct and
700 * does not touch the cache when it decides to destroy it */
701
702 /* Destroy all magazines */
703 _slab_reclaim(cache, SLAB_RECLAIM_ALL);
704
705 /* All slabs must be empty */
706 if (!list_empty(&cache->full_slabs) \
707 || !list_empty(&cache->partial_slabs))
708 panic("Destroying cache that is not empty.");
709
710 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
711 slab_free(cpu_cache, cache->mag_cache);
712 slab_free(&slab_cache_cache, cache);
713}
714
715/** Allocate new object from cache - if no flags given, always returns
716 memory */
717void * slab_alloc(slab_cache_t *cache, int flags)
718{
719 ipl_t ipl;
720 void *result = NULL;
721
722 /* Disable interrupts to avoid deadlocks with interrupt handlers */
723 ipl = interrupts_disable();
724
725 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
726 result = magazine_obj_get(cache);
727 if (!result)
728 result = slab_obj_create(cache, flags);
729
730 interrupts_restore(ipl);
731
732 if (result)
733 atomic_inc(&cache->allocated_objs);
734
735 return result;
736}
737
738/** Return object to cache, use slab if known */
739static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab)
740{
741 ipl_t ipl;
742
743 ipl = interrupts_disable();
744
745 if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
746 || magazine_obj_put(cache, obj)) {
747
748 slab_obj_destroy(cache, obj, slab);
749
750 }
751 interrupts_restore(ipl);
752 atomic_dec(&cache->allocated_objs);
753}
754
755/** Return slab object to cache */
756void slab_free(slab_cache_t *cache, void *obj)
757{
758 _slab_free(cache,obj,NULL);
759}
760
761/* Go through all caches and reclaim what is possible */
762count_t slab_reclaim(int flags)
763{
764 slab_cache_t *cache;
765 link_t *cur;
766 count_t frames = 0;
767
768 spinlock_lock(&slab_cache_lock);
769
770 /* TODO: Add assert, that interrupts are disabled, otherwise
771 * memory allocation from interrupts can deadlock.
772 */
773
774 for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
775 cache = list_get_instance(cur, slab_cache_t, link);
776 frames += _slab_reclaim(cache, flags);
777 }
778
779 spinlock_unlock(&slab_cache_lock);
780
781 return frames;
782}
783
784
785/* Print list of slabs */
786void slab_print_list(void)
787{
788 slab_cache_t *cache;
789 link_t *cur;
790 ipl_t ipl;
791
792 ipl = interrupts_disable();
793 spinlock_lock(&slab_cache_lock);
794 printf("SLAB name\tOsize\tPages\tObj/pg\tSlabs\tCached\tAllocobjs\tCtl\n");
795 for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
796 cache = list_get_instance(cur, slab_cache_t, link);
797 printf("%s\t%d\t%d\t%d\t%d\t%d\t%d\t\t%s\n", cache->name, cache->size,
798 (1 << cache->order), cache->objects,
799 atomic_get(&cache->allocated_slabs),
800 atomic_get(&cache->cached_objs),
801 atomic_get(&cache->allocated_objs),
802 cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out");
803 }
804 spinlock_unlock(&slab_cache_lock);
805 interrupts_restore(ipl);
806}
807
808#ifdef CONFIG_DEBUG
809static int _slab_initialized = 0;
810#endif
811
812void slab_cache_init(void)
813{
814 int i, size;
815
816 /* Initialize magazine cache */
817 _slab_cache_create(&mag_cache,
818 "slab_magazine",
819 sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*),
820 sizeof(__address),
821 NULL, NULL,
822 SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
823 /* Initialize slab_cache cache */
824 _slab_cache_create(&slab_cache_cache,
825 "slab_cache",
826 sizeof(slab_cache_cache),
827 sizeof(__address),
828 NULL, NULL,
829 SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
830 /* Initialize external slab cache */
831 slab_extern_cache = slab_cache_create("slab_extern",
832 sizeof(slab_t),
833 0, NULL, NULL,
834 SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED);
835
836 /* Initialize structures for malloc */
837 for (i=0, size=(1<<SLAB_MIN_MALLOC_W);
838 i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1);
839 i++, size <<= 1) {
840 malloc_caches[i] = slab_cache_create(malloc_names[i],
841 size, 0,
842 NULL,NULL, SLAB_CACHE_MAGDEFERRED);
843 }
844#ifdef CONFIG_DEBUG
845 _slab_initialized = 1;
846#endif
847}
848
849/** Enable cpu_cache
850 *
851 * Kernel calls this function, when it knows the real number of
852 * processors.
853 * Allocate slab for cpucache and enable it on all existing
854 * slabs that are SLAB_CACHE_MAGDEFERRED
855 */
856void slab_enable_cpucache(void)
857{
858 link_t *cur;
859 slab_cache_t *s;
860
861 cpu_cache = slab_cache_create("magcpucache",
862 sizeof(slab_mag_cache_t) * config.cpu_count,
863 0, NULL, NULL,
864 SLAB_CACHE_NOMAGAZINE);
865 spinlock_lock(&slab_cache_lock);
866
867 for (cur=slab_cache_list.next; cur != &slab_cache_list;cur=cur->next){
868 s = list_get_instance(cur, slab_cache_t, link);
869 if ((s->flags & SLAB_CACHE_MAGDEFERRED) != SLAB_CACHE_MAGDEFERRED)
870 continue;
871 make_magcache(s);
872 s->flags &= ~SLAB_CACHE_MAGDEFERRED;
873 }
874
875 spinlock_unlock(&slab_cache_lock);
876}
877
878/**************************************/
879/* kalloc/kfree functions */
880void * kalloc(unsigned int size, int flags)
881{
882 int idx;
883
884 ASSERT(_slab_initialized);
885 ASSERT( size && size <= (1 << SLAB_MAX_MALLOC_W));
886
887 if (size < (1 << SLAB_MIN_MALLOC_W))
888 size = (1 << SLAB_MIN_MALLOC_W);
889
890 idx = fnzb(size-1) - SLAB_MIN_MALLOC_W + 1;
891
892 return slab_alloc(malloc_caches[idx], flags);
893}
894
895
896void kfree(void *obj)
897{
898 slab_t *slab;
899
900 if (!obj) return;
901
902 slab = obj2slab(obj);
903 _slab_free(slab->cache, obj, slab);
904}
Note: See TracBrowser for help on using the repository browser.