source: mainline/generic/src/mm/slab.c@ 280a27e

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 280a27e was 280a27e, checked in by Josef Cejka <malyzelenyhnus@…>, 19 years ago

Printf ported back from uspace to kernel.
Printf calls changed to match new conventions.

  • Property mode set to 100644
File size: 23.6 KB
Line 
1/*
2 * Copyright (C) 2006 Ondrej Palkovsky
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/*
30 * The SLAB allocator is closely modelled after OpenSolaris SLAB allocator
31 * http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/
32 *
33 * with the following exceptions:
34 * - empty SLABS are deallocated immediately
35 * (in Linux they are kept in linked list, in Solaris ???)
36 * - empty magazines are deallocated when not needed
37 * (in Solaris they are held in linked list in slab cache)
38 *
39 * Following features are not currently supported but would be easy to do:
40 * - cache coloring
41 * - dynamic magazine growing (different magazine sizes are already
42 * supported, but we would need to adjust allocation strategy)
43 *
44 * The SLAB allocator supports per-CPU caches ('magazines') to facilitate
45 * good SMP scaling.
46 *
47 * When a new object is being allocated, it is first checked, if it is
48 * available in CPU-bound magazine. If it is not found there, it is
49 * allocated from CPU-shared SLAB - if partial full is found, it is used,
50 * otherwise a new one is allocated.
51 *
52 * When an object is being deallocated, it is put to CPU-bound magazine.
53 * If there is no such magazine, new one is allocated (if it fails,
54 * the object is deallocated into SLAB). If the magazine is full, it is
55 * put into cpu-shared list of magazines and new one is allocated.
56 *
57 * The CPU-bound magazine is actually a pair of magazine to avoid
58 * thrashing when somebody is allocating/deallocating 1 item at the magazine
59 * size boundary. LIFO order is enforced, which should avoid fragmentation
60 * as much as possible.
61 *
62 * Every cache contains list of full slabs and list of partialy full slabs.
63 * Empty SLABS are immediately freed (thrashing will be avoided because
64 * of magazines).
65 *
66 * The SLAB information structure is kept inside the data area, if possible.
67 * The cache can be marked that it should not use magazines. This is used
68 * only for SLAB related caches to avoid deadlocks and infinite recursion
69 * (the SLAB allocator uses itself for allocating all it's control structures).
70 *
71 * The SLAB allocator allocates lot of space and does not free it. When
72 * frame allocator fails to allocate the frame, it calls slab_reclaim().
73 * It tries 'light reclaim' first, then brutal reclaim. The light reclaim
74 * releases slabs from cpu-shared magazine-list, until at least 1 slab
75 * is deallocated in each cache (this algorithm should probably change).
76 * The brutal reclaim removes all cached objects, even from CPU-bound
77 * magazines.
78 *
79 * TODO: For better CPU-scaling the magazine allocation strategy should
80 * be extended. Currently, if the cache does not have magazine, it asks
81 * for non-cpu cached magazine cache to provide one. It might be feasible
82 * to add cpu-cached magazine cache (which would allocate it's magazines
83 * from non-cpu-cached mag. cache). This would provide a nice per-cpu
84 * buffer. The other possibility is to use the per-cache
85 * 'empty-magazine-list', which decreases competing for 1 per-system
86 * magazine cache.
87 *
88 * - it might be good to add granularity of locks even to slab level,
89 * we could then try_spinlock over all partial slabs and thus improve
90 * scalability even on slab level
91 */
92
93
94#include <synch/spinlock.h>
95#include <mm/slab.h>
96#include <adt/list.h>
97#include <memstr.h>
98#include <align.h>
99#include <mm/frame.h>
100#include <config.h>
101#include <print.h>
102#include <arch.h>
103#include <panic.h>
104#include <debug.h>
105#include <bitops.h>
106
107SPINLOCK_INITIALIZE(slab_cache_lock);
108static LIST_INITIALIZE(slab_cache_list);
109
110/** Magazine cache */
111static slab_cache_t mag_cache;
112/** Cache for cache descriptors */
113static slab_cache_t slab_cache_cache;
114/** Cache for external slab descriptors
115 * This time we want per-cpu cache, so do not make it static
116 * - using SLAB for internal SLAB structures will not deadlock,
117 * as all slab structures are 'small' - control structures of
118 * their caches do not require further allocation
119 */
120static slab_cache_t *slab_extern_cache;
121/** Caches for malloc */
122static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1];
123char *malloc_names[] = {
124 "malloc-16","malloc-32","malloc-64","malloc-128",
125 "malloc-256","malloc-512","malloc-1K","malloc-2K",
126 "malloc-4K","malloc-8K","malloc-16K","malloc-32K",
127 "malloc-64K","malloc-128K"
128};
129
130/** Slab descriptor */
131typedef struct {
132 slab_cache_t *cache; /**< Pointer to parent cache */
133 link_t link; /* List of full/partial slabs */
134 void *start; /**< Start address of first available item */
135 count_t available; /**< Count of available items in this slab */
136 index_t nextavail; /**< The index of next available item */
137}slab_t;
138
139#ifdef CONFIG_DEBUG
140static int _slab_initialized = 0;
141#endif
142
143/**************************************/
144/* SLAB allocation functions */
145
146/**
147 * Allocate frames for slab space and initialize
148 *
149 */
150static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
151{
152 void *data;
153 slab_t *slab;
154 size_t fsize;
155 int i;
156 int status;
157 pfn_t pfn;
158 int zone=0;
159
160 pfn = frame_alloc_rc_zone(cache->order, FRAME_KA | flags, &status, &zone);
161 data = (void *) PA2KA(PFN2ADDR(pfn));
162 if (status != FRAME_OK) {
163 return NULL;
164 }
165 if (! (cache->flags & SLAB_CACHE_SLINSIDE)) {
166 slab = slab_alloc(slab_extern_cache, flags);
167 if (!slab) {
168 frame_free(ADDR2PFN(KA2PA(data)));
169 return NULL;
170 }
171 } else {
172 fsize = (PAGE_SIZE << cache->order);
173 slab = data + fsize - sizeof(*slab);
174 }
175
176 /* Fill in slab structures */
177 for (i=0; i < (1 << cache->order); i++)
178 frame_set_parent(pfn+i, slab, zone);
179
180 slab->start = data;
181 slab->available = cache->objects;
182 slab->nextavail = 0;
183 slab->cache = cache;
184
185 for (i=0; i<cache->objects;i++)
186 *((int *) (slab->start + i*cache->size)) = i+1;
187
188 atomic_inc(&cache->allocated_slabs);
189 return slab;
190}
191
192/**
193 * Deallocate space associated with SLAB
194 *
195 * @return number of freed frames
196 */
197static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
198{
199 frame_free(ADDR2PFN(KA2PA(slab->start)));
200 if (! (cache->flags & SLAB_CACHE_SLINSIDE))
201 slab_free(slab_extern_cache, slab);
202
203 atomic_dec(&cache->allocated_slabs);
204
205 return 1 << cache->order;
206}
207
208/** Map object to slab structure */
209static slab_t * obj2slab(void *obj)
210{
211 return (slab_t *)frame_get_parent(ADDR2PFN(KA2PA(obj)), 0);
212}
213
214/**************************************/
215/* SLAB functions */
216
217
218/**
219 * Return object to slab and call a destructor
220 *
221 * @param slab If the caller knows directly slab of the object, otherwise NULL
222 *
223 * @return Number of freed pages
224 */
225static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
226 slab_t *slab)
227{
228 int freed = 0;
229
230 if (!slab)
231 slab = obj2slab(obj);
232
233 ASSERT(slab->cache == cache);
234
235 if (cache->destructor)
236 freed = cache->destructor(obj);
237
238 spinlock_lock(&cache->slablock);
239 ASSERT(slab->available < cache->objects);
240
241 *((int *)obj) = slab->nextavail;
242 slab->nextavail = (obj - slab->start)/cache->size;
243 slab->available++;
244
245 /* Move it to correct list */
246 if (slab->available == cache->objects) {
247 /* Free associated memory */
248 list_remove(&slab->link);
249 spinlock_unlock(&cache->slablock);
250
251 return freed + slab_space_free(cache, slab);
252
253 } else if (slab->available == 1) {
254 /* It was in full, move to partial */
255 list_remove(&slab->link);
256 list_prepend(&slab->link, &cache->partial_slabs);
257 }
258 spinlock_unlock(&cache->slablock);
259 return freed;
260}
261
262/**
263 * Take new object from slab or create new if needed
264 *
265 * @return Object address or null
266 */
267static void * slab_obj_create(slab_cache_t *cache, int flags)
268{
269 slab_t *slab;
270 void *obj;
271
272 spinlock_lock(&cache->slablock);
273
274 if (list_empty(&cache->partial_slabs)) {
275 /* Allow recursion and reclaiming
276 * - this should work, as the SLAB control structures
277 * are small and do not need to allocte with anything
278 * other ten frame_alloc when they are allocating,
279 * that's why we should get recursion at most 1-level deep
280 */
281 spinlock_unlock(&cache->slablock);
282 slab = slab_space_alloc(cache, flags);
283 if (!slab)
284 return NULL;
285 spinlock_lock(&cache->slablock);
286 } else {
287 slab = list_get_instance(cache->partial_slabs.next,
288 slab_t,
289 link);
290 list_remove(&slab->link);
291 }
292 obj = slab->start + slab->nextavail * cache->size;
293 slab->nextavail = *((int *)obj);
294 slab->available--;
295
296 if (! slab->available)
297 list_prepend(&slab->link, &cache->full_slabs);
298 else
299 list_prepend(&slab->link, &cache->partial_slabs);
300
301 spinlock_unlock(&cache->slablock);
302
303 if (cache->constructor && cache->constructor(obj, flags)) {
304 /* Bad, bad, construction failed */
305 slab_obj_destroy(cache, obj, slab);
306 return NULL;
307 }
308 return obj;
309}
310
311/**************************************/
312/* CPU-Cache slab functions */
313
314/**
315 * Finds a full magazine in cache, takes it from list
316 * and returns it
317 *
318 * @param first If true, return first, else last mag
319 */
320static slab_magazine_t * get_mag_from_cache(slab_cache_t *cache,
321 int first)
322{
323 slab_magazine_t *mag = NULL;
324 link_t *cur;
325
326 spinlock_lock(&cache->maglock);
327 if (!list_empty(&cache->magazines)) {
328 if (first)
329 cur = cache->magazines.next;
330 else
331 cur = cache->magazines.prev;
332 mag = list_get_instance(cur, slab_magazine_t, link);
333 list_remove(&mag->link);
334 atomic_dec(&cache->magazine_counter);
335 }
336 spinlock_unlock(&cache->maglock);
337 return mag;
338}
339
340/** Prepend magazine to magazine list in cache */
341static void put_mag_to_cache(slab_cache_t *cache, slab_magazine_t *mag)
342{
343 spinlock_lock(&cache->maglock);
344
345 list_prepend(&mag->link, &cache->magazines);
346 atomic_inc(&cache->magazine_counter);
347
348 spinlock_unlock(&cache->maglock);
349}
350
351/**
352 * Free all objects in magazine and free memory associated with magazine
353 *
354 * @return Number of freed pages
355 */
356static count_t magazine_destroy(slab_cache_t *cache,
357 slab_magazine_t *mag)
358{
359 int i;
360 count_t frames = 0;
361
362 for (i=0;i < mag->busy; i++) {
363 frames += slab_obj_destroy(cache, mag->objs[i], NULL);
364 atomic_dec(&cache->cached_objs);
365 }
366
367 slab_free(&mag_cache, mag);
368
369 return frames;
370}
371
372/**
373 * Find full magazine, set it as current and return it
374 *
375 * Assume cpu_magazine lock is held
376 */
377static slab_magazine_t * get_full_current_mag(slab_cache_t *cache)
378{
379 slab_magazine_t *cmag, *lastmag, *newmag;
380
381 cmag = cache->mag_cache[CPU->id].current;
382 lastmag = cache->mag_cache[CPU->id].last;
383 if (cmag) { /* First try local CPU magazines */
384 if (cmag->busy)
385 return cmag;
386
387 if (lastmag && lastmag->busy) {
388 cache->mag_cache[CPU->id].current = lastmag;
389 cache->mag_cache[CPU->id].last = cmag;
390 return lastmag;
391 }
392 }
393 /* Local magazines are empty, import one from magazine list */
394 newmag = get_mag_from_cache(cache, 1);
395 if (!newmag)
396 return NULL;
397
398 if (lastmag)
399 magazine_destroy(cache, lastmag);
400
401 cache->mag_cache[CPU->id].last = cmag;
402 cache->mag_cache[CPU->id].current = newmag;
403 return newmag;
404}
405
406/**
407 * Try to find object in CPU-cache magazines
408 *
409 * @return Pointer to object or NULL if not available
410 */
411static void * magazine_obj_get(slab_cache_t *cache)
412{
413 slab_magazine_t *mag;
414 void *obj;
415
416 if (!CPU)
417 return NULL;
418
419 spinlock_lock(&cache->mag_cache[CPU->id].lock);
420
421 mag = get_full_current_mag(cache);
422 if (!mag) {
423 spinlock_unlock(&cache->mag_cache[CPU->id].lock);
424 return NULL;
425 }
426 obj = mag->objs[--mag->busy];
427 spinlock_unlock(&cache->mag_cache[CPU->id].lock);
428 atomic_dec(&cache->cached_objs);
429
430 return obj;
431}
432
433/**
434 * Assure that the current magazine is empty, return pointer to it, or NULL if
435 * no empty magazine is available and cannot be allocated
436 *
437 * Assume mag_cache[CPU->id].lock is held
438 *
439 * We have 2 magazines bound to processor.
440 * First try the current.
441 * If full, try the last.
442 * If full, put to magazines list.
443 * allocate new, exchange last & current
444 *
445 */
446static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache)
447{
448 slab_magazine_t *cmag,*lastmag,*newmag;
449
450 cmag = cache->mag_cache[CPU->id].current;
451 lastmag = cache->mag_cache[CPU->id].last;
452
453 if (cmag) {
454 if (cmag->busy < cmag->size)
455 return cmag;
456 if (lastmag && lastmag->busy < lastmag->size) {
457 cache->mag_cache[CPU->id].last = cmag;
458 cache->mag_cache[CPU->id].current = lastmag;
459 return lastmag;
460 }
461 }
462 /* current | last are full | nonexistent, allocate new */
463 /* We do not want to sleep just because of caching */
464 /* Especially we do not want reclaiming to start, as
465 * this would deadlock */
466 newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM);
467 if (!newmag)
468 return NULL;
469 newmag->size = SLAB_MAG_SIZE;
470 newmag->busy = 0;
471
472 /* Flush last to magazine list */
473 if (lastmag)
474 put_mag_to_cache(cache, lastmag);
475
476 /* Move current as last, save new as current */
477 cache->mag_cache[CPU->id].last = cmag;
478 cache->mag_cache[CPU->id].current = newmag;
479
480 return newmag;
481}
482
483/**
484 * Put object into CPU-cache magazine
485 *
486 * @return 0 - success, -1 - could not get memory
487 */
488static int magazine_obj_put(slab_cache_t *cache, void *obj)
489{
490 slab_magazine_t *mag;
491
492 if (!CPU)
493 return -1;
494
495 spinlock_lock(&cache->mag_cache[CPU->id].lock);
496
497 mag = make_empty_current_mag(cache);
498 if (!mag) {
499 spinlock_unlock(&cache->mag_cache[CPU->id].lock);
500 return -1;
501 }
502
503 mag->objs[mag->busy++] = obj;
504
505 spinlock_unlock(&cache->mag_cache[CPU->id].lock);
506 atomic_inc(&cache->cached_objs);
507 return 0;
508}
509
510
511/**************************************/
512/* SLAB CACHE functions */
513
514/** Return number of objects that fit in certain cache size */
515static int comp_objects(slab_cache_t *cache)
516{
517 if (cache->flags & SLAB_CACHE_SLINSIDE)
518 return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
519 else
520 return (PAGE_SIZE << cache->order) / cache->size;
521}
522
523/** Return wasted space in slab */
524static int badness(slab_cache_t *cache)
525{
526 int objects;
527 int ssize;
528
529 objects = comp_objects(cache);
530 ssize = PAGE_SIZE << cache->order;
531 if (cache->flags & SLAB_CACHE_SLINSIDE)
532 ssize -= sizeof(slab_t);
533 return ssize - objects*cache->size;
534}
535
536/**
537 * Initialize mag_cache structure in slab cache
538 */
539static void make_magcache(slab_cache_t *cache)
540{
541 int i;
542
543 ASSERT(_slab_initialized >= 2);
544
545 cache->mag_cache = malloc(sizeof(slab_mag_cache_t)*config.cpu_count,0);
546 for (i=0; i < config.cpu_count; i++) {
547 memsetb((__address)&cache->mag_cache[i],
548 sizeof(cache->mag_cache[i]), 0);
549 spinlock_initialize(&cache->mag_cache[i].lock,
550 "slab_maglock_cpu");
551 }
552}
553
554/** Initialize allocated memory as a slab cache */
555static void
556_slab_cache_create(slab_cache_t *cache,
557 char *name,
558 size_t size,
559 size_t align,
560 int (*constructor)(void *obj, int kmflag),
561 int (*destructor)(void *obj),
562 int flags)
563{
564 int pages;
565 ipl_t ipl;
566
567 memsetb((__address)cache, sizeof(*cache), 0);
568 cache->name = name;
569
570 if (align < sizeof(__native))
571 align = sizeof(__native);
572 size = ALIGN_UP(size, align);
573
574 cache->size = size;
575
576 cache->constructor = constructor;
577 cache->destructor = destructor;
578 cache->flags = flags;
579
580 list_initialize(&cache->full_slabs);
581 list_initialize(&cache->partial_slabs);
582 list_initialize(&cache->magazines);
583 spinlock_initialize(&cache->slablock, "slab_lock");
584 spinlock_initialize(&cache->maglock, "slab_maglock");
585 if (! (cache->flags & SLAB_CACHE_NOMAGAZINE))
586 make_magcache(cache);
587
588 /* Compute slab sizes, object counts in slabs etc. */
589 if (cache->size < SLAB_INSIDE_SIZE)
590 cache->flags |= SLAB_CACHE_SLINSIDE;
591
592 /* Minimum slab order */
593 pages = ((cache->size-1) >> PAGE_WIDTH) + 1;
594 cache->order = fnzb(pages);
595
596 while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
597 cache->order += 1;
598 }
599 cache->objects = comp_objects(cache);
600 /* If info fits in, put it inside */
601 if (badness(cache) > sizeof(slab_t))
602 cache->flags |= SLAB_CACHE_SLINSIDE;
603
604 /* Add cache to cache list */
605 ipl = interrupts_disable();
606 spinlock_lock(&slab_cache_lock);
607
608 list_append(&cache->link, &slab_cache_list);
609
610 spinlock_unlock(&slab_cache_lock);
611 interrupts_restore(ipl);
612}
613
614/** Create slab cache */
615slab_cache_t * slab_cache_create(char *name,
616 size_t size,
617 size_t align,
618 int (*constructor)(void *obj, int kmflag),
619 int (*destructor)(void *obj),
620 int flags)
621{
622 slab_cache_t *cache;
623
624 cache = slab_alloc(&slab_cache_cache, 0);
625 _slab_cache_create(cache, name, size, align, constructor, destructor,
626 flags);
627 return cache;
628}
629
630/**
631 * Reclaim space occupied by objects that are already free
632 *
633 * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing
634 * @return Number of freed pages
635 */
636static count_t _slab_reclaim(slab_cache_t *cache, int flags)
637{
638 int i;
639 slab_magazine_t *mag;
640 count_t frames = 0;
641 int magcount;
642
643 if (cache->flags & SLAB_CACHE_NOMAGAZINE)
644 return 0; /* Nothing to do */
645
646 /* We count up to original magazine count to avoid
647 * endless loop
648 */
649 magcount = atomic_get(&cache->magazine_counter);
650 while (magcount-- && (mag=get_mag_from_cache(cache,0))) {
651 frames += magazine_destroy(cache,mag);
652 if (!(flags & SLAB_RECLAIM_ALL) && frames)
653 break;
654 }
655
656 if (flags & SLAB_RECLAIM_ALL) {
657 /* Free cpu-bound magazines */
658 /* Destroy CPU magazines */
659 for (i=0; i<config.cpu_count; i++) {
660 spinlock_lock(&cache->mag_cache[i].lock);
661
662 mag = cache->mag_cache[i].current;
663 if (mag)
664 frames += magazine_destroy(cache, mag);
665 cache->mag_cache[i].current = NULL;
666
667 mag = cache->mag_cache[i].last;
668 if (mag)
669 frames += magazine_destroy(cache, mag);
670 cache->mag_cache[i].last = NULL;
671
672 spinlock_unlock(&cache->mag_cache[i].lock);
673 }
674 }
675
676 return frames;
677}
678
679/** Check that there are no slabs and remove cache from system */
680void slab_cache_destroy(slab_cache_t *cache)
681{
682 ipl_t ipl;
683
684 /* First remove cache from link, so that we don't need
685 * to disable interrupts later
686 */
687
688 ipl = interrupts_disable();
689 spinlock_lock(&slab_cache_lock);
690
691 list_remove(&cache->link);
692
693 spinlock_unlock(&slab_cache_lock);
694 interrupts_restore(ipl);
695
696 /* Do not lock anything, we assume the software is correct and
697 * does not touch the cache when it decides to destroy it */
698
699 /* Destroy all magazines */
700 _slab_reclaim(cache, SLAB_RECLAIM_ALL);
701
702 /* All slabs must be empty */
703 if (!list_empty(&cache->full_slabs) \
704 || !list_empty(&cache->partial_slabs))
705 panic("Destroying cache that is not empty.");
706
707 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
708 free(cache->mag_cache);
709 slab_free(&slab_cache_cache, cache);
710}
711
712/** Allocate new object from cache - if no flags given, always returns
713 memory */
714void * slab_alloc(slab_cache_t *cache, int flags)
715{
716 ipl_t ipl;
717 void *result = NULL;
718
719 /* Disable interrupts to avoid deadlocks with interrupt handlers */
720 ipl = interrupts_disable();
721
722 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) {
723 result = magazine_obj_get(cache);
724 }
725 if (!result)
726 result = slab_obj_create(cache, flags);
727
728 interrupts_restore(ipl);
729
730 if (result)
731 atomic_inc(&cache->allocated_objs);
732
733 return result;
734}
735
736/** Return object to cache, use slab if known */
737static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab)
738{
739 ipl_t ipl;
740
741 ipl = interrupts_disable();
742
743 if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
744 || magazine_obj_put(cache, obj)) {
745
746 slab_obj_destroy(cache, obj, slab);
747
748 }
749 interrupts_restore(ipl);
750 atomic_dec(&cache->allocated_objs);
751}
752
753/** Return slab object to cache */
754void slab_free(slab_cache_t *cache, void *obj)
755{
756 _slab_free(cache,obj,NULL);
757}
758
759/* Go through all caches and reclaim what is possible */
760count_t slab_reclaim(int flags)
761{
762 slab_cache_t *cache;
763 link_t *cur;
764 count_t frames = 0;
765
766 spinlock_lock(&slab_cache_lock);
767
768 /* TODO: Add assert, that interrupts are disabled, otherwise
769 * memory allocation from interrupts can deadlock.
770 */
771
772 for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
773 cache = list_get_instance(cur, slab_cache_t, link);
774 frames += _slab_reclaim(cache, flags);
775 }
776
777 spinlock_unlock(&slab_cache_lock);
778
779 return frames;
780}
781
782
783/* Print list of slabs */
784void slab_print_list(void)
785{
786 slab_cache_t *cache;
787 link_t *cur;
788 ipl_t ipl;
789
790 ipl = interrupts_disable();
791 spinlock_lock(&slab_cache_lock);
792 printf("SLAB name\tOsize\tPages\tObj/pg\tSlabs\tCached\tAllocobjs\tCtl\n");
793 for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
794 cache = list_get_instance(cur, slab_cache_t, link);
795 printf("%s\t%zd\t%zd\t%zd\t%zd\t%zd\t%zd\t\t%s\n", cache->name, cache->size,
796 (1 << cache->order), cache->objects,
797 atomic_get(&cache->allocated_slabs),
798 atomic_get(&cache->cached_objs),
799 atomic_get(&cache->allocated_objs),
800 cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out");
801 }
802 spinlock_unlock(&slab_cache_lock);
803 interrupts_restore(ipl);
804}
805
806void slab_cache_init(void)
807{
808 int i, size;
809
810 /* Initialize magazine cache */
811 _slab_cache_create(&mag_cache,
812 "slab_magazine",
813 sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*),
814 sizeof(__address),
815 NULL, NULL,
816 SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
817 /* Initialize slab_cache cache */
818 _slab_cache_create(&slab_cache_cache,
819 "slab_cache",
820 sizeof(slab_cache_cache),
821 sizeof(__address),
822 NULL, NULL,
823 SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
824 /* Initialize external slab cache */
825 slab_extern_cache = slab_cache_create("slab_extern",
826 sizeof(slab_t),
827 0, NULL, NULL,
828 SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED);
829
830 /* Initialize structures for malloc */
831 for (i=0, size=(1<<SLAB_MIN_MALLOC_W);
832 i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1);
833 i++, size <<= 1) {
834 malloc_caches[i] = slab_cache_create(malloc_names[i],
835 size, 0,
836 NULL,NULL, SLAB_CACHE_MAGDEFERRED);
837 }
838#ifdef CONFIG_DEBUG
839 _slab_initialized = 1;
840#endif
841}
842
843/** Enable cpu_cache
844 *
845 * Kernel calls this function, when it knows the real number of
846 * processors.
847 * Allocate slab for cpucache and enable it on all existing
848 * slabs that are SLAB_CACHE_MAGDEFERRED
849 */
850void slab_enable_cpucache(void)
851{
852 link_t *cur;
853 slab_cache_t *s;
854
855#ifdef CONFIG_DEBUG
856 _slab_initialized = 2;
857#endif
858
859 spinlock_lock(&slab_cache_lock);
860
861 for (cur=slab_cache_list.next; cur != &slab_cache_list;cur=cur->next){
862 s = list_get_instance(cur, slab_cache_t, link);
863 if ((s->flags & SLAB_CACHE_MAGDEFERRED) != SLAB_CACHE_MAGDEFERRED)
864 continue;
865 make_magcache(s);
866 s->flags &= ~SLAB_CACHE_MAGDEFERRED;
867 }
868
869 spinlock_unlock(&slab_cache_lock);
870}
871
872/**************************************/
873/* kalloc/kfree functions */
874void * malloc(unsigned int size, int flags)
875{
876 int idx;
877
878 ASSERT(_slab_initialized);
879 ASSERT( size && size <= (1 << SLAB_MAX_MALLOC_W));
880
881 if (size < (1 << SLAB_MIN_MALLOC_W))
882 size = (1 << SLAB_MIN_MALLOC_W);
883
884 idx = fnzb(size-1) - SLAB_MIN_MALLOC_W + 1;
885
886 return slab_alloc(malloc_caches[idx], flags);
887}
888
889
890void free(void *obj)
891{
892 slab_t *slab;
893
894 if (!obj) return;
895
896 slab = obj2slab(obj);
897 _slab_free(slab->cache, obj, slab);
898}
Note: See TracBrowser for help on using the repository browser.