source: mainline/kernel/generic/src/mm/slab.c@ 2d3ddad

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 2d3ddad was 2d3ddad, checked in by Jakub Jermar <jakub@…>, 15 years ago

Add more *_locked() assertions.

  • Property mode set to 100644
File size: 26.2 KB
RevLine 
[4e147a6]1/*
[df4ed85]2 * Copyright (c) 2006 Ondrej Palkovsky
[4e147a6]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[cc73a8a1]29/** @addtogroup genericmm
[b45c443]30 * @{
31 */
32
[9179d0a]33/**
[b45c443]34 * @file
[da1bafb]35 * @brief Slab allocator.
[9179d0a]36 *
37 * The slab allocator is closely modelled after OpenSolaris slab allocator.
38 * @see http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/
[fb10289b]39 *
40 * with the following exceptions:
[9179d0a]41 * @li empty slabs are deallocated immediately
[fb10289b]42 * (in Linux they are kept in linked list, in Solaris ???)
[9179d0a]43 * @li empty magazines are deallocated when not needed
[fb10289b]44 * (in Solaris they are held in linked list in slab cache)
45 *
[9179d0a]46 * Following features are not currently supported but would be easy to do:
47 * @li cache coloring
48 * @li dynamic magazine growing (different magazine sizes are already
[5b04fc7]49 * supported, but we would need to adjust allocation strategy)
[fb10289b]50 *
[9179d0a]51 * The slab allocator supports per-CPU caches ('magazines') to facilitate
[da1bafb]52 * good SMP scaling.
[fb10289b]53 *
54 * When a new object is being allocated, it is first checked, if it is
[7669bcf]55 * available in a CPU-bound magazine. If it is not found there, it is
56 * allocated from a CPU-shared slab - if a partially full one is found,
57 * it is used, otherwise a new one is allocated.
[fb10289b]58 *
[7669bcf]59 * When an object is being deallocated, it is put to a CPU-bound magazine.
60 * If there is no such magazine, a new one is allocated (if this fails,
[9179d0a]61 * the object is deallocated into slab). If the magazine is full, it is
[7669bcf]62 * put into cpu-shared list of magazines and a new one is allocated.
[fb10289b]63 *
[7669bcf]64 * The CPU-bound magazine is actually a pair of magazines in order to avoid
[fb10289b]65 * thrashing when somebody is allocating/deallocating 1 item at the magazine
66 * size boundary. LIFO order is enforced, which should avoid fragmentation
[da1bafb]67 * as much as possible.
68 *
[7669bcf]69 * Every cache contains list of full slabs and list of partially full slabs.
[9179d0a]70 * Empty slabs are immediately freed (thrashing will be avoided because
[da1bafb]71 * of magazines).
[fb10289b]72 *
[9179d0a]73 * The slab information structure is kept inside the data area, if possible.
[fb10289b]74 * The cache can be marked that it should not use magazines. This is used
[9179d0a]75 * only for slab related caches to avoid deadlocks and infinite recursion
76 * (the slab allocator uses itself for allocating all it's control structures).
[fb10289b]77 *
[7669bcf]78 * The slab allocator allocates a lot of space and does not free it. When
79 * the frame allocator fails to allocate a frame, it calls slab_reclaim().
[fb10289b]80 * It tries 'light reclaim' first, then brutal reclaim. The light reclaim
81 * releases slabs from cpu-shared magazine-list, until at least 1 slab
82 * is deallocated in each cache (this algorithm should probably change).
83 * The brutal reclaim removes all cached objects, even from CPU-bound
84 * magazines.
85 *
[cc73a8a1]86 * @todo
[9179d0a]87 * For better CPU-scaling the magazine allocation strategy should
[10e16a7]88 * be extended. Currently, if the cache does not have magazine, it asks
89 * for non-cpu cached magazine cache to provide one. It might be feasible
90 * to add cpu-cached magazine cache (which would allocate it's magazines
91 * from non-cpu-cached mag. cache). This would provide a nice per-cpu
92 * buffer. The other possibility is to use the per-cache
93 * 'empty-magazine-list', which decreases competing for 1 per-system
94 * magazine cache.
95 *
[cc73a8a1]96 * @todo
[da1bafb]97 * It might be good to add granularity of locks even to slab level,
[cc73a8a1]98 * we could then try_spinlock over all partial slabs and thus improve
[da1bafb]99 * scalability even on slab level.
100 *
[fb10289b]101 */
102
[4e147a6]103#include <synch/spinlock.h>
104#include <mm/slab.h>
[5c9a08b]105#include <adt/list.h>
[4e147a6]106#include <memstr.h>
107#include <align.h>
[a294ad0]108#include <mm/frame.h>
[4e147a6]109#include <config.h>
110#include <print.h>
111#include <arch.h>
112#include <panic.h>
[a294ad0]113#include <debug.h>
[c352c2e]114#include <bitops.h>
[ce8aed1]115#include <macros.h>
[4e147a6]116
[da1bafb]117IRQ_SPINLOCK_STATIC_INITIALIZE(slab_cache_lock);
[fb10289b]118static LIST_INITIALIZE(slab_cache_list);
119
120/** Magazine cache */
121static slab_cache_t mag_cache;
[da1bafb]122
[fb10289b]123/** Cache for cache descriptors */
124static slab_cache_t slab_cache_cache;
[da1bafb]125
[fb10289b]126/** Cache for external slab descriptors
127 * This time we want per-cpu cache, so do not make it static
[9179d0a]128 * - using slab for internal slab structures will not deadlock,
[fb10289b]129 * as all slab structures are 'small' - control structures of
130 * their caches do not require further allocation
131 */
132static slab_cache_t *slab_extern_cache;
[da1bafb]133
[c352c2e]134/** Caches for malloc */
[ce8aed1]135static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1];
[da1bafb]136
[a000878c]137static const char *malloc_names[] = {
[ce8aed1]138 "malloc-16",
139 "malloc-32",
140 "malloc-64",
141 "malloc-128",
142 "malloc-256",
143 "malloc-512",
144 "malloc-1K",
145 "malloc-2K",
146 "malloc-4K",
147 "malloc-8K",
148 "malloc-16K",
149 "malloc-32K",
150 "malloc-64K",
151 "malloc-128K",
[c3ebc47]152 "malloc-256K",
153 "malloc-512K",
154 "malloc-1M",
155 "malloc-2M",
156 "malloc-4M"
[c352c2e]157};
[a294ad0]158
[fb10289b]159/** Slab descriptor */
[a294ad0]160typedef struct {
[da1bafb]161 slab_cache_t *cache; /**< Pointer to parent cache. */
162 link_t link; /**< List of full/partial slabs. */
163 void *start; /**< Start address of first available item. */
164 size_t available; /**< Count of available items in this slab. */
165 size_t nextavail; /**< The index of next available item. */
[ce8aed1]166} slab_t;
[a294ad0]167
[214f5bb]168#ifdef CONFIG_DEBUG
[da1bafb]169static unsigned int _slab_initialized = 0;
[214f5bb]170#endif
171
[a294ad0]172/**************************************/
[9179d0a]173/* Slab allocation functions */
[da1bafb]174/**************************************/
[a294ad0]175
[da1bafb]176/** Allocate frames for slab space and initialize
[a294ad0]177 *
178 */
[da1bafb]179static slab_t *slab_space_alloc(slab_cache_t *cache, unsigned int flags)
[a294ad0]180{
[da1bafb]181
182
[98000fb]183 size_t zone = 0;
[085d973]184
[da1bafb]185 void *data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone);
[e45f81a]186 if (!data) {
[a294ad0]187 return NULL;
[bc504ef2]188 }
[da1bafb]189
190 slab_t *slab;
191 size_t fsize;
192
[46c1234]193 if (!(cache->flags & SLAB_CACHE_SLINSIDE)) {
[fb10289b]194 slab = slab_alloc(slab_extern_cache, flags);
[a294ad0]195 if (!slab) {
[2e9eae2]196 frame_free(KA2PA(data));
[a294ad0]197 return NULL;
198 }
199 } else {
200 fsize = (PAGE_SIZE << cache->order);
201 slab = data + fsize - sizeof(*slab);
202 }
[e3c762cd]203
[a294ad0]204 /* Fill in slab structures */
[da1bafb]205 size_t i;
206 for (i = 0; i < ((size_t) 1 << cache->order); i++)
[6c441cf8]207 frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab, zone);
[da1bafb]208
[a294ad0]209 slab->start = data;
210 slab->available = cache->objects;
211 slab->nextavail = 0;
[4a5b2b0e]212 slab->cache = cache;
[da1bafb]213
[6c441cf8]214 for (i = 0; i < cache->objects; i++)
[da1bafb]215 *((size_t *) (slab->start + i * cache->size)) = i + 1;
216
[bc504ef2]217 atomic_inc(&cache->allocated_slabs);
[a294ad0]218 return slab;
219}
220
[da1bafb]221/** Deallocate space associated with slab
[a294ad0]222 *
223 * @return number of freed frames
[da1bafb]224 *
[a294ad0]225 */
[98000fb]226static size_t slab_space_free(slab_cache_t *cache, slab_t *slab)
[a294ad0]227{
[2e9eae2]228 frame_free(KA2PA(slab->start));
[da1bafb]229 if (!(cache->flags & SLAB_CACHE_SLINSIDE))
[fb10289b]230 slab_free(slab_extern_cache, slab);
[da1bafb]231
[bc504ef2]232 atomic_dec(&cache->allocated_slabs);
233
[da1bafb]234 return (1 << cache->order);
[a294ad0]235}
236
237/** Map object to slab structure */
[da1bafb]238static slab_t *obj2slab(void *obj)
[a294ad0]239{
[ce8aed1]240 return (slab_t *) frame_get_parent(ADDR2PFN(KA2PA(obj)), 0);
[a294ad0]241}
242
[da1bafb]243/******************/
[9179d0a]244/* Slab functions */
[da1bafb]245/******************/
[4e147a6]246
[da1bafb]247/** Return object to slab and call a destructor
[4e147a6]248 *
[a294ad0]249 * @param slab If the caller knows directly slab of the object, otherwise NULL
250 *
[4e147a6]251 * @return Number of freed pages
[da1bafb]252 *
[4e147a6]253 */
[98000fb]254static size_t slab_obj_destroy(slab_cache_t *cache, void *obj, slab_t *slab)
[4e147a6]255{
[a294ad0]256 if (!slab)
257 slab = obj2slab(obj);
[da1bafb]258
[4a5b2b0e]259 ASSERT(slab->cache == cache);
[da1bafb]260
261 size_t freed = 0;
262
[266294a9]263 if (cache->destructor)
264 freed = cache->destructor(obj);
265
[428aabf]266 spinlock_lock(&cache->slablock);
[8e1ea655]267 ASSERT(slab->available < cache->objects);
[da1bafb]268
269 *((size_t *) obj) = slab->nextavail;
[46c1234]270 slab->nextavail = (obj - slab->start) / cache->size;
[a294ad0]271 slab->available++;
[da1bafb]272
[a294ad0]273 /* Move it to correct list */
274 if (slab->available == cache->objects) {
275 /* Free associated memory */
276 list_remove(&slab->link);
[e22f561]277 spinlock_unlock(&cache->slablock);
[da1bafb]278
[266294a9]279 return freed + slab_space_free(cache, slab);
[e72b0a3]280 } else if (slab->available == 1) {
281 /* It was in full, move to partial */
282 list_remove(&slab->link);
283 list_prepend(&slab->link, &cache->partial_slabs);
[a294ad0]284 }
[da1bafb]285
[248fc1a]286 spinlock_unlock(&cache->slablock);
[266294a9]287 return freed;
[a294ad0]288}
[4e147a6]289
[da1bafb]290/** Take new object from slab or create new if needed
[4e147a6]291 *
292 * @return Object address or null
[da1bafb]293 *
[4e147a6]294 */
[46c1234]295static void *slab_obj_create(slab_cache_t *cache, int flags)
[4e147a6]296{
[428aabf]297 spinlock_lock(&cache->slablock);
[da1bafb]298
299 slab_t *slab;
300
[a294ad0]301 if (list_empty(&cache->partial_slabs)) {
[da1bafb]302 /*
303 * Allow recursion and reclaiming
[9179d0a]304 * - this should work, as the slab control structures
[e3c762cd]305 * are small and do not need to allocate with anything
306 * other than frame_alloc when they are allocating,
[a294ad0]307 * that's why we should get recursion at most 1-level deep
[da1bafb]308 *
[a294ad0]309 */
[428aabf]310 spinlock_unlock(&cache->slablock);
[a294ad0]311 slab = slab_space_alloc(cache, flags);
[428aabf]312 if (!slab)
[e72b0a3]313 return NULL;
[da1bafb]314
[e72b0a3]315 spinlock_lock(&cache->slablock);
[a294ad0]316 } else {
[46c1234]317 slab = list_get_instance(cache->partial_slabs.next, slab_t,
318 link);
[a294ad0]319 list_remove(&slab->link);
320 }
[da1bafb]321
322 void *obj = slab->start + slab->nextavail * cache->size;
323 slab->nextavail = *((size_t *) obj);
[a294ad0]324 slab->available--;
[da1bafb]325
[f3272e98]326 if (!slab->available)
[bc504ef2]327 list_prepend(&slab->link, &cache->full_slabs);
[a294ad0]328 else
[bc504ef2]329 list_prepend(&slab->link, &cache->partial_slabs);
[da1bafb]330
[428aabf]331 spinlock_unlock(&cache->slablock);
[da1bafb]332
333 if ((cache->constructor) && (cache->constructor(obj, flags))) {
[266294a9]334 /* Bad, bad, construction failed */
335 slab_obj_destroy(cache, obj, slab);
336 return NULL;
337 }
[da1bafb]338
[a294ad0]339 return obj;
[4e147a6]340}
341
[da1bafb]342/****************************/
[4e147a6]343/* CPU-Cache slab functions */
[da1bafb]344/****************************/
[4e147a6]345
[da1bafb]346/** Find a full magazine in cache, take it from list and return it
347 *
348 * @param first If true, return first, else last mag.
[5158549]349 *
350 */
[da1bafb]351static slab_magazine_t *get_mag_from_cache(slab_cache_t *cache, bool first)
[5158549]352{
353 slab_magazine_t *mag = NULL;
354 link_t *cur;
[da1bafb]355
[5158549]356 spinlock_lock(&cache->maglock);
357 if (!list_empty(&cache->magazines)) {
358 if (first)
359 cur = cache->magazines.next;
360 else
361 cur = cache->magazines.prev;
[da1bafb]362
[5158549]363 mag = list_get_instance(cur, slab_magazine_t, link);
364 list_remove(&mag->link);
365 atomic_dec(&cache->magazine_counter);
366 }
[da1bafb]367
[5158549]368 spinlock_unlock(&cache->maglock);
369 return mag;
370}
371
[da1bafb]372/** Prepend magazine to magazine list in cache
373 *
374 */
[5158549]375static void put_mag_to_cache(slab_cache_t *cache, slab_magazine_t *mag)
376{
377 spinlock_lock(&cache->maglock);
[da1bafb]378
[5158549]379 list_prepend(&mag->link, &cache->magazines);
380 atomic_inc(&cache->magazine_counter);
381
382 spinlock_unlock(&cache->maglock);
383}
384
[da1bafb]385/** Free all objects in magazine and free memory associated with magazine
[4e147a6]386 *
387 * @return Number of freed pages
[da1bafb]388 *
[4e147a6]389 */
[98000fb]390static size_t magazine_destroy(slab_cache_t *cache, slab_magazine_t *mag)
[4e147a6]391{
[da1bafb]392 size_t i;
[98000fb]393 size_t frames = 0;
[da1bafb]394
[6c441cf8]395 for (i = 0; i < mag->busy; i++) {
[a294ad0]396 frames += slab_obj_destroy(cache, mag->objs[i], NULL);
[4a5b2b0e]397 atomic_dec(&cache->cached_objs);
398 }
[4e147a6]399
400 slab_free(&mag_cache, mag);
[da1bafb]401
[4e147a6]402 return frames;
403}
404
[da1bafb]405/** Find full magazine, set it as current and return it
406 *
[fb10289b]407 */
[46c1234]408static slab_magazine_t *get_full_current_mag(slab_cache_t *cache)
[fb10289b]409{
[da1bafb]410 slab_magazine_t *cmag = cache->mag_cache[CPU->id].current;
411 slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last;
[2d3ddad]412
413 ASSERT(spinlock_locked(&cache->mag_cache[CPU->id].lock));
[da1bafb]414
[fb10289b]415 if (cmag) { /* First try local CPU magazines */
416 if (cmag->busy)
417 return cmag;
[da1bafb]418
419 if ((lastmag) && (lastmag->busy)) {
[fb10289b]420 cache->mag_cache[CPU->id].current = lastmag;
421 cache->mag_cache[CPU->id].last = cmag;
422 return lastmag;
423 }
424 }
[da1bafb]425
[fb10289b]426 /* Local magazines are empty, import one from magazine list */
[da1bafb]427 slab_magazine_t *newmag = get_mag_from_cache(cache, 1);
[5158549]428 if (!newmag)
[fb10289b]429 return NULL;
[da1bafb]430
[fb10289b]431 if (lastmag)
[5158549]432 magazine_destroy(cache, lastmag);
[da1bafb]433
[fb10289b]434 cache->mag_cache[CPU->id].last = cmag;
435 cache->mag_cache[CPU->id].current = newmag;
[da1bafb]436
[fb10289b]437 return newmag;
438}
439
[da1bafb]440/** Try to find object in CPU-cache magazines
[4e147a6]441 *
442 * @return Pointer to object or NULL if not available
[da1bafb]443 *
[4e147a6]444 */
[46c1234]445static void *magazine_obj_get(slab_cache_t *cache)
[4e147a6]446{
[81e52f2a]447 if (!CPU)
448 return NULL;
[da1bafb]449
[4e147a6]450 spinlock_lock(&cache->mag_cache[CPU->id].lock);
[da1bafb]451
452 slab_magazine_t *mag = get_full_current_mag(cache);
[fb10289b]453 if (!mag) {
454 spinlock_unlock(&cache->mag_cache[CPU->id].lock);
455 return NULL;
[4e147a6]456 }
[da1bafb]457
458 void *obj = mag->objs[--mag->busy];
[4e147a6]459 spinlock_unlock(&cache->mag_cache[CPU->id].lock);
[da1bafb]460
[4a5b2b0e]461 atomic_dec(&cache->cached_objs);
462
463 return obj;
[4e147a6]464}
465
[da1bafb]466/** Assure that the current magazine is empty, return pointer to it,
467 * or NULL if no empty magazine is available and cannot be allocated
[4e147a6]468 *
[da1bafb]469 * We have 2 magazines bound to processor.
470 * First try the current.
471 * If full, try the last.
472 * If full, put to magazines list.
[4e147a6]473 *
[086a600]474 */
[46c1234]475static slab_magazine_t *make_empty_current_mag(slab_cache_t *cache)
[086a600]476{
[da1bafb]477 slab_magazine_t *cmag = cache->mag_cache[CPU->id].current;
478 slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last;
479
[2d3ddad]480 ASSERT(spinlock_locked(&cache->mag_cache[CPU->id].lock));
481
[086a600]482 if (cmag) {
483 if (cmag->busy < cmag->size)
484 return cmag;
[da1bafb]485
486 if ((lastmag) && (lastmag->busy < lastmag->size)) {
[086a600]487 cache->mag_cache[CPU->id].last = cmag;
488 cache->mag_cache[CPU->id].current = lastmag;
489 return lastmag;
490 }
491 }
[da1bafb]492
[086a600]493 /* current | last are full | nonexistent, allocate new */
[da1bafb]494
495 /*
496 * We do not want to sleep just because of caching,
497 * especially we do not want reclaiming to start, as
498 * this would deadlock.
499 *
500 */
501 slab_magazine_t *newmag = slab_alloc(&mag_cache,
502 FRAME_ATOMIC | FRAME_NO_RECLAIM);
[086a600]503 if (!newmag)
504 return NULL;
[da1bafb]505
[086a600]506 newmag->size = SLAB_MAG_SIZE;
507 newmag->busy = 0;
[da1bafb]508
[086a600]509 /* Flush last to magazine list */
[5158549]510 if (lastmag)
511 put_mag_to_cache(cache, lastmag);
[da1bafb]512
[086a600]513 /* Move current as last, save new as current */
[da1bafb]514 cache->mag_cache[CPU->id].last = cmag;
515 cache->mag_cache[CPU->id].current = newmag;
516
[086a600]517 return newmag;
518}
519
[da1bafb]520/** Put object into CPU-cache magazine
521 *
522 * @return 0 on success, -1 on no memory
[086a600]523 *
[4e147a6]524 */
525static int magazine_obj_put(slab_cache_t *cache, void *obj)
526{
[81e52f2a]527 if (!CPU)
528 return -1;
[da1bafb]529
[4e147a6]530 spinlock_lock(&cache->mag_cache[CPU->id].lock);
[da1bafb]531
532 slab_magazine_t *mag = make_empty_current_mag(cache);
[fb10289b]533 if (!mag) {
534 spinlock_unlock(&cache->mag_cache[CPU->id].lock);
535 return -1;
536 }
[4e147a6]537
538 mag->objs[mag->busy++] = obj;
[da1bafb]539
[4e147a6]540 spinlock_unlock(&cache->mag_cache[CPU->id].lock);
[da1bafb]541
[4a5b2b0e]542 atomic_inc(&cache->cached_objs);
[da1bafb]543
[4e147a6]544 return 0;
545}
546
[da1bafb]547/************************/
[9179d0a]548/* Slab cache functions */
[da1bafb]549/************************/
[a294ad0]550
[da1bafb]551/** Return number of objects that fit in certain cache size
552 *
553 */
554static size_t comp_objects(slab_cache_t *cache)
[a294ad0]555{
556 if (cache->flags & SLAB_CACHE_SLINSIDE)
[da1bafb]557 return ((PAGE_SIZE << cache->order)
558 - sizeof(slab_t)) / cache->size;
559 else
[a294ad0]560 return (PAGE_SIZE << cache->order) / cache->size;
561}
562
[da1bafb]563/** Return wasted space in slab
564 *
565 */
566static size_t badness(slab_cache_t *cache)
[a294ad0]567{
[da1bafb]568 size_t objects = comp_objects(cache);
569 size_t ssize = PAGE_SIZE << cache->order;
570
[a294ad0]571 if (cache->flags & SLAB_CACHE_SLINSIDE)
572 ssize -= sizeof(slab_t);
[da1bafb]573
[6c441cf8]574 return ssize - objects * cache->size;
[a294ad0]575}
[4e147a6]576
[da1bafb]577/** Initialize mag_cache structure in slab cache
578 *
[8e1ea655]579 */
[55821eea]580static bool make_magcache(slab_cache_t *cache)
[8e1ea655]581{
[214f5bb]582 ASSERT(_slab_initialized >= 2);
[da1bafb]583
[46c1234]584 cache->mag_cache = malloc(sizeof(slab_mag_cache_t) * config.cpu_count,
[55821eea]585 FRAME_ATOMIC);
586 if (!cache->mag_cache)
587 return false;
[da1bafb]588
589 size_t i;
[6c441cf8]590 for (i = 0; i < config.cpu_count; i++) {
[e32e092]591 memsetb(&cache->mag_cache[i], sizeof(cache->mag_cache[i]), 0);
[46c1234]592 spinlock_initialize(&cache->mag_cache[i].lock,
[da1bafb]593 "slab.cache.mag_cache[].lock");
[8e1ea655]594 }
[da1bafb]595
[55821eea]596 return true;
[8e1ea655]597}
598
[da1bafb]599/** Initialize allocated memory as a slab cache
600 *
601 */
[a000878c]602static void _slab_cache_create(slab_cache_t *cache, const char *name,
[da1bafb]603 size_t size, size_t align, int (*constructor)(void *obj,
604 unsigned int kmflag), size_t (*destructor)(void *obj), unsigned int flags)
[4e147a6]605{
[e32e092]606 memsetb(cache, sizeof(*cache), 0);
[4e147a6]607 cache->name = name;
[da1bafb]608
[7f1c620]609 if (align < sizeof(unative_t))
610 align = sizeof(unative_t);
[da1bafb]611
[14e5d88]612 size = ALIGN_UP(size, align);
[da1bafb]613
[a294ad0]614 cache->size = size;
[4e147a6]615 cache->constructor = constructor;
616 cache->destructor = destructor;
617 cache->flags = flags;
[da1bafb]618
[4e147a6]619 list_initialize(&cache->full_slabs);
620 list_initialize(&cache->partial_slabs);
621 list_initialize(&cache->magazines);
[da1bafb]622
623 spinlock_initialize(&cache->slablock, "slab.cache.slablock");
624 spinlock_initialize(&cache->maglock, "slab.cache.maglock");
625
[46c1234]626 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
[55821eea]627 (void) make_magcache(cache);
[da1bafb]628
[4e147a6]629 /* Compute slab sizes, object counts in slabs etc. */
630 if (cache->size < SLAB_INSIDE_SIZE)
631 cache->flags |= SLAB_CACHE_SLINSIDE;
[da1bafb]632
[a294ad0]633 /* Minimum slab order */
[da1bafb]634 size_t pages = SIZE2FRAMES(cache->size);
635
[99993b9]636 /* We need the 2^order >= pages */
637 if (pages == 1)
638 cache->order = 0;
639 else
[46c1234]640 cache->order = fnzb(pages - 1) + 1;
[da1bafb]641
642 while (badness(cache) > SLAB_MAX_BADNESS(cache))
[a294ad0]643 cache->order += 1;
[da1bafb]644
[a294ad0]645 cache->objects = comp_objects(cache);
[da1bafb]646
[14e5d88]647 /* If info fits in, put it inside */
648 if (badness(cache) > sizeof(slab_t))
649 cache->flags |= SLAB_CACHE_SLINSIDE;
[da1bafb]650
[248fc1a]651 /* Add cache to cache list */
[da1bafb]652 irq_spinlock_lock(&slab_cache_lock, true);
[4e147a6]653 list_append(&cache->link, &slab_cache_list);
[da1bafb]654 irq_spinlock_unlock(&slab_cache_lock, true);
[4e147a6]655}
656
[da1bafb]657/** Create slab cache
658 *
659 */
[a000878c]660slab_cache_t *slab_cache_create(const char *name, size_t size, size_t align,
[da1bafb]661 int (*constructor)(void *obj, unsigned int kmflag),
662 size_t (*destructor)(void *obj), unsigned int flags)
[4e147a6]663{
[da1bafb]664 slab_cache_t *cache = slab_alloc(&slab_cache_cache, 0);
[4e147a6]665 _slab_cache_create(cache, name, size, align, constructor, destructor,
[46c1234]666 flags);
[da1bafb]667
[4e147a6]668 return cache;
669}
670
[da1bafb]671/** Reclaim space occupied by objects that are already free
[4e147a6]672 *
673 * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing
[da1bafb]674 *
[4e147a6]675 * @return Number of freed pages
[da1bafb]676 *
[4e147a6]677 */
[da1bafb]678static size_t _slab_reclaim(slab_cache_t *cache, unsigned int flags)
[4e147a6]679{
680 if (cache->flags & SLAB_CACHE_NOMAGAZINE)
681 return 0; /* Nothing to do */
[da1bafb]682
683 /*
684 * We count up to original magazine count to avoid
685 * endless loop
[5158549]686 */
[da1bafb]687 atomic_count_t magcount = atomic_get(&cache->magazine_counter);
688
689 slab_magazine_t *mag;
690 size_t frames = 0;
691
692 while ((magcount--) && (mag = get_mag_from_cache(cache, 0))) {
693 frames += magazine_destroy(cache, mag);
694 if ((!(flags & SLAB_RECLAIM_ALL)) && (frames))
[5158549]695 break;
[fb10289b]696 }
[4e147a6]697
698 if (flags & SLAB_RECLAIM_ALL) {
[5158549]699 /* Free cpu-bound magazines */
[4e147a6]700 /* Destroy CPU magazines */
[da1bafb]701 size_t i;
[6c441cf8]702 for (i = 0; i < config.cpu_count; i++) {
[5158549]703 spinlock_lock(&cache->mag_cache[i].lock);
[da1bafb]704
[4e147a6]705 mag = cache->mag_cache[i].current;
706 if (mag)
707 frames += magazine_destroy(cache, mag);
708 cache->mag_cache[i].current = NULL;
709
710 mag = cache->mag_cache[i].last;
711 if (mag)
712 frames += magazine_destroy(cache, mag);
713 cache->mag_cache[i].last = NULL;
[da1bafb]714
[428aabf]715 spinlock_unlock(&cache->mag_cache[i].lock);
[5158549]716 }
[428aabf]717 }
[da1bafb]718
[4e147a6]719 return frames;
720}
721
[da1bafb]722/** Check that there are no slabs and remove cache from system
723 *
724 */
[4e147a6]725void slab_cache_destroy(slab_cache_t *cache)
726{
[da1bafb]727 /*
728 * First remove cache from link, so that we don't need
[5158549]729 * to disable interrupts later
[da1bafb]730 *
[5158549]731 */
[da1bafb]732 irq_spinlock_lock(&slab_cache_lock, true);
[5158549]733 list_remove(&cache->link);
[da1bafb]734 irq_spinlock_unlock(&slab_cache_lock, true);
735
736 /*
737 * Do not lock anything, we assume the software is correct and
738 * does not touch the cache when it decides to destroy it
739 *
740 */
[4e147a6]741
742 /* Destroy all magazines */
743 _slab_reclaim(cache, SLAB_RECLAIM_ALL);
[da1bafb]744
[4e147a6]745 /* All slabs must be empty */
[da1bafb]746 if ((!list_empty(&cache->full_slabs)) ||
747 (!list_empty(&cache->partial_slabs)))
[4e147a6]748 panic("Destroying cache that is not empty.");
[da1bafb]749
[8e1ea655]750 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
[bb68433]751 free(cache->mag_cache);
[da1bafb]752
[fb10289b]753 slab_free(&slab_cache_cache, cache);
[4e147a6]754}
755
[da1bafb]756/** Allocate new object from cache - if no flags given, always returns memory
757 *
758 */
759void *slab_alloc(slab_cache_t *cache, unsigned int flags)
[4e147a6]760{
[da1bafb]761 /* Disable interrupts to avoid deadlocks with interrupt handlers */
762 ipl_t ipl = interrupts_disable();
763
[4e147a6]764 void *result = NULL;
[c5613b72]765
[da1bafb]766 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
[4e147a6]767 result = magazine_obj_get(cache);
[da1bafb]768
[428aabf]769 if (!result)
[4e147a6]770 result = slab_obj_create(cache, flags);
[da1bafb]771
[4e147a6]772 interrupts_restore(ipl);
[da1bafb]773
[fb10289b]774 if (result)
775 atomic_inc(&cache->allocated_objs);
[da1bafb]776
[4e147a6]777 return result;
778}
779
[da1bafb]780/** Return object to cache, use slab if known
781 *
782 */
[c352c2e]783static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab)
[4e147a6]784{
[da1bafb]785 ipl_t ipl = interrupts_disable();
786
[46c1234]787 if ((cache->flags & SLAB_CACHE_NOMAGAZINE) ||
[da1bafb]788 (magazine_obj_put(cache, obj)))
[c352c2e]789 slab_obj_destroy(cache, obj, slab);
[da1bafb]790
[4e147a6]791 interrupts_restore(ipl);
[fb10289b]792 atomic_dec(&cache->allocated_objs);
[4e147a6]793}
794
[da1bafb]795/** Return slab object to cache
796 *
797 */
[c352c2e]798void slab_free(slab_cache_t *cache, void *obj)
799{
[ce8aed1]800 _slab_free(cache, obj, NULL);
[c352c2e]801}
802
[da1bafb]803/** Go through all caches and reclaim what is possible
804 *
805 * Interrupts must be disabled before calling this function,
806 * otherwise memory allocation from interrupts can deadlock.
807 *
808 */
809size_t slab_reclaim(unsigned int flags)
[4e147a6]810{
[da1bafb]811 irq_spinlock_lock(&slab_cache_lock, false);
812
[98000fb]813 size_t frames = 0;
[da1bafb]814 link_t *cur;
[46c1234]815 for (cur = slab_cache_list.next; cur != &slab_cache_list;
816 cur = cur->next) {
[da1bafb]817 slab_cache_t *cache = list_get_instance(cur, slab_cache_t, link);
[4e147a6]818 frames += _slab_reclaim(cache, flags);
819 }
[da1bafb]820
821 irq_spinlock_unlock(&slab_cache_lock, false);
822
[4e147a6]823 return frames;
824}
825
[da1bafb]826/* Print list of slabs
827 *
828 */
[4e147a6]829void slab_print_list(void)
830{
[da1bafb]831 printf("slab name size pages obj/pg slabs cached allocated"
[46c1234]832 " ctl\n");
[da1bafb]833 printf("---------------- -------- ------ -------- ------ ------ ---------"
[46c1234]834 " ---\n");
[da1bafb]835
836 size_t skip = 0;
[599d6f5]837 while (true) {
838 /*
839 * We must not hold the slab_cache_lock spinlock when printing
840 * the statistics. Otherwise we can easily deadlock if the print
841 * needs to allocate memory.
842 *
843 * Therefore, we walk through the slab cache list, skipping some
844 * amount of already processed caches during each iteration and
845 * gathering statistics about the first unprocessed cache. For
846 * the sake of printing the statistics, we realese the
847 * slab_cache_lock and reacquire it afterwards. Then the walk
848 * starts again.
849 *
850 * This limits both the efficiency and also accuracy of the
851 * obtained statistics. The efficiency is decreased because the
852 * time complexity of the algorithm is quadratic instead of
853 * linear. The accuracy is impacted because we drop the lock
854 * after processing one cache. If there is someone else
855 * manipulating the cache list, we might omit an arbitrary
856 * number of caches or process one cache multiple times.
857 * However, we don't bleed for this algorithm for it is only
858 * statistics.
859 */
[da1bafb]860
861 irq_spinlock_lock(&slab_cache_lock, true);
862
863 link_t *cur;
864 size_t i;
[599d6f5]865 for (i = 0, cur = slab_cache_list.next;
[da1bafb]866 (i < skip) && (cur != &slab_cache_list);
867 i++, cur = cur->next);
868
[599d6f5]869 if (cur == &slab_cache_list) {
[da1bafb]870 irq_spinlock_unlock(&slab_cache_lock, true);
[599d6f5]871 break;
872 }
[da1bafb]873
[599d6f5]874 skip++;
[da1bafb]875
876 slab_cache_t *cache = list_get_instance(cur, slab_cache_t, link);
877
[a000878c]878 const char *name = cache->name;
[599d6f5]879 uint8_t order = cache->order;
880 size_t size = cache->size;
[da1bafb]881 size_t objects = cache->objects;
[599d6f5]882 long allocated_slabs = atomic_get(&cache->allocated_slabs);
883 long cached_objs = atomic_get(&cache->cached_objs);
884 long allocated_objs = atomic_get(&cache->allocated_objs);
[da1bafb]885 unsigned int flags = cache->flags;
[599d6f5]886
[da1bafb]887 irq_spinlock_unlock(&slab_cache_lock, true);
[6536a4a9]888
[da1bafb]889 printf("%-16s %8" PRIs " %6u %8" PRIs " %6ld %6ld %9ld %-3s\n",
[599d6f5]890 name, size, (1 << order), objects, allocated_slabs,
891 cached_objs, allocated_objs,
892 flags & SLAB_CACHE_SLINSIDE ? "in" : "out");
[4e147a6]893 }
894}
895
896void slab_cache_init(void)
897{
898 /* Initialize magazine cache */
[46c1234]899 _slab_cache_create(&mag_cache, "slab_magazine",
900 sizeof(slab_magazine_t) + SLAB_MAG_SIZE * sizeof(void*),
901 sizeof(uintptr_t), NULL, NULL, SLAB_CACHE_NOMAGAZINE |
902 SLAB_CACHE_SLINSIDE);
[da1bafb]903
[fb10289b]904 /* Initialize slab_cache cache */
[46c1234]905 _slab_cache_create(&slab_cache_cache, "slab_cache",
906 sizeof(slab_cache_cache), sizeof(uintptr_t), NULL, NULL,
907 SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
[da1bafb]908
[fb10289b]909 /* Initialize external slab cache */
[46c1234]910 slab_extern_cache = slab_cache_create("slab_extern", sizeof(slab_t), 0,
911 NULL, NULL, SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED);
[da1bafb]912
[4e147a6]913 /* Initialize structures for malloc */
[da1bafb]914 size_t i;
915 size_t size;
916
[46c1234]917 for (i = 0, size = (1 << SLAB_MIN_MALLOC_W);
918 i < (SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1);
919 i++, size <<= 1) {
920 malloc_caches[i] = slab_cache_create(malloc_names[i], size, 0,
921 NULL, NULL, SLAB_CACHE_MAGDEFERRED);
[c352c2e]922 }
[da1bafb]923
[a000878c]924#ifdef CONFIG_DEBUG
[04225a7]925 _slab_initialized = 1;
926#endif
[c352c2e]927}
928
[8e1ea655]929/** Enable cpu_cache
930 *
931 * Kernel calls this function, when it knows the real number of
[da1bafb]932 * processors. Allocate slab for cpucache and enable it on all
933 * existing slabs that are SLAB_CACHE_MAGDEFERRED
934 *
[8e1ea655]935 */
936void slab_enable_cpucache(void)
937{
[214f5bb]938#ifdef CONFIG_DEBUG
939 _slab_initialized = 2;
940#endif
[8e1ea655]941
[da1bafb]942 irq_spinlock_lock(&slab_cache_lock, false);
943
944 link_t *cur;
[46c1234]945 for (cur = slab_cache_list.next; cur != &slab_cache_list;
[da1bafb]946 cur = cur->next) {
947 slab_cache_t *slab = list_get_instance(cur, slab_cache_t, link);
948 if ((slab->flags & SLAB_CACHE_MAGDEFERRED) !=
[46c1234]949 SLAB_CACHE_MAGDEFERRED)
[8e1ea655]950 continue;
[da1bafb]951
952 (void) make_magcache(slab);
953 slab->flags &= ~SLAB_CACHE_MAGDEFERRED;
[8e1ea655]954 }
[da1bafb]955
956 irq_spinlock_unlock(&slab_cache_lock, false);
[8e1ea655]957}
958
[da1bafb]959void *malloc(size_t size, unsigned int flags)
[c352c2e]960{
[04225a7]961 ASSERT(_slab_initialized);
[c259b9b]962 ASSERT(size <= (1 << SLAB_MAX_MALLOC_W));
[c352c2e]963
964 if (size < (1 << SLAB_MIN_MALLOC_W))
965 size = (1 << SLAB_MIN_MALLOC_W);
[da1bafb]966
967 uint8_t idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;
968
[c352c2e]969 return slab_alloc(malloc_caches[idx], flags);
970}
971
[da1bafb]972void *realloc(void *ptr, size_t size, unsigned int flags)
[c352c2e]973{
[ce8aed1]974 ASSERT(_slab_initialized);
975 ASSERT(size <= (1 << SLAB_MAX_MALLOC_W));
976
977 void *new_ptr;
978
979 if (size > 0) {
980 if (size < (1 << SLAB_MIN_MALLOC_W))
981 size = (1 << SLAB_MIN_MALLOC_W);
[da1bafb]982 uint8_t idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;
[ce8aed1]983
984 new_ptr = slab_alloc(malloc_caches[idx], flags);
985 } else
986 new_ptr = NULL;
987
988 if ((new_ptr != NULL) && (ptr != NULL)) {
989 slab_t *slab = obj2slab(ptr);
990 memcpy(new_ptr, ptr, min(size, slab->cache->size));
991 }
992
993 if (ptr != NULL)
994 free(ptr);
995
996 return new_ptr;
997}
[5158549]998
[ce8aed1]999void free(void *ptr)
1000{
1001 if (!ptr)
[f3272e98]1002 return;
[da1bafb]1003
[ce8aed1]1004 slab_t *slab = obj2slab(ptr);
1005 _slab_free(slab->cache, ptr, slab);
[4e147a6]1006}
[b45c443]1007
[cc73a8a1]1008/** @}
[b45c443]1009 */
Note: See TracBrowser for help on using the repository browser.