source: mainline/kernel/generic/src/mm/slab.c@ cd3b380

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since cd3b380 was cd3b380, checked in by Martin Decky <martin@…>, 12 years ago

due to the removal of FRAME_KA, the return value of frame_alloc*() needs to be checked before converting the physical address to kernel address

  • Property mode set to 100644
File size: 26.1 KB
RevLine 
[4e147a6]1/*
[df4ed85]2 * Copyright (c) 2006 Ondrej Palkovsky
[4e147a6]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[cc73a8a1]29/** @addtogroup genericmm
[b45c443]30 * @{
31 */
32
[9179d0a]33/**
[b45c443]34 * @file
[da1bafb]35 * @brief Slab allocator.
[9179d0a]36 *
37 * The slab allocator is closely modelled after OpenSolaris slab allocator.
38 * @see http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/
[fb10289b]39 *
40 * with the following exceptions:
[9179d0a]41 * @li empty slabs are deallocated immediately
[fb10289b]42 * (in Linux they are kept in linked list, in Solaris ???)
[9179d0a]43 * @li empty magazines are deallocated when not needed
[fb10289b]44 * (in Solaris they are held in linked list in slab cache)
45 *
[9179d0a]46 * Following features are not currently supported but would be easy to do:
47 * @li cache coloring
48 * @li dynamic magazine growing (different magazine sizes are already
[5b04fc7]49 * supported, but we would need to adjust allocation strategy)
[fb10289b]50 *
[9179d0a]51 * The slab allocator supports per-CPU caches ('magazines') to facilitate
[da1bafb]52 * good SMP scaling.
[fb10289b]53 *
54 * When a new object is being allocated, it is first checked, if it is
[7669bcf]55 * available in a CPU-bound magazine. If it is not found there, it is
56 * allocated from a CPU-shared slab - if a partially full one is found,
57 * it is used, otherwise a new one is allocated.
[fb10289b]58 *
[7669bcf]59 * When an object is being deallocated, it is put to a CPU-bound magazine.
60 * If there is no such magazine, a new one is allocated (if this fails,
[9179d0a]61 * the object is deallocated into slab). If the magazine is full, it is
[7669bcf]62 * put into cpu-shared list of magazines and a new one is allocated.
[fb10289b]63 *
[7669bcf]64 * The CPU-bound magazine is actually a pair of magazines in order to avoid
[fb10289b]65 * thrashing when somebody is allocating/deallocating 1 item at the magazine
66 * size boundary. LIFO order is enforced, which should avoid fragmentation
[da1bafb]67 * as much as possible.
68 *
[7669bcf]69 * Every cache contains list of full slabs and list of partially full slabs.
[9179d0a]70 * Empty slabs are immediately freed (thrashing will be avoided because
[da1bafb]71 * of magazines).
[fb10289b]72 *
[9179d0a]73 * The slab information structure is kept inside the data area, if possible.
[fb10289b]74 * The cache can be marked that it should not use magazines. This is used
[9179d0a]75 * only for slab related caches to avoid deadlocks and infinite recursion
76 * (the slab allocator uses itself for allocating all it's control structures).
[fb10289b]77 *
[7669bcf]78 * The slab allocator allocates a lot of space and does not free it. When
79 * the frame allocator fails to allocate a frame, it calls slab_reclaim().
[fb10289b]80 * It tries 'light reclaim' first, then brutal reclaim. The light reclaim
81 * releases slabs from cpu-shared magazine-list, until at least 1 slab
82 * is deallocated in each cache (this algorithm should probably change).
83 * The brutal reclaim removes all cached objects, even from CPU-bound
84 * magazines.
85 *
[cc73a8a1]86 * @todo
[9179d0a]87 * For better CPU-scaling the magazine allocation strategy should
[10e16a7]88 * be extended. Currently, if the cache does not have magazine, it asks
89 * for non-cpu cached magazine cache to provide one. It might be feasible
90 * to add cpu-cached magazine cache (which would allocate it's magazines
91 * from non-cpu-cached mag. cache). This would provide a nice per-cpu
92 * buffer. The other possibility is to use the per-cache
93 * 'empty-magazine-list', which decreases competing for 1 per-system
94 * magazine cache.
95 *
[cc73a8a1]96 * @todo
[da1bafb]97 * It might be good to add granularity of locks even to slab level,
[cc73a8a1]98 * we could then try_spinlock over all partial slabs and thus improve
[da1bafb]99 * scalability even on slab level.
100 *
[fb10289b]101 */
102
[4e147a6]103#include <synch/spinlock.h>
104#include <mm/slab.h>
[5c9a08b]105#include <adt/list.h>
[4e147a6]106#include <memstr.h>
107#include <align.h>
[a294ad0]108#include <mm/frame.h>
[4e147a6]109#include <config.h>
110#include <print.h>
111#include <arch.h>
112#include <panic.h>
[a294ad0]113#include <debug.h>
[c352c2e]114#include <bitops.h>
[ce8aed1]115#include <macros.h>
[4e147a6]116
[da1bafb]117IRQ_SPINLOCK_STATIC_INITIALIZE(slab_cache_lock);
[fb10289b]118static LIST_INITIALIZE(slab_cache_list);
119
120/** Magazine cache */
121static slab_cache_t mag_cache;
[da1bafb]122
[fb10289b]123/** Cache for cache descriptors */
124static slab_cache_t slab_cache_cache;
[da1bafb]125
[fb10289b]126/** Cache for external slab descriptors
127 * This time we want per-cpu cache, so do not make it static
[9179d0a]128 * - using slab for internal slab structures will not deadlock,
[fb10289b]129 * as all slab structures are 'small' - control structures of
130 * their caches do not require further allocation
131 */
132static slab_cache_t *slab_extern_cache;
[da1bafb]133
[c352c2e]134/** Caches for malloc */
[ce8aed1]135static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1];
[da1bafb]136
[a000878c]137static const char *malloc_names[] = {
[ce8aed1]138 "malloc-16",
139 "malloc-32",
140 "malloc-64",
141 "malloc-128",
142 "malloc-256",
143 "malloc-512",
144 "malloc-1K",
145 "malloc-2K",
146 "malloc-4K",
147 "malloc-8K",
148 "malloc-16K",
149 "malloc-32K",
150 "malloc-64K",
151 "malloc-128K",
[c3ebc47]152 "malloc-256K",
153 "malloc-512K",
154 "malloc-1M",
155 "malloc-2M",
156 "malloc-4M"
[c352c2e]157};
[a294ad0]158
[fb10289b]159/** Slab descriptor */
[a294ad0]160typedef struct {
[da1bafb]161 slab_cache_t *cache; /**< Pointer to parent cache. */
162 link_t link; /**< List of full/partial slabs. */
163 void *start; /**< Start address of first available item. */
164 size_t available; /**< Count of available items in this slab. */
165 size_t nextavail; /**< The index of next available item. */
[ce8aed1]166} slab_t;
[a294ad0]167
[214f5bb]168#ifdef CONFIG_DEBUG
[da1bafb]169static unsigned int _slab_initialized = 0;
[214f5bb]170#endif
171
[a294ad0]172/**************************************/
[9179d0a]173/* Slab allocation functions */
[da1bafb]174/**************************************/
[a294ad0]175
[da1bafb]176/** Allocate frames for slab space and initialize
[a294ad0]177 *
178 */
[7a0359b]179NO_TRACE static slab_t *slab_space_alloc(slab_cache_t *cache,
180 unsigned int flags)
[a294ad0]181{
[98000fb]182 size_t zone = 0;
[085d973]183
[cd3b380]184 uintptr_t data_phys =
185 frame_alloc_generic(cache->frames, flags, 0, &zone);
186 if (!data_phys)
[a294ad0]187 return NULL;
[da1bafb]188
[cd3b380]189 void *data = (void *) PA2KA(data_phys);
190
[da1bafb]191 slab_t *slab;
192 size_t fsize;
193
[46c1234]194 if (!(cache->flags & SLAB_CACHE_SLINSIDE)) {
[fb10289b]195 slab = slab_alloc(slab_extern_cache, flags);
[a294ad0]196 if (!slab) {
[5df1963]197 frame_free(KA2PA(data), cache->frames);
[a294ad0]198 return NULL;
199 }
200 } else {
[b0c2075]201 fsize = FRAMES2SIZE(cache->frames);
[a294ad0]202 slab = data + fsize - sizeof(*slab);
203 }
[e3c762cd]204
[a294ad0]205 /* Fill in slab structures */
[da1bafb]206 size_t i;
[b0c2075]207 for (i = 0; i < cache->frames; i++)
[6c441cf8]208 frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab, zone);
[da1bafb]209
[a294ad0]210 slab->start = data;
211 slab->available = cache->objects;
212 slab->nextavail = 0;
[4a5b2b0e]213 slab->cache = cache;
[da1bafb]214
[6c441cf8]215 for (i = 0; i < cache->objects; i++)
[da1bafb]216 *((size_t *) (slab->start + i * cache->size)) = i + 1;
217
[bc504ef2]218 atomic_inc(&cache->allocated_slabs);
[a294ad0]219 return slab;
220}
221
[da1bafb]222/** Deallocate space associated with slab
[a294ad0]223 *
224 * @return number of freed frames
[da1bafb]225 *
[a294ad0]226 */
[7a0359b]227NO_TRACE static size_t slab_space_free(slab_cache_t *cache, slab_t *slab)
[a294ad0]228{
[5df1963]229 frame_free(KA2PA(slab->start), slab->cache->frames);
[da1bafb]230 if (!(cache->flags & SLAB_CACHE_SLINSIDE))
[fb10289b]231 slab_free(slab_extern_cache, slab);
[da1bafb]232
[bc504ef2]233 atomic_dec(&cache->allocated_slabs);
234
[b0c2075]235 return cache->frames;
[a294ad0]236}
237
238/** Map object to slab structure */
[7a0359b]239NO_TRACE static slab_t *obj2slab(void *obj)
[a294ad0]240{
[ce8aed1]241 return (slab_t *) frame_get_parent(ADDR2PFN(KA2PA(obj)), 0);
[a294ad0]242}
243
[da1bafb]244/******************/
[9179d0a]245/* Slab functions */
[da1bafb]246/******************/
[4e147a6]247
[da1bafb]248/** Return object to slab and call a destructor
[4e147a6]249 *
[a294ad0]250 * @param slab If the caller knows directly slab of the object, otherwise NULL
251 *
[4e147a6]252 * @return Number of freed pages
[da1bafb]253 *
[4e147a6]254 */
[7a0359b]255NO_TRACE static size_t slab_obj_destroy(slab_cache_t *cache, void *obj,
256 slab_t *slab)
[4e147a6]257{
[a294ad0]258 if (!slab)
259 slab = obj2slab(obj);
[da1bafb]260
[4a5b2b0e]261 ASSERT(slab->cache == cache);
[da1bafb]262
263 size_t freed = 0;
264
[266294a9]265 if (cache->destructor)
266 freed = cache->destructor(obj);
267
[ddb56be]268 irq_spinlock_lock(&cache->slablock, true);
[8e1ea655]269 ASSERT(slab->available < cache->objects);
[da1bafb]270
271 *((size_t *) obj) = slab->nextavail;
[46c1234]272 slab->nextavail = (obj - slab->start) / cache->size;
[a294ad0]273 slab->available++;
[da1bafb]274
[a294ad0]275 /* Move it to correct list */
276 if (slab->available == cache->objects) {
277 /* Free associated memory */
278 list_remove(&slab->link);
[ddb56be]279 irq_spinlock_unlock(&cache->slablock, true);
[da1bafb]280
[266294a9]281 return freed + slab_space_free(cache, slab);
[e72b0a3]282 } else if (slab->available == 1) {
283 /* It was in full, move to partial */
284 list_remove(&slab->link);
285 list_prepend(&slab->link, &cache->partial_slabs);
[a294ad0]286 }
[da1bafb]287
[ddb56be]288 irq_spinlock_unlock(&cache->slablock, true);
[266294a9]289 return freed;
[a294ad0]290}
[4e147a6]291
[da1bafb]292/** Take new object from slab or create new if needed
[4e147a6]293 *
294 * @return Object address or null
[da1bafb]295 *
[4e147a6]296 */
[7a0359b]297NO_TRACE static void *slab_obj_create(slab_cache_t *cache, unsigned int flags)
[4e147a6]298{
[ddb56be]299 irq_spinlock_lock(&cache->slablock, true);
[da1bafb]300
301 slab_t *slab;
302
[a294ad0]303 if (list_empty(&cache->partial_slabs)) {
[da1bafb]304 /*
305 * Allow recursion and reclaiming
[9179d0a]306 * - this should work, as the slab control structures
[e3c762cd]307 * are small and do not need to allocate with anything
308 * other than frame_alloc when they are allocating,
[a294ad0]309 * that's why we should get recursion at most 1-level deep
[da1bafb]310 *
[a294ad0]311 */
[ddb56be]312 irq_spinlock_unlock(&cache->slablock, true);
[a294ad0]313 slab = slab_space_alloc(cache, flags);
[428aabf]314 if (!slab)
[e72b0a3]315 return NULL;
[da1bafb]316
[ddb56be]317 irq_spinlock_lock(&cache->slablock, true);
[a294ad0]318 } else {
[55b77d9]319 slab = list_get_instance(list_first(&cache->partial_slabs),
320 slab_t, link);
[a294ad0]321 list_remove(&slab->link);
322 }
[da1bafb]323
324 void *obj = slab->start + slab->nextavail * cache->size;
325 slab->nextavail = *((size_t *) obj);
[a294ad0]326 slab->available--;
[da1bafb]327
[f3272e98]328 if (!slab->available)
[bc504ef2]329 list_prepend(&slab->link, &cache->full_slabs);
[a294ad0]330 else
[bc504ef2]331 list_prepend(&slab->link, &cache->partial_slabs);
[da1bafb]332
[ddb56be]333 irq_spinlock_unlock(&cache->slablock, true);
[da1bafb]334
335 if ((cache->constructor) && (cache->constructor(obj, flags))) {
[266294a9]336 /* Bad, bad, construction failed */
337 slab_obj_destroy(cache, obj, slab);
338 return NULL;
339 }
[da1bafb]340
[a294ad0]341 return obj;
[4e147a6]342}
343
[da1bafb]344/****************************/
[4e147a6]345/* CPU-Cache slab functions */
[da1bafb]346/****************************/
[4e147a6]347
[da1bafb]348/** Find a full magazine in cache, take it from list and return it
349 *
350 * @param first If true, return first, else last mag.
[5158549]351 *
352 */
[7a0359b]353NO_TRACE static slab_magazine_t *get_mag_from_cache(slab_cache_t *cache,
354 bool first)
[5158549]355{
356 slab_magazine_t *mag = NULL;
357 link_t *cur;
[da1bafb]358
[4d194be]359 irq_spinlock_lock(&cache->maglock, true);
[5158549]360 if (!list_empty(&cache->magazines)) {
361 if (first)
[55b77d9]362 cur = list_first(&cache->magazines);
[5158549]363 else
[55b77d9]364 cur = list_last(&cache->magazines);
[da1bafb]365
[5158549]366 mag = list_get_instance(cur, slab_magazine_t, link);
367 list_remove(&mag->link);
368 atomic_dec(&cache->magazine_counter);
369 }
[4d194be]370 irq_spinlock_unlock(&cache->maglock, true);
[25ebfbd]371
[5158549]372 return mag;
373}
374
[da1bafb]375/** Prepend magazine to magazine list in cache
376 *
377 */
[7a0359b]378NO_TRACE static void put_mag_to_cache(slab_cache_t *cache,
379 slab_magazine_t *mag)
[5158549]380{
[4d194be]381 irq_spinlock_lock(&cache->maglock, true);
[da1bafb]382
[5158549]383 list_prepend(&mag->link, &cache->magazines);
384 atomic_inc(&cache->magazine_counter);
385
[4d194be]386 irq_spinlock_unlock(&cache->maglock, true);
[5158549]387}
388
[da1bafb]389/** Free all objects in magazine and free memory associated with magazine
[4e147a6]390 *
391 * @return Number of freed pages
[da1bafb]392 *
[4e147a6]393 */
[7a0359b]394NO_TRACE static size_t magazine_destroy(slab_cache_t *cache,
395 slab_magazine_t *mag)
[4e147a6]396{
[da1bafb]397 size_t i;
[98000fb]398 size_t frames = 0;
[da1bafb]399
[6c441cf8]400 for (i = 0; i < mag->busy; i++) {
[a294ad0]401 frames += slab_obj_destroy(cache, mag->objs[i], NULL);
[4a5b2b0e]402 atomic_dec(&cache->cached_objs);
403 }
[4e147a6]404
405 slab_free(&mag_cache, mag);
[da1bafb]406
[4e147a6]407 return frames;
408}
409
[da1bafb]410/** Find full magazine, set it as current and return it
411 *
[fb10289b]412 */
[7a0359b]413NO_TRACE static slab_magazine_t *get_full_current_mag(slab_cache_t *cache)
[fb10289b]414{
[da1bafb]415 slab_magazine_t *cmag = cache->mag_cache[CPU->id].current;
416 slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last;
[7a0359b]417
[25ebfbd]418 ASSERT(irq_spinlock_locked(&cache->mag_cache[CPU->id].lock));
[da1bafb]419
[fb10289b]420 if (cmag) { /* First try local CPU magazines */
421 if (cmag->busy)
422 return cmag;
[da1bafb]423
424 if ((lastmag) && (lastmag->busy)) {
[fb10289b]425 cache->mag_cache[CPU->id].current = lastmag;
426 cache->mag_cache[CPU->id].last = cmag;
427 return lastmag;
428 }
429 }
[da1bafb]430
[fb10289b]431 /* Local magazines are empty, import one from magazine list */
[da1bafb]432 slab_magazine_t *newmag = get_mag_from_cache(cache, 1);
[5158549]433 if (!newmag)
[fb10289b]434 return NULL;
[da1bafb]435
[fb10289b]436 if (lastmag)
[5158549]437 magazine_destroy(cache, lastmag);
[da1bafb]438
[fb10289b]439 cache->mag_cache[CPU->id].last = cmag;
440 cache->mag_cache[CPU->id].current = newmag;
[da1bafb]441
[fb10289b]442 return newmag;
443}
444
[da1bafb]445/** Try to find object in CPU-cache magazines
[4e147a6]446 *
447 * @return Pointer to object or NULL if not available
[da1bafb]448 *
[4e147a6]449 */
[7a0359b]450NO_TRACE static void *magazine_obj_get(slab_cache_t *cache)
[4e147a6]451{
[81e52f2a]452 if (!CPU)
453 return NULL;
[da1bafb]454
[25ebfbd]455 irq_spinlock_lock(&cache->mag_cache[CPU->id].lock, true);
[da1bafb]456
457 slab_magazine_t *mag = get_full_current_mag(cache);
[fb10289b]458 if (!mag) {
[25ebfbd]459 irq_spinlock_unlock(&cache->mag_cache[CPU->id].lock, true);
[fb10289b]460 return NULL;
[4e147a6]461 }
[da1bafb]462
463 void *obj = mag->objs[--mag->busy];
[25ebfbd]464 irq_spinlock_unlock(&cache->mag_cache[CPU->id].lock, true);
[da1bafb]465
[4a5b2b0e]466 atomic_dec(&cache->cached_objs);
467
468 return obj;
[4e147a6]469}
470
[da1bafb]471/** Assure that the current magazine is empty, return pointer to it,
472 * or NULL if no empty magazine is available and cannot be allocated
[4e147a6]473 *
[da1bafb]474 * We have 2 magazines bound to processor.
475 * First try the current.
476 * If full, try the last.
477 * If full, put to magazines list.
[4e147a6]478 *
[086a600]479 */
[7a0359b]480NO_TRACE static slab_magazine_t *make_empty_current_mag(slab_cache_t *cache)
[086a600]481{
[da1bafb]482 slab_magazine_t *cmag = cache->mag_cache[CPU->id].current;
483 slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last;
484
[25ebfbd]485 ASSERT(irq_spinlock_locked(&cache->mag_cache[CPU->id].lock));
[7a0359b]486
[086a600]487 if (cmag) {
488 if (cmag->busy < cmag->size)
489 return cmag;
[da1bafb]490
491 if ((lastmag) && (lastmag->busy < lastmag->size)) {
[086a600]492 cache->mag_cache[CPU->id].last = cmag;
493 cache->mag_cache[CPU->id].current = lastmag;
494 return lastmag;
495 }
496 }
[da1bafb]497
[086a600]498 /* current | last are full | nonexistent, allocate new */
[da1bafb]499
500 /*
501 * We do not want to sleep just because of caching,
502 * especially we do not want reclaiming to start, as
503 * this would deadlock.
504 *
505 */
506 slab_magazine_t *newmag = slab_alloc(&mag_cache,
507 FRAME_ATOMIC | FRAME_NO_RECLAIM);
[086a600]508 if (!newmag)
509 return NULL;
[da1bafb]510
[086a600]511 newmag->size = SLAB_MAG_SIZE;
512 newmag->busy = 0;
[da1bafb]513
[086a600]514 /* Flush last to magazine list */
[5158549]515 if (lastmag)
516 put_mag_to_cache(cache, lastmag);
[da1bafb]517
[086a600]518 /* Move current as last, save new as current */
[da1bafb]519 cache->mag_cache[CPU->id].last = cmag;
520 cache->mag_cache[CPU->id].current = newmag;
521
[086a600]522 return newmag;
523}
524
[da1bafb]525/** Put object into CPU-cache magazine
526 *
527 * @return 0 on success, -1 on no memory
[086a600]528 *
[4e147a6]529 */
[7a0359b]530NO_TRACE static int magazine_obj_put(slab_cache_t *cache, void *obj)
[4e147a6]531{
[81e52f2a]532 if (!CPU)
533 return -1;
[da1bafb]534
[25ebfbd]535 irq_spinlock_lock(&cache->mag_cache[CPU->id].lock, true);
[da1bafb]536
537 slab_magazine_t *mag = make_empty_current_mag(cache);
[fb10289b]538 if (!mag) {
[25ebfbd]539 irq_spinlock_unlock(&cache->mag_cache[CPU->id].lock, true);
[fb10289b]540 return -1;
541 }
[4e147a6]542
543 mag->objs[mag->busy++] = obj;
[da1bafb]544
[25ebfbd]545 irq_spinlock_unlock(&cache->mag_cache[CPU->id].lock, true);
[da1bafb]546
[4a5b2b0e]547 atomic_inc(&cache->cached_objs);
[da1bafb]548
[4e147a6]549 return 0;
550}
551
[da1bafb]552/************************/
[9179d0a]553/* Slab cache functions */
[da1bafb]554/************************/
[a294ad0]555
[da1bafb]556/** Return number of objects that fit in certain cache size
557 *
558 */
[7a0359b]559NO_TRACE static size_t comp_objects(slab_cache_t *cache)
[a294ad0]560{
561 if (cache->flags & SLAB_CACHE_SLINSIDE)
[b0c2075]562 return (FRAMES2SIZE(cache->frames) - sizeof(slab_t)) /
563 cache->size;
[da1bafb]564 else
[b0c2075]565 return FRAMES2SIZE(cache->frames) / cache->size;
[a294ad0]566}
567
[da1bafb]568/** Return wasted space in slab
569 *
570 */
[7a0359b]571NO_TRACE static size_t badness(slab_cache_t *cache)
[a294ad0]572{
[da1bafb]573 size_t objects = comp_objects(cache);
[b0c2075]574 size_t ssize = FRAMES2SIZE(cache->frames);
[da1bafb]575
[a294ad0]576 if (cache->flags & SLAB_CACHE_SLINSIDE)
577 ssize -= sizeof(slab_t);
[da1bafb]578
[6c441cf8]579 return ssize - objects * cache->size;
[a294ad0]580}
[4e147a6]581
[da1bafb]582/** Initialize mag_cache structure in slab cache
583 *
[8e1ea655]584 */
[7a0359b]585NO_TRACE static bool make_magcache(slab_cache_t *cache)
[8e1ea655]586{
[214f5bb]587 ASSERT(_slab_initialized >= 2);
[da1bafb]588
[46c1234]589 cache->mag_cache = malloc(sizeof(slab_mag_cache_t) * config.cpu_count,
[55821eea]590 FRAME_ATOMIC);
591 if (!cache->mag_cache)
592 return false;
[da1bafb]593
594 size_t i;
[6c441cf8]595 for (i = 0; i < config.cpu_count; i++) {
[e32e092]596 memsetb(&cache->mag_cache[i], sizeof(cache->mag_cache[i]), 0);
[25ebfbd]597 irq_spinlock_initialize(&cache->mag_cache[i].lock,
[da1bafb]598 "slab.cache.mag_cache[].lock");
[8e1ea655]599 }
[da1bafb]600
[55821eea]601 return true;
[8e1ea655]602}
603
[da1bafb]604/** Initialize allocated memory as a slab cache
605 *
606 */
[7a0359b]607NO_TRACE static void _slab_cache_create(slab_cache_t *cache, const char *name,
[da1bafb]608 size_t size, size_t align, int (*constructor)(void *obj,
609 unsigned int kmflag), size_t (*destructor)(void *obj), unsigned int flags)
[4e147a6]610{
[e32e092]611 memsetb(cache, sizeof(*cache), 0);
[4e147a6]612 cache->name = name;
[da1bafb]613
[96b02eb9]614 if (align < sizeof(sysarg_t))
615 align = sizeof(sysarg_t);
[da1bafb]616
[14e5d88]617 size = ALIGN_UP(size, align);
[da1bafb]618
[a294ad0]619 cache->size = size;
[4e147a6]620 cache->constructor = constructor;
621 cache->destructor = destructor;
622 cache->flags = flags;
[da1bafb]623
[4e147a6]624 list_initialize(&cache->full_slabs);
625 list_initialize(&cache->partial_slabs);
626 list_initialize(&cache->magazines);
[da1bafb]627
[ddb56be]628 irq_spinlock_initialize(&cache->slablock, "slab.cache.slablock");
[4d194be]629 irq_spinlock_initialize(&cache->maglock, "slab.cache.maglock");
[da1bafb]630
[46c1234]631 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
[55821eea]632 (void) make_magcache(cache);
[da1bafb]633
[4e147a6]634 /* Compute slab sizes, object counts in slabs etc. */
635 if (cache->size < SLAB_INSIDE_SIZE)
636 cache->flags |= SLAB_CACHE_SLINSIDE;
[da1bafb]637
[b0c2075]638 /* Minimum slab frames */
639 cache->frames = SIZE2FRAMES(cache->size);
[da1bafb]640
641 while (badness(cache) > SLAB_MAX_BADNESS(cache))
[b0c2075]642 cache->frames <<= 1;
[da1bafb]643
[a294ad0]644 cache->objects = comp_objects(cache);
[da1bafb]645
[14e5d88]646 /* If info fits in, put it inside */
647 if (badness(cache) > sizeof(slab_t))
648 cache->flags |= SLAB_CACHE_SLINSIDE;
[da1bafb]649
[248fc1a]650 /* Add cache to cache list */
[da1bafb]651 irq_spinlock_lock(&slab_cache_lock, true);
[4e147a6]652 list_append(&cache->link, &slab_cache_list);
[da1bafb]653 irq_spinlock_unlock(&slab_cache_lock, true);
[4e147a6]654}
655
[da1bafb]656/** Create slab cache
657 *
658 */
[a000878c]659slab_cache_t *slab_cache_create(const char *name, size_t size, size_t align,
[da1bafb]660 int (*constructor)(void *obj, unsigned int kmflag),
661 size_t (*destructor)(void *obj), unsigned int flags)
[4e147a6]662{
[da1bafb]663 slab_cache_t *cache = slab_alloc(&slab_cache_cache, 0);
[4e147a6]664 _slab_cache_create(cache, name, size, align, constructor, destructor,
[46c1234]665 flags);
[da1bafb]666
[4e147a6]667 return cache;
668}
669
[da1bafb]670/** Reclaim space occupied by objects that are already free
[4e147a6]671 *
672 * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing
[da1bafb]673 *
[4e147a6]674 * @return Number of freed pages
[da1bafb]675 *
[4e147a6]676 */
[7a0359b]677NO_TRACE static size_t _slab_reclaim(slab_cache_t *cache, unsigned int flags)
[4e147a6]678{
679 if (cache->flags & SLAB_CACHE_NOMAGAZINE)
680 return 0; /* Nothing to do */
[da1bafb]681
682 /*
683 * We count up to original magazine count to avoid
684 * endless loop
[5158549]685 */
[da1bafb]686 atomic_count_t magcount = atomic_get(&cache->magazine_counter);
687
688 slab_magazine_t *mag;
689 size_t frames = 0;
690
691 while ((magcount--) && (mag = get_mag_from_cache(cache, 0))) {
692 frames += magazine_destroy(cache, mag);
693 if ((!(flags & SLAB_RECLAIM_ALL)) && (frames))
[5158549]694 break;
[fb10289b]695 }
[4e147a6]696
697 if (flags & SLAB_RECLAIM_ALL) {
[5158549]698 /* Free cpu-bound magazines */
[4e147a6]699 /* Destroy CPU magazines */
[da1bafb]700 size_t i;
[6c441cf8]701 for (i = 0; i < config.cpu_count; i++) {
[25ebfbd]702 irq_spinlock_lock(&cache->mag_cache[i].lock, true);
[da1bafb]703
[4e147a6]704 mag = cache->mag_cache[i].current;
705 if (mag)
706 frames += magazine_destroy(cache, mag);
707 cache->mag_cache[i].current = NULL;
708
709 mag = cache->mag_cache[i].last;
710 if (mag)
711 frames += magazine_destroy(cache, mag);
712 cache->mag_cache[i].last = NULL;
[da1bafb]713
[25ebfbd]714 irq_spinlock_unlock(&cache->mag_cache[i].lock, true);
[5158549]715 }
[428aabf]716 }
[da1bafb]717
[4e147a6]718 return frames;
719}
720
[da1bafb]721/** Check that there are no slabs and remove cache from system
722 *
723 */
[4e147a6]724void slab_cache_destroy(slab_cache_t *cache)
725{
[da1bafb]726 /*
727 * First remove cache from link, so that we don't need
[5158549]728 * to disable interrupts later
[da1bafb]729 *
[5158549]730 */
[da1bafb]731 irq_spinlock_lock(&slab_cache_lock, true);
[5158549]732 list_remove(&cache->link);
[da1bafb]733 irq_spinlock_unlock(&slab_cache_lock, true);
734
735 /*
736 * Do not lock anything, we assume the software is correct and
737 * does not touch the cache when it decides to destroy it
738 *
739 */
[4e147a6]740
741 /* Destroy all magazines */
742 _slab_reclaim(cache, SLAB_RECLAIM_ALL);
[da1bafb]743
[4e147a6]744 /* All slabs must be empty */
[da1bafb]745 if ((!list_empty(&cache->full_slabs)) ||
746 (!list_empty(&cache->partial_slabs)))
[4e147a6]747 panic("Destroying cache that is not empty.");
[da1bafb]748
[8e1ea655]749 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
[bb68433]750 free(cache->mag_cache);
[da1bafb]751
[fb10289b]752 slab_free(&slab_cache_cache, cache);
[4e147a6]753}
754
[da1bafb]755/** Allocate new object from cache - if no flags given, always returns memory
756 *
757 */
758void *slab_alloc(slab_cache_t *cache, unsigned int flags)
[4e147a6]759{
[da1bafb]760 /* Disable interrupts to avoid deadlocks with interrupt handlers */
761 ipl_t ipl = interrupts_disable();
762
[4e147a6]763 void *result = NULL;
[c5613b72]764
[da1bafb]765 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
[4e147a6]766 result = magazine_obj_get(cache);
[da1bafb]767
[428aabf]768 if (!result)
[4e147a6]769 result = slab_obj_create(cache, flags);
[da1bafb]770
[4e147a6]771 interrupts_restore(ipl);
[da1bafb]772
[fb10289b]773 if (result)
774 atomic_inc(&cache->allocated_objs);
[da1bafb]775
[4e147a6]776 return result;
777}
778
[da1bafb]779/** Return object to cache, use slab if known
780 *
781 */
[7a0359b]782NO_TRACE static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab)
[4e147a6]783{
[da1bafb]784 ipl_t ipl = interrupts_disable();
785
[46c1234]786 if ((cache->flags & SLAB_CACHE_NOMAGAZINE) ||
[da1bafb]787 (magazine_obj_put(cache, obj)))
[c352c2e]788 slab_obj_destroy(cache, obj, slab);
[da1bafb]789
[4e147a6]790 interrupts_restore(ipl);
[fb10289b]791 atomic_dec(&cache->allocated_objs);
[4e147a6]792}
793
[da1bafb]794/** Return slab object to cache
795 *
796 */
[c352c2e]797void slab_free(slab_cache_t *cache, void *obj)
798{
[ce8aed1]799 _slab_free(cache, obj, NULL);
[c352c2e]800}
801
[ab6f2507]802/** Go through all caches and reclaim what is possible */
[da1bafb]803size_t slab_reclaim(unsigned int flags)
[4e147a6]804{
[ab6f2507]805 irq_spinlock_lock(&slab_cache_lock, true);
[da1bafb]806
[98000fb]807 size_t frames = 0;
[feeac0d]808 list_foreach(slab_cache_list, link, slab_cache_t, cache) {
[4e147a6]809 frames += _slab_reclaim(cache, flags);
810 }
[da1bafb]811
[ab6f2507]812 irq_spinlock_unlock(&slab_cache_lock, true);
[da1bafb]813
[4e147a6]814 return frames;
815}
816
[da1bafb]817/* Print list of slabs
818 *
819 */
[4e147a6]820void slab_print_list(void)
821{
[ccb426c]822 printf("[slab name ] [size ] [pages ] [obj/pg] [slabs ]"
823 " [cached] [alloc ] [ctl]\n");
[da1bafb]824
825 size_t skip = 0;
[599d6f5]826 while (true) {
827 /*
828 * We must not hold the slab_cache_lock spinlock when printing
829 * the statistics. Otherwise we can easily deadlock if the print
830 * needs to allocate memory.
831 *
832 * Therefore, we walk through the slab cache list, skipping some
833 * amount of already processed caches during each iteration and
834 * gathering statistics about the first unprocessed cache. For
835 * the sake of printing the statistics, we realese the
836 * slab_cache_lock and reacquire it afterwards. Then the walk
837 * starts again.
838 *
839 * This limits both the efficiency and also accuracy of the
840 * obtained statistics. The efficiency is decreased because the
841 * time complexity of the algorithm is quadratic instead of
842 * linear. The accuracy is impacted because we drop the lock
843 * after processing one cache. If there is someone else
844 * manipulating the cache list, we might omit an arbitrary
845 * number of caches or process one cache multiple times.
846 * However, we don't bleed for this algorithm for it is only
847 * statistics.
848 */
[da1bafb]849
850 irq_spinlock_lock(&slab_cache_lock, true);
851
852 link_t *cur;
853 size_t i;
[55b77d9]854 for (i = 0, cur = slab_cache_list.head.next;
855 (i < skip) && (cur != &slab_cache_list.head);
[da1bafb]856 i++, cur = cur->next);
857
[55b77d9]858 if (cur == &slab_cache_list.head) {
[da1bafb]859 irq_spinlock_unlock(&slab_cache_lock, true);
[599d6f5]860 break;
861 }
[da1bafb]862
[599d6f5]863 skip++;
[da1bafb]864
865 slab_cache_t *cache = list_get_instance(cur, slab_cache_t, link);
866
[a000878c]867 const char *name = cache->name;
[b0c2075]868 size_t frames = cache->frames;
[599d6f5]869 size_t size = cache->size;
[da1bafb]870 size_t objects = cache->objects;
[599d6f5]871 long allocated_slabs = atomic_get(&cache->allocated_slabs);
872 long cached_objs = atomic_get(&cache->cached_objs);
873 long allocated_objs = atomic_get(&cache->allocated_objs);
[da1bafb]874 unsigned int flags = cache->flags;
[599d6f5]875
[da1bafb]876 irq_spinlock_unlock(&slab_cache_lock, true);
[6536a4a9]877
[b0c2075]878 printf("%-18s %8zu %8zu %8zu %8ld %8ld %8ld %-5s\n",
879 name, size, frames, objects, allocated_slabs,
[599d6f5]880 cached_objs, allocated_objs,
881 flags & SLAB_CACHE_SLINSIDE ? "in" : "out");
[4e147a6]882 }
883}
884
885void slab_cache_init(void)
886{
887 /* Initialize magazine cache */
[f97f1e51]888 _slab_cache_create(&mag_cache, "slab_magazine_t",
[46c1234]889 sizeof(slab_magazine_t) + SLAB_MAG_SIZE * sizeof(void*),
890 sizeof(uintptr_t), NULL, NULL, SLAB_CACHE_NOMAGAZINE |
891 SLAB_CACHE_SLINSIDE);
[da1bafb]892
[fb10289b]893 /* Initialize slab_cache cache */
[f97f1e51]894 _slab_cache_create(&slab_cache_cache, "slab_cache_cache",
[46c1234]895 sizeof(slab_cache_cache), sizeof(uintptr_t), NULL, NULL,
896 SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
[da1bafb]897
[fb10289b]898 /* Initialize external slab cache */
[f97f1e51]899 slab_extern_cache = slab_cache_create("slab_t", sizeof(slab_t), 0,
[46c1234]900 NULL, NULL, SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED);
[da1bafb]901
[4e147a6]902 /* Initialize structures for malloc */
[da1bafb]903 size_t i;
904 size_t size;
905
[46c1234]906 for (i = 0, size = (1 << SLAB_MIN_MALLOC_W);
907 i < (SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1);
908 i++, size <<= 1) {
909 malloc_caches[i] = slab_cache_create(malloc_names[i], size, 0,
910 NULL, NULL, SLAB_CACHE_MAGDEFERRED);
[c352c2e]911 }
[da1bafb]912
[a000878c]913#ifdef CONFIG_DEBUG
[04225a7]914 _slab_initialized = 1;
915#endif
[c352c2e]916}
917
[8e1ea655]918/** Enable cpu_cache
919 *
920 * Kernel calls this function, when it knows the real number of
[da1bafb]921 * processors. Allocate slab for cpucache and enable it on all
922 * existing slabs that are SLAB_CACHE_MAGDEFERRED
923 *
[8e1ea655]924 */
925void slab_enable_cpucache(void)
926{
[214f5bb]927#ifdef CONFIG_DEBUG
928 _slab_initialized = 2;
929#endif
[8e1ea655]930
[da1bafb]931 irq_spinlock_lock(&slab_cache_lock, false);
932
[feeac0d]933 list_foreach(slab_cache_list, link, slab_cache_t, slab) {
[da1bafb]934 if ((slab->flags & SLAB_CACHE_MAGDEFERRED) !=
[46c1234]935 SLAB_CACHE_MAGDEFERRED)
[8e1ea655]936 continue;
[da1bafb]937
938 (void) make_magcache(slab);
939 slab->flags &= ~SLAB_CACHE_MAGDEFERRED;
[8e1ea655]940 }
[da1bafb]941
942 irq_spinlock_unlock(&slab_cache_lock, false);
[8e1ea655]943}
944
[da1bafb]945void *malloc(size_t size, unsigned int flags)
[c352c2e]946{
[04225a7]947 ASSERT(_slab_initialized);
[c259b9b]948 ASSERT(size <= (1 << SLAB_MAX_MALLOC_W));
[c352c2e]949
950 if (size < (1 << SLAB_MIN_MALLOC_W))
951 size = (1 << SLAB_MIN_MALLOC_W);
[da1bafb]952
953 uint8_t idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;
954
[c352c2e]955 return slab_alloc(malloc_caches[idx], flags);
956}
957
[da1bafb]958void *realloc(void *ptr, size_t size, unsigned int flags)
[c352c2e]959{
[ce8aed1]960 ASSERT(_slab_initialized);
961 ASSERT(size <= (1 << SLAB_MAX_MALLOC_W));
962
963 void *new_ptr;
964
965 if (size > 0) {
966 if (size < (1 << SLAB_MIN_MALLOC_W))
967 size = (1 << SLAB_MIN_MALLOC_W);
[da1bafb]968 uint8_t idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;
[ce8aed1]969
970 new_ptr = slab_alloc(malloc_caches[idx], flags);
971 } else
972 new_ptr = NULL;
973
974 if ((new_ptr != NULL) && (ptr != NULL)) {
975 slab_t *slab = obj2slab(ptr);
976 memcpy(new_ptr, ptr, min(size, slab->cache->size));
977 }
978
979 if (ptr != NULL)
980 free(ptr);
981
982 return new_ptr;
983}
[5158549]984
[ce8aed1]985void free(void *ptr)
986{
987 if (!ptr)
[f3272e98]988 return;
[da1bafb]989
[ce8aed1]990 slab_t *slab = obj2slab(ptr);
991 _slab_free(slab->cache, ptr, slab);
[4e147a6]992}
[b45c443]993
[cc73a8a1]994/** @}
[b45c443]995 */
Note: See TracBrowser for help on using the repository browser.