source: mainline/kernel/generic/src/mm/slab.c@ 96b02eb9

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 96b02eb9 was 96b02eb9, checked in by Martin Decky <martin@…>, 15 years ago

more unification of basic types

  • use sysarg_t and native_t (unsigned and signed variant) in both kernel and uspace
  • remove ipcarg_t in favour of sysarg_t

(no change in functionality)

  • Property mode set to 100644
File size: 26.3 KB
RevLine 
[4e147a6]1/*
[df4ed85]2 * Copyright (c) 2006 Ondrej Palkovsky
[4e147a6]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[cc73a8a1]29/** @addtogroup genericmm
[b45c443]30 * @{
31 */
32
[9179d0a]33/**
[b45c443]34 * @file
[da1bafb]35 * @brief Slab allocator.
[9179d0a]36 *
37 * The slab allocator is closely modelled after OpenSolaris slab allocator.
38 * @see http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/
[fb10289b]39 *
40 * with the following exceptions:
[9179d0a]41 * @li empty slabs are deallocated immediately
[fb10289b]42 * (in Linux they are kept in linked list, in Solaris ???)
[9179d0a]43 * @li empty magazines are deallocated when not needed
[fb10289b]44 * (in Solaris they are held in linked list in slab cache)
45 *
[9179d0a]46 * Following features are not currently supported but would be easy to do:
47 * @li cache coloring
48 * @li dynamic magazine growing (different magazine sizes are already
[5b04fc7]49 * supported, but we would need to adjust allocation strategy)
[fb10289b]50 *
[9179d0a]51 * The slab allocator supports per-CPU caches ('magazines') to facilitate
[da1bafb]52 * good SMP scaling.
[fb10289b]53 *
54 * When a new object is being allocated, it is first checked, if it is
[7669bcf]55 * available in a CPU-bound magazine. If it is not found there, it is
56 * allocated from a CPU-shared slab - if a partially full one is found,
57 * it is used, otherwise a new one is allocated.
[fb10289b]58 *
[7669bcf]59 * When an object is being deallocated, it is put to a CPU-bound magazine.
60 * If there is no such magazine, a new one is allocated (if this fails,
[9179d0a]61 * the object is deallocated into slab). If the magazine is full, it is
[7669bcf]62 * put into cpu-shared list of magazines and a new one is allocated.
[fb10289b]63 *
[7669bcf]64 * The CPU-bound magazine is actually a pair of magazines in order to avoid
[fb10289b]65 * thrashing when somebody is allocating/deallocating 1 item at the magazine
66 * size boundary. LIFO order is enforced, which should avoid fragmentation
[da1bafb]67 * as much as possible.
68 *
[7669bcf]69 * Every cache contains list of full slabs and list of partially full slabs.
[9179d0a]70 * Empty slabs are immediately freed (thrashing will be avoided because
[da1bafb]71 * of magazines).
[fb10289b]72 *
[9179d0a]73 * The slab information structure is kept inside the data area, if possible.
[fb10289b]74 * The cache can be marked that it should not use magazines. This is used
[9179d0a]75 * only for slab related caches to avoid deadlocks and infinite recursion
76 * (the slab allocator uses itself for allocating all it's control structures).
[fb10289b]77 *
[7669bcf]78 * The slab allocator allocates a lot of space and does not free it. When
79 * the frame allocator fails to allocate a frame, it calls slab_reclaim().
[fb10289b]80 * It tries 'light reclaim' first, then brutal reclaim. The light reclaim
81 * releases slabs from cpu-shared magazine-list, until at least 1 slab
82 * is deallocated in each cache (this algorithm should probably change).
83 * The brutal reclaim removes all cached objects, even from CPU-bound
84 * magazines.
85 *
[cc73a8a1]86 * @todo
[9179d0a]87 * For better CPU-scaling the magazine allocation strategy should
[10e16a7]88 * be extended. Currently, if the cache does not have magazine, it asks
89 * for non-cpu cached magazine cache to provide one. It might be feasible
90 * to add cpu-cached magazine cache (which would allocate it's magazines
91 * from non-cpu-cached mag. cache). This would provide a nice per-cpu
92 * buffer. The other possibility is to use the per-cache
93 * 'empty-magazine-list', which decreases competing for 1 per-system
94 * magazine cache.
95 *
[cc73a8a1]96 * @todo
[da1bafb]97 * It might be good to add granularity of locks even to slab level,
[cc73a8a1]98 * we could then try_spinlock over all partial slabs and thus improve
[da1bafb]99 * scalability even on slab level.
100 *
[fb10289b]101 */
102
[4e147a6]103#include <synch/spinlock.h>
104#include <mm/slab.h>
[5c9a08b]105#include <adt/list.h>
[4e147a6]106#include <memstr.h>
107#include <align.h>
[a294ad0]108#include <mm/frame.h>
[4e147a6]109#include <config.h>
110#include <print.h>
111#include <arch.h>
112#include <panic.h>
[a294ad0]113#include <debug.h>
[c352c2e]114#include <bitops.h>
[ce8aed1]115#include <macros.h>
[4e147a6]116
[da1bafb]117IRQ_SPINLOCK_STATIC_INITIALIZE(slab_cache_lock);
[fb10289b]118static LIST_INITIALIZE(slab_cache_list);
119
120/** Magazine cache */
121static slab_cache_t mag_cache;
[da1bafb]122
[fb10289b]123/** Cache for cache descriptors */
124static slab_cache_t slab_cache_cache;
[da1bafb]125
[fb10289b]126/** Cache for external slab descriptors
127 * This time we want per-cpu cache, so do not make it static
[9179d0a]128 * - using slab for internal slab structures will not deadlock,
[fb10289b]129 * as all slab structures are 'small' - control structures of
130 * their caches do not require further allocation
131 */
132static slab_cache_t *slab_extern_cache;
[da1bafb]133
[c352c2e]134/** Caches for malloc */
[ce8aed1]135static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1];
[da1bafb]136
[a000878c]137static const char *malloc_names[] = {
[ce8aed1]138 "malloc-16",
139 "malloc-32",
140 "malloc-64",
141 "malloc-128",
142 "malloc-256",
143 "malloc-512",
144 "malloc-1K",
145 "malloc-2K",
146 "malloc-4K",
147 "malloc-8K",
148 "malloc-16K",
149 "malloc-32K",
150 "malloc-64K",
151 "malloc-128K",
[c3ebc47]152 "malloc-256K",
153 "malloc-512K",
154 "malloc-1M",
155 "malloc-2M",
156 "malloc-4M"
[c352c2e]157};
[a294ad0]158
[fb10289b]159/** Slab descriptor */
[a294ad0]160typedef struct {
[da1bafb]161 slab_cache_t *cache; /**< Pointer to parent cache. */
162 link_t link; /**< List of full/partial slabs. */
163 void *start; /**< Start address of first available item. */
164 size_t available; /**< Count of available items in this slab. */
165 size_t nextavail; /**< The index of next available item. */
[ce8aed1]166} slab_t;
[a294ad0]167
[214f5bb]168#ifdef CONFIG_DEBUG
[da1bafb]169static unsigned int _slab_initialized = 0;
[214f5bb]170#endif
171
[a294ad0]172/**************************************/
[9179d0a]173/* Slab allocation functions */
[da1bafb]174/**************************************/
[a294ad0]175
[da1bafb]176/** Allocate frames for slab space and initialize
[a294ad0]177 *
178 */
[7a0359b]179NO_TRACE static slab_t *slab_space_alloc(slab_cache_t *cache,
180 unsigned int flags)
[a294ad0]181{
[da1bafb]182
183
[98000fb]184 size_t zone = 0;
[085d973]185
[da1bafb]186 void *data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone);
[e45f81a]187 if (!data) {
[a294ad0]188 return NULL;
[bc504ef2]189 }
[da1bafb]190
191 slab_t *slab;
192 size_t fsize;
193
[46c1234]194 if (!(cache->flags & SLAB_CACHE_SLINSIDE)) {
[fb10289b]195 slab = slab_alloc(slab_extern_cache, flags);
[a294ad0]196 if (!slab) {
[2e9eae2]197 frame_free(KA2PA(data));
[a294ad0]198 return NULL;
199 }
200 } else {
201 fsize = (PAGE_SIZE << cache->order);
202 slab = data + fsize - sizeof(*slab);
203 }
[e3c762cd]204
[a294ad0]205 /* Fill in slab structures */
[da1bafb]206 size_t i;
207 for (i = 0; i < ((size_t) 1 << cache->order); i++)
[6c441cf8]208 frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab, zone);
[da1bafb]209
[a294ad0]210 slab->start = data;
211 slab->available = cache->objects;
212 slab->nextavail = 0;
[4a5b2b0e]213 slab->cache = cache;
[da1bafb]214
[6c441cf8]215 for (i = 0; i < cache->objects; i++)
[da1bafb]216 *((size_t *) (slab->start + i * cache->size)) = i + 1;
217
[bc504ef2]218 atomic_inc(&cache->allocated_slabs);
[a294ad0]219 return slab;
220}
221
[da1bafb]222/** Deallocate space associated with slab
[a294ad0]223 *
224 * @return number of freed frames
[da1bafb]225 *
[a294ad0]226 */
[7a0359b]227NO_TRACE static size_t slab_space_free(slab_cache_t *cache, slab_t *slab)
[a294ad0]228{
[2e9eae2]229 frame_free(KA2PA(slab->start));
[da1bafb]230 if (!(cache->flags & SLAB_CACHE_SLINSIDE))
[fb10289b]231 slab_free(slab_extern_cache, slab);
[da1bafb]232
[bc504ef2]233 atomic_dec(&cache->allocated_slabs);
234
[da1bafb]235 return (1 << cache->order);
[a294ad0]236}
237
238/** Map object to slab structure */
[7a0359b]239NO_TRACE static slab_t *obj2slab(void *obj)
[a294ad0]240{
[ce8aed1]241 return (slab_t *) frame_get_parent(ADDR2PFN(KA2PA(obj)), 0);
[a294ad0]242}
243
[da1bafb]244/******************/
[9179d0a]245/* Slab functions */
[da1bafb]246/******************/
[4e147a6]247
[da1bafb]248/** Return object to slab and call a destructor
[4e147a6]249 *
[a294ad0]250 * @param slab If the caller knows directly slab of the object, otherwise NULL
251 *
[4e147a6]252 * @return Number of freed pages
[da1bafb]253 *
[4e147a6]254 */
[7a0359b]255NO_TRACE static size_t slab_obj_destroy(slab_cache_t *cache, void *obj,
256 slab_t *slab)
[4e147a6]257{
[a294ad0]258 if (!slab)
259 slab = obj2slab(obj);
[da1bafb]260
[4a5b2b0e]261 ASSERT(slab->cache == cache);
[da1bafb]262
263 size_t freed = 0;
264
[266294a9]265 if (cache->destructor)
266 freed = cache->destructor(obj);
267
[428aabf]268 spinlock_lock(&cache->slablock);
[8e1ea655]269 ASSERT(slab->available < cache->objects);
[da1bafb]270
271 *((size_t *) obj) = slab->nextavail;
[46c1234]272 slab->nextavail = (obj - slab->start) / cache->size;
[a294ad0]273 slab->available++;
[da1bafb]274
[a294ad0]275 /* Move it to correct list */
276 if (slab->available == cache->objects) {
277 /* Free associated memory */
278 list_remove(&slab->link);
[e22f561]279 spinlock_unlock(&cache->slablock);
[da1bafb]280
[266294a9]281 return freed + slab_space_free(cache, slab);
[e72b0a3]282 } else if (slab->available == 1) {
283 /* It was in full, move to partial */
284 list_remove(&slab->link);
285 list_prepend(&slab->link, &cache->partial_slabs);
[a294ad0]286 }
[da1bafb]287
[248fc1a]288 spinlock_unlock(&cache->slablock);
[266294a9]289 return freed;
[a294ad0]290}
[4e147a6]291
[da1bafb]292/** Take new object from slab or create new if needed
[4e147a6]293 *
294 * @return Object address or null
[da1bafb]295 *
[4e147a6]296 */
[7a0359b]297NO_TRACE static void *slab_obj_create(slab_cache_t *cache, unsigned int flags)
[4e147a6]298{
[428aabf]299 spinlock_lock(&cache->slablock);
[da1bafb]300
301 slab_t *slab;
302
[a294ad0]303 if (list_empty(&cache->partial_slabs)) {
[da1bafb]304 /*
305 * Allow recursion and reclaiming
[9179d0a]306 * - this should work, as the slab control structures
[e3c762cd]307 * are small and do not need to allocate with anything
308 * other than frame_alloc when they are allocating,
[a294ad0]309 * that's why we should get recursion at most 1-level deep
[da1bafb]310 *
[a294ad0]311 */
[428aabf]312 spinlock_unlock(&cache->slablock);
[a294ad0]313 slab = slab_space_alloc(cache, flags);
[428aabf]314 if (!slab)
[e72b0a3]315 return NULL;
[da1bafb]316
[e72b0a3]317 spinlock_lock(&cache->slablock);
[a294ad0]318 } else {
[46c1234]319 slab = list_get_instance(cache->partial_slabs.next, slab_t,
320 link);
[a294ad0]321 list_remove(&slab->link);
322 }
[da1bafb]323
324 void *obj = slab->start + slab->nextavail * cache->size;
325 slab->nextavail = *((size_t *) obj);
[a294ad0]326 slab->available--;
[da1bafb]327
[f3272e98]328 if (!slab->available)
[bc504ef2]329 list_prepend(&slab->link, &cache->full_slabs);
[a294ad0]330 else
[bc504ef2]331 list_prepend(&slab->link, &cache->partial_slabs);
[da1bafb]332
[428aabf]333 spinlock_unlock(&cache->slablock);
[da1bafb]334
335 if ((cache->constructor) && (cache->constructor(obj, flags))) {
[266294a9]336 /* Bad, bad, construction failed */
337 slab_obj_destroy(cache, obj, slab);
338 return NULL;
339 }
[da1bafb]340
[a294ad0]341 return obj;
[4e147a6]342}
343
[da1bafb]344/****************************/
[4e147a6]345/* CPU-Cache slab functions */
[da1bafb]346/****************************/
[4e147a6]347
[da1bafb]348/** Find a full magazine in cache, take it from list and return it
349 *
350 * @param first If true, return first, else last mag.
[5158549]351 *
352 */
[7a0359b]353NO_TRACE static slab_magazine_t *get_mag_from_cache(slab_cache_t *cache,
354 bool first)
[5158549]355{
356 slab_magazine_t *mag = NULL;
357 link_t *cur;
[da1bafb]358
[5158549]359 spinlock_lock(&cache->maglock);
360 if (!list_empty(&cache->magazines)) {
361 if (first)
362 cur = cache->magazines.next;
363 else
364 cur = cache->magazines.prev;
[da1bafb]365
[5158549]366 mag = list_get_instance(cur, slab_magazine_t, link);
367 list_remove(&mag->link);
368 atomic_dec(&cache->magazine_counter);
369 }
[da1bafb]370
[5158549]371 spinlock_unlock(&cache->maglock);
372 return mag;
373}
374
[da1bafb]375/** Prepend magazine to magazine list in cache
376 *
377 */
[7a0359b]378NO_TRACE static void put_mag_to_cache(slab_cache_t *cache,
379 slab_magazine_t *mag)
[5158549]380{
381 spinlock_lock(&cache->maglock);
[da1bafb]382
[5158549]383 list_prepend(&mag->link, &cache->magazines);
384 atomic_inc(&cache->magazine_counter);
385
386 spinlock_unlock(&cache->maglock);
387}
388
[da1bafb]389/** Free all objects in magazine and free memory associated with magazine
[4e147a6]390 *
391 * @return Number of freed pages
[da1bafb]392 *
[4e147a6]393 */
[7a0359b]394NO_TRACE static size_t magazine_destroy(slab_cache_t *cache,
395 slab_magazine_t *mag)
[4e147a6]396{
[da1bafb]397 size_t i;
[98000fb]398 size_t frames = 0;
[da1bafb]399
[6c441cf8]400 for (i = 0; i < mag->busy; i++) {
[a294ad0]401 frames += slab_obj_destroy(cache, mag->objs[i], NULL);
[4a5b2b0e]402 atomic_dec(&cache->cached_objs);
403 }
[4e147a6]404
405 slab_free(&mag_cache, mag);
[da1bafb]406
[4e147a6]407 return frames;
408}
409
[da1bafb]410/** Find full magazine, set it as current and return it
411 *
[fb10289b]412 */
[7a0359b]413NO_TRACE static slab_magazine_t *get_full_current_mag(slab_cache_t *cache)
[fb10289b]414{
[da1bafb]415 slab_magazine_t *cmag = cache->mag_cache[CPU->id].current;
416 slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last;
[7a0359b]417
[2d3ddad]418 ASSERT(spinlock_locked(&cache->mag_cache[CPU->id].lock));
[da1bafb]419
[fb10289b]420 if (cmag) { /* First try local CPU magazines */
421 if (cmag->busy)
422 return cmag;
[da1bafb]423
424 if ((lastmag) && (lastmag->busy)) {
[fb10289b]425 cache->mag_cache[CPU->id].current = lastmag;
426 cache->mag_cache[CPU->id].last = cmag;
427 return lastmag;
428 }
429 }
[da1bafb]430
[fb10289b]431 /* Local magazines are empty, import one from magazine list */
[da1bafb]432 slab_magazine_t *newmag = get_mag_from_cache(cache, 1);
[5158549]433 if (!newmag)
[fb10289b]434 return NULL;
[da1bafb]435
[fb10289b]436 if (lastmag)
[5158549]437 magazine_destroy(cache, lastmag);
[da1bafb]438
[fb10289b]439 cache->mag_cache[CPU->id].last = cmag;
440 cache->mag_cache[CPU->id].current = newmag;
[da1bafb]441
[fb10289b]442 return newmag;
443}
444
[da1bafb]445/** Try to find object in CPU-cache magazines
[4e147a6]446 *
447 * @return Pointer to object or NULL if not available
[da1bafb]448 *
[4e147a6]449 */
[7a0359b]450NO_TRACE static void *magazine_obj_get(slab_cache_t *cache)
[4e147a6]451{
[81e52f2a]452 if (!CPU)
453 return NULL;
[da1bafb]454
[4e147a6]455 spinlock_lock(&cache->mag_cache[CPU->id].lock);
[da1bafb]456
457 slab_magazine_t *mag = get_full_current_mag(cache);
[fb10289b]458 if (!mag) {
459 spinlock_unlock(&cache->mag_cache[CPU->id].lock);
460 return NULL;
[4e147a6]461 }
[da1bafb]462
463 void *obj = mag->objs[--mag->busy];
[4e147a6]464 spinlock_unlock(&cache->mag_cache[CPU->id].lock);
[da1bafb]465
[4a5b2b0e]466 atomic_dec(&cache->cached_objs);
467
468 return obj;
[4e147a6]469}
470
[da1bafb]471/** Assure that the current magazine is empty, return pointer to it,
472 * or NULL if no empty magazine is available and cannot be allocated
[4e147a6]473 *
[da1bafb]474 * We have 2 magazines bound to processor.
475 * First try the current.
476 * If full, try the last.
477 * If full, put to magazines list.
[4e147a6]478 *
[086a600]479 */
[7a0359b]480NO_TRACE static slab_magazine_t *make_empty_current_mag(slab_cache_t *cache)
[086a600]481{
[da1bafb]482 slab_magazine_t *cmag = cache->mag_cache[CPU->id].current;
483 slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last;
484
[2d3ddad]485 ASSERT(spinlock_locked(&cache->mag_cache[CPU->id].lock));
[7a0359b]486
[086a600]487 if (cmag) {
488 if (cmag->busy < cmag->size)
489 return cmag;
[da1bafb]490
491 if ((lastmag) && (lastmag->busy < lastmag->size)) {
[086a600]492 cache->mag_cache[CPU->id].last = cmag;
493 cache->mag_cache[CPU->id].current = lastmag;
494 return lastmag;
495 }
496 }
[da1bafb]497
[086a600]498 /* current | last are full | nonexistent, allocate new */
[da1bafb]499
500 /*
501 * We do not want to sleep just because of caching,
502 * especially we do not want reclaiming to start, as
503 * this would deadlock.
504 *
505 */
506 slab_magazine_t *newmag = slab_alloc(&mag_cache,
507 FRAME_ATOMIC | FRAME_NO_RECLAIM);
[086a600]508 if (!newmag)
509 return NULL;
[da1bafb]510
[086a600]511 newmag->size = SLAB_MAG_SIZE;
512 newmag->busy = 0;
[da1bafb]513
[086a600]514 /* Flush last to magazine list */
[5158549]515 if (lastmag)
516 put_mag_to_cache(cache, lastmag);
[da1bafb]517
[086a600]518 /* Move current as last, save new as current */
[da1bafb]519 cache->mag_cache[CPU->id].last = cmag;
520 cache->mag_cache[CPU->id].current = newmag;
521
[086a600]522 return newmag;
523}
524
[da1bafb]525/** Put object into CPU-cache magazine
526 *
527 * @return 0 on success, -1 on no memory
[086a600]528 *
[4e147a6]529 */
[7a0359b]530NO_TRACE static int magazine_obj_put(slab_cache_t *cache, void *obj)
[4e147a6]531{
[81e52f2a]532 if (!CPU)
533 return -1;
[da1bafb]534
[4e147a6]535 spinlock_lock(&cache->mag_cache[CPU->id].lock);
[da1bafb]536
537 slab_magazine_t *mag = make_empty_current_mag(cache);
[fb10289b]538 if (!mag) {
539 spinlock_unlock(&cache->mag_cache[CPU->id].lock);
540 return -1;
541 }
[4e147a6]542
543 mag->objs[mag->busy++] = obj;
[da1bafb]544
[4e147a6]545 spinlock_unlock(&cache->mag_cache[CPU->id].lock);
[da1bafb]546
[4a5b2b0e]547 atomic_inc(&cache->cached_objs);
[da1bafb]548
[4e147a6]549 return 0;
550}
551
[da1bafb]552/************************/
[9179d0a]553/* Slab cache functions */
[da1bafb]554/************************/
[a294ad0]555
[da1bafb]556/** Return number of objects that fit in certain cache size
557 *
558 */
[7a0359b]559NO_TRACE static size_t comp_objects(slab_cache_t *cache)
[a294ad0]560{
561 if (cache->flags & SLAB_CACHE_SLINSIDE)
[da1bafb]562 return ((PAGE_SIZE << cache->order)
563 - sizeof(slab_t)) / cache->size;
564 else
[a294ad0]565 return (PAGE_SIZE << cache->order) / cache->size;
566}
567
[da1bafb]568/** Return wasted space in slab
569 *
570 */
[7a0359b]571NO_TRACE static size_t badness(slab_cache_t *cache)
[a294ad0]572{
[da1bafb]573 size_t objects = comp_objects(cache);
574 size_t ssize = PAGE_SIZE << cache->order;
575
[a294ad0]576 if (cache->flags & SLAB_CACHE_SLINSIDE)
577 ssize -= sizeof(slab_t);
[da1bafb]578
[6c441cf8]579 return ssize - objects * cache->size;
[a294ad0]580}
[4e147a6]581
[da1bafb]582/** Initialize mag_cache structure in slab cache
583 *
[8e1ea655]584 */
[7a0359b]585NO_TRACE static bool make_magcache(slab_cache_t *cache)
[8e1ea655]586{
[214f5bb]587 ASSERT(_slab_initialized >= 2);
[da1bafb]588
[46c1234]589 cache->mag_cache = malloc(sizeof(slab_mag_cache_t) * config.cpu_count,
[55821eea]590 FRAME_ATOMIC);
591 if (!cache->mag_cache)
592 return false;
[da1bafb]593
594 size_t i;
[6c441cf8]595 for (i = 0; i < config.cpu_count; i++) {
[e32e092]596 memsetb(&cache->mag_cache[i], sizeof(cache->mag_cache[i]), 0);
[46c1234]597 spinlock_initialize(&cache->mag_cache[i].lock,
[da1bafb]598 "slab.cache.mag_cache[].lock");
[8e1ea655]599 }
[da1bafb]600
[55821eea]601 return true;
[8e1ea655]602}
603
[da1bafb]604/** Initialize allocated memory as a slab cache
605 *
606 */
[7a0359b]607NO_TRACE static void _slab_cache_create(slab_cache_t *cache, const char *name,
[da1bafb]608 size_t size, size_t align, int (*constructor)(void *obj,
609 unsigned int kmflag), size_t (*destructor)(void *obj), unsigned int flags)
[4e147a6]610{
[e32e092]611 memsetb(cache, sizeof(*cache), 0);
[4e147a6]612 cache->name = name;
[da1bafb]613
[96b02eb9]614 if (align < sizeof(sysarg_t))
615 align = sizeof(sysarg_t);
[da1bafb]616
[14e5d88]617 size = ALIGN_UP(size, align);
[da1bafb]618
[a294ad0]619 cache->size = size;
[4e147a6]620 cache->constructor = constructor;
621 cache->destructor = destructor;
622 cache->flags = flags;
[da1bafb]623
[4e147a6]624 list_initialize(&cache->full_slabs);
625 list_initialize(&cache->partial_slabs);
626 list_initialize(&cache->magazines);
[da1bafb]627
628 spinlock_initialize(&cache->slablock, "slab.cache.slablock");
629 spinlock_initialize(&cache->maglock, "slab.cache.maglock");
630
[46c1234]631 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
[55821eea]632 (void) make_magcache(cache);
[da1bafb]633
[4e147a6]634 /* Compute slab sizes, object counts in slabs etc. */
635 if (cache->size < SLAB_INSIDE_SIZE)
636 cache->flags |= SLAB_CACHE_SLINSIDE;
[da1bafb]637
[a294ad0]638 /* Minimum slab order */
[da1bafb]639 size_t pages = SIZE2FRAMES(cache->size);
640
[99993b9]641 /* We need the 2^order >= pages */
642 if (pages == 1)
643 cache->order = 0;
644 else
[46c1234]645 cache->order = fnzb(pages - 1) + 1;
[da1bafb]646
647 while (badness(cache) > SLAB_MAX_BADNESS(cache))
[a294ad0]648 cache->order += 1;
[da1bafb]649
[a294ad0]650 cache->objects = comp_objects(cache);
[da1bafb]651
[14e5d88]652 /* If info fits in, put it inside */
653 if (badness(cache) > sizeof(slab_t))
654 cache->flags |= SLAB_CACHE_SLINSIDE;
[da1bafb]655
[248fc1a]656 /* Add cache to cache list */
[da1bafb]657 irq_spinlock_lock(&slab_cache_lock, true);
[4e147a6]658 list_append(&cache->link, &slab_cache_list);
[da1bafb]659 irq_spinlock_unlock(&slab_cache_lock, true);
[4e147a6]660}
661
[da1bafb]662/** Create slab cache
663 *
664 */
[a000878c]665slab_cache_t *slab_cache_create(const char *name, size_t size, size_t align,
[da1bafb]666 int (*constructor)(void *obj, unsigned int kmflag),
667 size_t (*destructor)(void *obj), unsigned int flags)
[4e147a6]668{
[da1bafb]669 slab_cache_t *cache = slab_alloc(&slab_cache_cache, 0);
[4e147a6]670 _slab_cache_create(cache, name, size, align, constructor, destructor,
[46c1234]671 flags);
[da1bafb]672
[4e147a6]673 return cache;
674}
675
[da1bafb]676/** Reclaim space occupied by objects that are already free
[4e147a6]677 *
678 * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing
[da1bafb]679 *
[4e147a6]680 * @return Number of freed pages
[da1bafb]681 *
[4e147a6]682 */
[7a0359b]683NO_TRACE static size_t _slab_reclaim(slab_cache_t *cache, unsigned int flags)
[4e147a6]684{
685 if (cache->flags & SLAB_CACHE_NOMAGAZINE)
686 return 0; /* Nothing to do */
[da1bafb]687
688 /*
689 * We count up to original magazine count to avoid
690 * endless loop
[5158549]691 */
[da1bafb]692 atomic_count_t magcount = atomic_get(&cache->magazine_counter);
693
694 slab_magazine_t *mag;
695 size_t frames = 0;
696
697 while ((magcount--) && (mag = get_mag_from_cache(cache, 0))) {
698 frames += magazine_destroy(cache, mag);
699 if ((!(flags & SLAB_RECLAIM_ALL)) && (frames))
[5158549]700 break;
[fb10289b]701 }
[4e147a6]702
703 if (flags & SLAB_RECLAIM_ALL) {
[5158549]704 /* Free cpu-bound magazines */
[4e147a6]705 /* Destroy CPU magazines */
[da1bafb]706 size_t i;
[6c441cf8]707 for (i = 0; i < config.cpu_count; i++) {
[5158549]708 spinlock_lock(&cache->mag_cache[i].lock);
[da1bafb]709
[4e147a6]710 mag = cache->mag_cache[i].current;
711 if (mag)
712 frames += magazine_destroy(cache, mag);
713 cache->mag_cache[i].current = NULL;
714
715 mag = cache->mag_cache[i].last;
716 if (mag)
717 frames += magazine_destroy(cache, mag);
718 cache->mag_cache[i].last = NULL;
[da1bafb]719
[428aabf]720 spinlock_unlock(&cache->mag_cache[i].lock);
[5158549]721 }
[428aabf]722 }
[da1bafb]723
[4e147a6]724 return frames;
725}
726
[da1bafb]727/** Check that there are no slabs and remove cache from system
728 *
729 */
[4e147a6]730void slab_cache_destroy(slab_cache_t *cache)
731{
[da1bafb]732 /*
733 * First remove cache from link, so that we don't need
[5158549]734 * to disable interrupts later
[da1bafb]735 *
[5158549]736 */
[da1bafb]737 irq_spinlock_lock(&slab_cache_lock, true);
[5158549]738 list_remove(&cache->link);
[da1bafb]739 irq_spinlock_unlock(&slab_cache_lock, true);
740
741 /*
742 * Do not lock anything, we assume the software is correct and
743 * does not touch the cache when it decides to destroy it
744 *
745 */
[4e147a6]746
747 /* Destroy all magazines */
748 _slab_reclaim(cache, SLAB_RECLAIM_ALL);
[da1bafb]749
[4e147a6]750 /* All slabs must be empty */
[da1bafb]751 if ((!list_empty(&cache->full_slabs)) ||
752 (!list_empty(&cache->partial_slabs)))
[4e147a6]753 panic("Destroying cache that is not empty.");
[da1bafb]754
[8e1ea655]755 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
[bb68433]756 free(cache->mag_cache);
[da1bafb]757
[fb10289b]758 slab_free(&slab_cache_cache, cache);
[4e147a6]759}
760
[da1bafb]761/** Allocate new object from cache - if no flags given, always returns memory
762 *
763 */
764void *slab_alloc(slab_cache_t *cache, unsigned int flags)
[4e147a6]765{
[da1bafb]766 /* Disable interrupts to avoid deadlocks with interrupt handlers */
767 ipl_t ipl = interrupts_disable();
768
[4e147a6]769 void *result = NULL;
[c5613b72]770
[da1bafb]771 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
[4e147a6]772 result = magazine_obj_get(cache);
[da1bafb]773
[428aabf]774 if (!result)
[4e147a6]775 result = slab_obj_create(cache, flags);
[da1bafb]776
[4e147a6]777 interrupts_restore(ipl);
[da1bafb]778
[fb10289b]779 if (result)
780 atomic_inc(&cache->allocated_objs);
[da1bafb]781
[4e147a6]782 return result;
783}
784
[da1bafb]785/** Return object to cache, use slab if known
786 *
787 */
[7a0359b]788NO_TRACE static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab)
[4e147a6]789{
[da1bafb]790 ipl_t ipl = interrupts_disable();
791
[46c1234]792 if ((cache->flags & SLAB_CACHE_NOMAGAZINE) ||
[da1bafb]793 (magazine_obj_put(cache, obj)))
[c352c2e]794 slab_obj_destroy(cache, obj, slab);
[da1bafb]795
[4e147a6]796 interrupts_restore(ipl);
[fb10289b]797 atomic_dec(&cache->allocated_objs);
[4e147a6]798}
799
[da1bafb]800/** Return slab object to cache
801 *
802 */
[c352c2e]803void slab_free(slab_cache_t *cache, void *obj)
804{
[ce8aed1]805 _slab_free(cache, obj, NULL);
[c352c2e]806}
807
[da1bafb]808/** Go through all caches and reclaim what is possible
809 *
810 * Interrupts must be disabled before calling this function,
811 * otherwise memory allocation from interrupts can deadlock.
812 *
813 */
814size_t slab_reclaim(unsigned int flags)
[4e147a6]815{
[da1bafb]816 irq_spinlock_lock(&slab_cache_lock, false);
817
[98000fb]818 size_t frames = 0;
[da1bafb]819 link_t *cur;
[46c1234]820 for (cur = slab_cache_list.next; cur != &slab_cache_list;
821 cur = cur->next) {
[da1bafb]822 slab_cache_t *cache = list_get_instance(cur, slab_cache_t, link);
[4e147a6]823 frames += _slab_reclaim(cache, flags);
824 }
[da1bafb]825
826 irq_spinlock_unlock(&slab_cache_lock, false);
827
[4e147a6]828 return frames;
829}
830
[da1bafb]831/* Print list of slabs
832 *
833 */
[4e147a6]834void slab_print_list(void)
835{
[ccb426c]836 printf("[slab name ] [size ] [pages ] [obj/pg] [slabs ]"
837 " [cached] [alloc ] [ctl]\n");
[da1bafb]838
839 size_t skip = 0;
[599d6f5]840 while (true) {
841 /*
842 * We must not hold the slab_cache_lock spinlock when printing
843 * the statistics. Otherwise we can easily deadlock if the print
844 * needs to allocate memory.
845 *
846 * Therefore, we walk through the slab cache list, skipping some
847 * amount of already processed caches during each iteration and
848 * gathering statistics about the first unprocessed cache. For
849 * the sake of printing the statistics, we realese the
850 * slab_cache_lock and reacquire it afterwards. Then the walk
851 * starts again.
852 *
853 * This limits both the efficiency and also accuracy of the
854 * obtained statistics. The efficiency is decreased because the
855 * time complexity of the algorithm is quadratic instead of
856 * linear. The accuracy is impacted because we drop the lock
857 * after processing one cache. If there is someone else
858 * manipulating the cache list, we might omit an arbitrary
859 * number of caches or process one cache multiple times.
860 * However, we don't bleed for this algorithm for it is only
861 * statistics.
862 */
[da1bafb]863
864 irq_spinlock_lock(&slab_cache_lock, true);
865
866 link_t *cur;
867 size_t i;
[599d6f5]868 for (i = 0, cur = slab_cache_list.next;
[da1bafb]869 (i < skip) && (cur != &slab_cache_list);
870 i++, cur = cur->next);
871
[599d6f5]872 if (cur == &slab_cache_list) {
[da1bafb]873 irq_spinlock_unlock(&slab_cache_lock, true);
[599d6f5]874 break;
875 }
[da1bafb]876
[599d6f5]877 skip++;
[da1bafb]878
879 slab_cache_t *cache = list_get_instance(cur, slab_cache_t, link);
880
[a000878c]881 const char *name = cache->name;
[599d6f5]882 uint8_t order = cache->order;
883 size_t size = cache->size;
[da1bafb]884 size_t objects = cache->objects;
[599d6f5]885 long allocated_slabs = atomic_get(&cache->allocated_slabs);
886 long cached_objs = atomic_get(&cache->cached_objs);
887 long allocated_objs = atomic_get(&cache->allocated_objs);
[da1bafb]888 unsigned int flags = cache->flags;
[599d6f5]889
[da1bafb]890 irq_spinlock_unlock(&slab_cache_lock, true);
[6536a4a9]891
[7e752b2]892 printf("%-18s %8zu %8u %8zu %8ld %8ld %8ld %-5s\n",
[599d6f5]893 name, size, (1 << order), objects, allocated_slabs,
894 cached_objs, allocated_objs,
895 flags & SLAB_CACHE_SLINSIDE ? "in" : "out");
[4e147a6]896 }
897}
898
899void slab_cache_init(void)
900{
901 /* Initialize magazine cache */
[46c1234]902 _slab_cache_create(&mag_cache, "slab_magazine",
903 sizeof(slab_magazine_t) + SLAB_MAG_SIZE * sizeof(void*),
904 sizeof(uintptr_t), NULL, NULL, SLAB_CACHE_NOMAGAZINE |
905 SLAB_CACHE_SLINSIDE);
[da1bafb]906
[fb10289b]907 /* Initialize slab_cache cache */
[46c1234]908 _slab_cache_create(&slab_cache_cache, "slab_cache",
909 sizeof(slab_cache_cache), sizeof(uintptr_t), NULL, NULL,
910 SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
[da1bafb]911
[fb10289b]912 /* Initialize external slab cache */
[46c1234]913 slab_extern_cache = slab_cache_create("slab_extern", sizeof(slab_t), 0,
914 NULL, NULL, SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED);
[da1bafb]915
[4e147a6]916 /* Initialize structures for malloc */
[da1bafb]917 size_t i;
918 size_t size;
919
[46c1234]920 for (i = 0, size = (1 << SLAB_MIN_MALLOC_W);
921 i < (SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1);
922 i++, size <<= 1) {
923 malloc_caches[i] = slab_cache_create(malloc_names[i], size, 0,
924 NULL, NULL, SLAB_CACHE_MAGDEFERRED);
[c352c2e]925 }
[da1bafb]926
[a000878c]927#ifdef CONFIG_DEBUG
[04225a7]928 _slab_initialized = 1;
929#endif
[c352c2e]930}
931
[8e1ea655]932/** Enable cpu_cache
933 *
934 * Kernel calls this function, when it knows the real number of
[da1bafb]935 * processors. Allocate slab for cpucache and enable it on all
936 * existing slabs that are SLAB_CACHE_MAGDEFERRED
937 *
[8e1ea655]938 */
939void slab_enable_cpucache(void)
940{
[214f5bb]941#ifdef CONFIG_DEBUG
942 _slab_initialized = 2;
943#endif
[8e1ea655]944
[da1bafb]945 irq_spinlock_lock(&slab_cache_lock, false);
946
947 link_t *cur;
[46c1234]948 for (cur = slab_cache_list.next; cur != &slab_cache_list;
[da1bafb]949 cur = cur->next) {
950 slab_cache_t *slab = list_get_instance(cur, slab_cache_t, link);
951 if ((slab->flags & SLAB_CACHE_MAGDEFERRED) !=
[46c1234]952 SLAB_CACHE_MAGDEFERRED)
[8e1ea655]953 continue;
[da1bafb]954
955 (void) make_magcache(slab);
956 slab->flags &= ~SLAB_CACHE_MAGDEFERRED;
[8e1ea655]957 }
[da1bafb]958
959 irq_spinlock_unlock(&slab_cache_lock, false);
[8e1ea655]960}
961
[da1bafb]962void *malloc(size_t size, unsigned int flags)
[c352c2e]963{
[04225a7]964 ASSERT(_slab_initialized);
[c259b9b]965 ASSERT(size <= (1 << SLAB_MAX_MALLOC_W));
[c352c2e]966
967 if (size < (1 << SLAB_MIN_MALLOC_W))
968 size = (1 << SLAB_MIN_MALLOC_W);
[da1bafb]969
970 uint8_t idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;
971
[c352c2e]972 return slab_alloc(malloc_caches[idx], flags);
973}
974
[da1bafb]975void *realloc(void *ptr, size_t size, unsigned int flags)
[c352c2e]976{
[ce8aed1]977 ASSERT(_slab_initialized);
978 ASSERT(size <= (1 << SLAB_MAX_MALLOC_W));
979
980 void *new_ptr;
981
982 if (size > 0) {
983 if (size < (1 << SLAB_MIN_MALLOC_W))
984 size = (1 << SLAB_MIN_MALLOC_W);
[da1bafb]985 uint8_t idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;
[ce8aed1]986
987 new_ptr = slab_alloc(malloc_caches[idx], flags);
988 } else
989 new_ptr = NULL;
990
991 if ((new_ptr != NULL) && (ptr != NULL)) {
992 slab_t *slab = obj2slab(ptr);
993 memcpy(new_ptr, ptr, min(size, slab->cache->size));
994 }
995
996 if (ptr != NULL)
997 free(ptr);
998
999 return new_ptr;
1000}
[5158549]1001
[ce8aed1]1002void free(void *ptr)
1003{
1004 if (!ptr)
[f3272e98]1005 return;
[da1bafb]1006
[ce8aed1]1007 slab_t *slab = obj2slab(ptr);
1008 _slab_free(slab->cache, ptr, slab);
[4e147a6]1009}
[b45c443]1010
[cc73a8a1]1011/** @}
[b45c443]1012 */
Note: See TracBrowser for help on using the repository browser.