source: mainline/kernel/generic/src/mm/slab.c@ c8d0f9e5

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since c8d0f9e5 was c8d0f9e5, checked in by Jakub Jermar <jakub@…>, 13 years ago

Add assertions to stress the fact that the slab allocator spinlocks are
always taken with interrupts disabled.

  • Property mode set to 100644
File size: 26.3 KB
RevLine 
[4e147a6]1/*
[df4ed85]2 * Copyright (c) 2006 Ondrej Palkovsky
[4e147a6]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[cc73a8a1]29/** @addtogroup genericmm
[b45c443]30 * @{
31 */
32
[9179d0a]33/**
[b45c443]34 * @file
[da1bafb]35 * @brief Slab allocator.
[9179d0a]36 *
37 * The slab allocator is closely modelled after OpenSolaris slab allocator.
38 * @see http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/
[fb10289b]39 *
40 * with the following exceptions:
[9179d0a]41 * @li empty slabs are deallocated immediately
[fb10289b]42 * (in Linux they are kept in linked list, in Solaris ???)
[9179d0a]43 * @li empty magazines are deallocated when not needed
[fb10289b]44 * (in Solaris they are held in linked list in slab cache)
45 *
[9179d0a]46 * Following features are not currently supported but would be easy to do:
47 * @li cache coloring
48 * @li dynamic magazine growing (different magazine sizes are already
[5b04fc7]49 * supported, but we would need to adjust allocation strategy)
[fb10289b]50 *
[9179d0a]51 * The slab allocator supports per-CPU caches ('magazines') to facilitate
[da1bafb]52 * good SMP scaling.
[fb10289b]53 *
54 * When a new object is being allocated, it is first checked, if it is
[7669bcf]55 * available in a CPU-bound magazine. If it is not found there, it is
56 * allocated from a CPU-shared slab - if a partially full one is found,
57 * it is used, otherwise a new one is allocated.
[fb10289b]58 *
[7669bcf]59 * When an object is being deallocated, it is put to a CPU-bound magazine.
60 * If there is no such magazine, a new one is allocated (if this fails,
[9179d0a]61 * the object is deallocated into slab). If the magazine is full, it is
[7669bcf]62 * put into cpu-shared list of magazines and a new one is allocated.
[fb10289b]63 *
[7669bcf]64 * The CPU-bound magazine is actually a pair of magazines in order to avoid
[fb10289b]65 * thrashing when somebody is allocating/deallocating 1 item at the magazine
66 * size boundary. LIFO order is enforced, which should avoid fragmentation
[da1bafb]67 * as much as possible.
68 *
[7669bcf]69 * Every cache contains list of full slabs and list of partially full slabs.
[9179d0a]70 * Empty slabs are immediately freed (thrashing will be avoided because
[da1bafb]71 * of magazines).
[fb10289b]72 *
[9179d0a]73 * The slab information structure is kept inside the data area, if possible.
[fb10289b]74 * The cache can be marked that it should not use magazines. This is used
[9179d0a]75 * only for slab related caches to avoid deadlocks and infinite recursion
76 * (the slab allocator uses itself for allocating all it's control structures).
[fb10289b]77 *
[7669bcf]78 * The slab allocator allocates a lot of space and does not free it. When
79 * the frame allocator fails to allocate a frame, it calls slab_reclaim().
[fb10289b]80 * It tries 'light reclaim' first, then brutal reclaim. The light reclaim
81 * releases slabs from cpu-shared magazine-list, until at least 1 slab
82 * is deallocated in each cache (this algorithm should probably change).
83 * The brutal reclaim removes all cached objects, even from CPU-bound
84 * magazines.
85 *
[cc73a8a1]86 * @todo
[9179d0a]87 * For better CPU-scaling the magazine allocation strategy should
[10e16a7]88 * be extended. Currently, if the cache does not have magazine, it asks
89 * for non-cpu cached magazine cache to provide one. It might be feasible
90 * to add cpu-cached magazine cache (which would allocate it's magazines
91 * from non-cpu-cached mag. cache). This would provide a nice per-cpu
92 * buffer. The other possibility is to use the per-cache
93 * 'empty-magazine-list', which decreases competing for 1 per-system
94 * magazine cache.
95 *
[cc73a8a1]96 * @todo
[da1bafb]97 * It might be good to add granularity of locks even to slab level,
[cc73a8a1]98 * we could then try_spinlock over all partial slabs and thus improve
[da1bafb]99 * scalability even on slab level.
100 *
[fb10289b]101 */
102
[4e147a6]103#include <synch/spinlock.h>
104#include <mm/slab.h>
[5c9a08b]105#include <adt/list.h>
[4e147a6]106#include <memstr.h>
107#include <align.h>
[a294ad0]108#include <mm/frame.h>
[4e147a6]109#include <config.h>
110#include <print.h>
111#include <arch.h>
112#include <panic.h>
[a294ad0]113#include <debug.h>
[c352c2e]114#include <bitops.h>
[ce8aed1]115#include <macros.h>
[4e147a6]116
[da1bafb]117IRQ_SPINLOCK_STATIC_INITIALIZE(slab_cache_lock);
[fb10289b]118static LIST_INITIALIZE(slab_cache_list);
119
120/** Magazine cache */
121static slab_cache_t mag_cache;
[da1bafb]122
[fb10289b]123/** Cache for cache descriptors */
124static slab_cache_t slab_cache_cache;
[da1bafb]125
[fb10289b]126/** Cache for external slab descriptors
127 * This time we want per-cpu cache, so do not make it static
[9179d0a]128 * - using slab for internal slab structures will not deadlock,
[fb10289b]129 * as all slab structures are 'small' - control structures of
130 * their caches do not require further allocation
131 */
132static slab_cache_t *slab_extern_cache;
[da1bafb]133
[c352c2e]134/** Caches for malloc */
[ce8aed1]135static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1];
[da1bafb]136
[a000878c]137static const char *malloc_names[] = {
[ce8aed1]138 "malloc-16",
139 "malloc-32",
140 "malloc-64",
141 "malloc-128",
142 "malloc-256",
143 "malloc-512",
144 "malloc-1K",
145 "malloc-2K",
146 "malloc-4K",
147 "malloc-8K",
148 "malloc-16K",
149 "malloc-32K",
150 "malloc-64K",
151 "malloc-128K",
[c3ebc47]152 "malloc-256K",
153 "malloc-512K",
154 "malloc-1M",
155 "malloc-2M",
156 "malloc-4M"
[c352c2e]157};
[a294ad0]158
[fb10289b]159/** Slab descriptor */
[a294ad0]160typedef struct {
[da1bafb]161 slab_cache_t *cache; /**< Pointer to parent cache. */
162 link_t link; /**< List of full/partial slabs. */
163 void *start; /**< Start address of first available item. */
164 size_t available; /**< Count of available items in this slab. */
165 size_t nextavail; /**< The index of next available item. */
[ce8aed1]166} slab_t;
[a294ad0]167
[214f5bb]168#ifdef CONFIG_DEBUG
[da1bafb]169static unsigned int _slab_initialized = 0;
[214f5bb]170#endif
171
[a294ad0]172/**************************************/
[9179d0a]173/* Slab allocation functions */
[da1bafb]174/**************************************/
[a294ad0]175
[da1bafb]176/** Allocate frames for slab space and initialize
[a294ad0]177 *
178 */
[7a0359b]179NO_TRACE static slab_t *slab_space_alloc(slab_cache_t *cache,
180 unsigned int flags)
[a294ad0]181{
[98000fb]182 size_t zone = 0;
[085d973]183
[da1bafb]184 void *data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone);
[e45f81a]185 if (!data) {
[a294ad0]186 return NULL;
[bc504ef2]187 }
[da1bafb]188
189 slab_t *slab;
190 size_t fsize;
191
[46c1234]192 if (!(cache->flags & SLAB_CACHE_SLINSIDE)) {
[fb10289b]193 slab = slab_alloc(slab_extern_cache, flags);
[a294ad0]194 if (!slab) {
[2e9eae2]195 frame_free(KA2PA(data));
[a294ad0]196 return NULL;
197 }
198 } else {
199 fsize = (PAGE_SIZE << cache->order);
200 slab = data + fsize - sizeof(*slab);
201 }
[e3c762cd]202
[a294ad0]203 /* Fill in slab structures */
[da1bafb]204 size_t i;
205 for (i = 0; i < ((size_t) 1 << cache->order); i++)
[6c441cf8]206 frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab, zone);
[da1bafb]207
[a294ad0]208 slab->start = data;
209 slab->available = cache->objects;
210 slab->nextavail = 0;
[4a5b2b0e]211 slab->cache = cache;
[da1bafb]212
[6c441cf8]213 for (i = 0; i < cache->objects; i++)
[da1bafb]214 *((size_t *) (slab->start + i * cache->size)) = i + 1;
215
[bc504ef2]216 atomic_inc(&cache->allocated_slabs);
[a294ad0]217 return slab;
218}
219
[da1bafb]220/** Deallocate space associated with slab
[a294ad0]221 *
222 * @return number of freed frames
[da1bafb]223 *
[a294ad0]224 */
[7a0359b]225NO_TRACE static size_t slab_space_free(slab_cache_t *cache, slab_t *slab)
[a294ad0]226{
[2e9eae2]227 frame_free(KA2PA(slab->start));
[da1bafb]228 if (!(cache->flags & SLAB_CACHE_SLINSIDE))
[fb10289b]229 slab_free(slab_extern_cache, slab);
[da1bafb]230
[bc504ef2]231 atomic_dec(&cache->allocated_slabs);
232
[da1bafb]233 return (1 << cache->order);
[a294ad0]234}
235
236/** Map object to slab structure */
[7a0359b]237NO_TRACE static slab_t *obj2slab(void *obj)
[a294ad0]238{
[ce8aed1]239 return (slab_t *) frame_get_parent(ADDR2PFN(KA2PA(obj)), 0);
[a294ad0]240}
241
[da1bafb]242/******************/
[9179d0a]243/* Slab functions */
[da1bafb]244/******************/
[4e147a6]245
[da1bafb]246/** Return object to slab and call a destructor
[4e147a6]247 *
[a294ad0]248 * @param slab If the caller knows directly slab of the object, otherwise NULL
249 *
[4e147a6]250 * @return Number of freed pages
[da1bafb]251 *
[4e147a6]252 */
[7a0359b]253NO_TRACE static size_t slab_obj_destroy(slab_cache_t *cache, void *obj,
254 slab_t *slab)
[4e147a6]255{
[c8d0f9e5]256 ASSERT(interrupts_disabled());
257
[a294ad0]258 if (!slab)
259 slab = obj2slab(obj);
[da1bafb]260
[4a5b2b0e]261 ASSERT(slab->cache == cache);
[da1bafb]262
263 size_t freed = 0;
264
[266294a9]265 if (cache->destructor)
266 freed = cache->destructor(obj);
267
[428aabf]268 spinlock_lock(&cache->slablock);
[8e1ea655]269 ASSERT(slab->available < cache->objects);
[da1bafb]270
271 *((size_t *) obj) = slab->nextavail;
[46c1234]272 slab->nextavail = (obj - slab->start) / cache->size;
[a294ad0]273 slab->available++;
[da1bafb]274
[a294ad0]275 /* Move it to correct list */
276 if (slab->available == cache->objects) {
277 /* Free associated memory */
278 list_remove(&slab->link);
[e22f561]279 spinlock_unlock(&cache->slablock);
[da1bafb]280
[266294a9]281 return freed + slab_space_free(cache, slab);
[e72b0a3]282 } else if (slab->available == 1) {
283 /* It was in full, move to partial */
284 list_remove(&slab->link);
285 list_prepend(&slab->link, &cache->partial_slabs);
[a294ad0]286 }
[da1bafb]287
[248fc1a]288 spinlock_unlock(&cache->slablock);
[266294a9]289 return freed;
[a294ad0]290}
[4e147a6]291
[da1bafb]292/** Take new object from slab or create new if needed
[4e147a6]293 *
294 * @return Object address or null
[da1bafb]295 *
[4e147a6]296 */
[7a0359b]297NO_TRACE static void *slab_obj_create(slab_cache_t *cache, unsigned int flags)
[4e147a6]298{
[c8d0f9e5]299 ASSERT(interrupts_disabled());
300
[428aabf]301 spinlock_lock(&cache->slablock);
[da1bafb]302
303 slab_t *slab;
304
[a294ad0]305 if (list_empty(&cache->partial_slabs)) {
[da1bafb]306 /*
307 * Allow recursion and reclaiming
[9179d0a]308 * - this should work, as the slab control structures
[e3c762cd]309 * are small and do not need to allocate with anything
310 * other than frame_alloc when they are allocating,
[a294ad0]311 * that's why we should get recursion at most 1-level deep
[da1bafb]312 *
[a294ad0]313 */
[428aabf]314 spinlock_unlock(&cache->slablock);
[a294ad0]315 slab = slab_space_alloc(cache, flags);
[428aabf]316 if (!slab)
[e72b0a3]317 return NULL;
[da1bafb]318
[e72b0a3]319 spinlock_lock(&cache->slablock);
[a294ad0]320 } else {
[55b77d9]321 slab = list_get_instance(list_first(&cache->partial_slabs),
322 slab_t, link);
[a294ad0]323 list_remove(&slab->link);
324 }
[da1bafb]325
326 void *obj = slab->start + slab->nextavail * cache->size;
327 slab->nextavail = *((size_t *) obj);
[a294ad0]328 slab->available--;
[da1bafb]329
[f3272e98]330 if (!slab->available)
[bc504ef2]331 list_prepend(&slab->link, &cache->full_slabs);
[a294ad0]332 else
[bc504ef2]333 list_prepend(&slab->link, &cache->partial_slabs);
[da1bafb]334
[428aabf]335 spinlock_unlock(&cache->slablock);
[da1bafb]336
337 if ((cache->constructor) && (cache->constructor(obj, flags))) {
[266294a9]338 /* Bad, bad, construction failed */
339 slab_obj_destroy(cache, obj, slab);
340 return NULL;
341 }
[da1bafb]342
[a294ad0]343 return obj;
[4e147a6]344}
345
[da1bafb]346/****************************/
[4e147a6]347/* CPU-Cache slab functions */
[da1bafb]348/****************************/
[4e147a6]349
[da1bafb]350/** Find a full magazine in cache, take it from list and return it
351 *
352 * @param first If true, return first, else last mag.
[5158549]353 *
354 */
[7a0359b]355NO_TRACE static slab_magazine_t *get_mag_from_cache(slab_cache_t *cache,
356 bool first)
[5158549]357{
358 slab_magazine_t *mag = NULL;
359 link_t *cur;
[da1bafb]360
[c8d0f9e5]361 ASSERT(interrupts_disabled());
362
[5158549]363 spinlock_lock(&cache->maglock);
364 if (!list_empty(&cache->magazines)) {
365 if (first)
[55b77d9]366 cur = list_first(&cache->magazines);
[5158549]367 else
[55b77d9]368 cur = list_last(&cache->magazines);
[da1bafb]369
[5158549]370 mag = list_get_instance(cur, slab_magazine_t, link);
371 list_remove(&mag->link);
372 atomic_dec(&cache->magazine_counter);
373 }
374 spinlock_unlock(&cache->maglock);
[25ebfbd]375
[5158549]376 return mag;
377}
378
[da1bafb]379/** Prepend magazine to magazine list in cache
380 *
381 */
[7a0359b]382NO_TRACE static void put_mag_to_cache(slab_cache_t *cache,
383 slab_magazine_t *mag)
[5158549]384{
[c8d0f9e5]385 ASSERT(interrupts_disabled());
386
[5158549]387 spinlock_lock(&cache->maglock);
[da1bafb]388
[5158549]389 list_prepend(&mag->link, &cache->magazines);
390 atomic_inc(&cache->magazine_counter);
391
392 spinlock_unlock(&cache->maglock);
393}
394
[da1bafb]395/** Free all objects in magazine and free memory associated with magazine
[4e147a6]396 *
397 * @return Number of freed pages
[da1bafb]398 *
[4e147a6]399 */
[7a0359b]400NO_TRACE static size_t magazine_destroy(slab_cache_t *cache,
401 slab_magazine_t *mag)
[4e147a6]402{
[da1bafb]403 size_t i;
[98000fb]404 size_t frames = 0;
[da1bafb]405
[6c441cf8]406 for (i = 0; i < mag->busy; i++) {
[a294ad0]407 frames += slab_obj_destroy(cache, mag->objs[i], NULL);
[4a5b2b0e]408 atomic_dec(&cache->cached_objs);
409 }
[4e147a6]410
411 slab_free(&mag_cache, mag);
[da1bafb]412
[4e147a6]413 return frames;
414}
415
[da1bafb]416/** Find full magazine, set it as current and return it
417 *
[fb10289b]418 */
[7a0359b]419NO_TRACE static slab_magazine_t *get_full_current_mag(slab_cache_t *cache)
[fb10289b]420{
[da1bafb]421 slab_magazine_t *cmag = cache->mag_cache[CPU->id].current;
422 slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last;
[7a0359b]423
[25ebfbd]424 ASSERT(irq_spinlock_locked(&cache->mag_cache[CPU->id].lock));
[da1bafb]425
[fb10289b]426 if (cmag) { /* First try local CPU magazines */
427 if (cmag->busy)
428 return cmag;
[da1bafb]429
430 if ((lastmag) && (lastmag->busy)) {
[fb10289b]431 cache->mag_cache[CPU->id].current = lastmag;
432 cache->mag_cache[CPU->id].last = cmag;
433 return lastmag;
434 }
435 }
[da1bafb]436
[fb10289b]437 /* Local magazines are empty, import one from magazine list */
[da1bafb]438 slab_magazine_t *newmag = get_mag_from_cache(cache, 1);
[5158549]439 if (!newmag)
[fb10289b]440 return NULL;
[da1bafb]441
[fb10289b]442 if (lastmag)
[5158549]443 magazine_destroy(cache, lastmag);
[da1bafb]444
[fb10289b]445 cache->mag_cache[CPU->id].last = cmag;
446 cache->mag_cache[CPU->id].current = newmag;
[da1bafb]447
[fb10289b]448 return newmag;
449}
450
[da1bafb]451/** Try to find object in CPU-cache magazines
[4e147a6]452 *
453 * @return Pointer to object or NULL if not available
[da1bafb]454 *
[4e147a6]455 */
[7a0359b]456NO_TRACE static void *magazine_obj_get(slab_cache_t *cache)
[4e147a6]457{
[81e52f2a]458 if (!CPU)
459 return NULL;
[da1bafb]460
[25ebfbd]461 irq_spinlock_lock(&cache->mag_cache[CPU->id].lock, true);
[da1bafb]462
463 slab_magazine_t *mag = get_full_current_mag(cache);
[fb10289b]464 if (!mag) {
[25ebfbd]465 irq_spinlock_unlock(&cache->mag_cache[CPU->id].lock, true);
[fb10289b]466 return NULL;
[4e147a6]467 }
[da1bafb]468
469 void *obj = mag->objs[--mag->busy];
[25ebfbd]470 irq_spinlock_unlock(&cache->mag_cache[CPU->id].lock, true);
[da1bafb]471
[4a5b2b0e]472 atomic_dec(&cache->cached_objs);
473
474 return obj;
[4e147a6]475}
476
[da1bafb]477/** Assure that the current magazine is empty, return pointer to it,
478 * or NULL if no empty magazine is available and cannot be allocated
[4e147a6]479 *
[da1bafb]480 * We have 2 magazines bound to processor.
481 * First try the current.
482 * If full, try the last.
483 * If full, put to magazines list.
[4e147a6]484 *
[086a600]485 */
[7a0359b]486NO_TRACE static slab_magazine_t *make_empty_current_mag(slab_cache_t *cache)
[086a600]487{
[da1bafb]488 slab_magazine_t *cmag = cache->mag_cache[CPU->id].current;
489 slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last;
490
[25ebfbd]491 ASSERT(irq_spinlock_locked(&cache->mag_cache[CPU->id].lock));
[7a0359b]492
[086a600]493 if (cmag) {
494 if (cmag->busy < cmag->size)
495 return cmag;
[da1bafb]496
497 if ((lastmag) && (lastmag->busy < lastmag->size)) {
[086a600]498 cache->mag_cache[CPU->id].last = cmag;
499 cache->mag_cache[CPU->id].current = lastmag;
500 return lastmag;
501 }
502 }
[da1bafb]503
[086a600]504 /* current | last are full | nonexistent, allocate new */
[da1bafb]505
506 /*
507 * We do not want to sleep just because of caching,
508 * especially we do not want reclaiming to start, as
509 * this would deadlock.
510 *
511 */
512 slab_magazine_t *newmag = slab_alloc(&mag_cache,
513 FRAME_ATOMIC | FRAME_NO_RECLAIM);
[086a600]514 if (!newmag)
515 return NULL;
[da1bafb]516
[086a600]517 newmag->size = SLAB_MAG_SIZE;
518 newmag->busy = 0;
[da1bafb]519
[086a600]520 /* Flush last to magazine list */
[5158549]521 if (lastmag)
522 put_mag_to_cache(cache, lastmag);
[da1bafb]523
[086a600]524 /* Move current as last, save new as current */
[da1bafb]525 cache->mag_cache[CPU->id].last = cmag;
526 cache->mag_cache[CPU->id].current = newmag;
527
[086a600]528 return newmag;
529}
530
[da1bafb]531/** Put object into CPU-cache magazine
532 *
533 * @return 0 on success, -1 on no memory
[086a600]534 *
[4e147a6]535 */
[7a0359b]536NO_TRACE static int magazine_obj_put(slab_cache_t *cache, void *obj)
[4e147a6]537{
[81e52f2a]538 if (!CPU)
539 return -1;
[da1bafb]540
[25ebfbd]541 irq_spinlock_lock(&cache->mag_cache[CPU->id].lock, true);
[da1bafb]542
543 slab_magazine_t *mag = make_empty_current_mag(cache);
[fb10289b]544 if (!mag) {
[25ebfbd]545 irq_spinlock_unlock(&cache->mag_cache[CPU->id].lock, true);
[fb10289b]546 return -1;
547 }
[4e147a6]548
549 mag->objs[mag->busy++] = obj;
[da1bafb]550
[25ebfbd]551 irq_spinlock_unlock(&cache->mag_cache[CPU->id].lock, true);
[da1bafb]552
[4a5b2b0e]553 atomic_inc(&cache->cached_objs);
[da1bafb]554
[4e147a6]555 return 0;
556}
557
[da1bafb]558/************************/
[9179d0a]559/* Slab cache functions */
[da1bafb]560/************************/
[a294ad0]561
[da1bafb]562/** Return number of objects that fit in certain cache size
563 *
564 */
[7a0359b]565NO_TRACE static size_t comp_objects(slab_cache_t *cache)
[a294ad0]566{
567 if (cache->flags & SLAB_CACHE_SLINSIDE)
[da1bafb]568 return ((PAGE_SIZE << cache->order)
569 - sizeof(slab_t)) / cache->size;
570 else
[a294ad0]571 return (PAGE_SIZE << cache->order) / cache->size;
572}
573
[da1bafb]574/** Return wasted space in slab
575 *
576 */
[7a0359b]577NO_TRACE static size_t badness(slab_cache_t *cache)
[a294ad0]578{
[da1bafb]579 size_t objects = comp_objects(cache);
580 size_t ssize = PAGE_SIZE << cache->order;
581
[a294ad0]582 if (cache->flags & SLAB_CACHE_SLINSIDE)
583 ssize -= sizeof(slab_t);
[da1bafb]584
[6c441cf8]585 return ssize - objects * cache->size;
[a294ad0]586}
[4e147a6]587
[da1bafb]588/** Initialize mag_cache structure in slab cache
589 *
[8e1ea655]590 */
[7a0359b]591NO_TRACE static bool make_magcache(slab_cache_t *cache)
[8e1ea655]592{
[214f5bb]593 ASSERT(_slab_initialized >= 2);
[da1bafb]594
[46c1234]595 cache->mag_cache = malloc(sizeof(slab_mag_cache_t) * config.cpu_count,
[55821eea]596 FRAME_ATOMIC);
597 if (!cache->mag_cache)
598 return false;
[da1bafb]599
600 size_t i;
[6c441cf8]601 for (i = 0; i < config.cpu_count; i++) {
[e32e092]602 memsetb(&cache->mag_cache[i], sizeof(cache->mag_cache[i]), 0);
[25ebfbd]603 irq_spinlock_initialize(&cache->mag_cache[i].lock,
[da1bafb]604 "slab.cache.mag_cache[].lock");
[8e1ea655]605 }
[da1bafb]606
[55821eea]607 return true;
[8e1ea655]608}
609
[da1bafb]610/** Initialize allocated memory as a slab cache
611 *
612 */
[7a0359b]613NO_TRACE static void _slab_cache_create(slab_cache_t *cache, const char *name,
[da1bafb]614 size_t size, size_t align, int (*constructor)(void *obj,
615 unsigned int kmflag), size_t (*destructor)(void *obj), unsigned int flags)
[4e147a6]616{
[e32e092]617 memsetb(cache, sizeof(*cache), 0);
[4e147a6]618 cache->name = name;
[da1bafb]619
[96b02eb9]620 if (align < sizeof(sysarg_t))
621 align = sizeof(sysarg_t);
[da1bafb]622
[14e5d88]623 size = ALIGN_UP(size, align);
[da1bafb]624
[a294ad0]625 cache->size = size;
[4e147a6]626 cache->constructor = constructor;
627 cache->destructor = destructor;
628 cache->flags = flags;
[da1bafb]629
[4e147a6]630 list_initialize(&cache->full_slabs);
631 list_initialize(&cache->partial_slabs);
632 list_initialize(&cache->magazines);
[da1bafb]633
634 spinlock_initialize(&cache->slablock, "slab.cache.slablock");
635 spinlock_initialize(&cache->maglock, "slab.cache.maglock");
636
[46c1234]637 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
[55821eea]638 (void) make_magcache(cache);
[da1bafb]639
[4e147a6]640 /* Compute slab sizes, object counts in slabs etc. */
641 if (cache->size < SLAB_INSIDE_SIZE)
642 cache->flags |= SLAB_CACHE_SLINSIDE;
[da1bafb]643
[a294ad0]644 /* Minimum slab order */
[da1bafb]645 size_t pages = SIZE2FRAMES(cache->size);
646
[99993b9]647 /* We need the 2^order >= pages */
648 if (pages == 1)
649 cache->order = 0;
650 else
[46c1234]651 cache->order = fnzb(pages - 1) + 1;
[da1bafb]652
653 while (badness(cache) > SLAB_MAX_BADNESS(cache))
[a294ad0]654 cache->order += 1;
[da1bafb]655
[a294ad0]656 cache->objects = comp_objects(cache);
[da1bafb]657
[14e5d88]658 /* If info fits in, put it inside */
659 if (badness(cache) > sizeof(slab_t))
660 cache->flags |= SLAB_CACHE_SLINSIDE;
[da1bafb]661
[248fc1a]662 /* Add cache to cache list */
[da1bafb]663 irq_spinlock_lock(&slab_cache_lock, true);
[4e147a6]664 list_append(&cache->link, &slab_cache_list);
[da1bafb]665 irq_spinlock_unlock(&slab_cache_lock, true);
[4e147a6]666}
667
[da1bafb]668/** Create slab cache
669 *
670 */
[a000878c]671slab_cache_t *slab_cache_create(const char *name, size_t size, size_t align,
[da1bafb]672 int (*constructor)(void *obj, unsigned int kmflag),
673 size_t (*destructor)(void *obj), unsigned int flags)
[4e147a6]674{
[da1bafb]675 slab_cache_t *cache = slab_alloc(&slab_cache_cache, 0);
[4e147a6]676 _slab_cache_create(cache, name, size, align, constructor, destructor,
[46c1234]677 flags);
[da1bafb]678
[4e147a6]679 return cache;
680}
681
[da1bafb]682/** Reclaim space occupied by objects that are already free
[4e147a6]683 *
684 * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing
[da1bafb]685 *
[4e147a6]686 * @return Number of freed pages
[da1bafb]687 *
[4e147a6]688 */
[7a0359b]689NO_TRACE static size_t _slab_reclaim(slab_cache_t *cache, unsigned int flags)
[4e147a6]690{
691 if (cache->flags & SLAB_CACHE_NOMAGAZINE)
692 return 0; /* Nothing to do */
[da1bafb]693
694 /*
695 * We count up to original magazine count to avoid
696 * endless loop
[5158549]697 */
[da1bafb]698 atomic_count_t magcount = atomic_get(&cache->magazine_counter);
699
700 slab_magazine_t *mag;
701 size_t frames = 0;
702
703 while ((magcount--) && (mag = get_mag_from_cache(cache, 0))) {
704 frames += magazine_destroy(cache, mag);
705 if ((!(flags & SLAB_RECLAIM_ALL)) && (frames))
[5158549]706 break;
[fb10289b]707 }
[4e147a6]708
709 if (flags & SLAB_RECLAIM_ALL) {
[5158549]710 /* Free cpu-bound magazines */
[4e147a6]711 /* Destroy CPU magazines */
[da1bafb]712 size_t i;
[6c441cf8]713 for (i = 0; i < config.cpu_count; i++) {
[25ebfbd]714 irq_spinlock_lock(&cache->mag_cache[i].lock, true);
[da1bafb]715
[4e147a6]716 mag = cache->mag_cache[i].current;
717 if (mag)
718 frames += magazine_destroy(cache, mag);
719 cache->mag_cache[i].current = NULL;
720
721 mag = cache->mag_cache[i].last;
722 if (mag)
723 frames += magazine_destroy(cache, mag);
724 cache->mag_cache[i].last = NULL;
[da1bafb]725
[25ebfbd]726 irq_spinlock_unlock(&cache->mag_cache[i].lock, true);
[5158549]727 }
[428aabf]728 }
[da1bafb]729
[4e147a6]730 return frames;
731}
732
[da1bafb]733/** Check that there are no slabs and remove cache from system
734 *
735 */
[4e147a6]736void slab_cache_destroy(slab_cache_t *cache)
737{
[da1bafb]738 /*
739 * First remove cache from link, so that we don't need
[5158549]740 * to disable interrupts later
[da1bafb]741 *
[5158549]742 */
[da1bafb]743 irq_spinlock_lock(&slab_cache_lock, true);
[5158549]744 list_remove(&cache->link);
[da1bafb]745 irq_spinlock_unlock(&slab_cache_lock, true);
746
747 /*
748 * Do not lock anything, we assume the software is correct and
749 * does not touch the cache when it decides to destroy it
750 *
751 */
[4e147a6]752
753 /* Destroy all magazines */
754 _slab_reclaim(cache, SLAB_RECLAIM_ALL);
[da1bafb]755
[4e147a6]756 /* All slabs must be empty */
[da1bafb]757 if ((!list_empty(&cache->full_slabs)) ||
758 (!list_empty(&cache->partial_slabs)))
[4e147a6]759 panic("Destroying cache that is not empty.");
[da1bafb]760
[8e1ea655]761 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
[bb68433]762 free(cache->mag_cache);
[da1bafb]763
[fb10289b]764 slab_free(&slab_cache_cache, cache);
[4e147a6]765}
766
[da1bafb]767/** Allocate new object from cache - if no flags given, always returns memory
768 *
769 */
770void *slab_alloc(slab_cache_t *cache, unsigned int flags)
[4e147a6]771{
[da1bafb]772 /* Disable interrupts to avoid deadlocks with interrupt handlers */
773 ipl_t ipl = interrupts_disable();
774
[4e147a6]775 void *result = NULL;
[c5613b72]776
[da1bafb]777 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
[4e147a6]778 result = magazine_obj_get(cache);
[da1bafb]779
[428aabf]780 if (!result)
[4e147a6]781 result = slab_obj_create(cache, flags);
[da1bafb]782
[4e147a6]783 interrupts_restore(ipl);
[da1bafb]784
[fb10289b]785 if (result)
786 atomic_inc(&cache->allocated_objs);
[da1bafb]787
[4e147a6]788 return result;
789}
790
[da1bafb]791/** Return object to cache, use slab if known
792 *
793 */
[7a0359b]794NO_TRACE static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab)
[4e147a6]795{
[da1bafb]796 ipl_t ipl = interrupts_disable();
797
[46c1234]798 if ((cache->flags & SLAB_CACHE_NOMAGAZINE) ||
[da1bafb]799 (magazine_obj_put(cache, obj)))
[c352c2e]800 slab_obj_destroy(cache, obj, slab);
[da1bafb]801
[4e147a6]802 interrupts_restore(ipl);
[fb10289b]803 atomic_dec(&cache->allocated_objs);
[4e147a6]804}
805
[da1bafb]806/** Return slab object to cache
807 *
808 */
[c352c2e]809void slab_free(slab_cache_t *cache, void *obj)
810{
[ce8aed1]811 _slab_free(cache, obj, NULL);
[c352c2e]812}
813
[ab6f2507]814/** Go through all caches and reclaim what is possible */
[da1bafb]815size_t slab_reclaim(unsigned int flags)
[4e147a6]816{
[ab6f2507]817 irq_spinlock_lock(&slab_cache_lock, true);
[da1bafb]818
[98000fb]819 size_t frames = 0;
[55b77d9]820 list_foreach(slab_cache_list, cur) {
[da1bafb]821 slab_cache_t *cache = list_get_instance(cur, slab_cache_t, link);
[4e147a6]822 frames += _slab_reclaim(cache, flags);
823 }
[da1bafb]824
[ab6f2507]825 irq_spinlock_unlock(&slab_cache_lock, true);
[da1bafb]826
[4e147a6]827 return frames;
828}
829
[da1bafb]830/* Print list of slabs
831 *
832 */
[4e147a6]833void slab_print_list(void)
834{
[ccb426c]835 printf("[slab name ] [size ] [pages ] [obj/pg] [slabs ]"
836 " [cached] [alloc ] [ctl]\n");
[da1bafb]837
838 size_t skip = 0;
[599d6f5]839 while (true) {
840 /*
841 * We must not hold the slab_cache_lock spinlock when printing
842 * the statistics. Otherwise we can easily deadlock if the print
843 * needs to allocate memory.
844 *
845 * Therefore, we walk through the slab cache list, skipping some
846 * amount of already processed caches during each iteration and
847 * gathering statistics about the first unprocessed cache. For
848 * the sake of printing the statistics, we realese the
849 * slab_cache_lock and reacquire it afterwards. Then the walk
850 * starts again.
851 *
852 * This limits both the efficiency and also accuracy of the
853 * obtained statistics. The efficiency is decreased because the
854 * time complexity of the algorithm is quadratic instead of
855 * linear. The accuracy is impacted because we drop the lock
856 * after processing one cache. If there is someone else
857 * manipulating the cache list, we might omit an arbitrary
858 * number of caches or process one cache multiple times.
859 * However, we don't bleed for this algorithm for it is only
860 * statistics.
861 */
[da1bafb]862
863 irq_spinlock_lock(&slab_cache_lock, true);
864
865 link_t *cur;
866 size_t i;
[55b77d9]867 for (i = 0, cur = slab_cache_list.head.next;
868 (i < skip) && (cur != &slab_cache_list.head);
[da1bafb]869 i++, cur = cur->next);
870
[55b77d9]871 if (cur == &slab_cache_list.head) {
[da1bafb]872 irq_spinlock_unlock(&slab_cache_lock, true);
[599d6f5]873 break;
874 }
[da1bafb]875
[599d6f5]876 skip++;
[da1bafb]877
878 slab_cache_t *cache = list_get_instance(cur, slab_cache_t, link);
879
[a000878c]880 const char *name = cache->name;
[599d6f5]881 uint8_t order = cache->order;
882 size_t size = cache->size;
[da1bafb]883 size_t objects = cache->objects;
[599d6f5]884 long allocated_slabs = atomic_get(&cache->allocated_slabs);
885 long cached_objs = atomic_get(&cache->cached_objs);
886 long allocated_objs = atomic_get(&cache->allocated_objs);
[da1bafb]887 unsigned int flags = cache->flags;
[599d6f5]888
[da1bafb]889 irq_spinlock_unlock(&slab_cache_lock, true);
[6536a4a9]890
[7e752b2]891 printf("%-18s %8zu %8u %8zu %8ld %8ld %8ld %-5s\n",
[599d6f5]892 name, size, (1 << order), objects, allocated_slabs,
893 cached_objs, allocated_objs,
894 flags & SLAB_CACHE_SLINSIDE ? "in" : "out");
[4e147a6]895 }
896}
897
898void slab_cache_init(void)
899{
900 /* Initialize magazine cache */
[46c1234]901 _slab_cache_create(&mag_cache, "slab_magazine",
902 sizeof(slab_magazine_t) + SLAB_MAG_SIZE * sizeof(void*),
903 sizeof(uintptr_t), NULL, NULL, SLAB_CACHE_NOMAGAZINE |
904 SLAB_CACHE_SLINSIDE);
[da1bafb]905
[fb10289b]906 /* Initialize slab_cache cache */
[46c1234]907 _slab_cache_create(&slab_cache_cache, "slab_cache",
908 sizeof(slab_cache_cache), sizeof(uintptr_t), NULL, NULL,
909 SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
[da1bafb]910
[fb10289b]911 /* Initialize external slab cache */
[46c1234]912 slab_extern_cache = slab_cache_create("slab_extern", sizeof(slab_t), 0,
913 NULL, NULL, SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED);
[da1bafb]914
[4e147a6]915 /* Initialize structures for malloc */
[da1bafb]916 size_t i;
917 size_t size;
918
[46c1234]919 for (i = 0, size = (1 << SLAB_MIN_MALLOC_W);
920 i < (SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1);
921 i++, size <<= 1) {
922 malloc_caches[i] = slab_cache_create(malloc_names[i], size, 0,
923 NULL, NULL, SLAB_CACHE_MAGDEFERRED);
[c352c2e]924 }
[da1bafb]925
[a000878c]926#ifdef CONFIG_DEBUG
[04225a7]927 _slab_initialized = 1;
928#endif
[c352c2e]929}
930
[8e1ea655]931/** Enable cpu_cache
932 *
933 * Kernel calls this function, when it knows the real number of
[da1bafb]934 * processors. Allocate slab for cpucache and enable it on all
935 * existing slabs that are SLAB_CACHE_MAGDEFERRED
936 *
[8e1ea655]937 */
938void slab_enable_cpucache(void)
939{
[214f5bb]940#ifdef CONFIG_DEBUG
941 _slab_initialized = 2;
942#endif
[8e1ea655]943
[da1bafb]944 irq_spinlock_lock(&slab_cache_lock, false);
945
[55b77d9]946 list_foreach(slab_cache_list, cur) {
[da1bafb]947 slab_cache_t *slab = list_get_instance(cur, slab_cache_t, link);
948 if ((slab->flags & SLAB_CACHE_MAGDEFERRED) !=
[46c1234]949 SLAB_CACHE_MAGDEFERRED)
[8e1ea655]950 continue;
[da1bafb]951
952 (void) make_magcache(slab);
953 slab->flags &= ~SLAB_CACHE_MAGDEFERRED;
[8e1ea655]954 }
[da1bafb]955
956 irq_spinlock_unlock(&slab_cache_lock, false);
[8e1ea655]957}
958
[da1bafb]959void *malloc(size_t size, unsigned int flags)
[c352c2e]960{
[04225a7]961 ASSERT(_slab_initialized);
[c259b9b]962 ASSERT(size <= (1 << SLAB_MAX_MALLOC_W));
[c352c2e]963
964 if (size < (1 << SLAB_MIN_MALLOC_W))
965 size = (1 << SLAB_MIN_MALLOC_W);
[da1bafb]966
967 uint8_t idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;
968
[c352c2e]969 return slab_alloc(malloc_caches[idx], flags);
970}
971
[da1bafb]972void *realloc(void *ptr, size_t size, unsigned int flags)
[c352c2e]973{
[ce8aed1]974 ASSERT(_slab_initialized);
975 ASSERT(size <= (1 << SLAB_MAX_MALLOC_W));
976
977 void *new_ptr;
978
979 if (size > 0) {
980 if (size < (1 << SLAB_MIN_MALLOC_W))
981 size = (1 << SLAB_MIN_MALLOC_W);
[da1bafb]982 uint8_t idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;
[ce8aed1]983
984 new_ptr = slab_alloc(malloc_caches[idx], flags);
985 } else
986 new_ptr = NULL;
987
988 if ((new_ptr != NULL) && (ptr != NULL)) {
989 slab_t *slab = obj2slab(ptr);
990 memcpy(new_ptr, ptr, min(size, slab->cache->size));
991 }
992
993 if (ptr != NULL)
994 free(ptr);
995
996 return new_ptr;
997}
[5158549]998
[ce8aed1]999void free(void *ptr)
1000{
1001 if (!ptr)
[f3272e98]1002 return;
[da1bafb]1003
[ce8aed1]1004 slab_t *slab = obj2slab(ptr);
1005 _slab_free(slab->cache, ptr, slab);
[4e147a6]1006}
[b45c443]1007
[cc73a8a1]1008/** @}
[b45c443]1009 */
Note: See TracBrowser for help on using the repository browser.