[4e147a6] | 1 | /*
|
---|
[df4ed85] | 2 | * Copyright (c) 2006 Ondrej Palkovsky
|
---|
[4e147a6] | 3 | * All rights reserved.
|
---|
| 4 | *
|
---|
| 5 | * Redistribution and use in source and binary forms, with or without
|
---|
| 6 | * modification, are permitted provided that the following conditions
|
---|
| 7 | * are met:
|
---|
| 8 | *
|
---|
| 9 | * - Redistributions of source code must retain the above copyright
|
---|
| 10 | * notice, this list of conditions and the following disclaimer.
|
---|
| 11 | * - Redistributions in binary form must reproduce the above copyright
|
---|
| 12 | * notice, this list of conditions and the following disclaimer in the
|
---|
| 13 | * documentation and/or other materials provided with the distribution.
|
---|
| 14 | * - The name of the author may not be used to endorse or promote products
|
---|
| 15 | * derived from this software without specific prior written permission.
|
---|
| 16 | *
|
---|
| 17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
---|
| 18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
---|
| 19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
---|
| 20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
---|
| 21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
---|
| 22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
---|
| 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
---|
| 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
---|
| 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
---|
| 26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
---|
| 27 | */
|
---|
| 28 |
|
---|
[cc73a8a1] | 29 | /** @addtogroup genericmm
|
---|
[b45c443] | 30 | * @{
|
---|
| 31 | */
|
---|
| 32 |
|
---|
[9179d0a] | 33 | /**
|
---|
[b45c443] | 34 | * @file
|
---|
[9179d0a] | 35 | * @brief Slab allocator.
|
---|
| 36 | *
|
---|
| 37 | * The slab allocator is closely modelled after OpenSolaris slab allocator.
|
---|
| 38 | * @see http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/
|
---|
[fb10289b] | 39 | *
|
---|
| 40 | * with the following exceptions:
|
---|
[9179d0a] | 41 | * @li empty slabs are deallocated immediately
|
---|
[fb10289b] | 42 | * (in Linux they are kept in linked list, in Solaris ???)
|
---|
[9179d0a] | 43 | * @li empty magazines are deallocated when not needed
|
---|
[fb10289b] | 44 | * (in Solaris they are held in linked list in slab cache)
|
---|
| 45 | *
|
---|
[9179d0a] | 46 | * Following features are not currently supported but would be easy to do:
|
---|
| 47 | * @li cache coloring
|
---|
| 48 | * @li dynamic magazine growing (different magazine sizes are already
|
---|
[5b04fc7] | 49 | * supported, but we would need to adjust allocation strategy)
|
---|
[fb10289b] | 50 | *
|
---|
[9179d0a] | 51 | * The slab allocator supports per-CPU caches ('magazines') to facilitate
|
---|
[fb10289b] | 52 | * good SMP scaling.
|
---|
| 53 | *
|
---|
| 54 | * When a new object is being allocated, it is first checked, if it is
|
---|
[7669bcf] | 55 | * available in a CPU-bound magazine. If it is not found there, it is
|
---|
| 56 | * allocated from a CPU-shared slab - if a partially full one is found,
|
---|
| 57 | * it is used, otherwise a new one is allocated.
|
---|
[fb10289b] | 58 | *
|
---|
[7669bcf] | 59 | * When an object is being deallocated, it is put to a CPU-bound magazine.
|
---|
| 60 | * If there is no such magazine, a new one is allocated (if this fails,
|
---|
[9179d0a] | 61 | * the object is deallocated into slab). If the magazine is full, it is
|
---|
[7669bcf] | 62 | * put into cpu-shared list of magazines and a new one is allocated.
|
---|
[fb10289b] | 63 | *
|
---|
[7669bcf] | 64 | * The CPU-bound magazine is actually a pair of magazines in order to avoid
|
---|
[fb10289b] | 65 | * thrashing when somebody is allocating/deallocating 1 item at the magazine
|
---|
| 66 | * size boundary. LIFO order is enforced, which should avoid fragmentation
|
---|
| 67 | * as much as possible.
|
---|
| 68 | *
|
---|
[7669bcf] | 69 | * Every cache contains list of full slabs and list of partially full slabs.
|
---|
[9179d0a] | 70 | * Empty slabs are immediately freed (thrashing will be avoided because
|
---|
[fb10289b] | 71 | * of magazines).
|
---|
| 72 | *
|
---|
[9179d0a] | 73 | * The slab information structure is kept inside the data area, if possible.
|
---|
[fb10289b] | 74 | * The cache can be marked that it should not use magazines. This is used
|
---|
[9179d0a] | 75 | * only for slab related caches to avoid deadlocks and infinite recursion
|
---|
| 76 | * (the slab allocator uses itself for allocating all it's control structures).
|
---|
[fb10289b] | 77 | *
|
---|
[7669bcf] | 78 | * The slab allocator allocates a lot of space and does not free it. When
|
---|
| 79 | * the frame allocator fails to allocate a frame, it calls slab_reclaim().
|
---|
[fb10289b] | 80 | * It tries 'light reclaim' first, then brutal reclaim. The light reclaim
|
---|
| 81 | * releases slabs from cpu-shared magazine-list, until at least 1 slab
|
---|
| 82 | * is deallocated in each cache (this algorithm should probably change).
|
---|
| 83 | * The brutal reclaim removes all cached objects, even from CPU-bound
|
---|
| 84 | * magazines.
|
---|
| 85 | *
|
---|
[cc73a8a1] | 86 | * @todo
|
---|
[9179d0a] | 87 | * For better CPU-scaling the magazine allocation strategy should
|
---|
[10e16a7] | 88 | * be extended. Currently, if the cache does not have magazine, it asks
|
---|
| 89 | * for non-cpu cached magazine cache to provide one. It might be feasible
|
---|
| 90 | * to add cpu-cached magazine cache (which would allocate it's magazines
|
---|
| 91 | * from non-cpu-cached mag. cache). This would provide a nice per-cpu
|
---|
| 92 | * buffer. The other possibility is to use the per-cache
|
---|
| 93 | * 'empty-magazine-list', which decreases competing for 1 per-system
|
---|
| 94 | * magazine cache.
|
---|
| 95 | *
|
---|
[cc73a8a1] | 96 | * @todo
|
---|
| 97 | * it might be good to add granularity of locks even to slab level,
|
---|
| 98 | * we could then try_spinlock over all partial slabs and thus improve
|
---|
| 99 | * scalability even on slab level
|
---|
[fb10289b] | 100 | */
|
---|
| 101 |
|
---|
[4e147a6] | 102 | #include <synch/spinlock.h>
|
---|
| 103 | #include <mm/slab.h>
|
---|
[5c9a08b] | 104 | #include <adt/list.h>
|
---|
[4e147a6] | 105 | #include <memstr.h>
|
---|
| 106 | #include <align.h>
|
---|
[a294ad0] | 107 | #include <mm/frame.h>
|
---|
[4e147a6] | 108 | #include <config.h>
|
---|
| 109 | #include <print.h>
|
---|
| 110 | #include <arch.h>
|
---|
| 111 | #include <panic.h>
|
---|
[a294ad0] | 112 | #include <debug.h>
|
---|
[c352c2e] | 113 | #include <bitops.h>
|
---|
[ce8aed1] | 114 | #include <macros.h>
|
---|
[4e147a6] | 115 |
|
---|
| 116 | SPINLOCK_INITIALIZE(slab_cache_lock);
|
---|
[fb10289b] | 117 | static LIST_INITIALIZE(slab_cache_list);
|
---|
| 118 |
|
---|
| 119 | /** Magazine cache */
|
---|
| 120 | static slab_cache_t mag_cache;
|
---|
| 121 | /** Cache for cache descriptors */
|
---|
| 122 | static slab_cache_t slab_cache_cache;
|
---|
| 123 | /** Cache for external slab descriptors
|
---|
| 124 | * This time we want per-cpu cache, so do not make it static
|
---|
[9179d0a] | 125 | * - using slab for internal slab structures will not deadlock,
|
---|
[fb10289b] | 126 | * as all slab structures are 'small' - control structures of
|
---|
| 127 | * their caches do not require further allocation
|
---|
| 128 | */
|
---|
| 129 | static slab_cache_t *slab_extern_cache;
|
---|
[c352c2e] | 130 | /** Caches for malloc */
|
---|
[ce8aed1] | 131 | static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1];
|
---|
[c352c2e] | 132 | char *malloc_names[] = {
|
---|
[ce8aed1] | 133 | "malloc-16",
|
---|
| 134 | "malloc-32",
|
---|
| 135 | "malloc-64",
|
---|
| 136 | "malloc-128",
|
---|
| 137 | "malloc-256",
|
---|
| 138 | "malloc-512",
|
---|
| 139 | "malloc-1K",
|
---|
| 140 | "malloc-2K",
|
---|
| 141 | "malloc-4K",
|
---|
| 142 | "malloc-8K",
|
---|
| 143 | "malloc-16K",
|
---|
| 144 | "malloc-32K",
|
---|
| 145 | "malloc-64K",
|
---|
| 146 | "malloc-128K",
|
---|
| 147 | "malloc-256K"
|
---|
[c352c2e] | 148 | };
|
---|
[a294ad0] | 149 |
|
---|
[fb10289b] | 150 | /** Slab descriptor */
|
---|
[a294ad0] | 151 | typedef struct {
|
---|
[f3272e98] | 152 | slab_cache_t *cache; /**< Pointer to parent cache. */
|
---|
| 153 | link_t link; /**< List of full/partial slabs. */
|
---|
| 154 | void *start; /**< Start address of first available item. */
|
---|
| 155 | count_t available; /**< Count of available items in this slab. */
|
---|
| 156 | index_t nextavail; /**< The index of next available item. */
|
---|
[ce8aed1] | 157 | } slab_t;
|
---|
[a294ad0] | 158 |
|
---|
[214f5bb] | 159 | #ifdef CONFIG_DEBUG
|
---|
| 160 | static int _slab_initialized = 0;
|
---|
| 161 | #endif
|
---|
| 162 |
|
---|
[a294ad0] | 163 | /**************************************/
|
---|
[9179d0a] | 164 | /* Slab allocation functions */
|
---|
[a294ad0] | 165 |
|
---|
| 166 | /**
|
---|
| 167 | * Allocate frames for slab space and initialize
|
---|
| 168 | *
|
---|
| 169 | */
|
---|
| 170 | static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
|
---|
| 171 | {
|
---|
| 172 | void *data;
|
---|
| 173 | slab_t *slab;
|
---|
| 174 | size_t fsize;
|
---|
[6c441cf8] | 175 | unsigned int i;
|
---|
[3c771149] | 176 | unsigned int zone = 0;
|
---|
[085d973] | 177 |
|
---|
[e45f81a] | 178 | data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone);
|
---|
| 179 | if (!data) {
|
---|
[a294ad0] | 180 | return NULL;
|
---|
[bc504ef2] | 181 | }
|
---|
[086a600] | 182 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) {
|
---|
[fb10289b] | 183 | slab = slab_alloc(slab_extern_cache, flags);
|
---|
[a294ad0] | 184 | if (!slab) {
|
---|
[2e9eae2] | 185 | frame_free(KA2PA(data));
|
---|
[a294ad0] | 186 | return NULL;
|
---|
| 187 | }
|
---|
| 188 | } else {
|
---|
| 189 | fsize = (PAGE_SIZE << cache->order);
|
---|
| 190 | slab = data + fsize - sizeof(*slab);
|
---|
| 191 | }
|
---|
[e3c762cd] | 192 |
|
---|
[a294ad0] | 193 | /* Fill in slab structures */
|
---|
[6c441cf8] | 194 | for (i = 0; i < ((unsigned int) 1 << cache->order); i++)
|
---|
| 195 | frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab, zone);
|
---|
[a294ad0] | 196 |
|
---|
| 197 | slab->start = data;
|
---|
| 198 | slab->available = cache->objects;
|
---|
| 199 | slab->nextavail = 0;
|
---|
[4a5b2b0e] | 200 | slab->cache = cache;
|
---|
[a294ad0] | 201 |
|
---|
[6c441cf8] | 202 | for (i = 0; i < cache->objects; i++)
|
---|
[a294ad0] | 203 | *((int *) (slab->start + i*cache->size)) = i+1;
|
---|
[bc504ef2] | 204 |
|
---|
| 205 | atomic_inc(&cache->allocated_slabs);
|
---|
[a294ad0] | 206 | return slab;
|
---|
| 207 | }
|
---|
| 208 |
|
---|
| 209 | /**
|
---|
[9179d0a] | 210 | * Deallocate space associated with slab
|
---|
[a294ad0] | 211 | *
|
---|
| 212 | * @return number of freed frames
|
---|
| 213 | */
|
---|
| 214 | static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
|
---|
| 215 | {
|
---|
[2e9eae2] | 216 | frame_free(KA2PA(slab->start));
|
---|
[086a600] | 217 | if (! (cache->flags & SLAB_CACHE_SLINSIDE))
|
---|
[fb10289b] | 218 | slab_free(slab_extern_cache, slab);
|
---|
[bc504ef2] | 219 |
|
---|
| 220 | atomic_dec(&cache->allocated_slabs);
|
---|
| 221 |
|
---|
[a294ad0] | 222 | return 1 << cache->order;
|
---|
| 223 | }
|
---|
| 224 |
|
---|
| 225 | /** Map object to slab structure */
|
---|
| 226 | static slab_t * obj2slab(void *obj)
|
---|
| 227 | {
|
---|
[ce8aed1] | 228 | return (slab_t *) frame_get_parent(ADDR2PFN(KA2PA(obj)), 0);
|
---|
[a294ad0] | 229 | }
|
---|
| 230 |
|
---|
[4e147a6] | 231 | /**************************************/
|
---|
[9179d0a] | 232 | /* Slab functions */
|
---|
[4e147a6] | 233 |
|
---|
| 234 |
|
---|
| 235 | /**
|
---|
| 236 | * Return object to slab and call a destructor
|
---|
| 237 | *
|
---|
[a294ad0] | 238 | * @param slab If the caller knows directly slab of the object, otherwise NULL
|
---|
| 239 | *
|
---|
[4e147a6] | 240 | * @return Number of freed pages
|
---|
| 241 | */
|
---|
[a294ad0] | 242 | static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
|
---|
| 243 | slab_t *slab)
|
---|
[4e147a6] | 244 | {
|
---|
[266294a9] | 245 | int freed = 0;
|
---|
| 246 |
|
---|
[a294ad0] | 247 | if (!slab)
|
---|
| 248 | slab = obj2slab(obj);
|
---|
| 249 |
|
---|
[4a5b2b0e] | 250 | ASSERT(slab->cache == cache);
|
---|
| 251 |
|
---|
[266294a9] | 252 | if (cache->destructor)
|
---|
| 253 | freed = cache->destructor(obj);
|
---|
| 254 |
|
---|
[428aabf] | 255 | spinlock_lock(&cache->slablock);
|
---|
[8e1ea655] | 256 | ASSERT(slab->available < cache->objects);
|
---|
[428aabf] | 257 |
|
---|
[a294ad0] | 258 | *((int *)obj) = slab->nextavail;
|
---|
| 259 | slab->nextavail = (obj - slab->start)/cache->size;
|
---|
| 260 | slab->available++;
|
---|
| 261 |
|
---|
| 262 | /* Move it to correct list */
|
---|
| 263 | if (slab->available == cache->objects) {
|
---|
| 264 | /* Free associated memory */
|
---|
| 265 | list_remove(&slab->link);
|
---|
[e22f561] | 266 | spinlock_unlock(&cache->slablock);
|
---|
| 267 |
|
---|
[266294a9] | 268 | return freed + slab_space_free(cache, slab);
|
---|
[e22f561] | 269 |
|
---|
[e72b0a3] | 270 | } else if (slab->available == 1) {
|
---|
| 271 | /* It was in full, move to partial */
|
---|
| 272 | list_remove(&slab->link);
|
---|
| 273 | list_prepend(&slab->link, &cache->partial_slabs);
|
---|
[a294ad0] | 274 | }
|
---|
[248fc1a] | 275 | spinlock_unlock(&cache->slablock);
|
---|
[266294a9] | 276 | return freed;
|
---|
[a294ad0] | 277 | }
|
---|
[4e147a6] | 278 |
|
---|
| 279 | /**
|
---|
| 280 | * Take new object from slab or create new if needed
|
---|
| 281 | *
|
---|
| 282 | * @return Object address or null
|
---|
| 283 | */
|
---|
| 284 | static void * slab_obj_create(slab_cache_t *cache, int flags)
|
---|
| 285 | {
|
---|
[a294ad0] | 286 | slab_t *slab;
|
---|
| 287 | void *obj;
|
---|
| 288 |
|
---|
[428aabf] | 289 | spinlock_lock(&cache->slablock);
|
---|
| 290 |
|
---|
[a294ad0] | 291 | if (list_empty(&cache->partial_slabs)) {
|
---|
| 292 | /* Allow recursion and reclaiming
|
---|
[9179d0a] | 293 | * - this should work, as the slab control structures
|
---|
[e3c762cd] | 294 | * are small and do not need to allocate with anything
|
---|
| 295 | * other than frame_alloc when they are allocating,
|
---|
[a294ad0] | 296 | * that's why we should get recursion at most 1-level deep
|
---|
| 297 | */
|
---|
[428aabf] | 298 | spinlock_unlock(&cache->slablock);
|
---|
[a294ad0] | 299 | slab = slab_space_alloc(cache, flags);
|
---|
[428aabf] | 300 | if (!slab)
|
---|
[e72b0a3] | 301 | return NULL;
|
---|
| 302 | spinlock_lock(&cache->slablock);
|
---|
[a294ad0] | 303 | } else {
|
---|
[f3272e98] | 304 | slab = list_get_instance(cache->partial_slabs.next, slab_t, link);
|
---|
[a294ad0] | 305 | list_remove(&slab->link);
|
---|
| 306 | }
|
---|
| 307 | obj = slab->start + slab->nextavail * cache->size;
|
---|
| 308 | slab->nextavail = *((int *)obj);
|
---|
| 309 | slab->available--;
|
---|
[266294a9] | 310 |
|
---|
[f3272e98] | 311 | if (!slab->available)
|
---|
[bc504ef2] | 312 | list_prepend(&slab->link, &cache->full_slabs);
|
---|
[a294ad0] | 313 | else
|
---|
[bc504ef2] | 314 | list_prepend(&slab->link, &cache->partial_slabs);
|
---|
[428aabf] | 315 |
|
---|
| 316 | spinlock_unlock(&cache->slablock);
|
---|
[266294a9] | 317 |
|
---|
| 318 | if (cache->constructor && cache->constructor(obj, flags)) {
|
---|
| 319 | /* Bad, bad, construction failed */
|
---|
| 320 | slab_obj_destroy(cache, obj, slab);
|
---|
| 321 | return NULL;
|
---|
| 322 | }
|
---|
[a294ad0] | 323 | return obj;
|
---|
[4e147a6] | 324 | }
|
---|
| 325 |
|
---|
| 326 | /**************************************/
|
---|
| 327 | /* CPU-Cache slab functions */
|
---|
| 328 |
|
---|
[5158549] | 329 | /**
|
---|
| 330 | * Finds a full magazine in cache, takes it from list
|
---|
| 331 | * and returns it
|
---|
| 332 | *
|
---|
| 333 | * @param first If true, return first, else last mag
|
---|
| 334 | */
|
---|
| 335 | static slab_magazine_t * get_mag_from_cache(slab_cache_t *cache,
|
---|
| 336 | int first)
|
---|
| 337 | {
|
---|
| 338 | slab_magazine_t *mag = NULL;
|
---|
| 339 | link_t *cur;
|
---|
| 340 |
|
---|
| 341 | spinlock_lock(&cache->maglock);
|
---|
| 342 | if (!list_empty(&cache->magazines)) {
|
---|
| 343 | if (first)
|
---|
| 344 | cur = cache->magazines.next;
|
---|
| 345 | else
|
---|
| 346 | cur = cache->magazines.prev;
|
---|
| 347 | mag = list_get_instance(cur, slab_magazine_t, link);
|
---|
| 348 | list_remove(&mag->link);
|
---|
| 349 | atomic_dec(&cache->magazine_counter);
|
---|
| 350 | }
|
---|
| 351 | spinlock_unlock(&cache->maglock);
|
---|
| 352 | return mag;
|
---|
| 353 | }
|
---|
| 354 |
|
---|
| 355 | /** Prepend magazine to magazine list in cache */
|
---|
| 356 | static void put_mag_to_cache(slab_cache_t *cache, slab_magazine_t *mag)
|
---|
| 357 | {
|
---|
| 358 | spinlock_lock(&cache->maglock);
|
---|
| 359 |
|
---|
| 360 | list_prepend(&mag->link, &cache->magazines);
|
---|
| 361 | atomic_inc(&cache->magazine_counter);
|
---|
| 362 |
|
---|
| 363 | spinlock_unlock(&cache->maglock);
|
---|
| 364 | }
|
---|
| 365 |
|
---|
[4e147a6] | 366 | /**
|
---|
| 367 | * Free all objects in magazine and free memory associated with magazine
|
---|
| 368 | *
|
---|
| 369 | * @return Number of freed pages
|
---|
| 370 | */
|
---|
| 371 | static count_t magazine_destroy(slab_cache_t *cache,
|
---|
| 372 | slab_magazine_t *mag)
|
---|
| 373 | {
|
---|
[6c441cf8] | 374 | unsigned int i;
|
---|
[4e147a6] | 375 | count_t frames = 0;
|
---|
| 376 |
|
---|
[6c441cf8] | 377 | for (i = 0; i < mag->busy; i++) {
|
---|
[a294ad0] | 378 | frames += slab_obj_destroy(cache, mag->objs[i], NULL);
|
---|
[4a5b2b0e] | 379 | atomic_dec(&cache->cached_objs);
|
---|
| 380 | }
|
---|
[4e147a6] | 381 |
|
---|
| 382 | slab_free(&mag_cache, mag);
|
---|
| 383 |
|
---|
| 384 | return frames;
|
---|
| 385 | }
|
---|
| 386 |
|
---|
[fb10289b] | 387 | /**
|
---|
| 388 | * Find full magazine, set it as current and return it
|
---|
| 389 | *
|
---|
| 390 | * Assume cpu_magazine lock is held
|
---|
| 391 | */
|
---|
| 392 | static slab_magazine_t * get_full_current_mag(slab_cache_t *cache)
|
---|
| 393 | {
|
---|
| 394 | slab_magazine_t *cmag, *lastmag, *newmag;
|
---|
| 395 |
|
---|
| 396 | cmag = cache->mag_cache[CPU->id].current;
|
---|
| 397 | lastmag = cache->mag_cache[CPU->id].last;
|
---|
| 398 | if (cmag) { /* First try local CPU magazines */
|
---|
| 399 | if (cmag->busy)
|
---|
| 400 | return cmag;
|
---|
| 401 |
|
---|
| 402 | if (lastmag && lastmag->busy) {
|
---|
| 403 | cache->mag_cache[CPU->id].current = lastmag;
|
---|
| 404 | cache->mag_cache[CPU->id].last = cmag;
|
---|
| 405 | return lastmag;
|
---|
| 406 | }
|
---|
| 407 | }
|
---|
| 408 | /* Local magazines are empty, import one from magazine list */
|
---|
[5158549] | 409 | newmag = get_mag_from_cache(cache, 1);
|
---|
| 410 | if (!newmag)
|
---|
[fb10289b] | 411 | return NULL;
|
---|
| 412 |
|
---|
| 413 | if (lastmag)
|
---|
[5158549] | 414 | magazine_destroy(cache, lastmag);
|
---|
| 415 |
|
---|
[fb10289b] | 416 | cache->mag_cache[CPU->id].last = cmag;
|
---|
| 417 | cache->mag_cache[CPU->id].current = newmag;
|
---|
| 418 | return newmag;
|
---|
| 419 | }
|
---|
| 420 |
|
---|
[4e147a6] | 421 | /**
|
---|
| 422 | * Try to find object in CPU-cache magazines
|
---|
| 423 | *
|
---|
| 424 | * @return Pointer to object or NULL if not available
|
---|
| 425 | */
|
---|
| 426 | static void * magazine_obj_get(slab_cache_t *cache)
|
---|
| 427 | {
|
---|
| 428 | slab_magazine_t *mag;
|
---|
[4a5b2b0e] | 429 | void *obj;
|
---|
[4e147a6] | 430 |
|
---|
[81e52f2a] | 431 | if (!CPU)
|
---|
| 432 | return NULL;
|
---|
| 433 |
|
---|
[4e147a6] | 434 | spinlock_lock(&cache->mag_cache[CPU->id].lock);
|
---|
| 435 |
|
---|
[fb10289b] | 436 | mag = get_full_current_mag(cache);
|
---|
| 437 | if (!mag) {
|
---|
| 438 | spinlock_unlock(&cache->mag_cache[CPU->id].lock);
|
---|
| 439 | return NULL;
|
---|
[4e147a6] | 440 | }
|
---|
[4a5b2b0e] | 441 | obj = mag->objs[--mag->busy];
|
---|
[4e147a6] | 442 | spinlock_unlock(&cache->mag_cache[CPU->id].lock);
|
---|
[4a5b2b0e] | 443 | atomic_dec(&cache->cached_objs);
|
---|
| 444 |
|
---|
| 445 | return obj;
|
---|
[4e147a6] | 446 | }
|
---|
| 447 |
|
---|
| 448 | /**
|
---|
[086a600] | 449 | * Assure that the current magazine is empty, return pointer to it, or NULL if
|
---|
[fb10289b] | 450 | * no empty magazine is available and cannot be allocated
|
---|
[4e147a6] | 451 | *
|
---|
[c5613b72] | 452 | * Assume mag_cache[CPU->id].lock is held
|
---|
| 453 | *
|
---|
[4e147a6] | 454 | * We have 2 magazines bound to processor.
|
---|
| 455 | * First try the current.
|
---|
| 456 | * If full, try the last.
|
---|
| 457 | * If full, put to magazines list.
|
---|
| 458 | * allocate new, exchange last & current
|
---|
| 459 | *
|
---|
[086a600] | 460 | */
|
---|
| 461 | static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache)
|
---|
| 462 | {
|
---|
| 463 | slab_magazine_t *cmag,*lastmag,*newmag;
|
---|
| 464 |
|
---|
| 465 | cmag = cache->mag_cache[CPU->id].current;
|
---|
| 466 | lastmag = cache->mag_cache[CPU->id].last;
|
---|
| 467 |
|
---|
| 468 | if (cmag) {
|
---|
| 469 | if (cmag->busy < cmag->size)
|
---|
| 470 | return cmag;
|
---|
| 471 | if (lastmag && lastmag->busy < lastmag->size) {
|
---|
| 472 | cache->mag_cache[CPU->id].last = cmag;
|
---|
| 473 | cache->mag_cache[CPU->id].current = lastmag;
|
---|
| 474 | return lastmag;
|
---|
| 475 | }
|
---|
| 476 | }
|
---|
| 477 | /* current | last are full | nonexistent, allocate new */
|
---|
| 478 | /* We do not want to sleep just because of caching */
|
---|
| 479 | /* Especially we do not want reclaiming to start, as
|
---|
| 480 | * this would deadlock */
|
---|
| 481 | newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM);
|
---|
| 482 | if (!newmag)
|
---|
| 483 | return NULL;
|
---|
| 484 | newmag->size = SLAB_MAG_SIZE;
|
---|
| 485 | newmag->busy = 0;
|
---|
| 486 |
|
---|
| 487 | /* Flush last to magazine list */
|
---|
[5158549] | 488 | if (lastmag)
|
---|
| 489 | put_mag_to_cache(cache, lastmag);
|
---|
| 490 |
|
---|
[086a600] | 491 | /* Move current as last, save new as current */
|
---|
| 492 | cache->mag_cache[CPU->id].last = cmag;
|
---|
| 493 | cache->mag_cache[CPU->id].current = newmag;
|
---|
| 494 |
|
---|
| 495 | return newmag;
|
---|
| 496 | }
|
---|
| 497 |
|
---|
| 498 | /**
|
---|
| 499 | * Put object into CPU-cache magazine
|
---|
| 500 | *
|
---|
[4e147a6] | 501 | * @return 0 - success, -1 - could not get memory
|
---|
| 502 | */
|
---|
| 503 | static int magazine_obj_put(slab_cache_t *cache, void *obj)
|
---|
| 504 | {
|
---|
| 505 | slab_magazine_t *mag;
|
---|
| 506 |
|
---|
[81e52f2a] | 507 | if (!CPU)
|
---|
| 508 | return -1;
|
---|
| 509 |
|
---|
[4e147a6] | 510 | spinlock_lock(&cache->mag_cache[CPU->id].lock);
|
---|
[086a600] | 511 |
|
---|
| 512 | mag = make_empty_current_mag(cache);
|
---|
[fb10289b] | 513 | if (!mag) {
|
---|
| 514 | spinlock_unlock(&cache->mag_cache[CPU->id].lock);
|
---|
| 515 | return -1;
|
---|
| 516 | }
|
---|
[4e147a6] | 517 |
|
---|
| 518 | mag->objs[mag->busy++] = obj;
|
---|
| 519 |
|
---|
| 520 | spinlock_unlock(&cache->mag_cache[CPU->id].lock);
|
---|
[4a5b2b0e] | 521 | atomic_inc(&cache->cached_objs);
|
---|
[4e147a6] | 522 | return 0;
|
---|
| 523 | }
|
---|
| 524 |
|
---|
| 525 |
|
---|
| 526 | /**************************************/
|
---|
[9179d0a] | 527 | /* Slab cache functions */
|
---|
[a294ad0] | 528 |
|
---|
| 529 | /** Return number of objects that fit in certain cache size */
|
---|
[6c441cf8] | 530 | static unsigned int comp_objects(slab_cache_t *cache)
|
---|
[a294ad0] | 531 | {
|
---|
| 532 | if (cache->flags & SLAB_CACHE_SLINSIDE)
|
---|
| 533 | return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
|
---|
| 534 | else
|
---|
| 535 | return (PAGE_SIZE << cache->order) / cache->size;
|
---|
| 536 | }
|
---|
| 537 |
|
---|
| 538 | /** Return wasted space in slab */
|
---|
[6c441cf8] | 539 | static unsigned int badness(slab_cache_t *cache)
|
---|
[a294ad0] | 540 | {
|
---|
[6c441cf8] | 541 | unsigned int objects;
|
---|
| 542 | unsigned int ssize;
|
---|
[a294ad0] | 543 |
|
---|
| 544 | objects = comp_objects(cache);
|
---|
| 545 | ssize = PAGE_SIZE << cache->order;
|
---|
| 546 | if (cache->flags & SLAB_CACHE_SLINSIDE)
|
---|
| 547 | ssize -= sizeof(slab_t);
|
---|
[6c441cf8] | 548 | return ssize - objects * cache->size;
|
---|
[a294ad0] | 549 | }
|
---|
[4e147a6] | 550 |
|
---|
[8e1ea655] | 551 | /**
|
---|
| 552 | * Initialize mag_cache structure in slab cache
|
---|
| 553 | */
|
---|
| 554 | static void make_magcache(slab_cache_t *cache)
|
---|
| 555 | {
|
---|
[6c441cf8] | 556 | unsigned int i;
|
---|
[214f5bb] | 557 |
|
---|
| 558 | ASSERT(_slab_initialized >= 2);
|
---|
[8e1ea655] | 559 |
|
---|
[2b8b0ca] | 560 | cache->mag_cache = malloc(sizeof(slab_mag_cache_t) * config.cpu_count,0);
|
---|
[6c441cf8] | 561 | for (i = 0; i < config.cpu_count; i++) {
|
---|
[7f1c620] | 562 | memsetb((uintptr_t)&cache->mag_cache[i],
|
---|
[8e1ea655] | 563 | sizeof(cache->mag_cache[i]), 0);
|
---|
[6c441cf8] | 564 | spinlock_initialize(&cache->mag_cache[i].lock, "slab_maglock_cpu");
|
---|
[8e1ea655] | 565 | }
|
---|
| 566 | }
|
---|
| 567 |
|
---|
[4e147a6] | 568 | /** Initialize allocated memory as a slab cache */
|
---|
| 569 | static void
|
---|
| 570 | _slab_cache_create(slab_cache_t *cache,
|
---|
| 571 | char *name,
|
---|
| 572 | size_t size,
|
---|
| 573 | size_t align,
|
---|
| 574 | int (*constructor)(void *obj, int kmflag),
|
---|
[266294a9] | 575 | int (*destructor)(void *obj),
|
---|
[4e147a6] | 576 | int flags)
|
---|
| 577 | {
|
---|
[c352c2e] | 578 | int pages;
|
---|
[248fc1a] | 579 | ipl_t ipl;
|
---|
[4e147a6] | 580 |
|
---|
[7f1c620] | 581 | memsetb((uintptr_t)cache, sizeof(*cache), 0);
|
---|
[4e147a6] | 582 | cache->name = name;
|
---|
| 583 |
|
---|
[7f1c620] | 584 | if (align < sizeof(unative_t))
|
---|
| 585 | align = sizeof(unative_t);
|
---|
[14e5d88] | 586 | size = ALIGN_UP(size, align);
|
---|
| 587 |
|
---|
[a294ad0] | 588 | cache->size = size;
|
---|
[4e147a6] | 589 |
|
---|
| 590 | cache->constructor = constructor;
|
---|
| 591 | cache->destructor = destructor;
|
---|
| 592 | cache->flags = flags;
|
---|
| 593 |
|
---|
| 594 | list_initialize(&cache->full_slabs);
|
---|
| 595 | list_initialize(&cache->partial_slabs);
|
---|
| 596 | list_initialize(&cache->magazines);
|
---|
[428aabf] | 597 | spinlock_initialize(&cache->slablock, "slab_lock");
|
---|
| 598 | spinlock_initialize(&cache->maglock, "slab_maglock");
|
---|
[8e1ea655] | 599 | if (! (cache->flags & SLAB_CACHE_NOMAGAZINE))
|
---|
| 600 | make_magcache(cache);
|
---|
[4e147a6] | 601 |
|
---|
| 602 | /* Compute slab sizes, object counts in slabs etc. */
|
---|
| 603 | if (cache->size < SLAB_INSIDE_SIZE)
|
---|
| 604 | cache->flags |= SLAB_CACHE_SLINSIDE;
|
---|
| 605 |
|
---|
[a294ad0] | 606 | /* Minimum slab order */
|
---|
[6eb96fce] | 607 | pages = SIZE2FRAMES(cache->size);
|
---|
[99993b9] | 608 | /* We need the 2^order >= pages */
|
---|
| 609 | if (pages == 1)
|
---|
| 610 | cache->order = 0;
|
---|
| 611 | else
|
---|
| 612 | cache->order = fnzb(pages-1)+1;
|
---|
[14e5d88] | 613 |
|
---|
[a294ad0] | 614 | while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
|
---|
| 615 | cache->order += 1;
|
---|
| 616 | }
|
---|
| 617 | cache->objects = comp_objects(cache);
|
---|
[14e5d88] | 618 | /* If info fits in, put it inside */
|
---|
| 619 | if (badness(cache) > sizeof(slab_t))
|
---|
| 620 | cache->flags |= SLAB_CACHE_SLINSIDE;
|
---|
[4e147a6] | 621 |
|
---|
[248fc1a] | 622 | /* Add cache to cache list */
|
---|
| 623 | ipl = interrupts_disable();
|
---|
[4e147a6] | 624 | spinlock_lock(&slab_cache_lock);
|
---|
| 625 |
|
---|
| 626 | list_append(&cache->link, &slab_cache_list);
|
---|
| 627 |
|
---|
| 628 | spinlock_unlock(&slab_cache_lock);
|
---|
[248fc1a] | 629 | interrupts_restore(ipl);
|
---|
[4e147a6] | 630 | }
|
---|
| 631 |
|
---|
| 632 | /** Create slab cache */
|
---|
| 633 | slab_cache_t * slab_cache_create(char *name,
|
---|
| 634 | size_t size,
|
---|
| 635 | size_t align,
|
---|
| 636 | int (*constructor)(void *obj, int kmflag),
|
---|
[266294a9] | 637 | int (*destructor)(void *obj),
|
---|
[4e147a6] | 638 | int flags)
|
---|
| 639 | {
|
---|
| 640 | slab_cache_t *cache;
|
---|
| 641 |
|
---|
[fb10289b] | 642 | cache = slab_alloc(&slab_cache_cache, 0);
|
---|
[4e147a6] | 643 | _slab_cache_create(cache, name, size, align, constructor, destructor,
|
---|
| 644 | flags);
|
---|
| 645 | return cache;
|
---|
| 646 | }
|
---|
| 647 |
|
---|
| 648 | /**
|
---|
| 649 | * Reclaim space occupied by objects that are already free
|
---|
| 650 | *
|
---|
| 651 | * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing
|
---|
| 652 | * @return Number of freed pages
|
---|
| 653 | */
|
---|
| 654 | static count_t _slab_reclaim(slab_cache_t *cache, int flags)
|
---|
| 655 | {
|
---|
[6c441cf8] | 656 | unsigned int i;
|
---|
[4e147a6] | 657 | slab_magazine_t *mag;
|
---|
| 658 | count_t frames = 0;
|
---|
[5158549] | 659 | int magcount;
|
---|
[4e147a6] | 660 |
|
---|
| 661 | if (cache->flags & SLAB_CACHE_NOMAGAZINE)
|
---|
| 662 | return 0; /* Nothing to do */
|
---|
[5158549] | 663 |
|
---|
| 664 | /* We count up to original magazine count to avoid
|
---|
| 665 | * endless loop
|
---|
| 666 | */
|
---|
| 667 | magcount = atomic_get(&cache->magazine_counter);
|
---|
| 668 | while (magcount-- && (mag=get_mag_from_cache(cache,0))) {
|
---|
| 669 | frames += magazine_destroy(cache,mag);
|
---|
| 670 | if (!(flags & SLAB_RECLAIM_ALL) && frames)
|
---|
| 671 | break;
|
---|
[fb10289b] | 672 | }
|
---|
[4e147a6] | 673 |
|
---|
| 674 | if (flags & SLAB_RECLAIM_ALL) {
|
---|
[5158549] | 675 | /* Free cpu-bound magazines */
|
---|
[4e147a6] | 676 | /* Destroy CPU magazines */
|
---|
[6c441cf8] | 677 | for (i = 0; i < config.cpu_count; i++) {
|
---|
[5158549] | 678 | spinlock_lock(&cache->mag_cache[i].lock);
|
---|
| 679 |
|
---|
[4e147a6] | 680 | mag = cache->mag_cache[i].current;
|
---|
| 681 | if (mag)
|
---|
| 682 | frames += magazine_destroy(cache, mag);
|
---|
| 683 | cache->mag_cache[i].current = NULL;
|
---|
| 684 |
|
---|
| 685 | mag = cache->mag_cache[i].last;
|
---|
| 686 | if (mag)
|
---|
| 687 | frames += magazine_destroy(cache, mag);
|
---|
| 688 | cache->mag_cache[i].last = NULL;
|
---|
[5158549] | 689 |
|
---|
[428aabf] | 690 | spinlock_unlock(&cache->mag_cache[i].lock);
|
---|
[5158549] | 691 | }
|
---|
[428aabf] | 692 | }
|
---|
[4a5b2b0e] | 693 |
|
---|
[4e147a6] | 694 | return frames;
|
---|
| 695 | }
|
---|
| 696 |
|
---|
| 697 | /** Check that there are no slabs and remove cache from system */
|
---|
| 698 | void slab_cache_destroy(slab_cache_t *cache)
|
---|
| 699 | {
|
---|
[5158549] | 700 | ipl_t ipl;
|
---|
| 701 |
|
---|
| 702 | /* First remove cache from link, so that we don't need
|
---|
| 703 | * to disable interrupts later
|
---|
| 704 | */
|
---|
| 705 |
|
---|
| 706 | ipl = interrupts_disable();
|
---|
| 707 | spinlock_lock(&slab_cache_lock);
|
---|
| 708 |
|
---|
| 709 | list_remove(&cache->link);
|
---|
| 710 |
|
---|
| 711 | spinlock_unlock(&slab_cache_lock);
|
---|
| 712 | interrupts_restore(ipl);
|
---|
| 713 |
|
---|
[4e147a6] | 714 | /* Do not lock anything, we assume the software is correct and
|
---|
| 715 | * does not touch the cache when it decides to destroy it */
|
---|
| 716 |
|
---|
| 717 | /* Destroy all magazines */
|
---|
| 718 | _slab_reclaim(cache, SLAB_RECLAIM_ALL);
|
---|
| 719 |
|
---|
| 720 | /* All slabs must be empty */
|
---|
| 721 | if (!list_empty(&cache->full_slabs) \
|
---|
| 722 | || !list_empty(&cache->partial_slabs))
|
---|
| 723 | panic("Destroying cache that is not empty.");
|
---|
| 724 |
|
---|
[8e1ea655] | 725 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
|
---|
[bb68433] | 726 | free(cache->mag_cache);
|
---|
[fb10289b] | 727 | slab_free(&slab_cache_cache, cache);
|
---|
[4e147a6] | 728 | }
|
---|
| 729 |
|
---|
| 730 | /** Allocate new object from cache - if no flags given, always returns
|
---|
| 731 | memory */
|
---|
| 732 | void * slab_alloc(slab_cache_t *cache, int flags)
|
---|
| 733 | {
|
---|
| 734 | ipl_t ipl;
|
---|
| 735 | void *result = NULL;
|
---|
[c5613b72] | 736 |
|
---|
[4e147a6] | 737 | /* Disable interrupts to avoid deadlocks with interrupt handlers */
|
---|
| 738 | ipl = interrupts_disable();
|
---|
[c352c2e] | 739 |
|
---|
[085d973] | 740 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) {
|
---|
[4e147a6] | 741 | result = magazine_obj_get(cache);
|
---|
[085d973] | 742 | }
|
---|
[428aabf] | 743 | if (!result)
|
---|
[4e147a6] | 744 | result = slab_obj_create(cache, flags);
|
---|
| 745 |
|
---|
| 746 | interrupts_restore(ipl);
|
---|
| 747 |
|
---|
[fb10289b] | 748 | if (result)
|
---|
| 749 | atomic_inc(&cache->allocated_objs);
|
---|
[bc504ef2] | 750 |
|
---|
[4e147a6] | 751 | return result;
|
---|
| 752 | }
|
---|
| 753 |
|
---|
[c352c2e] | 754 | /** Return object to cache, use slab if known */
|
---|
| 755 | static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab)
|
---|
[4e147a6] | 756 | {
|
---|
| 757 | ipl_t ipl;
|
---|
| 758 |
|
---|
| 759 | ipl = interrupts_disable();
|
---|
| 760 |
|
---|
[a294ad0] | 761 | if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
|
---|
| 762 | || magazine_obj_put(cache, obj)) {
|
---|
[428aabf] | 763 |
|
---|
[c352c2e] | 764 | slab_obj_destroy(cache, obj, slab);
|
---|
[428aabf] | 765 |
|
---|
[4e147a6] | 766 | }
|
---|
| 767 | interrupts_restore(ipl);
|
---|
[fb10289b] | 768 | atomic_dec(&cache->allocated_objs);
|
---|
[4e147a6] | 769 | }
|
---|
| 770 |
|
---|
[c352c2e] | 771 | /** Return slab object to cache */
|
---|
| 772 | void slab_free(slab_cache_t *cache, void *obj)
|
---|
| 773 | {
|
---|
[ce8aed1] | 774 | _slab_free(cache, obj, NULL);
|
---|
[c352c2e] | 775 | }
|
---|
| 776 |
|
---|
[4e147a6] | 777 | /* Go through all caches and reclaim what is possible */
|
---|
| 778 | count_t slab_reclaim(int flags)
|
---|
| 779 | {
|
---|
| 780 | slab_cache_t *cache;
|
---|
| 781 | link_t *cur;
|
---|
| 782 | count_t frames = 0;
|
---|
| 783 |
|
---|
| 784 | spinlock_lock(&slab_cache_lock);
|
---|
| 785 |
|
---|
[428aabf] | 786 | /* TODO: Add assert, that interrupts are disabled, otherwise
|
---|
| 787 | * memory allocation from interrupts can deadlock.
|
---|
| 788 | */
|
---|
| 789 |
|
---|
[4e147a6] | 790 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
|
---|
| 791 | cache = list_get_instance(cur, slab_cache_t, link);
|
---|
| 792 | frames += _slab_reclaim(cache, flags);
|
---|
| 793 | }
|
---|
| 794 |
|
---|
| 795 | spinlock_unlock(&slab_cache_lock);
|
---|
| 796 |
|
---|
| 797 | return frames;
|
---|
| 798 | }
|
---|
| 799 |
|
---|
| 800 |
|
---|
| 801 | /* Print list of slabs */
|
---|
| 802 | void slab_print_list(void)
|
---|
| 803 | {
|
---|
| 804 | slab_cache_t *cache;
|
---|
| 805 | link_t *cur;
|
---|
[248fc1a] | 806 | ipl_t ipl;
|
---|
| 807 |
|
---|
| 808 | ipl = interrupts_disable();
|
---|
[4e147a6] | 809 | spinlock_lock(&slab_cache_lock);
|
---|
[6536a4a9] | 810 | printf("slab name size pages obj/pg slabs cached allocated ctl\n");
|
---|
| 811 | printf("---------------- -------- ------ ------ ------ ------ --------- ---\n");
|
---|
| 812 |
|
---|
| 813 | for (cur = slab_cache_list.next; cur != &slab_cache_list; cur = cur->next) {
|
---|
[4e147a6] | 814 | cache = list_get_instance(cur, slab_cache_t, link);
|
---|
[6536a4a9] | 815 |
|
---|
[2b8b0ca] | 816 | printf("%-16s %8" PRIs " %6d %6u %6ld %6ld %9ld %-3s\n",
|
---|
| 817 | cache->name, cache->size, (1 << cache->order), cache->objects,
|
---|
| 818 | atomic_get(&cache->allocated_slabs), atomic_get(&cache->cached_objs),
|
---|
| 819 | atomic_get(&cache->allocated_objs), cache->flags & SLAB_CACHE_SLINSIDE ? "in" : "out");
|
---|
[4e147a6] | 820 | }
|
---|
| 821 | spinlock_unlock(&slab_cache_lock);
|
---|
[248fc1a] | 822 | interrupts_restore(ipl);
|
---|
[4e147a6] | 823 | }
|
---|
| 824 |
|
---|
| 825 | void slab_cache_init(void)
|
---|
| 826 | {
|
---|
[c352c2e] | 827 | int i, size;
|
---|
| 828 |
|
---|
[4e147a6] | 829 | /* Initialize magazine cache */
|
---|
| 830 | _slab_cache_create(&mag_cache,
|
---|
| 831 | "slab_magazine",
|
---|
[2b8b0ca] | 832 | sizeof(slab_magazine_t) + SLAB_MAG_SIZE * sizeof(void*),
|
---|
[7f1c620] | 833 | sizeof(uintptr_t),
|
---|
[4e147a6] | 834 | NULL, NULL,
|
---|
[fb10289b] | 835 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
|
---|
| 836 | /* Initialize slab_cache cache */
|
---|
| 837 | _slab_cache_create(&slab_cache_cache,
|
---|
| 838 | "slab_cache",
|
---|
[8e1ea655] | 839 | sizeof(slab_cache_cache),
|
---|
[7f1c620] | 840 | sizeof(uintptr_t),
|
---|
[fb10289b] | 841 | NULL, NULL,
|
---|
| 842 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
|
---|
| 843 | /* Initialize external slab cache */
|
---|
| 844 | slab_extern_cache = slab_cache_create("slab_extern",
|
---|
| 845 | sizeof(slab_t),
|
---|
| 846 | 0, NULL, NULL,
|
---|
[8e1ea655] | 847 | SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED);
|
---|
[4e147a6] | 848 |
|
---|
| 849 | /* Initialize structures for malloc */
|
---|
[2b8b0ca] | 850 | for (i=0, size=(1 << SLAB_MIN_MALLOC_W);
|
---|
| 851 | i < (SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1);
|
---|
[c352c2e] | 852 | i++, size <<= 1) {
|
---|
| 853 | malloc_caches[i] = slab_cache_create(malloc_names[i],
|
---|
| 854 | size, 0,
|
---|
[8e1ea655] | 855 | NULL,NULL, SLAB_CACHE_MAGDEFERRED);
|
---|
[c352c2e] | 856 | }
|
---|
[04225a7] | 857 | #ifdef CONFIG_DEBUG
|
---|
| 858 | _slab_initialized = 1;
|
---|
| 859 | #endif
|
---|
[c352c2e] | 860 | }
|
---|
| 861 |
|
---|
[8e1ea655] | 862 | /** Enable cpu_cache
|
---|
| 863 | *
|
---|
| 864 | * Kernel calls this function, when it knows the real number of
|
---|
| 865 | * processors.
|
---|
| 866 | * Allocate slab for cpucache and enable it on all existing
|
---|
| 867 | * slabs that are SLAB_CACHE_MAGDEFERRED
|
---|
| 868 | */
|
---|
| 869 | void slab_enable_cpucache(void)
|
---|
| 870 | {
|
---|
| 871 | link_t *cur;
|
---|
| 872 | slab_cache_t *s;
|
---|
| 873 |
|
---|
[214f5bb] | 874 | #ifdef CONFIG_DEBUG
|
---|
| 875 | _slab_initialized = 2;
|
---|
| 876 | #endif
|
---|
| 877 |
|
---|
[8e1ea655] | 878 | spinlock_lock(&slab_cache_lock);
|
---|
| 879 |
|
---|
| 880 | for (cur=slab_cache_list.next; cur != &slab_cache_list;cur=cur->next){
|
---|
| 881 | s = list_get_instance(cur, slab_cache_t, link);
|
---|
| 882 | if ((s->flags & SLAB_CACHE_MAGDEFERRED) != SLAB_CACHE_MAGDEFERRED)
|
---|
| 883 | continue;
|
---|
| 884 | make_magcache(s);
|
---|
| 885 | s->flags &= ~SLAB_CACHE_MAGDEFERRED;
|
---|
| 886 | }
|
---|
| 887 |
|
---|
| 888 | spinlock_unlock(&slab_cache_lock);
|
---|
| 889 | }
|
---|
| 890 |
|
---|
[c352c2e] | 891 | /**************************************/
|
---|
| 892 | /* kalloc/kfree functions */
|
---|
[bb68433] | 893 | void * malloc(unsigned int size, int flags)
|
---|
[c352c2e] | 894 | {
|
---|
[04225a7] | 895 | ASSERT(_slab_initialized);
|
---|
[e3c762cd] | 896 | ASSERT(size && size <= (1 << SLAB_MAX_MALLOC_W));
|
---|
[c352c2e] | 897 |
|
---|
| 898 | if (size < (1 << SLAB_MIN_MALLOC_W))
|
---|
| 899 | size = (1 << SLAB_MIN_MALLOC_W);
|
---|
| 900 |
|
---|
[ce8aed1] | 901 | int idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;
|
---|
[c352c2e] | 902 |
|
---|
| 903 | return slab_alloc(malloc_caches[idx], flags);
|
---|
| 904 | }
|
---|
| 905 |
|
---|
[ce8aed1] | 906 | void * realloc(void *ptr, unsigned int size, int flags)
|
---|
[c352c2e] | 907 | {
|
---|
[ce8aed1] | 908 | ASSERT(_slab_initialized);
|
---|
| 909 | ASSERT(size <= (1 << SLAB_MAX_MALLOC_W));
|
---|
| 910 |
|
---|
| 911 | void *new_ptr;
|
---|
| 912 |
|
---|
| 913 | if (size > 0) {
|
---|
| 914 | if (size < (1 << SLAB_MIN_MALLOC_W))
|
---|
| 915 | size = (1 << SLAB_MIN_MALLOC_W);
|
---|
| 916 | int idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;
|
---|
| 917 |
|
---|
| 918 | new_ptr = slab_alloc(malloc_caches[idx], flags);
|
---|
| 919 | } else
|
---|
| 920 | new_ptr = NULL;
|
---|
| 921 |
|
---|
| 922 | if ((new_ptr != NULL) && (ptr != NULL)) {
|
---|
| 923 | slab_t *slab = obj2slab(ptr);
|
---|
| 924 | memcpy(new_ptr, ptr, min(size, slab->cache->size));
|
---|
| 925 | }
|
---|
| 926 |
|
---|
| 927 | if (ptr != NULL)
|
---|
| 928 | free(ptr);
|
---|
| 929 |
|
---|
| 930 | return new_ptr;
|
---|
| 931 | }
|
---|
[5158549] | 932 |
|
---|
[ce8aed1] | 933 | void free(void *ptr)
|
---|
| 934 | {
|
---|
| 935 | if (!ptr)
|
---|
[f3272e98] | 936 | return;
|
---|
[5158549] | 937 |
|
---|
[ce8aed1] | 938 | slab_t *slab = obj2slab(ptr);
|
---|
| 939 | _slab_free(slab->cache, ptr, slab);
|
---|
[4e147a6] | 940 | }
|
---|
[b45c443] | 941 |
|
---|
[cc73a8a1] | 942 | /** @}
|
---|
[b45c443] | 943 | */
|
---|