[4e147a6] | 1 | /*
|
---|
| 2 | * Copyright (C) 2006 Ondrej Palkovsky
|
---|
| 3 | * All rights reserved.
|
---|
| 4 | *
|
---|
| 5 | * Redistribution and use in source and binary forms, with or without
|
---|
| 6 | * modification, are permitted provided that the following conditions
|
---|
| 7 | * are met:
|
---|
| 8 | *
|
---|
| 9 | * - Redistributions of source code must retain the above copyright
|
---|
| 10 | * notice, this list of conditions and the following disclaimer.
|
---|
| 11 | * - Redistributions in binary form must reproduce the above copyright
|
---|
| 12 | * notice, this list of conditions and the following disclaimer in the
|
---|
| 13 | * documentation and/or other materials provided with the distribution.
|
---|
| 14 | * - The name of the author may not be used to endorse or promote products
|
---|
| 15 | * derived from this software without specific prior written permission.
|
---|
| 16 | *
|
---|
| 17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
---|
| 18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
---|
| 19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
---|
| 20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
---|
| 21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
---|
| 22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
---|
| 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
---|
| 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
---|
| 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
---|
| 26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
---|
| 27 | */
|
---|
| 28 |
|
---|
[fb10289b] | 29 | /*
|
---|
| 30 | * The SLAB allocator is closely modelled after Opensolaris SLAB allocator
|
---|
| 31 | * http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/
|
---|
| 32 | *
|
---|
| 33 | * with the following exceptions:
|
---|
| 34 | * - empty SLABS are deallocated immediately
|
---|
| 35 | * (in Linux they are kept in linked list, in Solaris ???)
|
---|
| 36 | * - empty magazines are deallocated when not needed
|
---|
| 37 | * (in Solaris they are held in linked list in slab cache)
|
---|
| 38 | *
|
---|
| 39 | * Following features are not currently supported but would be easy to do:
|
---|
| 40 | * - cache coloring
|
---|
| 41 | * - dynamic magazine growing (different magazine sizes are already
|
---|
| 42 | * supported, but we would need to adjust allocating strategy)
|
---|
| 43 | *
|
---|
| 44 | * The SLAB allocator supports per-CPU caches ('magazines') to facilitate
|
---|
| 45 | * good SMP scaling.
|
---|
| 46 | *
|
---|
| 47 | * When a new object is being allocated, it is first checked, if it is
|
---|
| 48 | * available in CPU-bound magazine. If it is not found there, it is
|
---|
| 49 | * allocated from CPU-shared SLAB - if partial full is found, it is used,
|
---|
| 50 | * otherwise a new one is allocated.
|
---|
| 51 | *
|
---|
| 52 | * When an object is being deallocated, it is put to CPU-bound magazine.
|
---|
| 53 | * If there is no such magazine, new one is allocated (if it fails,
|
---|
| 54 | * the object is deallocated into SLAB). If the magazine is full, it is
|
---|
| 55 | * put into cpu-shared list of magazines and new one is allocated.
|
---|
| 56 | *
|
---|
| 57 | * The CPU-bound magazine is actually a pair of magazine to avoid
|
---|
| 58 | * thrashing when somebody is allocating/deallocating 1 item at the magazine
|
---|
| 59 | * size boundary. LIFO order is enforced, which should avoid fragmentation
|
---|
| 60 | * as much as possible.
|
---|
| 61 | *
|
---|
| 62 | * Every cache contains list of full slabs and list of partialy full slabs.
|
---|
| 63 | * Empty SLABS are immediately freed (thrashing will be avoided because
|
---|
| 64 | * of magazines).
|
---|
| 65 | *
|
---|
| 66 | * The SLAB information structure is kept inside the data area, if possible.
|
---|
| 67 | * The cache can be marked that it should not use magazines. This is used
|
---|
| 68 | * only for SLAB related caches to avoid deadlocks and infinite recursion
|
---|
| 69 | * (the SLAB allocator uses itself for allocating all it's control structures).
|
---|
| 70 | *
|
---|
| 71 | * The SLAB allocator allocates lot of space and does not free it. When
|
---|
| 72 | * frame allocator fails to allocate the frame, it calls slab_reclaim().
|
---|
| 73 | * It tries 'light reclaim' first, then brutal reclaim. The light reclaim
|
---|
| 74 | * releases slabs from cpu-shared magazine-list, until at least 1 slab
|
---|
| 75 | * is deallocated in each cache (this algorithm should probably change).
|
---|
| 76 | * The brutal reclaim removes all cached objects, even from CPU-bound
|
---|
| 77 | * magazines.
|
---|
| 78 | *
|
---|
| 79 | *
|
---|
| 80 | */
|
---|
| 81 |
|
---|
| 82 |
|
---|
[4e147a6] | 83 | #include <synch/spinlock.h>
|
---|
| 84 | #include <mm/slab.h>
|
---|
| 85 | #include <list.h>
|
---|
| 86 | #include <memstr.h>
|
---|
| 87 | #include <align.h>
|
---|
| 88 | #include <mm/heap.h>
|
---|
[a294ad0] | 89 | #include <mm/frame.h>
|
---|
[4e147a6] | 90 | #include <config.h>
|
---|
| 91 | #include <print.h>
|
---|
| 92 | #include <arch.h>
|
---|
| 93 | #include <panic.h>
|
---|
[a294ad0] | 94 | #include <debug.h>
|
---|
[c352c2e] | 95 | #include <bitops.h>
|
---|
[4e147a6] | 96 |
|
---|
| 97 | SPINLOCK_INITIALIZE(slab_cache_lock);
|
---|
[fb10289b] | 98 | static LIST_INITIALIZE(slab_cache_list);
|
---|
| 99 |
|
---|
| 100 | /** Magazine cache */
|
---|
| 101 | static slab_cache_t mag_cache;
|
---|
| 102 | /** Cache for cache descriptors */
|
---|
| 103 | static slab_cache_t slab_cache_cache;
|
---|
| 104 |
|
---|
| 105 | /** Cache for external slab descriptors
|
---|
| 106 | * This time we want per-cpu cache, so do not make it static
|
---|
| 107 | * - using SLAB for internal SLAB structures will not deadlock,
|
---|
| 108 | * as all slab structures are 'small' - control structures of
|
---|
| 109 | * their caches do not require further allocation
|
---|
| 110 | */
|
---|
| 111 | static slab_cache_t *slab_extern_cache;
|
---|
[c352c2e] | 112 | /** Caches for malloc */
|
---|
| 113 | static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1];
|
---|
| 114 | char *malloc_names[] = {
|
---|
| 115 | "malloc-8","malloc-16","malloc-32","malloc-64","malloc-128",
|
---|
| 116 | "malloc-256","malloc-512","malloc-1K","malloc-2K",
|
---|
| 117 | "malloc-4K","malloc-8K","malloc-16K","malloc-32K",
|
---|
| 118 | "malloc-64K","malloc-128K"
|
---|
| 119 | };
|
---|
[a294ad0] | 120 |
|
---|
[fb10289b] | 121 | /** Slab descriptor */
|
---|
[a294ad0] | 122 | typedef struct {
|
---|
| 123 | slab_cache_t *cache; /**< Pointer to parent cache */
|
---|
| 124 | link_t link; /* List of full/partial slabs */
|
---|
| 125 | void *start; /**< Start address of first available item */
|
---|
| 126 | count_t available; /**< Count of available items in this slab */
|
---|
| 127 | index_t nextavail; /**< The index of next available item */
|
---|
| 128 | }slab_t;
|
---|
| 129 |
|
---|
| 130 | /**************************************/
|
---|
| 131 | /* SLAB allocation functions */
|
---|
| 132 |
|
---|
| 133 | /**
|
---|
| 134 | * Allocate frames for slab space and initialize
|
---|
| 135 | *
|
---|
| 136 | */
|
---|
| 137 | static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
|
---|
| 138 | {
|
---|
| 139 | void *data;
|
---|
| 140 | slab_t *slab;
|
---|
| 141 | size_t fsize;
|
---|
| 142 | int i;
|
---|
| 143 | zone_t *zone = NULL;
|
---|
| 144 | int status;
|
---|
[bc504ef2] | 145 | frame_t *frame;
|
---|
[a294ad0] | 146 |
|
---|
| 147 | data = (void *)frame_alloc(FRAME_KA | flags, cache->order, &status, &zone);
|
---|
[bc504ef2] | 148 | if (status != FRAME_OK) {
|
---|
[a294ad0] | 149 | return NULL;
|
---|
[bc504ef2] | 150 | }
|
---|
[086a600] | 151 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) {
|
---|
[fb10289b] | 152 | slab = slab_alloc(slab_extern_cache, flags);
|
---|
[a294ad0] | 153 | if (!slab) {
|
---|
| 154 | frame_free((__address)data);
|
---|
| 155 | return NULL;
|
---|
| 156 | }
|
---|
| 157 | } else {
|
---|
| 158 | fsize = (PAGE_SIZE << cache->order);
|
---|
| 159 | slab = data + fsize - sizeof(*slab);
|
---|
| 160 | }
|
---|
[bc504ef2] | 161 |
|
---|
[a294ad0] | 162 | /* Fill in slab structures */
|
---|
[2d43f3e] | 163 | /* TODO: some better way of accessing the frame */
|
---|
[14e5d88] | 164 | for (i=0; i < (1 << cache->order); i++) {
|
---|
[bc504ef2] | 165 | frame = ADDR2FRAME(zone, KA2PA((__address)(data+i*PAGE_SIZE)));
|
---|
| 166 | frame->parent = slab;
|
---|
[a294ad0] | 167 | }
|
---|
| 168 |
|
---|
| 169 | slab->start = data;
|
---|
| 170 | slab->available = cache->objects;
|
---|
| 171 | slab->nextavail = 0;
|
---|
[4a5b2b0e] | 172 | slab->cache = cache;
|
---|
[a294ad0] | 173 |
|
---|
| 174 | for (i=0; i<cache->objects;i++)
|
---|
| 175 | *((int *) (slab->start + i*cache->size)) = i+1;
|
---|
[bc504ef2] | 176 |
|
---|
| 177 | atomic_inc(&cache->allocated_slabs);
|
---|
[a294ad0] | 178 | return slab;
|
---|
| 179 | }
|
---|
| 180 |
|
---|
| 181 | /**
|
---|
[14e5d88] | 182 | * Deallocate space associated with SLAB
|
---|
[a294ad0] | 183 | *
|
---|
| 184 | * @return number of freed frames
|
---|
| 185 | */
|
---|
| 186 | static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
|
---|
| 187 | {
|
---|
| 188 | frame_free((__address)slab->start);
|
---|
[086a600] | 189 | if (! (cache->flags & SLAB_CACHE_SLINSIDE))
|
---|
[fb10289b] | 190 | slab_free(slab_extern_cache, slab);
|
---|
[bc504ef2] | 191 |
|
---|
| 192 | atomic_dec(&cache->allocated_slabs);
|
---|
| 193 |
|
---|
[a294ad0] | 194 | return 1 << cache->order;
|
---|
| 195 | }
|
---|
| 196 |
|
---|
| 197 | /** Map object to slab structure */
|
---|
| 198 | static slab_t * obj2slab(void *obj)
|
---|
| 199 | {
|
---|
| 200 | frame_t *frame;
|
---|
| 201 |
|
---|
| 202 | frame = frame_addr2frame((__address)obj);
|
---|
| 203 | return (slab_t *)frame->parent;
|
---|
| 204 | }
|
---|
| 205 |
|
---|
[4e147a6] | 206 | /**************************************/
|
---|
[a294ad0] | 207 | /* SLAB functions */
|
---|
[4e147a6] | 208 |
|
---|
| 209 |
|
---|
| 210 | /**
|
---|
| 211 | * Return object to slab and call a destructor
|
---|
| 212 | *
|
---|
[a294ad0] | 213 | * Assume the cache->lock is held;
|
---|
| 214 | *
|
---|
| 215 | * @param slab If the caller knows directly slab of the object, otherwise NULL
|
---|
| 216 | *
|
---|
[4e147a6] | 217 | * @return Number of freed pages
|
---|
| 218 | */
|
---|
[a294ad0] | 219 | static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
|
---|
| 220 | slab_t *slab)
|
---|
[4e147a6] | 221 | {
|
---|
[a294ad0] | 222 | count_t frames = 0;
|
---|
[4e147a6] | 223 |
|
---|
[a294ad0] | 224 | if (!slab)
|
---|
| 225 | slab = obj2slab(obj);
|
---|
| 226 |
|
---|
[4a5b2b0e] | 227 | ASSERT(slab->cache == cache);
|
---|
| 228 |
|
---|
[a294ad0] | 229 | *((int *)obj) = slab->nextavail;
|
---|
| 230 | slab->nextavail = (obj - slab->start)/cache->size;
|
---|
| 231 | slab->available++;
|
---|
| 232 |
|
---|
| 233 | /* Move it to correct list */
|
---|
| 234 | if (slab->available == 1) {
|
---|
| 235 | /* It was in full, move to partial */
|
---|
| 236 | list_remove(&slab->link);
|
---|
[bc504ef2] | 237 | list_prepend(&slab->link, &cache->partial_slabs);
|
---|
[a294ad0] | 238 | }
|
---|
| 239 | if (slab->available == cache->objects) {
|
---|
| 240 | /* Free associated memory */
|
---|
| 241 | list_remove(&slab->link);
|
---|
| 242 | /* Avoid deadlock */
|
---|
| 243 | spinlock_unlock(&cache->lock);
|
---|
| 244 | frames = slab_space_free(cache, slab);
|
---|
| 245 | spinlock_lock(&cache->lock);
|
---|
| 246 | }
|
---|
| 247 |
|
---|
| 248 | return frames;
|
---|
| 249 | }
|
---|
[4e147a6] | 250 |
|
---|
| 251 | /**
|
---|
| 252 | * Take new object from slab or create new if needed
|
---|
| 253 | *
|
---|
[a294ad0] | 254 | * Assume cache->lock is held.
|
---|
| 255 | *
|
---|
[4e147a6] | 256 | * @return Object address or null
|
---|
| 257 | */
|
---|
| 258 | static void * slab_obj_create(slab_cache_t *cache, int flags)
|
---|
| 259 | {
|
---|
[a294ad0] | 260 | slab_t *slab;
|
---|
| 261 | void *obj;
|
---|
| 262 |
|
---|
| 263 | if (list_empty(&cache->partial_slabs)) {
|
---|
| 264 | /* Allow recursion and reclaiming
|
---|
| 265 | * - this should work, as the SLAB control structures
|
---|
| 266 | * are small and do not need to allocte with anything
|
---|
| 267 | * other ten frame_alloc when they are allocating,
|
---|
| 268 | * that's why we should get recursion at most 1-level deep
|
---|
| 269 | */
|
---|
| 270 | spinlock_unlock(&cache->lock);
|
---|
| 271 | slab = slab_space_alloc(cache, flags);
|
---|
| 272 | spinlock_lock(&cache->lock);
|
---|
[bc504ef2] | 273 | if (!slab) {
|
---|
[a294ad0] | 274 | return NULL;
|
---|
[bc504ef2] | 275 | }
|
---|
[a294ad0] | 276 | } else {
|
---|
| 277 | slab = list_get_instance(cache->partial_slabs.next,
|
---|
| 278 | slab_t,
|
---|
| 279 | link);
|
---|
| 280 | list_remove(&slab->link);
|
---|
| 281 | }
|
---|
| 282 | obj = slab->start + slab->nextavail * cache->size;
|
---|
| 283 | slab->nextavail = *((int *)obj);
|
---|
| 284 | slab->available--;
|
---|
| 285 | if (! slab->available)
|
---|
[bc504ef2] | 286 | list_prepend(&slab->link, &cache->full_slabs);
|
---|
[a294ad0] | 287 | else
|
---|
[bc504ef2] | 288 | list_prepend(&slab->link, &cache->partial_slabs);
|
---|
[a294ad0] | 289 | return obj;
|
---|
[4e147a6] | 290 | }
|
---|
| 291 |
|
---|
| 292 | /**************************************/
|
---|
| 293 | /* CPU-Cache slab functions */
|
---|
| 294 |
|
---|
| 295 | /**
|
---|
| 296 | * Free all objects in magazine and free memory associated with magazine
|
---|
| 297 | *
|
---|
[a294ad0] | 298 | * Assume mag_cache[cpu].lock is locked
|
---|
[4e147a6] | 299 | *
|
---|
| 300 | * @return Number of freed pages
|
---|
| 301 | */
|
---|
| 302 | static count_t magazine_destroy(slab_cache_t *cache,
|
---|
| 303 | slab_magazine_t *mag)
|
---|
| 304 | {
|
---|
| 305 | int i;
|
---|
| 306 | count_t frames = 0;
|
---|
| 307 |
|
---|
[4a5b2b0e] | 308 | for (i=0;i < mag->busy; i++) {
|
---|
[a294ad0] | 309 | frames += slab_obj_destroy(cache, mag->objs[i], NULL);
|
---|
[4a5b2b0e] | 310 | atomic_dec(&cache->cached_objs);
|
---|
| 311 | }
|
---|
[4e147a6] | 312 |
|
---|
| 313 | slab_free(&mag_cache, mag);
|
---|
| 314 |
|
---|
| 315 | return frames;
|
---|
| 316 | }
|
---|
| 317 |
|
---|
[fb10289b] | 318 | /**
|
---|
| 319 | * Find full magazine, set it as current and return it
|
---|
| 320 | *
|
---|
| 321 | * Assume cpu_magazine lock is held
|
---|
| 322 | */
|
---|
| 323 | static slab_magazine_t * get_full_current_mag(slab_cache_t *cache)
|
---|
| 324 | {
|
---|
| 325 | slab_magazine_t *cmag, *lastmag, *newmag;
|
---|
| 326 |
|
---|
| 327 | cmag = cache->mag_cache[CPU->id].current;
|
---|
| 328 | lastmag = cache->mag_cache[CPU->id].last;
|
---|
| 329 | if (cmag) { /* First try local CPU magazines */
|
---|
| 330 | if (cmag->busy)
|
---|
| 331 | return cmag;
|
---|
| 332 |
|
---|
| 333 | if (lastmag && lastmag->busy) {
|
---|
| 334 | cache->mag_cache[CPU->id].current = lastmag;
|
---|
| 335 | cache->mag_cache[CPU->id].last = cmag;
|
---|
| 336 | return lastmag;
|
---|
| 337 | }
|
---|
| 338 | }
|
---|
| 339 | /* Local magazines are empty, import one from magazine list */
|
---|
| 340 | spinlock_lock(&cache->lock);
|
---|
| 341 | if (list_empty(&cache->magazines)) {
|
---|
| 342 | spinlock_unlock(&cache->lock);
|
---|
| 343 | return NULL;
|
---|
| 344 | }
|
---|
| 345 | newmag = list_get_instance(cache->magazines.next,
|
---|
| 346 | slab_magazine_t,
|
---|
| 347 | link);
|
---|
| 348 | list_remove(&newmag->link);
|
---|
| 349 | spinlock_unlock(&cache->lock);
|
---|
| 350 |
|
---|
| 351 | if (lastmag)
|
---|
| 352 | slab_free(&mag_cache, lastmag);
|
---|
| 353 | cache->mag_cache[CPU->id].last = cmag;
|
---|
| 354 | cache->mag_cache[CPU->id].current = newmag;
|
---|
| 355 | return newmag;
|
---|
| 356 | }
|
---|
| 357 |
|
---|
[4e147a6] | 358 | /**
|
---|
| 359 | * Try to find object in CPU-cache magazines
|
---|
| 360 | *
|
---|
| 361 | * @return Pointer to object or NULL if not available
|
---|
| 362 | */
|
---|
| 363 | static void * magazine_obj_get(slab_cache_t *cache)
|
---|
| 364 | {
|
---|
| 365 | slab_magazine_t *mag;
|
---|
[4a5b2b0e] | 366 | void *obj;
|
---|
[4e147a6] | 367 |
|
---|
[81e52f2a] | 368 | if (!CPU)
|
---|
| 369 | return NULL;
|
---|
| 370 |
|
---|
[4e147a6] | 371 | spinlock_lock(&cache->mag_cache[CPU->id].lock);
|
---|
| 372 |
|
---|
[fb10289b] | 373 | mag = get_full_current_mag(cache);
|
---|
| 374 | if (!mag) {
|
---|
| 375 | spinlock_unlock(&cache->mag_cache[CPU->id].lock);
|
---|
| 376 | return NULL;
|
---|
[4e147a6] | 377 | }
|
---|
[4a5b2b0e] | 378 | obj = mag->objs[--mag->busy];
|
---|
[4e147a6] | 379 | spinlock_unlock(&cache->mag_cache[CPU->id].lock);
|
---|
[4a5b2b0e] | 380 | atomic_dec(&cache->cached_objs);
|
---|
| 381 |
|
---|
| 382 | return obj;
|
---|
[4e147a6] | 383 | }
|
---|
| 384 |
|
---|
| 385 | /**
|
---|
[086a600] | 386 | * Assure that the current magazine is empty, return pointer to it, or NULL if
|
---|
[fb10289b] | 387 | * no empty magazine is available and cannot be allocated
|
---|
[4e147a6] | 388 | *
|
---|
[c5613b72] | 389 | * Assume mag_cache[CPU->id].lock is held
|
---|
| 390 | *
|
---|
[4e147a6] | 391 | * We have 2 magazines bound to processor.
|
---|
| 392 | * First try the current.
|
---|
| 393 | * If full, try the last.
|
---|
| 394 | * If full, put to magazines list.
|
---|
| 395 | * allocate new, exchange last & current
|
---|
| 396 | *
|
---|
[086a600] | 397 | */
|
---|
| 398 | static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache)
|
---|
| 399 | {
|
---|
| 400 | slab_magazine_t *cmag,*lastmag,*newmag;
|
---|
| 401 |
|
---|
| 402 | cmag = cache->mag_cache[CPU->id].current;
|
---|
| 403 | lastmag = cache->mag_cache[CPU->id].last;
|
---|
| 404 |
|
---|
| 405 | if (cmag) {
|
---|
| 406 | if (cmag->busy < cmag->size)
|
---|
| 407 | return cmag;
|
---|
| 408 | if (lastmag && lastmag->busy < lastmag->size) {
|
---|
| 409 | cache->mag_cache[CPU->id].last = cmag;
|
---|
| 410 | cache->mag_cache[CPU->id].current = lastmag;
|
---|
| 411 | return lastmag;
|
---|
| 412 | }
|
---|
| 413 | }
|
---|
| 414 | /* current | last are full | nonexistent, allocate new */
|
---|
| 415 | /* We do not want to sleep just because of caching */
|
---|
| 416 | /* Especially we do not want reclaiming to start, as
|
---|
| 417 | * this would deadlock */
|
---|
| 418 | newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM);
|
---|
| 419 | if (!newmag)
|
---|
| 420 | return NULL;
|
---|
| 421 | newmag->size = SLAB_MAG_SIZE;
|
---|
| 422 | newmag->busy = 0;
|
---|
| 423 |
|
---|
| 424 | /* Flush last to magazine list */
|
---|
[c5613b72] | 425 | if (lastmag) {
|
---|
| 426 | spinlock_lock(&cache->lock);
|
---|
[086a600] | 427 | list_prepend(&lastmag->link, &cache->magazines);
|
---|
[c5613b72] | 428 | spinlock_unlock(&cache->lock);
|
---|
| 429 | }
|
---|
[086a600] | 430 | /* Move current as last, save new as current */
|
---|
| 431 | cache->mag_cache[CPU->id].last = cmag;
|
---|
| 432 | cache->mag_cache[CPU->id].current = newmag;
|
---|
| 433 |
|
---|
| 434 | return newmag;
|
---|
| 435 | }
|
---|
| 436 |
|
---|
| 437 | /**
|
---|
| 438 | * Put object into CPU-cache magazine
|
---|
| 439 | *
|
---|
[4e147a6] | 440 | * @return 0 - success, -1 - could not get memory
|
---|
| 441 | */
|
---|
| 442 | static int magazine_obj_put(slab_cache_t *cache, void *obj)
|
---|
| 443 | {
|
---|
| 444 | slab_magazine_t *mag;
|
---|
| 445 |
|
---|
[81e52f2a] | 446 | if (!CPU)
|
---|
| 447 | return -1;
|
---|
| 448 |
|
---|
[4e147a6] | 449 | spinlock_lock(&cache->mag_cache[CPU->id].lock);
|
---|
[086a600] | 450 |
|
---|
| 451 | mag = make_empty_current_mag(cache);
|
---|
[fb10289b] | 452 | if (!mag) {
|
---|
| 453 | spinlock_unlock(&cache->mag_cache[CPU->id].lock);
|
---|
| 454 | return -1;
|
---|
| 455 | }
|
---|
[4e147a6] | 456 |
|
---|
| 457 | mag->objs[mag->busy++] = obj;
|
---|
| 458 |
|
---|
| 459 | spinlock_unlock(&cache->mag_cache[CPU->id].lock);
|
---|
[4a5b2b0e] | 460 | atomic_inc(&cache->cached_objs);
|
---|
[4e147a6] | 461 | return 0;
|
---|
| 462 | }
|
---|
| 463 |
|
---|
| 464 |
|
---|
| 465 | /**************************************/
|
---|
[a294ad0] | 466 | /* SLAB CACHE functions */
|
---|
| 467 |
|
---|
| 468 | /** Return number of objects that fit in certain cache size */
|
---|
| 469 | static int comp_objects(slab_cache_t *cache)
|
---|
| 470 | {
|
---|
| 471 | if (cache->flags & SLAB_CACHE_SLINSIDE)
|
---|
| 472 | return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
|
---|
| 473 | else
|
---|
| 474 | return (PAGE_SIZE << cache->order) / cache->size;
|
---|
| 475 | }
|
---|
| 476 |
|
---|
| 477 | /** Return wasted space in slab */
|
---|
| 478 | static int badness(slab_cache_t *cache)
|
---|
| 479 | {
|
---|
| 480 | int objects;
|
---|
| 481 | int ssize;
|
---|
| 482 |
|
---|
| 483 | objects = comp_objects(cache);
|
---|
| 484 | ssize = PAGE_SIZE << cache->order;
|
---|
| 485 | if (cache->flags & SLAB_CACHE_SLINSIDE)
|
---|
| 486 | ssize -= sizeof(slab_t);
|
---|
| 487 | return ssize - objects*cache->size;
|
---|
| 488 | }
|
---|
[4e147a6] | 489 |
|
---|
| 490 | /** Initialize allocated memory as a slab cache */
|
---|
| 491 | static void
|
---|
| 492 | _slab_cache_create(slab_cache_t *cache,
|
---|
| 493 | char *name,
|
---|
| 494 | size_t size,
|
---|
| 495 | size_t align,
|
---|
| 496 | int (*constructor)(void *obj, int kmflag),
|
---|
| 497 | void (*destructor)(void *obj),
|
---|
| 498 | int flags)
|
---|
| 499 | {
|
---|
| 500 | int i;
|
---|
[c352c2e] | 501 | int pages;
|
---|
[4e147a6] | 502 |
|
---|
| 503 | memsetb((__address)cache, sizeof(*cache), 0);
|
---|
| 504 | cache->name = name;
|
---|
| 505 |
|
---|
[14e5d88] | 506 | if (align < sizeof(__native))
|
---|
| 507 | align = sizeof(__native);
|
---|
| 508 | size = ALIGN_UP(size, align);
|
---|
| 509 |
|
---|
[a294ad0] | 510 | cache->size = size;
|
---|
[4e147a6] | 511 |
|
---|
| 512 | cache->constructor = constructor;
|
---|
| 513 | cache->destructor = destructor;
|
---|
| 514 | cache->flags = flags;
|
---|
| 515 |
|
---|
| 516 | list_initialize(&cache->full_slabs);
|
---|
| 517 | list_initialize(&cache->partial_slabs);
|
---|
| 518 | list_initialize(&cache->magazines);
|
---|
| 519 | spinlock_initialize(&cache->lock, "cachelock");
|
---|
[086a600] | 520 | if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) {
|
---|
[c5613b72] | 521 | for (i=0; i < config.cpu_count; i++) {
|
---|
[81e52f2a] | 522 | memsetb((__address)&cache->mag_cache[i],
|
---|
| 523 | sizeof(cache->mag_cache[i]), 0);
|
---|
[4e147a6] | 524 | spinlock_initialize(&cache->mag_cache[i].lock,
|
---|
| 525 | "cpucachelock");
|
---|
[81e52f2a] | 526 | }
|
---|
[4e147a6] | 527 | }
|
---|
| 528 |
|
---|
| 529 | /* Compute slab sizes, object counts in slabs etc. */
|
---|
| 530 | if (cache->size < SLAB_INSIDE_SIZE)
|
---|
| 531 | cache->flags |= SLAB_CACHE_SLINSIDE;
|
---|
| 532 |
|
---|
[a294ad0] | 533 | /* Minimum slab order */
|
---|
[c352c2e] | 534 | pages = ((cache->size-1) >> PAGE_WIDTH) + 1;
|
---|
| 535 | cache->order = fnzb(pages);
|
---|
[14e5d88] | 536 |
|
---|
[a294ad0] | 537 | while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
|
---|
| 538 | cache->order += 1;
|
---|
| 539 | }
|
---|
| 540 | cache->objects = comp_objects(cache);
|
---|
[14e5d88] | 541 | /* If info fits in, put it inside */
|
---|
| 542 | if (badness(cache) > sizeof(slab_t))
|
---|
| 543 | cache->flags |= SLAB_CACHE_SLINSIDE;
|
---|
[4e147a6] | 544 |
|
---|
| 545 | spinlock_lock(&slab_cache_lock);
|
---|
| 546 |
|
---|
| 547 | list_append(&cache->link, &slab_cache_list);
|
---|
| 548 |
|
---|
| 549 | spinlock_unlock(&slab_cache_lock);
|
---|
| 550 | }
|
---|
| 551 |
|
---|
| 552 | /** Create slab cache */
|
---|
| 553 | slab_cache_t * slab_cache_create(char *name,
|
---|
| 554 | size_t size,
|
---|
| 555 | size_t align,
|
---|
| 556 | int (*constructor)(void *obj, int kmflag),
|
---|
| 557 | void (*destructor)(void *obj),
|
---|
| 558 | int flags)
|
---|
| 559 | {
|
---|
| 560 | slab_cache_t *cache;
|
---|
| 561 |
|
---|
[fb10289b] | 562 | cache = slab_alloc(&slab_cache_cache, 0);
|
---|
[4e147a6] | 563 | _slab_cache_create(cache, name, size, align, constructor, destructor,
|
---|
| 564 | flags);
|
---|
| 565 | return cache;
|
---|
| 566 | }
|
---|
| 567 |
|
---|
| 568 | /**
|
---|
| 569 | * Reclaim space occupied by objects that are already free
|
---|
| 570 | *
|
---|
| 571 | * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing
|
---|
| 572 | * @return Number of freed pages
|
---|
| 573 | */
|
---|
| 574 | static count_t _slab_reclaim(slab_cache_t *cache, int flags)
|
---|
| 575 | {
|
---|
| 576 | int i;
|
---|
| 577 | slab_magazine_t *mag;
|
---|
| 578 | link_t *cur;
|
---|
| 579 | count_t frames = 0;
|
---|
| 580 |
|
---|
| 581 | if (cache->flags & SLAB_CACHE_NOMAGAZINE)
|
---|
| 582 | return 0; /* Nothing to do */
|
---|
| 583 |
|
---|
| 584 | /* First lock all cpu caches, then the complete cache lock */
|
---|
[fb10289b] | 585 | if (flags & SLAB_RECLAIM_ALL) {
|
---|
| 586 | for (i=0; i < config.cpu_count; i++)
|
---|
| 587 | spinlock_lock(&cache->mag_cache[i].lock);
|
---|
| 588 | }
|
---|
[4e147a6] | 589 | spinlock_lock(&cache->lock);
|
---|
| 590 |
|
---|
| 591 | if (flags & SLAB_RECLAIM_ALL) {
|
---|
[a294ad0] | 592 | /* Aggressive memfree */
|
---|
[4e147a6] | 593 | /* Destroy CPU magazines */
|
---|
| 594 | for (i=0; i<config.cpu_count; i++) {
|
---|
| 595 | mag = cache->mag_cache[i].current;
|
---|
| 596 | if (mag)
|
---|
| 597 | frames += magazine_destroy(cache, mag);
|
---|
| 598 | cache->mag_cache[i].current = NULL;
|
---|
| 599 |
|
---|
| 600 | mag = cache->mag_cache[i].last;
|
---|
| 601 | if (mag)
|
---|
| 602 | frames += magazine_destroy(cache, mag);
|
---|
| 603 | cache->mag_cache[i].last = NULL;
|
---|
| 604 | }
|
---|
[a294ad0] | 605 | }
|
---|
| 606 | /* Destroy full magazines */
|
---|
| 607 | cur=cache->magazines.prev;
|
---|
[4a5b2b0e] | 608 |
|
---|
[086a600] | 609 | while (cur != &cache->magazines) {
|
---|
[a294ad0] | 610 | mag = list_get_instance(cur, slab_magazine_t, link);
|
---|
| 611 |
|
---|
| 612 | cur = cur->prev;
|
---|
[086a600] | 613 | list_remove(&mag->link);
|
---|
[a294ad0] | 614 | frames += magazine_destroy(cache,mag);
|
---|
| 615 | /* If we do not do full reclaim, break
|
---|
| 616 | * as soon as something is freed */
|
---|
| 617 | if (!(flags & SLAB_RECLAIM_ALL) && frames)
|
---|
| 618 | break;
|
---|
[4e147a6] | 619 | }
|
---|
| 620 |
|
---|
| 621 | spinlock_unlock(&cache->lock);
|
---|
[fb10289b] | 622 | if (flags & SLAB_RECLAIM_ALL) {
|
---|
| 623 | for (i=0; i < config.cpu_count; i++)
|
---|
| 624 | spinlock_unlock(&cache->mag_cache[i].lock);
|
---|
| 625 | }
|
---|
[4e147a6] | 626 |
|
---|
| 627 | return frames;
|
---|
| 628 | }
|
---|
| 629 |
|
---|
| 630 | /** Check that there are no slabs and remove cache from system */
|
---|
| 631 | void slab_cache_destroy(slab_cache_t *cache)
|
---|
| 632 | {
|
---|
| 633 | /* Do not lock anything, we assume the software is correct and
|
---|
| 634 | * does not touch the cache when it decides to destroy it */
|
---|
| 635 |
|
---|
| 636 | /* Destroy all magazines */
|
---|
| 637 | _slab_reclaim(cache, SLAB_RECLAIM_ALL);
|
---|
| 638 |
|
---|
| 639 | /* All slabs must be empty */
|
---|
| 640 | if (!list_empty(&cache->full_slabs) \
|
---|
| 641 | || !list_empty(&cache->partial_slabs))
|
---|
| 642 | panic("Destroying cache that is not empty.");
|
---|
| 643 |
|
---|
| 644 | spinlock_lock(&slab_cache_lock);
|
---|
| 645 | list_remove(&cache->link);
|
---|
| 646 | spinlock_unlock(&slab_cache_lock);
|
---|
| 647 |
|
---|
[fb10289b] | 648 | slab_free(&slab_cache_cache, cache);
|
---|
[4e147a6] | 649 | }
|
---|
| 650 |
|
---|
| 651 | /** Allocate new object from cache - if no flags given, always returns
|
---|
| 652 | memory */
|
---|
| 653 | void * slab_alloc(slab_cache_t *cache, int flags)
|
---|
| 654 | {
|
---|
| 655 | ipl_t ipl;
|
---|
| 656 | void *result = NULL;
|
---|
[c5613b72] | 657 |
|
---|
[4e147a6] | 658 | /* Disable interrupts to avoid deadlocks with interrupt handlers */
|
---|
| 659 | ipl = interrupts_disable();
|
---|
[c352c2e] | 660 |
|
---|
[81e52f2a] | 661 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
|
---|
[4e147a6] | 662 | result = magazine_obj_get(cache);
|
---|
| 663 |
|
---|
[a294ad0] | 664 | if (!result) {
|
---|
| 665 | spinlock_lock(&cache->lock);
|
---|
[4e147a6] | 666 | result = slab_obj_create(cache, flags);
|
---|
[a294ad0] | 667 | spinlock_unlock(&cache->lock);
|
---|
| 668 | }
|
---|
[4e147a6] | 669 |
|
---|
| 670 | interrupts_restore(ipl);
|
---|
| 671 |
|
---|
[fb10289b] | 672 | if (result)
|
---|
| 673 | atomic_inc(&cache->allocated_objs);
|
---|
[bc504ef2] | 674 |
|
---|
[4e147a6] | 675 | return result;
|
---|
| 676 | }
|
---|
| 677 |
|
---|
[c352c2e] | 678 | /** Return object to cache, use slab if known */
|
---|
| 679 | static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab)
|
---|
[4e147a6] | 680 | {
|
---|
| 681 | ipl_t ipl;
|
---|
| 682 |
|
---|
| 683 | ipl = interrupts_disable();
|
---|
| 684 |
|
---|
[a294ad0] | 685 | if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
|
---|
| 686 | || magazine_obj_put(cache, obj)) {
|
---|
| 687 | spinlock_lock(&cache->lock);
|
---|
[c352c2e] | 688 | slab_obj_destroy(cache, obj, slab);
|
---|
[a294ad0] | 689 | spinlock_unlock(&cache->lock);
|
---|
[4e147a6] | 690 | }
|
---|
| 691 | interrupts_restore(ipl);
|
---|
[fb10289b] | 692 | atomic_dec(&cache->allocated_objs);
|
---|
[4e147a6] | 693 | }
|
---|
| 694 |
|
---|
[c352c2e] | 695 | /** Return slab object to cache */
|
---|
| 696 | void slab_free(slab_cache_t *cache, void *obj)
|
---|
| 697 | {
|
---|
| 698 | _slab_free(cache,obj,NULL);
|
---|
| 699 | }
|
---|
| 700 |
|
---|
[4e147a6] | 701 | /* Go through all caches and reclaim what is possible */
|
---|
| 702 | count_t slab_reclaim(int flags)
|
---|
| 703 | {
|
---|
| 704 | slab_cache_t *cache;
|
---|
| 705 | link_t *cur;
|
---|
| 706 | count_t frames = 0;
|
---|
| 707 |
|
---|
| 708 | spinlock_lock(&slab_cache_lock);
|
---|
| 709 |
|
---|
| 710 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
|
---|
| 711 | cache = list_get_instance(cur, slab_cache_t, link);
|
---|
| 712 | frames += _slab_reclaim(cache, flags);
|
---|
| 713 | }
|
---|
| 714 |
|
---|
| 715 | spinlock_unlock(&slab_cache_lock);
|
---|
| 716 |
|
---|
| 717 | return frames;
|
---|
| 718 | }
|
---|
| 719 |
|
---|
| 720 |
|
---|
| 721 | /* Print list of slabs */
|
---|
| 722 | void slab_print_list(void)
|
---|
| 723 | {
|
---|
| 724 | slab_cache_t *cache;
|
---|
| 725 | link_t *cur;
|
---|
| 726 |
|
---|
| 727 | spinlock_lock(&slab_cache_lock);
|
---|
[4a5b2b0e] | 728 | printf("SLAB name\tOsize\tPages\tObj/pg\tSlabs\tCached\tAllocobjs\tCtl\n");
|
---|
[4e147a6] | 729 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
|
---|
| 730 | cache = list_get_instance(cur, slab_cache_t, link);
|
---|
[4a5b2b0e] | 731 | printf("%s\t%d\t%d\t%d\t%d\t%d\t%d\t\t%s\n", cache->name, cache->size,
|
---|
[14e5d88] | 732 | (1 << cache->order), cache->objects,
|
---|
[4a5b2b0e] | 733 | atomic_get(&cache->allocated_slabs),
|
---|
| 734 | atomic_get(&cache->cached_objs),
|
---|
[14e5d88] | 735 | atomic_get(&cache->allocated_objs),
|
---|
| 736 | cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out");
|
---|
[4e147a6] | 737 | }
|
---|
| 738 | spinlock_unlock(&slab_cache_lock);
|
---|
| 739 | }
|
---|
| 740 |
|
---|
| 741 | void slab_cache_init(void)
|
---|
| 742 | {
|
---|
[c352c2e] | 743 | int i, size;
|
---|
| 744 |
|
---|
[4e147a6] | 745 | /* Initialize magazine cache */
|
---|
| 746 | _slab_cache_create(&mag_cache,
|
---|
| 747 | "slab_magazine",
|
---|
| 748 | sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*),
|
---|
| 749 | sizeof(__address),
|
---|
| 750 | NULL, NULL,
|
---|
[fb10289b] | 751 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
|
---|
| 752 | /* Initialize slab_cache cache */
|
---|
| 753 | _slab_cache_create(&slab_cache_cache,
|
---|
| 754 | "slab_cache",
|
---|
| 755 | sizeof(slab_cache_cache) + config.cpu_count*sizeof(slab_cache_cache.mag_cache[0]),
|
---|
| 756 | sizeof(__address),
|
---|
| 757 | NULL, NULL,
|
---|
| 758 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
|
---|
| 759 | /* Initialize external slab cache */
|
---|
| 760 | slab_extern_cache = slab_cache_create("slab_extern",
|
---|
| 761 | sizeof(slab_t),
|
---|
| 762 | 0, NULL, NULL,
|
---|
| 763 | SLAB_CACHE_SLINSIDE);
|
---|
[4e147a6] | 764 |
|
---|
| 765 | /* Initialize structures for malloc */
|
---|
[c352c2e] | 766 | for (i=0, size=(1<<SLAB_MIN_MALLOC_W);
|
---|
| 767 | i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1);
|
---|
| 768 | i++, size <<= 1) {
|
---|
| 769 | malloc_caches[i] = slab_cache_create(malloc_names[i],
|
---|
| 770 | size, 0,
|
---|
| 771 | NULL,NULL,0);
|
---|
| 772 | }
|
---|
| 773 | }
|
---|
| 774 |
|
---|
| 775 | /**************************************/
|
---|
| 776 | /* kalloc/kfree functions */
|
---|
| 777 | void * kalloc(unsigned int size, int flags)
|
---|
| 778 | {
|
---|
| 779 | int idx;
|
---|
| 780 |
|
---|
| 781 | ASSERT( size && size <= (1 << SLAB_MAX_MALLOC_W));
|
---|
| 782 |
|
---|
| 783 | if (size < (1 << SLAB_MIN_MALLOC_W))
|
---|
| 784 | size = (1 << SLAB_MIN_MALLOC_W);
|
---|
| 785 |
|
---|
| 786 | idx = fnzb(size-1) - SLAB_MIN_MALLOC_W + 1;
|
---|
| 787 |
|
---|
| 788 | return slab_alloc(malloc_caches[idx], flags);
|
---|
| 789 | }
|
---|
| 790 |
|
---|
| 791 |
|
---|
| 792 | void kfree(void *obj)
|
---|
| 793 | {
|
---|
| 794 | slab_t *slab = obj2slab(obj);
|
---|
| 795 |
|
---|
| 796 | _slab_free(slab->cache, obj, slab);
|
---|
[4e147a6] | 797 | }
|
---|