source: mainline/generic/src/mm/slab.c@ 086a600

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 086a600 was 086a600, checked in by Ondrej Palkovsky <ondrap@…>, 20 years ago

Debugged slab allocator. It currently supports per-CPU cache on 1 cpu.

  • Property mode set to 100644
File size: 16.1 KB
Line 
1/*
2 * Copyright (C) 2006 Ondrej Palkovsky
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <synch/spinlock.h>
30#include <mm/slab.h>
31#include <list.h>
32#include <memstr.h>
33#include <align.h>
34#include <mm/heap.h>
35#include <mm/frame.h>
36#include <config.h>
37#include <print.h>
38#include <arch.h>
39#include <panic.h>
40#include <debug.h>
41
42SPINLOCK_INITIALIZE(slab_cache_lock);
43LIST_INITIALIZE(slab_cache_list);
44
45slab_cache_t mag_cache;
46
47
48typedef struct {
49 slab_cache_t *cache; /**< Pointer to parent cache */
50 link_t link; /* List of full/partial slabs */
51 void *start; /**< Start address of first available item */
52 count_t available; /**< Count of available items in this slab */
53 index_t nextavail; /**< The index of next available item */
54}slab_t;
55
56/**************************************/
57/* SLAB allocation functions */
58
59/**
60 * Allocate frames for slab space and initialize
61 *
62 * TODO: Change slab_t allocation to slab_alloc(????), malloc with flags!!
63 */
64static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
65{
66 void *data;
67 slab_t *slab;
68 size_t fsize;
69 int i;
70 zone_t *zone = NULL;
71 int status;
72 frame_t *frame;
73
74 data = (void *)frame_alloc(FRAME_KA | flags, cache->order, &status, &zone);
75 if (status != FRAME_OK) {
76 return NULL;
77 }
78 if (! (cache->flags & SLAB_CACHE_SLINSIDE)) {
79 slab = malloc(sizeof(*slab)); // , flags);
80 if (!slab) {
81 frame_free((__address)data);
82 return NULL;
83 }
84 } else {
85 fsize = (PAGE_SIZE << cache->order);
86 slab = data + fsize - sizeof(*slab);
87 }
88
89 /* Fill in slab structures */
90 /* TODO: some better way of accessing the frame */
91 for (i=0; i < (1 << cache->order); i++) {
92 frame = ADDR2FRAME(zone, KA2PA((__address)(data+i*PAGE_SIZE)));
93 frame->parent = slab;
94 }
95
96 slab->start = data;
97 slab->available = cache->objects;
98 slab->nextavail = 0;
99 slab->cache = cache;
100
101 for (i=0; i<cache->objects;i++)
102 *((int *) (slab->start + i*cache->size)) = i+1;
103
104 atomic_inc(&cache->allocated_slabs);
105 return slab;
106}
107
108/**
109 * Deallocate space associated with SLAB
110 *
111 * @return number of freed frames
112 */
113static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
114{
115 frame_free((__address)slab->start);
116 if (! (cache->flags & SLAB_CACHE_SLINSIDE))
117 free(slab);
118
119 atomic_dec(&cache->allocated_slabs);
120
121 return 1 << cache->order;
122}
123
124/** Map object to slab structure */
125static slab_t * obj2slab(void *obj)
126{
127 frame_t *frame;
128
129 frame = frame_addr2frame((__address)obj);
130 return (slab_t *)frame->parent;
131}
132
133/**************************************/
134/* SLAB functions */
135
136
137/**
138 * Return object to slab and call a destructor
139 *
140 * Assume the cache->lock is held;
141 *
142 * @param slab If the caller knows directly slab of the object, otherwise NULL
143 *
144 * @return Number of freed pages
145 */
146static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
147 slab_t *slab)
148{
149 count_t frames = 0;
150
151 if (!slab)
152 slab = obj2slab(obj);
153
154 ASSERT(slab->cache == cache);
155
156 *((int *)obj) = slab->nextavail;
157 slab->nextavail = (obj - slab->start)/cache->size;
158 slab->available++;
159
160 /* Move it to correct list */
161 if (slab->available == 1) {
162 /* It was in full, move to partial */
163 list_remove(&slab->link);
164 list_prepend(&slab->link, &cache->partial_slabs);
165 }
166 if (slab->available == cache->objects) {
167 /* Free associated memory */
168 list_remove(&slab->link);
169 /* Avoid deadlock */
170 spinlock_unlock(&cache->lock);
171 frames = slab_space_free(cache, slab);
172 spinlock_lock(&cache->lock);
173 }
174
175 return frames;
176}
177
178/**
179 * Take new object from slab or create new if needed
180 *
181 * Assume cache->lock is held.
182 *
183 * @return Object address or null
184 */
185static void * slab_obj_create(slab_cache_t *cache, int flags)
186{
187 slab_t *slab;
188 void *obj;
189
190 if (list_empty(&cache->partial_slabs)) {
191 /* Allow recursion and reclaiming
192 * - this should work, as the SLAB control structures
193 * are small and do not need to allocte with anything
194 * other ten frame_alloc when they are allocating,
195 * that's why we should get recursion at most 1-level deep
196 */
197 spinlock_unlock(&cache->lock);
198 slab = slab_space_alloc(cache, flags);
199 spinlock_lock(&cache->lock);
200 if (!slab) {
201 return NULL;
202 }
203 } else {
204 slab = list_get_instance(cache->partial_slabs.next,
205 slab_t,
206 link);
207 list_remove(&slab->link);
208 }
209 obj = slab->start + slab->nextavail * cache->size;
210 slab->nextavail = *((int *)obj);
211 slab->available--;
212 if (! slab->available)
213 list_prepend(&slab->link, &cache->full_slabs);
214 else
215 list_prepend(&slab->link, &cache->partial_slabs);
216 return obj;
217}
218
219/**************************************/
220/* CPU-Cache slab functions */
221
222/**
223 * Free all objects in magazine and free memory associated with magazine
224 *
225 * Assume mag_cache[cpu].lock is locked
226 *
227 * @return Number of freed pages
228 */
229static count_t magazine_destroy(slab_cache_t *cache,
230 slab_magazine_t *mag)
231{
232 int i;
233 count_t frames = 0;
234
235 for (i=0;i < mag->busy; i++) {
236 frames += slab_obj_destroy(cache, mag->objs[i], NULL);
237 atomic_dec(&cache->cached_objs);
238 }
239
240 slab_free(&mag_cache, mag);
241
242 return frames;
243}
244
245/**
246 * Try to find object in CPU-cache magazines
247 *
248 * @return Pointer to object or NULL if not available
249 */
250static void * magazine_obj_get(slab_cache_t *cache)
251{
252 slab_magazine_t *mag;
253 void *obj;
254
255 spinlock_lock(&cache->mag_cache[CPU->id].lock);
256
257 mag = cache->mag_cache[CPU->id].current;
258 if (!mag)
259 goto out;
260
261 if (!mag->busy) {
262 /* If current is empty && last exists && not empty, exchange */
263 if (cache->mag_cache[CPU->id].last \
264 && cache->mag_cache[CPU->id].last->busy) {
265 cache->mag_cache[CPU->id].current = cache->mag_cache[CPU->id].last;
266 cache->mag_cache[CPU->id].last = mag;
267 mag = cache->mag_cache[CPU->id].current;
268 goto gotit;
269 }
270 /* If still not busy, exchange current with some from
271 * other full magazines */
272 spinlock_lock(&cache->lock);
273 if (list_empty(&cache->magazines)) {
274 spinlock_unlock(&cache->lock);
275 goto out;
276 }
277 /* Free current magazine and take one from list */
278 slab_free(&mag_cache, mag);
279
280 mag = list_get_instance(cache->magazines.next,
281 slab_magazine_t,
282 link);
283 list_remove(&mag->link);
284
285 spinlock_unlock(&cache->lock);
286 }
287gotit:
288 obj = mag->objs[--mag->busy];
289 spinlock_unlock(&cache->mag_cache[CPU->id].lock);
290 atomic_dec(&cache->cached_objs);
291
292 return obj;
293out:
294 spinlock_unlock(&cache->mag_cache[CPU->id].lock);
295 return NULL;
296}
297
298/**
299 * Assure that the current magazine is empty, return pointer to it, or NULL if
300 * no empty magazine available and cannot be allocated
301 *
302 * We have 2 magazines bound to processor.
303 * First try the current.
304 * If full, try the last.
305 * If full, put to magazines list.
306 * allocate new, exchange last & current
307 *
308 */
309static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache)
310{
311 slab_magazine_t *cmag,*lastmag,*newmag;
312
313 cmag = cache->mag_cache[CPU->id].current;
314 lastmag = cache->mag_cache[CPU->id].last;
315
316 if (cmag) {
317 if (cmag->busy < cmag->size)
318 return cmag;
319 if (lastmag && lastmag->busy < lastmag->size) {
320 cache->mag_cache[CPU->id].last = cmag;
321 cache->mag_cache[CPU->id].current = lastmag;
322 return lastmag;
323 }
324 }
325 /* current | last are full | nonexistent, allocate new */
326 /* We do not want to sleep just because of caching */
327 /* Especially we do not want reclaiming to start, as
328 * this would deadlock */
329 newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM);
330 if (!newmag)
331 return NULL;
332 newmag->size = SLAB_MAG_SIZE;
333 newmag->busy = 0;
334
335 /* Flush last to magazine list */
336 if (lastmag)
337 list_prepend(&lastmag->link, &cache->magazines);
338 /* Move current as last, save new as current */
339 cache->mag_cache[CPU->id].last = cmag;
340 cache->mag_cache[CPU->id].current = newmag;
341
342 return newmag;
343}
344
345/**
346 * Put object into CPU-cache magazine
347 *
348 * @return 0 - success, -1 - could not get memory
349 */
350static int magazine_obj_put(slab_cache_t *cache, void *obj)
351{
352 slab_magazine_t *mag;
353
354 spinlock_lock(&cache->mag_cache[CPU->id].lock);
355
356 mag = make_empty_current_mag(cache);
357 if (!mag)
358 goto errout;
359
360 mag->objs[mag->busy++] = obj;
361
362 spinlock_unlock(&cache->mag_cache[CPU->id].lock);
363 atomic_inc(&cache->cached_objs);
364 return 0;
365errout:
366 spinlock_unlock(&cache->mag_cache[CPU->id].lock);
367 return -1;
368}
369
370
371/**************************************/
372/* SLAB CACHE functions */
373
374/** Return number of objects that fit in certain cache size */
375static int comp_objects(slab_cache_t *cache)
376{
377 if (cache->flags & SLAB_CACHE_SLINSIDE)
378 return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
379 else
380 return (PAGE_SIZE << cache->order) / cache->size;
381}
382
383/** Return wasted space in slab */
384static int badness(slab_cache_t *cache)
385{
386 int objects;
387 int ssize;
388
389 objects = comp_objects(cache);
390 ssize = PAGE_SIZE << cache->order;
391 if (cache->flags & SLAB_CACHE_SLINSIDE)
392 ssize -= sizeof(slab_t);
393 return ssize - objects*cache->size;
394}
395
396/** Initialize allocated memory as a slab cache */
397static void
398_slab_cache_create(slab_cache_t *cache,
399 char *name,
400 size_t size,
401 size_t align,
402 int (*constructor)(void *obj, int kmflag),
403 void (*destructor)(void *obj),
404 int flags)
405{
406 int i;
407
408 memsetb((__address)cache, sizeof(*cache), 0);
409 cache->name = name;
410
411 if (align < sizeof(__native))
412 align = sizeof(__native);
413 size = ALIGN_UP(size, align);
414
415 cache->size = size;
416
417 cache->constructor = constructor;
418 cache->destructor = destructor;
419 cache->flags = flags;
420
421 list_initialize(&cache->full_slabs);
422 list_initialize(&cache->partial_slabs);
423 list_initialize(&cache->magazines);
424 spinlock_initialize(&cache->lock, "cachelock");
425 if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) {
426 for (i=0; i< config.cpu_count; i++)
427 spinlock_initialize(&cache->mag_cache[i].lock,
428 "cpucachelock");
429 }
430
431 /* Compute slab sizes, object counts in slabs etc. */
432 if (cache->size < SLAB_INSIDE_SIZE)
433 cache->flags |= SLAB_CACHE_SLINSIDE;
434
435 /* Minimum slab order */
436 cache->order = (cache->size-1) >> PAGE_WIDTH;
437
438 while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
439 cache->order += 1;
440 }
441 cache->objects = comp_objects(cache);
442 /* If info fits in, put it inside */
443 if (badness(cache) > sizeof(slab_t))
444 cache->flags |= SLAB_CACHE_SLINSIDE;
445
446 spinlock_lock(&slab_cache_lock);
447
448 list_append(&cache->link, &slab_cache_list);
449
450 spinlock_unlock(&slab_cache_lock);
451}
452
453/** Create slab cache */
454slab_cache_t * slab_cache_create(char *name,
455 size_t size,
456 size_t align,
457 int (*constructor)(void *obj, int kmflag),
458 void (*destructor)(void *obj),
459 int flags)
460{
461 slab_cache_t *cache;
462
463 cache = malloc(sizeof(*cache) + config.cpu_count*sizeof(cache->mag_cache[0]));
464 _slab_cache_create(cache, name, size, align, constructor, destructor,
465 flags);
466 return cache;
467}
468
469/**
470 * Reclaim space occupied by objects that are already free
471 *
472 * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing
473 * @return Number of freed pages
474 */
475static count_t _slab_reclaim(slab_cache_t *cache, int flags)
476{
477 int i;
478 slab_magazine_t *mag;
479 link_t *cur;
480 count_t frames = 0;
481
482 if (cache->flags & SLAB_CACHE_NOMAGAZINE)
483 return 0; /* Nothing to do */
484
485 /* First lock all cpu caches, then the complete cache lock */
486 for (i=0; i < config.cpu_count; i++)
487 spinlock_lock(&cache->mag_cache[i].lock);
488 spinlock_lock(&cache->lock);
489
490 if (flags & SLAB_RECLAIM_ALL) {
491 /* Aggressive memfree */
492 /* Destroy CPU magazines */
493 for (i=0; i<config.cpu_count; i++) {
494 mag = cache->mag_cache[i].current;
495 if (mag)
496 frames += magazine_destroy(cache, mag);
497 cache->mag_cache[i].current = NULL;
498
499 mag = cache->mag_cache[i].last;
500 if (mag)
501 frames += magazine_destroy(cache, mag);
502 cache->mag_cache[i].last = NULL;
503 }
504 }
505 /* Destroy full magazines */
506 cur=cache->magazines.prev;
507
508 while (cur != &cache->magazines) {
509 mag = list_get_instance(cur, slab_magazine_t, link);
510
511 cur = cur->prev;
512 list_remove(&mag->link);
513 frames += magazine_destroy(cache,mag);
514 /* If we do not do full reclaim, break
515 * as soon as something is freed */
516 if (!(flags & SLAB_RECLAIM_ALL) && frames)
517 break;
518 }
519
520 spinlock_unlock(&cache->lock);
521 for (i=0; i < config.cpu_count; i++)
522 spinlock_unlock(&cache->mag_cache[i].lock);
523
524 return frames;
525}
526
527/** Check that there are no slabs and remove cache from system */
528void slab_cache_destroy(slab_cache_t *cache)
529{
530 /* Do not lock anything, we assume the software is correct and
531 * does not touch the cache when it decides to destroy it */
532
533 /* Destroy all magazines */
534 _slab_reclaim(cache, SLAB_RECLAIM_ALL);
535
536 /* All slabs must be empty */
537 if (!list_empty(&cache->full_slabs) \
538 || !list_empty(&cache->partial_slabs))
539 panic("Destroying cache that is not empty.");
540
541 spinlock_lock(&slab_cache_lock);
542 list_remove(&cache->link);
543 spinlock_unlock(&slab_cache_lock);
544
545 free(cache);
546}
547
548/** Allocate new object from cache - if no flags given, always returns
549 memory */
550void * slab_alloc(slab_cache_t *cache, int flags)
551{
552 ipl_t ipl;
553 void *result = NULL;
554
555 /* Disable interrupts to avoid deadlocks with interrupt handlers */
556 ipl = interrupts_disable();
557
558 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
559 result = magazine_obj_get(cache);
560
561 if (!result) {
562 spinlock_lock(&cache->lock);
563 result = slab_obj_create(cache, flags);
564 spinlock_unlock(&cache->lock);
565 }
566
567 if (result)
568 atomic_inc(&cache->allocated_objs);
569
570 interrupts_restore(ipl);
571
572
573 return result;
574}
575
576/** Return object to cache */
577void slab_free(slab_cache_t *cache, void *obj)
578{
579 ipl_t ipl;
580
581 ipl = interrupts_disable();
582
583 if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
584 || magazine_obj_put(cache, obj)) {
585
586 spinlock_lock(&cache->lock);
587 slab_obj_destroy(cache, obj, NULL);
588 spinlock_unlock(&cache->lock);
589 }
590 atomic_dec(&cache->allocated_objs);
591 interrupts_restore(ipl);
592}
593
594/* Go through all caches and reclaim what is possible */
595count_t slab_reclaim(int flags)
596{
597 slab_cache_t *cache;
598 link_t *cur;
599 count_t frames = 0;
600
601 spinlock_lock(&slab_cache_lock);
602
603 for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
604 cache = list_get_instance(cur, slab_cache_t, link);
605 frames += _slab_reclaim(cache, flags);
606 }
607
608 spinlock_unlock(&slab_cache_lock);
609
610 return frames;
611}
612
613
614/* Print list of slabs */
615void slab_print_list(void)
616{
617 slab_cache_t *cache;
618 link_t *cur;
619
620 spinlock_lock(&slab_cache_lock);
621 printf("SLAB name\tOsize\tPages\tObj/pg\tSlabs\tCached\tAllocobjs\tCtl\n");
622 for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
623 cache = list_get_instance(cur, slab_cache_t, link);
624 printf("%s\t%d\t%d\t%d\t%d\t%d\t%d\t\t%s\n", cache->name, cache->size,
625 (1 << cache->order), cache->objects,
626 atomic_get(&cache->allocated_slabs),
627 atomic_get(&cache->cached_objs),
628 atomic_get(&cache->allocated_objs),
629 cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out");
630 }
631 spinlock_unlock(&slab_cache_lock);
632}
633
634void slab_cache_init(void)
635{
636 /* Initialize magazine cache */
637 _slab_cache_create(&mag_cache,
638 "slab_magazine",
639 sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*),
640 sizeof(__address),
641 NULL, NULL,
642 SLAB_CACHE_NOMAGAZINE);
643
644 /* Initialize structures for malloc */
645}
Note: See TracBrowser for help on using the repository browser.