source: mainline/kernel/generic/src/lib/ra.c@ 9b11a971

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 9b11a971 was f97f1e51, checked in by Martin Decky <martin@…>, 13 years ago

unify slab cache naming scheme (according to the type name)

  • Property mode set to 100644
File size: 10.8 KB
Line 
1/*
2 * Copyright (c) 2011 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup generic
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Resource allocator.
36 *
37 * This is a generic resource allocator, loosely based on the ideas presented
38 * in chapter 4 of the following paper and further simplified:
39 *
40 * Bonwick J., Adams J.: Magazines and Vmem: Extending the Slab Allocator to
41 * Many CPUs and Arbitrary Resources, USENIX 2001
42 *
43 */
44
45#include <lib/ra.h>
46#include <typedefs.h>
47#include <mm/slab.h>
48#include <bitops.h>
49#include <debug.h>
50#include <panic.h>
51#include <adt/list.h>
52#include <adt/hash_table.h>
53#include <align.h>
54#include <macros.h>
55#include <synch/spinlock.h>
56
57static slab_cache_t *ra_segment_cache;
58
59#define USED_BUCKETS 1024
60
61static size_t used_hash(sysarg_t *key)
62{
63 return ((*key >> 2) & (USED_BUCKETS - 1));
64}
65
66static bool used_compare(sysarg_t *key, size_t keys, link_t *item)
67{
68 ra_segment_t *seg;
69
70 seg = hash_table_get_instance(item, ra_segment_t, fu_link);
71 return seg->base == *key;
72}
73
74static hash_table_operations_t used_ops = {
75 .hash = used_hash,
76 .compare = used_compare,
77 .remove_callback = NULL,
78};
79
80/** Calculate the segment size. */
81static size_t ra_segment_size_get(ra_segment_t *seg)
82{
83 ra_segment_t *nextseg;
84
85 nextseg = list_get_instance(seg->segment_link.next, ra_segment_t,
86 segment_link);
87 return nextseg->base - seg->base;
88}
89
90static ra_segment_t *ra_segment_create(uintptr_t base)
91{
92 ra_segment_t *seg;
93
94 seg = slab_alloc(ra_segment_cache, FRAME_ATOMIC);
95 if (!seg)
96 return NULL;
97
98 link_initialize(&seg->segment_link);
99 link_initialize(&seg->fu_link);
100
101 seg->base = base;
102 seg->flags = 0;
103
104 return seg;
105}
106
107static void ra_segment_destroy(ra_segment_t *seg)
108{
109 slab_free(ra_segment_cache, seg);
110}
111
112static ra_span_t *ra_span_create(uintptr_t base, size_t size)
113{
114 ra_span_t *span;
115 ra_segment_t *seg, *lastseg;
116 unsigned int i;
117
118 span = (ra_span_t *) malloc(sizeof(ra_span_t), FRAME_ATOMIC);
119 if (!span)
120 return NULL;
121
122 span->max_order = fnzb(size);
123 span->base = base;
124 span->size = size;
125
126 span->free = (list_t *) malloc((span->max_order + 1) * sizeof(list_t),
127 FRAME_ATOMIC);
128 if (!span->free) {
129 free(span);
130 return NULL;
131 }
132
133 /*
134 * Create a segment to represent the entire size of the span.
135 */
136 seg = ra_segment_create(base);
137 if (!seg) {
138 free(span->free);
139 free(span);
140 return NULL;
141 }
142 seg->flags = RA_SEGMENT_FREE;
143
144 /*
145 * The last segment will be used as a sentinel at the end of the
146 * segment list so that it is possible to calculate the size for
147 * all other segments. It will not be placed in any free list or
148 * in the used segment hash and adjacent segments will not be
149 * coalesced with it.
150 */
151 lastseg = ra_segment_create(base + size);
152 if (!lastseg) {
153 ra_segment_destroy(seg);
154 free(span->free);
155 free(span);
156 return NULL;
157 }
158
159 link_initialize(&span->span_link);
160 list_initialize(&span->segments);
161
162 hash_table_create(&span->used, USED_BUCKETS, 1, &used_ops);
163
164 for (i = 0; i <= span->max_order; i++)
165 list_initialize(&span->free[i]);
166
167 /* Insert the first segment into the list of segments. */
168 list_append(&seg->segment_link, &span->segments);
169 /* Insert the last segment into the list of segments. */
170 list_append(&lastseg->segment_link, &span->segments);
171
172 /* Insert the first segment into the respective free list. */
173 list_append(&seg->fu_link, &span->free[span->max_order]);
174
175 return span;
176}
177
178/** Create an empty arena. */
179ra_arena_t *ra_arena_create(void)
180{
181 ra_arena_t *arena;
182
183 arena = (ra_arena_t *) malloc(sizeof(ra_arena_t), FRAME_ATOMIC);
184 if (!arena)
185 return NULL;
186
187 irq_spinlock_initialize(&arena->lock, "arena_lock");
188 list_initialize(&arena->spans);
189
190 return arena;
191}
192
193/** Add a span to arena. */
194bool ra_span_add(ra_arena_t *arena, uintptr_t base, size_t size)
195{
196 ra_span_t *span;
197
198 /*
199 * At the moment, we can only create resources that don't include 0.
200 * If 0 needs to be considered as a valid resource, we would need to
201 * slightly change the API of the resource allocator.
202 */
203 if (base == 0)
204 return false;
205
206 span = ra_span_create(base, size);
207 if (!span)
208 return false;
209
210 /* TODO: check for overlaps */
211 irq_spinlock_lock(&arena->lock, true);
212 list_append(&span->span_link, &arena->spans);
213 irq_spinlock_unlock(&arena->lock, true);
214 return true;
215}
216
217static uintptr_t ra_span_alloc(ra_span_t *span, size_t size, size_t align)
218{
219 /*
220 * We need to add the maximum of align - 1 to be able to compensate for
221 * the worst case unaligned segment.
222 */
223 size_t needed = size + align - 1;
224 size_t order = ispwr2(needed) ? fnzb(needed) : fnzb(needed) + 1;
225 ra_segment_t *pred = NULL;
226 ra_segment_t *succ = NULL;
227
228 /*
229 * Find the free list of the smallest order which can satisfy this
230 * request.
231 */
232 for (; order <= span->max_order; order++) {
233 ra_segment_t *seg;
234 uintptr_t newbase;
235
236 if (list_empty(&span->free[order]))
237 continue;
238
239 /* Take the first segment from the free list. */
240 seg = list_get_instance(list_first(&span->free[order]),
241 ra_segment_t, fu_link);
242
243 ASSERT(seg->flags & RA_SEGMENT_FREE);
244
245 /*
246 * See if we need to allocate new segments for the chopped-off
247 * parts of this segment.
248 */
249 if (!IS_ALIGNED(seg->base, align)) {
250 pred = ra_segment_create(seg->base);
251 if (!pred) {
252 /*
253 * Fail as we are unable to split the segment.
254 */
255 break;
256 }
257 pred->flags |= RA_SEGMENT_FREE;
258 }
259 newbase = ALIGN_UP(seg->base, align);
260 if (newbase + size != seg->base + ra_segment_size_get(seg)) {
261 ASSERT(newbase + (size - 1) < seg->base +
262 (ra_segment_size_get(seg) - 1));
263 succ = ra_segment_create(newbase + size);
264 if (!succ) {
265 if (pred)
266 ra_segment_destroy(pred);
267 /*
268 * Fail as we are unable to split the segment.
269 */
270 break;
271 }
272 succ->flags |= RA_SEGMENT_FREE;
273 }
274
275
276 /* Put unneeded parts back. */
277 if (pred) {
278 size_t pred_order;
279
280 list_insert_before(&pred->segment_link,
281 &seg->segment_link);
282 pred_order = fnzb(ra_segment_size_get(pred));
283 list_append(&pred->fu_link, &span->free[pred_order]);
284 }
285 if (succ) {
286 size_t succ_order;
287
288 list_insert_after(&succ->segment_link,
289 &seg->segment_link);
290 succ_order = fnzb(ra_segment_size_get(succ));
291 list_append(&succ->fu_link, &span->free[succ_order]);
292 }
293
294 /* Now remove the found segment from the free list. */
295 list_remove(&seg->fu_link);
296 seg->base = newbase;
297 seg->flags &= ~RA_SEGMENT_FREE;
298
299 /* Hash-in the segment into the used hash. */
300 sysarg_t key = seg->base;
301 hash_table_insert(&span->used, &key, &seg->fu_link);
302
303 return newbase;
304 }
305
306 return 0;
307}
308
309static void ra_span_free(ra_span_t *span, size_t base, size_t size)
310{
311 sysarg_t key = base;
312 link_t *link;
313 ra_segment_t *seg;
314 ra_segment_t *pred;
315 ra_segment_t *succ;
316 size_t order;
317
318 /*
319 * Locate the segment in the used hash table.
320 */
321 link = hash_table_find(&span->used, &key);
322 if (!link) {
323 panic("Freeing segment which is not known to be used (base=%"
324 PRIxn ", size=%" PRIdn ").", base, size);
325 }
326 seg = hash_table_get_instance(link, ra_segment_t, fu_link);
327
328 /*
329 * Hash out the segment.
330 */
331 hash_table_remove(&span->used, &key, 1);
332
333 ASSERT(!(seg->flags & RA_SEGMENT_FREE));
334 ASSERT(seg->base == base);
335 ASSERT(ra_segment_size_get(seg) == size);
336
337 /*
338 * Check whether the segment can be coalesced with its left neighbor.
339 */
340 if (list_first(&span->segments) != &seg->segment_link) {
341 pred = hash_table_get_instance(seg->segment_link.prev,
342 ra_segment_t, segment_link);
343
344 ASSERT(pred->base < seg->base);
345
346 if (pred->flags & RA_SEGMENT_FREE) {
347 /*
348 * The segment can be coalesced with its predecessor.
349 * Remove the predecessor from the free and segment
350 * lists, rebase the segment and throw the predecessor
351 * away.
352 */
353 list_remove(&pred->fu_link);
354 list_remove(&pred->segment_link);
355 seg->base = pred->base;
356 ra_segment_destroy(pred);
357 }
358 }
359
360 /*
361 * Check whether the segment can be coalesced with its right neighbor.
362 */
363 succ = hash_table_get_instance(seg->segment_link.next, ra_segment_t,
364 segment_link);
365 ASSERT(succ->base > seg->base);
366 if (succ->flags & RA_SEGMENT_FREE) {
367 /*
368 * The segment can be coalesced with its successor.
369 * Remove the successor from the free and segment lists
370 * and throw it away.
371 */
372 list_remove(&succ->fu_link);
373 list_remove(&succ->segment_link);
374 ra_segment_destroy(succ);
375 }
376
377 /* Put the segment on the appropriate free list. */
378 seg->flags |= RA_SEGMENT_FREE;
379 order = fnzb(ra_segment_size_get(seg));
380 list_append(&seg->fu_link, &span->free[order]);
381}
382
383/** Allocate resources from arena. */
384uintptr_t ra_alloc(ra_arena_t *arena, size_t size, size_t alignment)
385{
386 uintptr_t base = 0;
387
388 ASSERT(size >= 1);
389 ASSERT(alignment >= 1);
390 ASSERT(ispwr2(alignment));
391
392 irq_spinlock_lock(&arena->lock, true);
393 list_foreach(arena->spans, cur) {
394 ra_span_t *span = list_get_instance(cur, ra_span_t, span_link);
395
396 base = ra_span_alloc(span, size, alignment);
397 if (base)
398 break;
399 }
400 irq_spinlock_unlock(&arena->lock, true);
401
402 return base;
403}
404
405/* Return resources to arena. */
406void ra_free(ra_arena_t *arena, uintptr_t base, size_t size)
407{
408 irq_spinlock_lock(&arena->lock, true);
409 list_foreach(arena->spans, cur) {
410 ra_span_t *span = list_get_instance(cur, ra_span_t, span_link);
411
412 if (iswithin(span->base, span->size, base, size)) {
413 ra_span_free(span, base, size);
414 irq_spinlock_unlock(&arena->lock, true);
415 return;
416 }
417 }
418 irq_spinlock_unlock(&arena->lock, true);
419
420 panic("Freeing to wrong arena (base=%" PRIxn ", size=%" PRIdn ").",
421 base, size);
422}
423
424void ra_init(void)
425{
426 ra_segment_cache = slab_cache_create("ra_segment_t",
427 sizeof(ra_segment_t), 0, NULL, NULL, SLAB_CACHE_MAGDEFERRED);
428}
429
430/** @}
431 */
Note: See TracBrowser for help on using the repository browser.