source: mainline/kernel/generic/src/lib/ra.c@ 3342f33

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 3342f33 was 3342f33, checked in by Jakub Jermar <jakub@…>, 14 years ago

Basic locking for resource allocator.

  • Property mode set to 100644
File size: 10.8 KB
Line 
1/*
2 * Copyright (c) 2011 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup generic
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Resource allocator.
36 *
37 * This is a generic resource allocator, loosely based on the ideas presented
38 * in chapter 4 of the following paper and further simplified:
39 *
40 * Bonwick J., Adams J.: Magazines and Vmem: Extending the Slab Allocator to
41 * Many CPUs and Arbitrary Resources, USENIX 2001
42 *
43 */
44
45#include <lib/ra.h>
46#include <typedefs.h>
47#include <mm/slab.h>
48#include <bitops.h>
49#include <debug.h>
50#include <panic.h>
51#include <adt/list.h>
52#include <adt/hash_table.h>
53#include <align.h>
54#include <macros.h>
55#include <synch/spinlock.h>
56
57#define USED_BUCKETS 1024
58
59static size_t used_hash(sysarg_t *key)
60{
61 return ((*key >> 2) & (USED_BUCKETS - 1));
62}
63
64static bool used_compare(sysarg_t *key, size_t keys, link_t *item)
65{
66 ra_segment_t *seg;
67
68 seg = hash_table_get_instance(item, ra_segment_t, fu_link);
69 return seg->base == *key;
70}
71
72static hash_table_operations_t used_ops = {
73 .hash = used_hash,
74 .compare = used_compare,
75 .remove_callback = NULL,
76};
77
78/** Calculate the segment size. */
79static size_t ra_segment_size_get(ra_segment_t *seg)
80{
81 ra_segment_t *nextseg;
82
83 nextseg = list_get_instance(seg->segment_link.next, ra_segment_t,
84 segment_link);
85 return nextseg->base - seg->base;
86}
87
88static ra_segment_t *ra_segment_create(uintptr_t base)
89{
90 ra_segment_t *seg;
91
92 seg = (ra_segment_t *) malloc(sizeof(ra_segment_t), FRAME_ATOMIC);
93 if (!seg)
94 return NULL;
95
96 link_initialize(&seg->segment_link);
97 link_initialize(&seg->fu_link);
98
99 seg->base = base;
100 seg->flags = 0;
101
102 return seg;
103}
104
105static void ra_segment_destroy(ra_segment_t *seg)
106{
107 free(seg);
108}
109
110static ra_span_t *ra_span_create(uintptr_t base, size_t size)
111{
112 ra_span_t *span;
113 ra_segment_t *seg, *lastseg;
114 unsigned int i;
115
116 span = (ra_span_t *) malloc(sizeof(ra_span_t), FRAME_ATOMIC);
117 if (!span)
118 return NULL;
119
120 span->max_order = fnzb(size);
121 span->base = base;
122 span->size = size;
123
124 span->free = (list_t *) malloc((span->max_order + 1) * sizeof(list_t),
125 FRAME_ATOMIC);
126 if (!span->free) {
127 free(span);
128 return NULL;
129 }
130
131 /*
132 * Create a segment to represent the entire size of the span.
133 */
134 seg = ra_segment_create(base);
135 if (!seg) {
136 free(span->free);
137 free(span);
138 return NULL;
139 }
140 seg->flags = RA_SEGMENT_FREE;
141
142 /*
143 * The last segment will be used as a sentinel at the end of the
144 * segment list so that it is possible to calculate the size for
145 * all other segments. It will not be placed in any free list or
146 * in the used segment hash and adjacent segments will not be
147 * coalesced with it.
148 */
149 lastseg = ra_segment_create(base + size);
150 if (!lastseg) {
151 ra_segment_destroy(seg);
152 free(span->free);
153 free(span);
154 return NULL;
155 }
156
157 link_initialize(&span->span_link);
158 list_initialize(&span->segments);
159
160 hash_table_create(&span->used, USED_BUCKETS, 1, &used_ops);
161
162 for (i = 0; i < span->max_order; i++)
163 list_initialize(&span->free[i]);
164
165 /* Insert the first segment into the list of segments. */
166 list_append(&seg->segment_link, &span->segments);
167 /* Insert the last segment into the list of segments. */
168 list_append(&lastseg->segment_link, &span->segments);
169
170 /* Insert the first segment into the respective free list. */
171 list_append(&seg->fu_link, &span->free[span->max_order]);
172
173 return span;
174}
175
176/** Create arena with initial span. */
177ra_arena_t *ra_arena_create(uintptr_t base, size_t size)
178{
179 ra_arena_t *arena;
180 ra_span_t *span;
181
182 /*
183 * At the moment, we can only create resources that don't include 0.
184 * If 0 needs to be considered as a valid resource, we would need to
185 * slightly change the API of the resource allocator.
186 */
187 if (base == 0)
188 return NULL;
189
190 arena = (ra_arena_t *) malloc(sizeof(ra_arena_t), FRAME_ATOMIC);
191 if (!arena)
192 return NULL;
193
194 span = ra_span_create(base, size);
195 if (!span) {
196 free(arena);
197 return NULL;
198 }
199
200 spinlock_initialize(&arena->lock, "arena_lock");
201 list_initialize(&arena->spans);
202 list_append(&span->span_link, &arena->spans);
203
204 return arena;
205}
206
207/** Add additional span to arena. */
208bool ra_span_add(ra_arena_t *arena, uintptr_t base, size_t size)
209{
210 ra_span_t *span;
211
212 if (base == 0)
213 return false;
214
215 span = ra_span_create(base, size);
216 if (!span)
217 return false;
218
219 /* TODO: check for overlaps */
220 spinlock_lock(&arena->lock);
221 list_append(&span->span_link, &arena->spans);
222 spinlock_unlock(&arena->lock);
223 return true;
224}
225
226static uintptr_t ra_span_alloc(ra_span_t *span, size_t size, size_t align)
227{
228 /*
229 * We need to add the maximum of align - 1 to be able to compensate for
230 * the worst case unaligned segment.
231 */
232 size_t needed = size + align - 1;
233 size_t order = ispwr2(needed) ? fnzb(needed) : fnzb(needed) + 1;
234 ra_segment_t *pred = NULL;
235 ra_segment_t *succ = NULL;
236
237 /*
238 * Find the free list of the smallest order which can satisfy this
239 * request.
240 */
241 for (; order <= span->max_order; order++) {
242 ra_segment_t *seg;
243 uintptr_t newbase;
244
245 if (list_empty(&span->free[order]))
246 continue;
247
248 /* Take the first segment from the free list. */
249 seg = list_get_instance(list_first(&span->free[order]),
250 ra_segment_t, fu_link);
251
252 ASSERT(seg->flags & RA_SEGMENT_FREE);
253
254 /*
255 * See if we need to allocate new segments for the chopped-off
256 * parts of this segment.
257 */
258 if (!IS_ALIGNED(seg->base, align)) {
259 pred = ra_segment_create(seg->base);
260 if (!pred) {
261 /*
262 * Fail as we are unable to split the segment.
263 */
264 break;
265 }
266 pred->flags |= RA_SEGMENT_FREE;
267 }
268 newbase = ALIGN_UP(seg->base, align);
269 if (newbase + size != seg->base + ra_segment_size_get(seg)) {
270 ASSERT(newbase + size < seg->base +
271 ra_segment_size_get(seg));
272 succ = ra_segment_create(newbase + size);
273 if (!succ) {
274 if (pred)
275 ra_segment_destroy(pred);
276 /*
277 * Fail as we are unable to split the segment.
278 */
279 break;
280 }
281 succ->flags |= RA_SEGMENT_FREE;
282 }
283
284
285 /* Put unneeded parts back. */
286 if (pred) {
287 size_t pred_order;
288
289 list_insert_before(&pred->segment_link,
290 &seg->segment_link);
291 pred_order = fnzb(ra_segment_size_get(pred));
292 list_append(&pred->fu_link, &span->free[pred_order]);
293 }
294 if (succ) {
295 size_t succ_order;
296
297 list_insert_after(&succ->segment_link,
298 &seg->segment_link);
299 succ_order = fnzb(ra_segment_size_get(succ));
300 list_append(&succ->fu_link, &span->free[succ_order]);
301 }
302
303 /* Now remove the found segment from the free list. */
304 list_remove(&seg->fu_link);
305 seg->base = newbase;
306 seg->flags &= ~RA_SEGMENT_FREE;
307
308 /* Hash-in the segment into the used hash. */
309 sysarg_t key = seg->base;
310 hash_table_insert(&span->used, &key, &seg->fu_link);
311
312 return newbase;
313 }
314
315 return 0;
316}
317
318static void ra_span_free(ra_span_t *span, size_t base, size_t size)
319{
320 sysarg_t key = base;
321 link_t *link;
322 ra_segment_t *seg;
323 ra_segment_t *pred;
324 ra_segment_t *succ;
325 size_t order;
326
327 /*
328 * Locate the segment in the used hash table.
329 */
330 link = hash_table_find(&span->used, &key);
331 if (!link) {
332 panic("Freeing segment which is not known to be used (base=%"
333 PRIxn ", size=%" PRIdn ").", base, size);
334 }
335 seg = hash_table_get_instance(link, ra_segment_t, fu_link);
336
337 /*
338 * Hash out the segment.
339 */
340 hash_table_remove(&span->used, &key, 1);
341
342 ASSERT(!(seg->flags & RA_SEGMENT_FREE));
343 ASSERT(seg->base == base);
344 ASSERT(ra_segment_size_get(seg) == size);
345
346 /*
347 * Check whether the segment can be coalesced with its left neighbor.
348 */
349 if (list_first(&span->segments) != &seg->segment_link) {
350 pred = hash_table_get_instance(seg->segment_link.prev,
351 ra_segment_t, segment_link);
352
353 ASSERT(pred->base < seg->base);
354
355 if (pred->flags & RA_SEGMENT_FREE) {
356 /*
357 * The segment can be coalesced with its predecessor.
358 * Remove the predecessor from the free and segment
359 * lists, rebase the segment and throw the predecessor
360 * away.
361 */
362 list_remove(&pred->fu_link);
363 list_remove(&pred->segment_link);
364 seg->base = pred->base;
365 ra_segment_destroy(pred);
366 }
367 }
368
369 /*
370 * Check whether the segment can be coalesced with its right neighbor.
371 */
372 succ = hash_table_get_instance(seg->segment_link.next, ra_segment_t,
373 segment_link);
374 ASSERT(succ->base > seg->base);
375 if (succ->flags & RA_SEGMENT_FREE) {
376 /*
377 * The segment can be coalesced with its successor.
378 * Remove the successor from the free and segment lists
379 * and throw it away.
380 */
381 list_remove(&succ->fu_link);
382 list_remove(&succ->segment_link);
383 ra_segment_destroy(succ);
384 }
385
386 /* Put the segment on the appropriate free list. */
387 seg->flags |= RA_SEGMENT_FREE;
388 order = fnzb(ra_segment_size_get(seg));
389 list_append(&seg->fu_link, &span->free[order]);
390}
391
392/** Allocate resources from arena. */
393uintptr_t ra_alloc(ra_arena_t *arena, size_t size, size_t alignment)
394{
395 uintptr_t base = 0;
396
397 ASSERT(size >= 1);
398 ASSERT(alignment >= 1);
399 ASSERT(ispwr2(alignment));
400
401 spinlock_lock(&arena->lock);
402 list_foreach(arena->spans, cur) {
403 ra_span_t *span = list_get_instance(cur, ra_span_t, span_link);
404
405 base = ra_span_alloc(span, size, alignment);
406 if (base)
407 break;
408 }
409 spinlock_unlock(&arena->lock);
410
411 return base;
412}
413
414/* Return resources to arena. */
415void ra_free(ra_arena_t *arena, uintptr_t base, size_t size)
416{
417 spinlock_lock(&arena->lock);
418 list_foreach(arena->spans, cur) {
419 ra_span_t *span = list_get_instance(cur, ra_span_t, span_link);
420
421 if (iswithin(span->base, span->size, base, size)) {
422 ra_span_free(span, base, size);
423 spinlock_unlock(&arena->lock);
424 return;
425 }
426 }
427 spinlock_unlock(&arena->lock);
428
429 panic("Freeing to wrong arena (base=%" PRIxn ", size=%" PRIdn ").",
430 base, size);
431}
432
433
434/** @}
435 */
Note: See TracBrowser for help on using the repository browser.