source: mainline/kernel/generic/src/lib/ra.c@ b4e59b3

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since b4e59b3 was b4e59b3, checked in by Jakub Jermar <jakub@…>, 14 years ago

WIP: Implement ra_free().

  • Property mode set to 100644
File size: 10.2 KB
Line 
1/*
2 * Copyright (c) 2011 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup generic
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Resource allocator.
36 *
37 * This is a generic resource allocator, loosely based on the ideas presented
38 * in chapter 4 of the following paper and further simplified:
39 *
40 * Bonwick J., Adams J.: Magazines and Vmem: Extending the Slab Allocator to
41 * Many CPUs and Arbitrary Resources, USENIX 2001
42 *
43 */
44
45#include <lib/ra.h>
46#include <typedefs.h>
47#include <mm/slab.h>
48#include <bitops.h>
49#include <debug.h>
50#include <panic.h>
51#include <adt/list.h>
52#include <adt/hash_table.h>
53#include <align.h>
54#include <macros.h>
55
56static hash_table_operations_t used_ops = {
57 .hash = NULL,
58 .compare = NULL,
59 .remove_callback = NULL,
60};
61
62/** Calculate the segment size. */
63static size_t ra_segment_size_get(ra_segment_t *seg)
64{
65 ra_segment_t *nextseg;
66
67 nextseg = list_get_instance(seg->segment_link.next, ra_segment_t,
68 segment_link);
69 return nextseg->base - seg->base;
70}
71
72static ra_segment_t *ra_segment_create(uintptr_t base)
73{
74 ra_segment_t *seg;
75
76 seg = (ra_segment_t *) malloc(sizeof(ra_segment_t), FRAME_ATOMIC);
77 if (!seg)
78 return NULL;
79
80 link_initialize(&seg->segment_link);
81 link_initialize(&seg->fu_link);
82
83 seg->base = base;
84 seg->flags = 0;
85
86 return seg;
87}
88
89static void ra_segment_destroy(ra_segment_t *seg)
90{
91 free(seg);
92}
93
94static ra_span_t *ra_span_create(uintptr_t base, size_t size)
95{
96 ra_span_t *span;
97 ra_segment_t *seg, *lastseg;
98 unsigned int i;
99
100 span = (ra_span_t *) malloc(sizeof(ra_span_t), FRAME_ATOMIC);
101 if (!span)
102 return NULL;
103
104 span->max_order = fnzb(size);
105 span->base = base;
106 span->size = size;
107
108 span->free = (list_t *) malloc((span->max_order + 1) * sizeof(list_t),
109 FRAME_ATOMIC);
110 if (!span->free) {
111 free(span);
112 return NULL;
113 }
114
115 /*
116 * Create a segment to represent the entire size of the span.
117 */
118 seg = ra_segment_create(base);
119 if (!seg) {
120 free(span->free);
121 free(span);
122 return NULL;
123 }
124 seg->flags = RA_SEGMENT_FREE;
125
126 /*
127 * The last segment will be used as a sentinel at the end of the
128 * segment list so that it is possible to calculate the size for
129 * all other segments. It will not be placed in any free list or
130 * in the used segment hash and adjacent segments will not be
131 * coalesced with it.
132 */
133 lastseg = ra_segment_create(base + size);
134 if (!lastseg) {
135 ra_segment_destroy(seg);
136 free(span->free);
137 free(span);
138 return NULL;
139 }
140
141 link_initialize(&span->span_link);
142 list_initialize(&span->segments);
143
144 hash_table_create(&span->used, span->max_order + 1, 1, &used_ops);
145
146 for (i = 0; i < span->max_order; i++)
147 list_initialize(&span->free[i]);
148
149 /* Insert the first segment into the list of segments. */
150 list_append(&seg->segment_link, &span->segments);
151 /* Insert the last segment into the list of segments. */
152 list_append(&lastseg->segment_link, &span->segments);
153
154 /* Insert the first segment into the respective free list. */
155 list_append(&seg->fu_link, &span->free[span->max_order]);
156
157 return span;
158}
159
160/** Create arena with initial span. */
161ra_arena_t *ra_arena_create(uintptr_t base, size_t size)
162{
163 ra_arena_t *arena;
164 ra_span_t *span;
165
166 /*
167 * At the moment, we can only create resources that don't include 0.
168 * If 0 needs to be considered as a valid resource, we would need to
169 * slightly change the API of the resource allocator.
170 */
171 if (base == 0)
172 return NULL;
173
174 arena = (ra_arena_t *) malloc(sizeof(ra_arena_t), FRAME_ATOMIC);
175 if (!arena)
176 return NULL;
177
178 span = ra_span_create(base, size);
179 if (!span) {
180 free(arena);
181 return NULL;
182 }
183
184 list_initialize(&arena->spans);
185 list_append(&span->span_link, &arena->spans);
186
187 return arena;
188}
189
190/** Add additional span to arena. */
191bool ra_span_add(ra_arena_t *arena, uintptr_t base, size_t size)
192{
193 ra_span_t *span;
194
195 if (base == 0)
196 return false;
197
198 span = ra_span_create(base, size);
199 if (!span)
200 return false;
201
202 /* TODO: check for overlaps */
203 list_append(&span->span_link, &arena->spans);
204 return true;
205}
206
207static uintptr_t ra_span_alloc(ra_span_t *span, size_t size, size_t align)
208{
209 /*
210 * We need to add the maximum of align - 1 to be able to compensate for
211 * the worst case unaligned segment.
212 */
213 size_t needed = size + align - 1;
214 size_t order = ispwr2(needed) ? fnzb(needed) : fnzb(needed) + 1;
215 ra_segment_t *pred = NULL;
216 ra_segment_t *succ = NULL;
217
218 /*
219 * Find the free list of the smallest order which can satisfy this
220 * request.
221 */
222 for (; order <= span->max_order; order++) {
223 ra_segment_t *seg;
224 uintptr_t newbase;
225
226 if (list_empty(&span->free[order]))
227 continue;
228
229 /* Take the first segment from the free list. */
230 seg = list_get_instance(list_first(&span->free[order]),
231 ra_segment_t, fu_link);
232
233 ASSERT(seg->flags & RA_SEGMENT_FREE);
234
235 /*
236 * See if we need to allocate new segments for the chopped-off
237 * parts of this segment.
238 */
239 if (!IS_ALIGNED(seg->base, align)) {
240 pred = ra_segment_create(seg->base);
241 if (!pred) {
242 /*
243 * Fail as we are unable to split the segment.
244 */
245 break;
246 }
247 pred->flags |= RA_SEGMENT_FREE;
248 }
249 newbase = ALIGN_UP(seg->base, align);
250 if (newbase + size != seg->base + ra_segment_size_get(seg)) {
251 ASSERT(newbase + size < seg->base +
252 ra_segment_size_get(seg));
253 succ = ra_segment_create(newbase + size);
254 if (!succ) {
255 if (pred)
256 ra_segment_destroy(pred);
257 /*
258 * Fail as we are unable to split the segment.
259 */
260 break;
261 }
262 succ->flags |= RA_SEGMENT_FREE;
263 }
264
265
266 /* Put unneeded parts back. */
267 if (pred) {
268 size_t pred_order;
269
270 list_insert_before(&pred->segment_link,
271 &seg->segment_link);
272 pred_order = fnzb(ra_segment_size_get(pred));
273 list_append(&pred->fu_link, &span->free[pred_order]);
274 }
275 if (succ) {
276 size_t succ_order;
277
278 list_insert_after(&succ->segment_link,
279 &seg->segment_link);
280 succ_order = fnzb(ra_segment_size_get(succ));
281 list_append(&succ->fu_link, &span->free[succ_order]);
282 }
283
284 /* Now remove the found segment from the free list. */
285 list_remove(&seg->fu_link);
286 seg->base = newbase;
287 seg->flags &= ~RA_SEGMENT_FREE;
288
289 /* Hash-in the segment into the used hash. */
290 sysarg_t key = seg->base;
291 hash_table_insert(&span->used, &key, &seg->fu_link);
292
293 return newbase;
294 }
295
296 return 0;
297}
298
299static void ra_span_free(ra_span_t *span, size_t base, size_t size)
300{
301 sysarg_t key = base;
302 link_t *link;
303 ra_segment_t *seg;
304 ra_segment_t *pred;
305 ra_segment_t *succ;
306 size_t order;
307
308 /*
309 * Locate the segment in the used hash table.
310 */
311 link = hash_table_find(&span->used, &key);
312 if (!link) {
313 panic("Freeing segment which is not known to be used (base=%"
314 PRIxn ", size=%" PRIdn ").", base, size);
315 }
316 seg = hash_table_get_instance(link, ra_segment_t, fu_link);
317
318 /*
319 * Hash out the segment.
320 */
321 hash_table_remove(&span->used, &key, 1);
322
323 ASSERT(!(seg->flags & RA_SEGMENT_FREE));
324 ASSERT(seg->base == base);
325 ASSERT(ra_segment_size_get(seg) == size);
326
327 /*
328 * Check whether the segment can be coalesced with its left neighbor.
329 */
330 if (list_first(&span->segments) != &seg->segment_link) {
331 pred = hash_table_get_instance(seg->segment_link.prev,
332 ra_segment_t, segment_link);
333
334 ASSERT(pred->base < seg->base);
335
336 if (pred->flags & RA_SEGMENT_FREE) {
337 /*
338 * The segment can be coalesced with its predecessor.
339 * Remove the predecessor from the free and segment
340 * lists, rebase the segment and throw the predecessor
341 * away.
342 */
343 list_remove(&pred->fu_link);
344 list_remove(&pred->segment_link);
345 seg->base = pred->base;
346 ra_segment_destroy(pred);
347 }
348 }
349
350 /*
351 * Check whether the segment can be coalesced with its right neighbor.
352 */
353 succ = hash_table_get_instance(seg->segment_link.next, ra_segment_t,
354 segment_link);
355 ASSERT(succ->base > seg->base);
356 if (succ->flags & RA_SEGMENT_FREE) {
357 /*
358 * The segment can be coalesced with its successor.
359 * Remove the successor from the free and segment lists
360 * and throw it away.
361 */
362 list_remove(&succ->fu_link);
363 list_remove(&succ->segment_link);
364 ra_segment_destroy(succ);
365 }
366
367 /* Put the segment on the appropriate free list. */
368 seg->flags |= RA_SEGMENT_FREE;
369 order = fnzb(ra_segment_size_get(seg));
370 list_append(&seg->fu_link, &span->free[order]);
371}
372
373/** Allocate resources from arena. */
374uintptr_t ra_alloc(ra_arena_t *arena, size_t size, size_t alignment)
375{
376 uintptr_t base = 0;
377
378 ASSERT(size >= 1);
379 ASSERT(alignment >= 1);
380 ASSERT(ispwr2(alignment));
381
382 list_foreach(arena->spans, cur) {
383 ra_span_t *span = list_get_instance(cur, ra_span_t, span_link);
384
385 base = ra_span_alloc(span, size, alignment);
386 if (base)
387 break;
388 }
389
390 return base;
391}
392
393/* Return resources to arena. */
394void ra_free(ra_arena_t *arena, uintptr_t base, size_t size)
395{
396 list_foreach(arena->spans, cur) {
397 ra_span_t *span = list_get_instance(cur, ra_span_t, span_link);
398
399 if (iswithin(span->base, span->size, base, size)) {
400 ra_span_free(span, base, size);
401 return;
402 }
403 }
404
405 panic("Freeing to wrong arena (base=%" PRIxn ", size=%" PRIdn ").",
406 base, size);
407}
408
409
410/** @}
411 */
Note: See TracBrowser for help on using the repository browser.