source: mainline/kernel/generic/src/lib/ra.c@ 12cb03dd

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 12cb03dd was 12cb03dd, checked in by Jakub Jermar <jakub@…>, 14 years ago

WIP: implement the used segment hash table

  • Property mode set to 100644
File size: 10.5 KB
Line 
1/*
2 * Copyright (c) 2011 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup generic
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Resource allocator.
36 *
37 * This is a generic resource allocator, loosely based on the ideas presented
38 * in chapter 4 of the following paper and further simplified:
39 *
40 * Bonwick J., Adams J.: Magazines and Vmem: Extending the Slab Allocator to
41 * Many CPUs and Arbitrary Resources, USENIX 2001
42 *
43 */
44
45#include <lib/ra.h>
46#include <typedefs.h>
47#include <mm/slab.h>
48#include <bitops.h>
49#include <debug.h>
50#include <panic.h>
51#include <adt/list.h>
52#include <adt/hash_table.h>
53#include <align.h>
54#include <macros.h>
55
56#define USED_BUCKETS 1024
57
58static size_t used_hash(sysarg_t *key)
59{
60 return ((*key >> 2) & (USED_BUCKETS - 1));
61}
62
63static bool used_compare(sysarg_t *key, size_t keys, link_t *item)
64{
65 ra_segment_t *seg;
66
67 seg = hash_table_get_instance(item, ra_segment_t, fu_link);
68 return seg->base == *key;
69}
70
71static hash_table_operations_t used_ops = {
72 .hash = used_hash,
73 .compare = used_compare,
74 .remove_callback = NULL,
75};
76
77/** Calculate the segment size. */
78static size_t ra_segment_size_get(ra_segment_t *seg)
79{
80 ra_segment_t *nextseg;
81
82 nextseg = list_get_instance(seg->segment_link.next, ra_segment_t,
83 segment_link);
84 return nextseg->base - seg->base;
85}
86
87static ra_segment_t *ra_segment_create(uintptr_t base)
88{
89 ra_segment_t *seg;
90
91 seg = (ra_segment_t *) malloc(sizeof(ra_segment_t), FRAME_ATOMIC);
92 if (!seg)
93 return NULL;
94
95 link_initialize(&seg->segment_link);
96 link_initialize(&seg->fu_link);
97
98 seg->base = base;
99 seg->flags = 0;
100
101 return seg;
102}
103
104static void ra_segment_destroy(ra_segment_t *seg)
105{
106 free(seg);
107}
108
109static ra_span_t *ra_span_create(uintptr_t base, size_t size)
110{
111 ra_span_t *span;
112 ra_segment_t *seg, *lastseg;
113 unsigned int i;
114
115 span = (ra_span_t *) malloc(sizeof(ra_span_t), FRAME_ATOMIC);
116 if (!span)
117 return NULL;
118
119 span->max_order = fnzb(size);
120 span->base = base;
121 span->size = size;
122
123 span->free = (list_t *) malloc((span->max_order + 1) * sizeof(list_t),
124 FRAME_ATOMIC);
125 if (!span->free) {
126 free(span);
127 return NULL;
128 }
129
130 /*
131 * Create a segment to represent the entire size of the span.
132 */
133 seg = ra_segment_create(base);
134 if (!seg) {
135 free(span->free);
136 free(span);
137 return NULL;
138 }
139 seg->flags = RA_SEGMENT_FREE;
140
141 /*
142 * The last segment will be used as a sentinel at the end of the
143 * segment list so that it is possible to calculate the size for
144 * all other segments. It will not be placed in any free list or
145 * in the used segment hash and adjacent segments will not be
146 * coalesced with it.
147 */
148 lastseg = ra_segment_create(base + size);
149 if (!lastseg) {
150 ra_segment_destroy(seg);
151 free(span->free);
152 free(span);
153 return NULL;
154 }
155
156 link_initialize(&span->span_link);
157 list_initialize(&span->segments);
158
159 hash_table_create(&span->used, USED_BUCKETS, 1, &used_ops);
160
161 for (i = 0; i < span->max_order; i++)
162 list_initialize(&span->free[i]);
163
164 /* Insert the first segment into the list of segments. */
165 list_append(&seg->segment_link, &span->segments);
166 /* Insert the last segment into the list of segments. */
167 list_append(&lastseg->segment_link, &span->segments);
168
169 /* Insert the first segment into the respective free list. */
170 list_append(&seg->fu_link, &span->free[span->max_order]);
171
172 return span;
173}
174
175/** Create arena with initial span. */
176ra_arena_t *ra_arena_create(uintptr_t base, size_t size)
177{
178 ra_arena_t *arena;
179 ra_span_t *span;
180
181 /*
182 * At the moment, we can only create resources that don't include 0.
183 * If 0 needs to be considered as a valid resource, we would need to
184 * slightly change the API of the resource allocator.
185 */
186 if (base == 0)
187 return NULL;
188
189 arena = (ra_arena_t *) malloc(sizeof(ra_arena_t), FRAME_ATOMIC);
190 if (!arena)
191 return NULL;
192
193 span = ra_span_create(base, size);
194 if (!span) {
195 free(arena);
196 return NULL;
197 }
198
199 list_initialize(&arena->spans);
200 list_append(&span->span_link, &arena->spans);
201
202 return arena;
203}
204
205/** Add additional span to arena. */
206bool ra_span_add(ra_arena_t *arena, uintptr_t base, size_t size)
207{
208 ra_span_t *span;
209
210 if (base == 0)
211 return false;
212
213 span = ra_span_create(base, size);
214 if (!span)
215 return false;
216
217 /* TODO: check for overlaps */
218 list_append(&span->span_link, &arena->spans);
219 return true;
220}
221
222static uintptr_t ra_span_alloc(ra_span_t *span, size_t size, size_t align)
223{
224 /*
225 * We need to add the maximum of align - 1 to be able to compensate for
226 * the worst case unaligned segment.
227 */
228 size_t needed = size + align - 1;
229 size_t order = ispwr2(needed) ? fnzb(needed) : fnzb(needed) + 1;
230 ra_segment_t *pred = NULL;
231 ra_segment_t *succ = NULL;
232
233 /*
234 * Find the free list of the smallest order which can satisfy this
235 * request.
236 */
237 for (; order <= span->max_order; order++) {
238 ra_segment_t *seg;
239 uintptr_t newbase;
240
241 if (list_empty(&span->free[order]))
242 continue;
243
244 /* Take the first segment from the free list. */
245 seg = list_get_instance(list_first(&span->free[order]),
246 ra_segment_t, fu_link);
247
248 ASSERT(seg->flags & RA_SEGMENT_FREE);
249
250 /*
251 * See if we need to allocate new segments for the chopped-off
252 * parts of this segment.
253 */
254 if (!IS_ALIGNED(seg->base, align)) {
255 pred = ra_segment_create(seg->base);
256 if (!pred) {
257 /*
258 * Fail as we are unable to split the segment.
259 */
260 break;
261 }
262 pred->flags |= RA_SEGMENT_FREE;
263 }
264 newbase = ALIGN_UP(seg->base, align);
265 if (newbase + size != seg->base + ra_segment_size_get(seg)) {
266 ASSERT(newbase + size < seg->base +
267 ra_segment_size_get(seg));
268 succ = ra_segment_create(newbase + size);
269 if (!succ) {
270 if (pred)
271 ra_segment_destroy(pred);
272 /*
273 * Fail as we are unable to split the segment.
274 */
275 break;
276 }
277 succ->flags |= RA_SEGMENT_FREE;
278 }
279
280
281 /* Put unneeded parts back. */
282 if (pred) {
283 size_t pred_order;
284
285 list_insert_before(&pred->segment_link,
286 &seg->segment_link);
287 pred_order = fnzb(ra_segment_size_get(pred));
288 list_append(&pred->fu_link, &span->free[pred_order]);
289 }
290 if (succ) {
291 size_t succ_order;
292
293 list_insert_after(&succ->segment_link,
294 &seg->segment_link);
295 succ_order = fnzb(ra_segment_size_get(succ));
296 list_append(&succ->fu_link, &span->free[succ_order]);
297 }
298
299 /* Now remove the found segment from the free list. */
300 list_remove(&seg->fu_link);
301 seg->base = newbase;
302 seg->flags &= ~RA_SEGMENT_FREE;
303
304 /* Hash-in the segment into the used hash. */
305 sysarg_t key = seg->base;
306 hash_table_insert(&span->used, &key, &seg->fu_link);
307
308 return newbase;
309 }
310
311 return 0;
312}
313
314static void ra_span_free(ra_span_t *span, size_t base, size_t size)
315{
316 sysarg_t key = base;
317 link_t *link;
318 ra_segment_t *seg;
319 ra_segment_t *pred;
320 ra_segment_t *succ;
321 size_t order;
322
323 /*
324 * Locate the segment in the used hash table.
325 */
326 link = hash_table_find(&span->used, &key);
327 if (!link) {
328 panic("Freeing segment which is not known to be used (base=%"
329 PRIxn ", size=%" PRIdn ").", base, size);
330 }
331 seg = hash_table_get_instance(link, ra_segment_t, fu_link);
332
333 /*
334 * Hash out the segment.
335 */
336 hash_table_remove(&span->used, &key, 1);
337
338 ASSERT(!(seg->flags & RA_SEGMENT_FREE));
339 ASSERT(seg->base == base);
340 ASSERT(ra_segment_size_get(seg) == size);
341
342 /*
343 * Check whether the segment can be coalesced with its left neighbor.
344 */
345 if (list_first(&span->segments) != &seg->segment_link) {
346 pred = hash_table_get_instance(seg->segment_link.prev,
347 ra_segment_t, segment_link);
348
349 ASSERT(pred->base < seg->base);
350
351 if (pred->flags & RA_SEGMENT_FREE) {
352 /*
353 * The segment can be coalesced with its predecessor.
354 * Remove the predecessor from the free and segment
355 * lists, rebase the segment and throw the predecessor
356 * away.
357 */
358 list_remove(&pred->fu_link);
359 list_remove(&pred->segment_link);
360 seg->base = pred->base;
361 ra_segment_destroy(pred);
362 }
363 }
364
365 /*
366 * Check whether the segment can be coalesced with its right neighbor.
367 */
368 succ = hash_table_get_instance(seg->segment_link.next, ra_segment_t,
369 segment_link);
370 ASSERT(succ->base > seg->base);
371 if (succ->flags & RA_SEGMENT_FREE) {
372 /*
373 * The segment can be coalesced with its successor.
374 * Remove the successor from the free and segment lists
375 * and throw it away.
376 */
377 list_remove(&succ->fu_link);
378 list_remove(&succ->segment_link);
379 ra_segment_destroy(succ);
380 }
381
382 /* Put the segment on the appropriate free list. */
383 seg->flags |= RA_SEGMENT_FREE;
384 order = fnzb(ra_segment_size_get(seg));
385 list_append(&seg->fu_link, &span->free[order]);
386}
387
388/** Allocate resources from arena. */
389uintptr_t ra_alloc(ra_arena_t *arena, size_t size, size_t alignment)
390{
391 uintptr_t base = 0;
392
393 ASSERT(size >= 1);
394 ASSERT(alignment >= 1);
395 ASSERT(ispwr2(alignment));
396
397 list_foreach(arena->spans, cur) {
398 ra_span_t *span = list_get_instance(cur, ra_span_t, span_link);
399
400 base = ra_span_alloc(span, size, alignment);
401 if (base)
402 break;
403 }
404
405 return base;
406}
407
408/* Return resources to arena. */
409void ra_free(ra_arena_t *arena, uintptr_t base, size_t size)
410{
411 list_foreach(arena->spans, cur) {
412 ra_span_t *span = list_get_instance(cur, ra_span_t, span_link);
413
414 if (iswithin(span->base, span->size, base, size)) {
415 ra_span_free(span, base, size);
416 return;
417 }
418 }
419
420 panic("Freeing to wrong arena (base=%" PRIxn ", size=%" PRIdn ").",
421 base, size);
422}
423
424
425/** @}
426 */
Note: See TracBrowser for help on using the repository browser.