source: mainline/kernel/generic/src/lib/ra.c@ 961c0484

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 961c0484 was 961c0484, checked in by Jakub Jermar <jakub@…>, 14 years ago

WIP: Implement ra_alloc().

  • Property mode set to 100644
File size: 8.5 KB
Line 
1/*
2 * Copyright (c) 2011 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup generic
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Resource allocator.
36 *
37 * This is a generic resource allocator, loosely based on the ideas presented
38 * in chapter 4 of the following paper and further simplified:
39 *
40 * Bonwick J., Adams J.: Magazines and Vmem: Extending the Slab Allocator to
41 * Many CPUs and Arbitrary Resources, USENIX 2001
42 *
43 */
44
45#include <lib/ra.h>
46#include <typedefs.h>
47#include <mm/slab.h>
48#include <bitops.h>
49#include <debug.h>
50#include <panic.h>
51#include <adt/list.h>
52#include <adt/hash_table.h>
53#include <align.h>
54#include <macros.h>
55
56/*
57 * The last segment on the segment list will be a special sentinel segment which
58 * is neither in any free list nor in the used segment hash.
59 */
60#define IS_LAST_SEG(seg) (!(seg)->fu_link.next)
61
62static hash_table_operations_t used_ops = {
63 .hash = NULL,
64 .compare = NULL,
65 .remove_callback = NULL,
66};
67
68/** Calculate the segment size. */
69static size_t ra_segment_size_get(ra_segment_t *seg)
70{
71 ra_segment_t *nextseg;
72
73 ASSERT(!IS_LAST_SEG(seg));
74
75 nextseg = list_get_instance(seg->segment_link.next, ra_segment_t,
76 segment_link);
77 return nextseg->base - seg->base;
78}
79
80static ra_segment_t *ra_segment_create(uintptr_t base)
81{
82 ra_segment_t *seg;
83
84 seg = (ra_segment_t *) malloc(sizeof(ra_segment_t), FRAME_ATOMIC);
85 if (!seg)
86 return NULL;
87
88 link_initialize(&seg->segment_link);
89 link_initialize(&seg->fu_link);
90
91 seg->base = base;
92
93 return seg;
94}
95
96static void ra_segment_destroy(ra_segment_t *seg)
97{
98 free(seg);
99}
100
101static ra_span_t *ra_span_create(uintptr_t base, size_t size)
102{
103 ra_span_t *span;
104 ra_segment_t *seg, *lastseg;
105 unsigned int i;
106
107 span = (ra_span_t *) malloc(sizeof(ra_span_t), FRAME_ATOMIC);
108 if (!span)
109 return NULL;
110
111 span->max_order = fnzb(size);
112 span->base = base;
113 span->size = size;
114
115 span->free = (list_t *) malloc((span->max_order + 1) * sizeof(list_t),
116 FRAME_ATOMIC);
117 if (!span->free) {
118 free(span);
119 return NULL;
120 }
121
122 /*
123 * Create a segment to represent the entire size of the span.
124 */
125 seg = ra_segment_create(base);
126 if (!seg) {
127 free(span->free);
128 free(span);
129 return NULL;
130 }
131
132 /*
133 * The last segment will be used as a sentinel at the end of the
134 * segment list so that it is possible to calculate the size for
135 * all other segments. It will not be placed in any free list or
136 * in the used segment hash and adjacent segments will not be
137 * coalesced with it.
138 */
139 lastseg = ra_segment_create(base + size);
140 if (!lastseg) {
141 ra_segment_destroy(seg);
142 free(span->free);
143 free(span);
144 return NULL;
145 }
146 /* Make sure we have NULL here so that we can recognize the sentinel. */
147 lastseg->fu_link.next = NULL;
148
149 link_initialize(&span->span_link);
150 list_initialize(&span->segments);
151
152 hash_table_create(&span->used, span->max_order + 1, 1, &used_ops);
153
154 for (i = 0; i < span->max_order; i++)
155 list_initialize(&span->free[i]);
156
157 /* Insert the first segment into the list of segments. */
158 list_append(&seg->segment_link, &span->segments);
159 /* Insert the last segment into the list of segments. */
160 list_append(&lastseg->segment_link, &span->segments);
161
162 /* Insert the first segment into the respective free list. */
163 list_append(&seg->fu_link, &span->free[span->max_order]);
164
165 return span;
166}
167
168/** Create arena with initial span. */
169ra_arena_t *ra_arena_create(uintptr_t base, size_t size)
170{
171 ra_arena_t *arena;
172 ra_span_t *span;
173
174 /*
175 * At the moment, we can only create resources that don't include 0.
176 * If 0 needs to be considered as a valid resource, we would need to
177 * slightly change the API of the resource allocator.
178 */
179 if (base == 0)
180 return NULL;
181
182 arena = (ra_arena_t *) malloc(sizeof(ra_arena_t), FRAME_ATOMIC);
183 if (!arena)
184 return NULL;
185
186 span = ra_span_create(base, size);
187 if (!span) {
188 free(arena);
189 return NULL;
190 }
191
192 list_initialize(&arena->spans);
193 list_append(&span->span_link, &arena->spans);
194
195 return arena;
196}
197
198/** Add additional span to arena. */
199bool ra_span_add(ra_arena_t *arena, uintptr_t base, size_t size)
200{
201 ra_span_t *span;
202
203 if (base == 0)
204 return false;
205
206 span = ra_span_create(base, size);
207 if (!span)
208 return false;
209
210 /* TODO: check for overlaps */
211 list_append(&span->span_link, &arena->spans);
212 return true;
213}
214
215static uintptr_t ra_span_alloc(ra_span_t *span, size_t size, size_t align)
216{
217 /*
218 * We need to add the maximum of align - 1 to be able to compensate for
219 * the worst case unaligned segment.
220 */
221 size_t needed = size + align - 1;
222 size_t order = ispwr2(needed) ? fnzb(needed) : fnzb(needed) + 1;
223 ra_segment_t *pred = NULL;
224 ra_segment_t *succ = NULL;
225
226 /*
227 * Find the free list of the smallest order which can satisfy this
228 * request.
229 */
230 for (; order <= span->max_order; order++) {
231 ra_segment_t *seg;
232 uintptr_t newbase;
233
234 if (list_empty(&span->free[order]))
235 continue;
236
237 /* Take the first segment from the free list. */
238 seg = list_get_instance(list_first(&span->free[order]),
239 ra_segment_t, fu_link);
240
241 /*
242 * See if we need to allocate new segments for the chopped-off
243 * parts of this segment.
244 */
245 if (!IS_ALIGNED(seg->base, align)) {
246 pred = ra_segment_create(seg->base);
247 if (!pred) {
248 /*
249 * Fail as we are unable to split the segment.
250 */
251 break;
252 }
253 }
254 newbase = ALIGN_UP(seg->base, align);
255 if (newbase + size != seg->base + ra_segment_size_get(seg)) {
256 ASSERT(newbase + size < seg->base +
257 ra_segment_size_get(seg));
258 succ = ra_segment_create(newbase + size);
259 if (!succ) {
260 if (pred)
261 ra_segment_destroy(pred);
262 /*
263 * Fail as we are unable to split the segment.
264 */
265 break;
266 }
267 }
268
269
270 /* Put unneeded parts back. */
271 if (pred) {
272 size_t pred_order;
273
274 list_insert_before(&pred->segment_link,
275 &seg->segment_link);
276 pred_order = fnzb(ra_segment_size_get(pred));
277 list_append(&pred->fu_link, &span->free[pred_order]);
278 }
279 if (succ) {
280 size_t succ_order;
281
282 list_insert_after(&succ->segment_link,
283 &seg->segment_link);
284 succ_order = fnzb(ra_segment_size_get(succ));
285 list_append(&succ->fu_link, &span->free[succ_order]);
286 }
287
288 /* Now remove the found segment from the free list. */
289 list_remove(&seg->fu_link);
290 seg->base = newbase;
291
292 /* Hash-in the segment into the used hash. */
293 sysarg_t key = seg->base;
294 hash_table_insert(&span->used, &key, &seg->fu_link);
295
296 return newbase;
297 }
298
299 return 0;
300}
301
302static void ra_span_free(ra_span_t *span, size_t base, size_t size)
303{
304}
305
306/** Allocate resources from arena. */
307uintptr_t ra_alloc(ra_arena_t *arena, size_t size, size_t alignment)
308{
309 uintptr_t base = 0;
310
311 ASSERT(size >= 1);
312 ASSERT(alignment >= 1);
313 ASSERT(ispwr2(alignment));
314
315 list_foreach(arena->spans, cur) {
316 ra_span_t *span = list_get_instance(cur, ra_span_t, span_link);
317
318 base = ra_span_alloc(span, size, alignment);
319 if (base)
320 break;
321 }
322
323 return base;
324}
325
326/* Return resources to arena. */
327void ra_free(ra_arena_t *arena, uintptr_t base, size_t size)
328{
329 list_foreach(arena->spans, cur) {
330 ra_span_t *span = list_get_instance(cur, ra_span_t, span_link);
331
332 if (iswithin(span->base, span->size, base, size)) {
333 ra_span_free(span, base, size);
334 return;
335 }
336 }
337
338 panic("Freeing to wrong arena (base=%" PRIxn ", size=%" PRIdn ").",
339 base, size);
340}
341
342
343/** @}
344 */
Note: See TracBrowser for help on using the repository browser.