source: mainline/kernel/generic/src/lib/ra.c@ 2175178

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 2175178 was 174156fd, checked in by Jakub Jermar <jakub@…>, 7 years ago

Disambiguate doxygroup generic*

  • Property mode set to 100644
File size: 11.5 KB
Line 
1/*
2 * Copyright (c) 2011 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup kernel_generic
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Resource allocator.
36 *
37 * This is a generic resource allocator, loosely based on the ideas presented
38 * in chapter 4 of the following paper and further simplified:
39 *
40 * Bonwick J., Adams J.: Magazines and Vmem: Extending the Slab Allocator to
41 * Many CPUs and Arbitrary Resources, USENIX 2001
42 *
43 */
44
45#include <assert.h>
46#include <lib/ra.h>
47#include <typedefs.h>
48#include <mm/slab.h>
49#include <bitops.h>
50#include <panic.h>
51#include <adt/list.h>
52#include <adt/hash.h>
53#include <adt/hash_table.h>
54#include <align.h>
55#include <macros.h>
56#include <synch/spinlock.h>
57
58static slab_cache_t *ra_segment_cache;
59
60/** Return the hash of the key stored in the item */
61static size_t used_hash(const ht_link_t *item)
62{
63 ra_segment_t *seg = hash_table_get_inst(item, ra_segment_t, uh_link);
64 return hash_mix(seg->base);
65}
66
67/** Return the hash of the key */
68static size_t used_key_hash(void *key)
69{
70 uintptr_t *base = (uintptr_t *) key;
71 return hash_mix(*base);
72}
73
74/** Return true if the key is equal to the item's lookup key */
75static bool used_key_equal(void *key, const ht_link_t *item)
76{
77 uintptr_t *base = (uintptr_t *) key;
78 ra_segment_t *seg = hash_table_get_inst(item, ra_segment_t, uh_link);
79 return seg->base == *base;
80}
81
82static hash_table_ops_t used_ops = {
83 .hash = used_hash,
84 .key_hash = used_key_hash,
85 .key_equal = used_key_equal
86};
87
88/** Calculate the segment size. */
89static size_t ra_segment_size_get(ra_segment_t *seg)
90{
91 ra_segment_t *nextseg;
92
93 nextseg = list_get_instance(seg->segment_link.next, ra_segment_t,
94 segment_link);
95 return nextseg->base - seg->base;
96}
97
98static ra_segment_t *ra_segment_create(uintptr_t base)
99{
100 ra_segment_t *seg;
101
102 seg = slab_alloc(ra_segment_cache, FRAME_ATOMIC);
103 if (!seg)
104 return NULL;
105
106 link_initialize(&seg->segment_link);
107 link_initialize(&seg->fl_link);
108
109 seg->base = base;
110 seg->flags = 0;
111
112 return seg;
113}
114
115static void ra_segment_destroy(ra_segment_t *seg)
116{
117 slab_free(ra_segment_cache, seg);
118}
119
120static ra_span_t *ra_span_create(uintptr_t base, size_t size)
121{
122 ra_span_t *span;
123 ra_segment_t *seg, *lastseg;
124 unsigned int i;
125
126 span = (ra_span_t *) malloc(sizeof(ra_span_t));
127 if (!span)
128 return NULL;
129
130 span->max_order = fnzb(size);
131 span->base = base;
132 span->size = size;
133
134 span->free = (list_t *) malloc((span->max_order + 1) * sizeof(list_t));
135 if (!span->free) {
136 free(span);
137 return NULL;
138 }
139
140 /*
141 * Create a segment to represent the entire size of the span.
142 */
143 seg = ra_segment_create(base);
144 if (!seg) {
145 free(span->free);
146 free(span);
147 return NULL;
148 }
149 seg->flags = RA_SEGMENT_FREE;
150
151 /*
152 * The last segment will be used as a sentinel at the end of the
153 * segment list so that it is possible to calculate the size for
154 * all other segments. It will not be placed in any free list or
155 * in the used segment hash and adjacent segments will not be
156 * coalesced with it.
157 */
158 lastseg = ra_segment_create(base + size);
159 if (!lastseg) {
160 ra_segment_destroy(seg);
161 free(span->free);
162 free(span);
163 return NULL;
164 }
165
166 link_initialize(&span->span_link);
167 list_initialize(&span->segments);
168
169 hash_table_create(&span->used, 0, 0, &used_ops);
170
171 for (i = 0; i <= span->max_order; i++)
172 list_initialize(&span->free[i]);
173
174 /* Insert the first segment into the list of segments. */
175 list_append(&seg->segment_link, &span->segments);
176 /* Insert the last segment into the list of segments. */
177 list_append(&lastseg->segment_link, &span->segments);
178
179 /* Insert the first segment into the respective free list. */
180 list_append(&seg->fl_link, &span->free[span->max_order]);
181
182 return span;
183}
184
185static void ra_span_destroy(ra_span_t *span)
186{
187 hash_table_destroy(&span->used);
188
189 list_foreach_safe(span->segments, cur, next) {
190 ra_segment_t *seg = list_get_instance(cur, ra_segment_t,
191 segment_link);
192 list_remove(&seg->segment_link);
193 if (seg->flags & RA_SEGMENT_FREE)
194 list_remove(&seg->fl_link);
195 ra_segment_destroy(seg);
196 }
197
198 free(span);
199}
200
201/** Create an empty arena. */
202ra_arena_t *ra_arena_create(void)
203{
204 ra_arena_t *arena;
205
206 arena = (ra_arena_t *) malloc(sizeof(ra_arena_t));
207 if (!arena)
208 return NULL;
209
210 irq_spinlock_initialize(&arena->lock, "arena_lock");
211 list_initialize(&arena->spans);
212
213 return arena;
214}
215
216void ra_arena_destroy(ra_arena_t *arena)
217{
218 /*
219 * No locking necessary as this is the cleanup and all users should have
220 * stopped using the arena already.
221 */
222 list_foreach_safe(arena->spans, cur, next) {
223 ra_span_t *span = list_get_instance(cur, ra_span_t, span_link);
224 list_remove(&span->span_link);
225 ra_span_destroy(span);
226 }
227
228 free(arena);
229}
230
231/** Add a span to arena. */
232bool ra_span_add(ra_arena_t *arena, uintptr_t base, size_t size)
233{
234 ra_span_t *span;
235
236 span = ra_span_create(base, size);
237 if (!span)
238 return false;
239
240 /* TODO: check for overlaps */
241 irq_spinlock_lock(&arena->lock, true);
242 list_append(&span->span_link, &arena->spans);
243 irq_spinlock_unlock(&arena->lock, true);
244 return true;
245}
246
247static bool
248ra_span_alloc(ra_span_t *span, size_t size, size_t align, uintptr_t *base)
249{
250 /*
251 * We need to add the maximum of align - 1 to be able to compensate for
252 * the worst case unaligned segment.
253 */
254 size_t needed = size + align - 1;
255 size_t order = ispwr2(needed) ? fnzb(needed) : fnzb(needed) + 1;
256 ra_segment_t *pred = NULL;
257 ra_segment_t *succ = NULL;
258
259 /*
260 * Find the free list of the smallest order which can satisfy this
261 * request.
262 */
263 for (; order <= span->max_order; order++) {
264 ra_segment_t *seg;
265 uintptr_t newbase;
266
267 if (list_empty(&span->free[order]))
268 continue;
269
270 /* Take the first segment from the free list. */
271 seg = list_get_instance(list_first(&span->free[order]),
272 ra_segment_t, fl_link);
273
274 assert(seg->flags & RA_SEGMENT_FREE);
275
276 /*
277 * See if we need to allocate new segments for the chopped-off
278 * parts of this segment.
279 */
280 if (!IS_ALIGNED(seg->base, align)) {
281 pred = ra_segment_create(seg->base);
282 if (!pred) {
283 /*
284 * Fail as we are unable to split the segment.
285 */
286 break;
287 }
288 pred->flags |= RA_SEGMENT_FREE;
289 }
290 newbase = ALIGN_UP(seg->base, align);
291 if (newbase + size != seg->base + ra_segment_size_get(seg)) {
292 assert(newbase + (size - 1) < seg->base +
293 (ra_segment_size_get(seg) - 1));
294 succ = ra_segment_create(newbase + size);
295 if (!succ) {
296 if (pred)
297 ra_segment_destroy(pred);
298 /*
299 * Fail as we are unable to split the segment.
300 */
301 break;
302 }
303 succ->flags |= RA_SEGMENT_FREE;
304 }
305
306 /* Put unneeded parts back. */
307 if (pred) {
308 size_t pred_order;
309
310 list_insert_before(&pred->segment_link,
311 &seg->segment_link);
312 pred_order = fnzb(ra_segment_size_get(pred));
313 list_append(&pred->fl_link, &span->free[pred_order]);
314 }
315 if (succ) {
316 size_t succ_order;
317
318 list_insert_after(&succ->segment_link,
319 &seg->segment_link);
320 succ_order = fnzb(ra_segment_size_get(succ));
321 list_append(&succ->fl_link, &span->free[succ_order]);
322 }
323
324 /* Now remove the found segment from the free list. */
325 list_remove(&seg->fl_link);
326 seg->base = newbase;
327 seg->flags &= ~RA_SEGMENT_FREE;
328
329 /* Hash-in the segment into the used hash. */
330 hash_table_insert(&span->used, &seg->uh_link);
331
332 *base = newbase;
333 return true;
334 }
335
336 return false;
337}
338
339static void ra_span_free(ra_span_t *span, size_t base, size_t size)
340{
341 sysarg_t key = base;
342 ht_link_t *link;
343 ra_segment_t *seg;
344 ra_segment_t *pred;
345 ra_segment_t *succ;
346 size_t order;
347
348 /*
349 * Locate the segment in the used hash table.
350 */
351 link = hash_table_find(&span->used, &key);
352 if (!link) {
353 panic("Freeing segment which is not known to be used (base=%zx"
354 ", size=%zd).", base, size);
355 }
356 seg = hash_table_get_inst(link, ra_segment_t, uh_link);
357
358 /*
359 * Hash out the segment.
360 */
361 hash_table_remove_item(&span->used, link);
362
363 assert(!(seg->flags & RA_SEGMENT_FREE));
364 assert(seg->base == base);
365 assert(ra_segment_size_get(seg) == size);
366
367 /*
368 * Check whether the segment can be coalesced with its left neighbor.
369 */
370 if (list_first(&span->segments) != &seg->segment_link) {
371 pred = hash_table_get_inst(seg->segment_link.prev,
372 ra_segment_t, segment_link);
373
374 assert(pred->base < seg->base);
375
376 if (pred->flags & RA_SEGMENT_FREE) {
377 /*
378 * The segment can be coalesced with its predecessor.
379 * Remove the predecessor from the free and segment
380 * lists, rebase the segment and throw the predecessor
381 * away.
382 */
383 list_remove(&pred->fl_link);
384 list_remove(&pred->segment_link);
385 seg->base = pred->base;
386 ra_segment_destroy(pred);
387 }
388 }
389
390 /*
391 * Check whether the segment can be coalesced with its right neighbor.
392 */
393 succ = hash_table_get_inst(seg->segment_link.next, ra_segment_t,
394 segment_link);
395 assert(succ->base > seg->base);
396 if (succ->flags & RA_SEGMENT_FREE) {
397 /*
398 * The segment can be coalesced with its successor.
399 * Remove the successor from the free and segment lists
400 * and throw it away.
401 */
402 list_remove(&succ->fl_link);
403 list_remove(&succ->segment_link);
404 ra_segment_destroy(succ);
405 }
406
407 /* Put the segment on the appropriate free list. */
408 seg->flags |= RA_SEGMENT_FREE;
409 order = fnzb(ra_segment_size_get(seg));
410 list_append(&seg->fl_link, &span->free[order]);
411}
412
413/** Allocate resources from arena. */
414bool
415ra_alloc(ra_arena_t *arena, size_t size, size_t alignment, uintptr_t *base)
416{
417 bool success = false;
418
419 assert(size >= 1);
420 assert(alignment >= 1);
421 assert(ispwr2(alignment));
422
423 irq_spinlock_lock(&arena->lock, true);
424 list_foreach(arena->spans, span_link, ra_span_t, span) {
425 success = ra_span_alloc(span, size, alignment, base);
426 if (success)
427 break;
428 }
429 irq_spinlock_unlock(&arena->lock, true);
430
431 return success;
432}
433
434/* Return resources to arena. */
435void ra_free(ra_arena_t *arena, uintptr_t base, size_t size)
436{
437 irq_spinlock_lock(&arena->lock, true);
438 list_foreach(arena->spans, span_link, ra_span_t, span) {
439 if (iswithin(span->base, span->size, base, size)) {
440 ra_span_free(span, base, size);
441 irq_spinlock_unlock(&arena->lock, true);
442 return;
443 }
444 }
445 irq_spinlock_unlock(&arena->lock, true);
446
447 panic("Freeing to wrong arena (base=%" PRIxPTR ", size=%zd).",
448 base, size);
449}
450
451void ra_init(void)
452{
453 ra_segment_cache = slab_cache_create("ra_segment_t",
454 sizeof(ra_segment_t), 0, NULL, NULL, SLAB_CACHE_MAGDEFERRED);
455}
456
457/** @}
458 */
Note: See TracBrowser for help on using the repository browser.