source: mainline/kernel/generic/src/lib/ra.c@ 314f4b59

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 314f4b59 was a35b458, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 7 years ago

style: Remove trailing whitespace on _all_ lines, including empty ones, for particular file types.

Command used: tools/srepl '\s\+$' '' -- *.c *.h *.py *.sh *.s *.S *.ag

Currently, whitespace on empty lines is very inconsistent.
There are two basic choices: Either remove the whitespace, or keep empty lines
indented to the level of surrounding code. The former is AFAICT more common,
and also much easier to do automatically.

Alternatively, we could write script for automatic indentation, and use that
instead. However, if such a script exists, it's possible to use the indented
style locally, by having the editor apply relevant conversions on load/save,
without affecting remote repository. IMO, it makes more sense to adopt
the simpler rule.

  • Property mode set to 100644
File size: 11.5 KB
Line 
1/*
2 * Copyright (c) 2011 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup generic
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Resource allocator.
36 *
37 * This is a generic resource allocator, loosely based on the ideas presented
38 * in chapter 4 of the following paper and further simplified:
39 *
40 * Bonwick J., Adams J.: Magazines and Vmem: Extending the Slab Allocator to
41 * Many CPUs and Arbitrary Resources, USENIX 2001
42 *
43 */
44
45#include <assert.h>
46#include <lib/ra.h>
47#include <typedefs.h>
48#include <mm/slab.h>
49#include <bitops.h>
50#include <panic.h>
51#include <adt/list.h>
52#include <adt/hash.h>
53#include <adt/hash_table.h>
54#include <align.h>
55#include <macros.h>
56#include <synch/spinlock.h>
57
58static slab_cache_t *ra_segment_cache;
59
60/** Return the hash of the key stored in the item */
61static size_t used_hash(const ht_link_t *item)
62{
63 ra_segment_t *seg = hash_table_get_inst(item, ra_segment_t, uh_link);
64 return hash_mix(seg->base);
65}
66
67/** Return the hash of the key */
68static size_t used_key_hash(void *key)
69{
70 uintptr_t *base = (uintptr_t *) key;
71 return hash_mix(*base);
72}
73
74/** Return true if the key is equal to the item's lookup key */
75static bool used_key_equal(void *key, const ht_link_t *item)
76{
77 uintptr_t *base = (uintptr_t *) key;
78 ra_segment_t *seg = hash_table_get_inst(item, ra_segment_t, uh_link);
79 return seg->base == *base;
80}
81
82static hash_table_ops_t used_ops = {
83 .hash = used_hash,
84 .key_hash = used_key_hash,
85 .key_equal = used_key_equal
86};
87
88/** Calculate the segment size. */
89static size_t ra_segment_size_get(ra_segment_t *seg)
90{
91 ra_segment_t *nextseg;
92
93 nextseg = list_get_instance(seg->segment_link.next, ra_segment_t,
94 segment_link);
95 return nextseg->base - seg->base;
96}
97
98static ra_segment_t *ra_segment_create(uintptr_t base)
99{
100 ra_segment_t *seg;
101
102 seg = slab_alloc(ra_segment_cache, FRAME_ATOMIC);
103 if (!seg)
104 return NULL;
105
106 link_initialize(&seg->segment_link);
107 link_initialize(&seg->fl_link);
108
109 seg->base = base;
110 seg->flags = 0;
111
112 return seg;
113}
114
115static void ra_segment_destroy(ra_segment_t *seg)
116{
117 slab_free(ra_segment_cache, seg);
118}
119
120static ra_span_t *ra_span_create(uintptr_t base, size_t size)
121{
122 ra_span_t *span;
123 ra_segment_t *seg, *lastseg;
124 unsigned int i;
125
126 span = (ra_span_t *) malloc(sizeof(ra_span_t), FRAME_ATOMIC);
127 if (!span)
128 return NULL;
129
130 span->max_order = fnzb(size);
131 span->base = base;
132 span->size = size;
133
134 span->free = (list_t *) malloc((span->max_order + 1) * sizeof(list_t),
135 FRAME_ATOMIC);
136 if (!span->free) {
137 free(span);
138 return NULL;
139 }
140
141 /*
142 * Create a segment to represent the entire size of the span.
143 */
144 seg = ra_segment_create(base);
145 if (!seg) {
146 free(span->free);
147 free(span);
148 return NULL;
149 }
150 seg->flags = RA_SEGMENT_FREE;
151
152 /*
153 * The last segment will be used as a sentinel at the end of the
154 * segment list so that it is possible to calculate the size for
155 * all other segments. It will not be placed in any free list or
156 * in the used segment hash and adjacent segments will not be
157 * coalesced with it.
158 */
159 lastseg = ra_segment_create(base + size);
160 if (!lastseg) {
161 ra_segment_destroy(seg);
162 free(span->free);
163 free(span);
164 return NULL;
165 }
166
167 link_initialize(&span->span_link);
168 list_initialize(&span->segments);
169
170 hash_table_create(&span->used, 0, 0, &used_ops);
171
172 for (i = 0; i <= span->max_order; i++)
173 list_initialize(&span->free[i]);
174
175 /* Insert the first segment into the list of segments. */
176 list_append(&seg->segment_link, &span->segments);
177 /* Insert the last segment into the list of segments. */
178 list_append(&lastseg->segment_link, &span->segments);
179
180 /* Insert the first segment into the respective free list. */
181 list_append(&seg->fl_link, &span->free[span->max_order]);
182
183 return span;
184}
185
186static void ra_span_destroy(ra_span_t *span)
187{
188 hash_table_destroy(&span->used);
189
190 list_foreach_safe(span->segments, cur, next) {
191 ra_segment_t *seg = list_get_instance(cur, ra_segment_t,
192 segment_link);
193 list_remove(&seg->segment_link);
194 if (seg->flags & RA_SEGMENT_FREE)
195 list_remove(&seg->fl_link);
196 ra_segment_destroy(seg);
197 }
198
199 free(span);
200}
201
202/** Create an empty arena. */
203ra_arena_t *ra_arena_create(void)
204{
205 ra_arena_t *arena;
206
207 arena = (ra_arena_t *) malloc(sizeof(ra_arena_t), FRAME_ATOMIC);
208 if (!arena)
209 return NULL;
210
211 irq_spinlock_initialize(&arena->lock, "arena_lock");
212 list_initialize(&arena->spans);
213
214 return arena;
215}
216
217void ra_arena_destroy(ra_arena_t *arena)
218{
219 /*
220 * No locking necessary as this is the cleanup and all users should have
221 * stopped using the arena already.
222 */
223 list_foreach_safe(arena->spans, cur, next) {
224 ra_span_t *span = list_get_instance(cur, ra_span_t, span_link);
225 list_remove(&span->span_link);
226 ra_span_destroy(span);
227 }
228
229 free(arena);
230}
231
232/** Add a span to arena. */
233bool ra_span_add(ra_arena_t *arena, uintptr_t base, size_t size)
234{
235 ra_span_t *span;
236
237 span = ra_span_create(base, size);
238 if (!span)
239 return false;
240
241 /* TODO: check for overlaps */
242 irq_spinlock_lock(&arena->lock, true);
243 list_append(&span->span_link, &arena->spans);
244 irq_spinlock_unlock(&arena->lock, true);
245 return true;
246}
247
248static bool
249ra_span_alloc(ra_span_t *span, size_t size, size_t align, uintptr_t *base)
250{
251 /*
252 * We need to add the maximum of align - 1 to be able to compensate for
253 * the worst case unaligned segment.
254 */
255 size_t needed = size + align - 1;
256 size_t order = ispwr2(needed) ? fnzb(needed) : fnzb(needed) + 1;
257 ra_segment_t *pred = NULL;
258 ra_segment_t *succ = NULL;
259
260 /*
261 * Find the free list of the smallest order which can satisfy this
262 * request.
263 */
264 for (; order <= span->max_order; order++) {
265 ra_segment_t *seg;
266 uintptr_t newbase;
267
268 if (list_empty(&span->free[order]))
269 continue;
270
271 /* Take the first segment from the free list. */
272 seg = list_get_instance(list_first(&span->free[order]),
273 ra_segment_t, fl_link);
274
275 assert(seg->flags & RA_SEGMENT_FREE);
276
277 /*
278 * See if we need to allocate new segments for the chopped-off
279 * parts of this segment.
280 */
281 if (!IS_ALIGNED(seg->base, align)) {
282 pred = ra_segment_create(seg->base);
283 if (!pred) {
284 /*
285 * Fail as we are unable to split the segment.
286 */
287 break;
288 }
289 pred->flags |= RA_SEGMENT_FREE;
290 }
291 newbase = ALIGN_UP(seg->base, align);
292 if (newbase + size != seg->base + ra_segment_size_get(seg)) {
293 assert(newbase + (size - 1) < seg->base +
294 (ra_segment_size_get(seg) - 1));
295 succ = ra_segment_create(newbase + size);
296 if (!succ) {
297 if (pred)
298 ra_segment_destroy(pred);
299 /*
300 * Fail as we are unable to split the segment.
301 */
302 break;
303 }
304 succ->flags |= RA_SEGMENT_FREE;
305 }
306
307
308 /* Put unneeded parts back. */
309 if (pred) {
310 size_t pred_order;
311
312 list_insert_before(&pred->segment_link,
313 &seg->segment_link);
314 pred_order = fnzb(ra_segment_size_get(pred));
315 list_append(&pred->fl_link, &span->free[pred_order]);
316 }
317 if (succ) {
318 size_t succ_order;
319
320 list_insert_after(&succ->segment_link,
321 &seg->segment_link);
322 succ_order = fnzb(ra_segment_size_get(succ));
323 list_append(&succ->fl_link, &span->free[succ_order]);
324 }
325
326 /* Now remove the found segment from the free list. */
327 list_remove(&seg->fl_link);
328 seg->base = newbase;
329 seg->flags &= ~RA_SEGMENT_FREE;
330
331 /* Hash-in the segment into the used hash. */
332 hash_table_insert(&span->used, &seg->uh_link);
333
334 *base = newbase;
335 return true;
336 }
337
338 return false;
339}
340
341static void ra_span_free(ra_span_t *span, size_t base, size_t size)
342{
343 sysarg_t key = base;
344 ht_link_t *link;
345 ra_segment_t *seg;
346 ra_segment_t *pred;
347 ra_segment_t *succ;
348 size_t order;
349
350 /*
351 * Locate the segment in the used hash table.
352 */
353 link = hash_table_find(&span->used, &key);
354 if (!link) {
355 panic("Freeing segment which is not known to be used (base=%zx"
356 ", size=%zd).", base, size);
357 }
358 seg = hash_table_get_inst(link, ra_segment_t, uh_link);
359
360 /*
361 * Hash out the segment.
362 */
363 hash_table_remove_item(&span->used, link);
364
365 assert(!(seg->flags & RA_SEGMENT_FREE));
366 assert(seg->base == base);
367 assert(ra_segment_size_get(seg) == size);
368
369 /*
370 * Check whether the segment can be coalesced with its left neighbor.
371 */
372 if (list_first(&span->segments) != &seg->segment_link) {
373 pred = hash_table_get_inst(seg->segment_link.prev,
374 ra_segment_t, segment_link);
375
376 assert(pred->base < seg->base);
377
378 if (pred->flags & RA_SEGMENT_FREE) {
379 /*
380 * The segment can be coalesced with its predecessor.
381 * Remove the predecessor from the free and segment
382 * lists, rebase the segment and throw the predecessor
383 * away.
384 */
385 list_remove(&pred->fl_link);
386 list_remove(&pred->segment_link);
387 seg->base = pred->base;
388 ra_segment_destroy(pred);
389 }
390 }
391
392 /*
393 * Check whether the segment can be coalesced with its right neighbor.
394 */
395 succ = hash_table_get_inst(seg->segment_link.next, ra_segment_t,
396 segment_link);
397 assert(succ->base > seg->base);
398 if (succ->flags & RA_SEGMENT_FREE) {
399 /*
400 * The segment can be coalesced with its successor.
401 * Remove the successor from the free and segment lists
402 * and throw it away.
403 */
404 list_remove(&succ->fl_link);
405 list_remove(&succ->segment_link);
406 ra_segment_destroy(succ);
407 }
408
409 /* Put the segment on the appropriate free list. */
410 seg->flags |= RA_SEGMENT_FREE;
411 order = fnzb(ra_segment_size_get(seg));
412 list_append(&seg->fl_link, &span->free[order]);
413}
414
415/** Allocate resources from arena. */
416bool
417ra_alloc(ra_arena_t *arena, size_t size, size_t alignment, uintptr_t *base)
418{
419 bool success = false;
420
421 assert(size >= 1);
422 assert(alignment >= 1);
423 assert(ispwr2(alignment));
424
425 irq_spinlock_lock(&arena->lock, true);
426 list_foreach(arena->spans, span_link, ra_span_t, span) {
427 success = ra_span_alloc(span, size, alignment, base);
428 if (success)
429 break;
430 }
431 irq_spinlock_unlock(&arena->lock, true);
432
433 return success;
434}
435
436/* Return resources to arena. */
437void ra_free(ra_arena_t *arena, uintptr_t base, size_t size)
438{
439 irq_spinlock_lock(&arena->lock, true);
440 list_foreach(arena->spans, span_link, ra_span_t, span) {
441 if (iswithin(span->base, span->size, base, size)) {
442 ra_span_free(span, base, size);
443 irq_spinlock_unlock(&arena->lock, true);
444 return;
445 }
446 }
447 irq_spinlock_unlock(&arena->lock, true);
448
449 panic("Freeing to wrong arena (base=%" PRIxPTR ", size=%zd).",
450 base, size);
451}
452
453void ra_init(void)
454{
455 ra_segment_cache = slab_cache_create("ra_segment_t",
456 sizeof(ra_segment_t), 0, NULL, NULL, SLAB_CACHE_MAGDEFERRED);
457}
458
459/** @}
460 */
Note: See TracBrowser for help on using the repository browser.