source: mainline/kernel/generic/src/lib/ra.c@ 28a5ebd

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 28a5ebd was 5e801dc, checked in by GitHub <noreply@…>, 6 years ago

Indicate and enforce constness of hash table key in certain functions (#158)

The assumption here is that modifying key in the hash/equal functions in something completely unexpected, and not something you would ever want to do intentionally, so it makes sense to disallow it entirely to get that extra level of checking.

  • Property mode set to 100644
File size: 11.5 KB
Line 
1/*
2 * Copyright (c) 2011 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup kernel_generic
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Resource allocator.
36 *
37 * This is a generic resource allocator, loosely based on the ideas presented
38 * in chapter 4 of the following paper and further simplified:
39 *
40 * Bonwick J., Adams J.: Magazines and Vmem: Extending the Slab Allocator to
41 * Many CPUs and Arbitrary Resources, USENIX 2001
42 *
43 */
44
45#include <assert.h>
46#include <lib/ra.h>
47#include <typedefs.h>
48#include <mm/slab.h>
49#include <bitops.h>
50#include <panic.h>
51#include <adt/list.h>
52#include <adt/hash.h>
53#include <adt/hash_table.h>
54#include <align.h>
55#include <macros.h>
56#include <synch/spinlock.h>
57#include <stdlib.h>
58
59static slab_cache_t *ra_segment_cache;
60
61/** Return the hash of the key stored in the item */
62static size_t used_hash(const ht_link_t *item)
63{
64 ra_segment_t *seg = hash_table_get_inst(item, ra_segment_t, uh_link);
65 return hash_mix(seg->base);
66}
67
68/** Return the hash of the key */
69static size_t used_key_hash(const void *key)
70{
71 const uintptr_t *base = key;
72 return hash_mix(*base);
73}
74
75/** Return true if the key is equal to the item's lookup key */
76static bool used_key_equal(const void *key, const ht_link_t *item)
77{
78 const uintptr_t *base = key;
79 ra_segment_t *seg = hash_table_get_inst(item, ra_segment_t, uh_link);
80 return seg->base == *base;
81}
82
83static hash_table_ops_t used_ops = {
84 .hash = used_hash,
85 .key_hash = used_key_hash,
86 .key_equal = used_key_equal
87};
88
89/** Calculate the segment size. */
90static size_t ra_segment_size_get(ra_segment_t *seg)
91{
92 ra_segment_t *nextseg;
93
94 nextseg = list_get_instance(seg->segment_link.next, ra_segment_t,
95 segment_link);
96 return nextseg->base - seg->base;
97}
98
99static ra_segment_t *ra_segment_create(uintptr_t base)
100{
101 ra_segment_t *seg;
102
103 seg = slab_alloc(ra_segment_cache, FRAME_ATOMIC);
104 if (!seg)
105 return NULL;
106
107 link_initialize(&seg->segment_link);
108 link_initialize(&seg->fl_link);
109
110 seg->base = base;
111 seg->flags = 0;
112
113 return seg;
114}
115
116static void ra_segment_destroy(ra_segment_t *seg)
117{
118 slab_free(ra_segment_cache, seg);
119}
120
121static ra_span_t *ra_span_create(uintptr_t base, size_t size)
122{
123 ra_span_t *span;
124 ra_segment_t *seg, *lastseg;
125 unsigned int i;
126
127 span = (ra_span_t *) malloc(sizeof(ra_span_t));
128 if (!span)
129 return NULL;
130
131 span->max_order = fnzb(size);
132 span->base = base;
133 span->size = size;
134
135 span->free = (list_t *) malloc((span->max_order + 1) * sizeof(list_t));
136 if (!span->free) {
137 free(span);
138 return NULL;
139 }
140
141 /*
142 * Create a segment to represent the entire size of the span.
143 */
144 seg = ra_segment_create(base);
145 if (!seg) {
146 free(span->free);
147 free(span);
148 return NULL;
149 }
150 seg->flags = RA_SEGMENT_FREE;
151
152 /*
153 * The last segment will be used as a sentinel at the end of the
154 * segment list so that it is possible to calculate the size for
155 * all other segments. It will not be placed in any free list or
156 * in the used segment hash and adjacent segments will not be
157 * coalesced with it.
158 */
159 lastseg = ra_segment_create(base + size);
160 if (!lastseg) {
161 ra_segment_destroy(seg);
162 free(span->free);
163 free(span);
164 return NULL;
165 }
166
167 link_initialize(&span->span_link);
168 list_initialize(&span->segments);
169
170 hash_table_create(&span->used, 0, 0, &used_ops);
171
172 for (i = 0; i <= span->max_order; i++)
173 list_initialize(&span->free[i]);
174
175 /* Insert the first segment into the list of segments. */
176 list_append(&seg->segment_link, &span->segments);
177 /* Insert the last segment into the list of segments. */
178 list_append(&lastseg->segment_link, &span->segments);
179
180 /* Insert the first segment into the respective free list. */
181 list_append(&seg->fl_link, &span->free[span->max_order]);
182
183 return span;
184}
185
186static void ra_span_destroy(ra_span_t *span)
187{
188 hash_table_destroy(&span->used);
189
190 list_foreach_safe(span->segments, cur, next) {
191 ra_segment_t *seg = list_get_instance(cur, ra_segment_t,
192 segment_link);
193 list_remove(&seg->segment_link);
194 if (seg->flags & RA_SEGMENT_FREE)
195 list_remove(&seg->fl_link);
196 ra_segment_destroy(seg);
197 }
198
199 free(span);
200}
201
202/** Create an empty arena. */
203ra_arena_t *ra_arena_create(void)
204{
205 ra_arena_t *arena;
206
207 arena = (ra_arena_t *) malloc(sizeof(ra_arena_t));
208 if (!arena)
209 return NULL;
210
211 irq_spinlock_initialize(&arena->lock, "arena_lock");
212 list_initialize(&arena->spans);
213
214 return arena;
215}
216
217void ra_arena_destroy(ra_arena_t *arena)
218{
219 /*
220 * No locking necessary as this is the cleanup and all users should have
221 * stopped using the arena already.
222 */
223 list_foreach_safe(arena->spans, cur, next) {
224 ra_span_t *span = list_get_instance(cur, ra_span_t, span_link);
225 list_remove(&span->span_link);
226 ra_span_destroy(span);
227 }
228
229 free(arena);
230}
231
232/** Add a span to arena. */
233bool ra_span_add(ra_arena_t *arena, uintptr_t base, size_t size)
234{
235 ra_span_t *span;
236
237 span = ra_span_create(base, size);
238 if (!span)
239 return false;
240
241 /* TODO: check for overlaps */
242 irq_spinlock_lock(&arena->lock, true);
243 list_append(&span->span_link, &arena->spans);
244 irq_spinlock_unlock(&arena->lock, true);
245 return true;
246}
247
248static bool
249ra_span_alloc(ra_span_t *span, size_t size, size_t align, uintptr_t *base)
250{
251 /*
252 * We need to add the maximum of align - 1 to be able to compensate for
253 * the worst case unaligned segment.
254 */
255 size_t needed = size + align - 1;
256 size_t order = ispwr2(needed) ? fnzb(needed) : fnzb(needed) + 1;
257 ra_segment_t *pred = NULL;
258 ra_segment_t *succ = NULL;
259
260 /*
261 * Find the free list of the smallest order which can satisfy this
262 * request.
263 */
264 for (; order <= span->max_order; order++) {
265 ra_segment_t *seg;
266 uintptr_t newbase;
267
268 if (list_empty(&span->free[order]))
269 continue;
270
271 /* Take the first segment from the free list. */
272 seg = list_get_instance(list_first(&span->free[order]),
273 ra_segment_t, fl_link);
274
275 assert(seg->flags & RA_SEGMENT_FREE);
276
277 /*
278 * See if we need to allocate new segments for the chopped-off
279 * parts of this segment.
280 */
281 if (!IS_ALIGNED(seg->base, align)) {
282 pred = ra_segment_create(seg->base);
283 if (!pred) {
284 /*
285 * Fail as we are unable to split the segment.
286 */
287 break;
288 }
289 pred->flags |= RA_SEGMENT_FREE;
290 }
291 newbase = ALIGN_UP(seg->base, align);
292 if (newbase + size != seg->base + ra_segment_size_get(seg)) {
293 assert(newbase + (size - 1) < seg->base +
294 (ra_segment_size_get(seg) - 1));
295 succ = ra_segment_create(newbase + size);
296 if (!succ) {
297 if (pred)
298 ra_segment_destroy(pred);
299 /*
300 * Fail as we are unable to split the segment.
301 */
302 break;
303 }
304 succ->flags |= RA_SEGMENT_FREE;
305 }
306
307 /* Put unneeded parts back. */
308 if (pred) {
309 size_t pred_order;
310
311 list_insert_before(&pred->segment_link,
312 &seg->segment_link);
313 pred_order = fnzb(ra_segment_size_get(pred));
314 list_append(&pred->fl_link, &span->free[pred_order]);
315 }
316 if (succ) {
317 size_t succ_order;
318
319 list_insert_after(&succ->segment_link,
320 &seg->segment_link);
321 succ_order = fnzb(ra_segment_size_get(succ));
322 list_append(&succ->fl_link, &span->free[succ_order]);
323 }
324
325 /* Now remove the found segment from the free list. */
326 list_remove(&seg->fl_link);
327 seg->base = newbase;
328 seg->flags &= ~RA_SEGMENT_FREE;
329
330 /* Hash-in the segment into the used hash. */
331 hash_table_insert(&span->used, &seg->uh_link);
332
333 *base = newbase;
334 return true;
335 }
336
337 return false;
338}
339
340static void ra_span_free(ra_span_t *span, size_t base, size_t size)
341{
342 sysarg_t key = base;
343 ht_link_t *link;
344 ra_segment_t *seg;
345 ra_segment_t *pred;
346 ra_segment_t *succ;
347 size_t order;
348
349 /*
350 * Locate the segment in the used hash table.
351 */
352 link = hash_table_find(&span->used, &key);
353 if (!link) {
354 panic("Freeing segment which is not known to be used (base=%zx"
355 ", size=%zd).", base, size);
356 }
357 seg = hash_table_get_inst(link, ra_segment_t, uh_link);
358
359 /*
360 * Hash out the segment.
361 */
362 hash_table_remove_item(&span->used, link);
363
364 assert(!(seg->flags & RA_SEGMENT_FREE));
365 assert(seg->base == base);
366 assert(ra_segment_size_get(seg) == size);
367
368 /*
369 * Check whether the segment can be coalesced with its left neighbor.
370 */
371 if (list_first(&span->segments) != &seg->segment_link) {
372 pred = hash_table_get_inst(seg->segment_link.prev,
373 ra_segment_t, segment_link);
374
375 assert(pred->base < seg->base);
376
377 if (pred->flags & RA_SEGMENT_FREE) {
378 /*
379 * The segment can be coalesced with its predecessor.
380 * Remove the predecessor from the free and segment
381 * lists, rebase the segment and throw the predecessor
382 * away.
383 */
384 list_remove(&pred->fl_link);
385 list_remove(&pred->segment_link);
386 seg->base = pred->base;
387 ra_segment_destroy(pred);
388 }
389 }
390
391 /*
392 * Check whether the segment can be coalesced with its right neighbor.
393 */
394 succ = hash_table_get_inst(seg->segment_link.next, ra_segment_t,
395 segment_link);
396 assert(succ->base > seg->base);
397 if (succ->flags & RA_SEGMENT_FREE) {
398 /*
399 * The segment can be coalesced with its successor.
400 * Remove the successor from the free and segment lists
401 * and throw it away.
402 */
403 list_remove(&succ->fl_link);
404 list_remove(&succ->segment_link);
405 ra_segment_destroy(succ);
406 }
407
408 /* Put the segment on the appropriate free list. */
409 seg->flags |= RA_SEGMENT_FREE;
410 order = fnzb(ra_segment_size_get(seg));
411 list_append(&seg->fl_link, &span->free[order]);
412}
413
414/** Allocate resources from arena. */
415bool
416ra_alloc(ra_arena_t *arena, size_t size, size_t alignment, uintptr_t *base)
417{
418 bool success = false;
419
420 assert(size >= 1);
421 assert(alignment >= 1);
422 assert(ispwr2(alignment));
423
424 irq_spinlock_lock(&arena->lock, true);
425 list_foreach(arena->spans, span_link, ra_span_t, span) {
426 success = ra_span_alloc(span, size, alignment, base);
427 if (success)
428 break;
429 }
430 irq_spinlock_unlock(&arena->lock, true);
431
432 return success;
433}
434
435/* Return resources to arena. */
436void ra_free(ra_arena_t *arena, uintptr_t base, size_t size)
437{
438 irq_spinlock_lock(&arena->lock, true);
439 list_foreach(arena->spans, span_link, ra_span_t, span) {
440 if (iswithin(span->base, span->size, base, size)) {
441 ra_span_free(span, base, size);
442 irq_spinlock_unlock(&arena->lock, true);
443 return;
444 }
445 }
446 irq_spinlock_unlock(&arena->lock, true);
447
448 panic("Freeing to wrong arena (base=%" PRIxPTR ", size=%zd).",
449 base, size);
450}
451
452void ra_init(void)
453{
454 ra_segment_cache = slab_cache_create("ra_segment_t",
455 sizeof(ra_segment_t), 0, NULL, NULL, SLAB_CACHE_MAGDEFERRED);
456}
457
458/** @}
459 */
Note: See TracBrowser for help on using the repository browser.