source: mainline/kernel/generic/src/lib/ra.c@ 29e7cc7

Last change on this file since 29e7cc7 was 0db0df2, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 3 months ago

Hash table improvements

Implement hash_table_foreach macro, analogous to list_foreach.

Remove superfluous argument to hash_table_find_next().
(If the user needs to recheck the part of the list already
checked by hash_table_find(), they can just rerun that function.)

Add hash argument to hash_table_ops_t::key_equal.
The big change here is that users with big keys can store the hash
value alongside key in their entries, and for the low low cost of
sizeof(size_t) bytes eliminate a bunch of expensive key comparisons.

Also added a hash function for strings and arbitrary data.
Found this one by asking ChatGPT, because the latency of accesses
to my book collection is currently a couple of hours.

+ Some drive-by unused #include removal.

  • Property mode set to 100644
File size: 11.5 KB
RevLine 
[9fe7d6c]1/*
2 * Copyright (c) 2011 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[174156fd]29/** @addtogroup kernel_generic
[9fe7d6c]30 * @{
31 */
32
33/**
34 * @file
35 * @brief Resource allocator.
36 *
37 * This is a generic resource allocator, loosely based on the ideas presented
38 * in chapter 4 of the following paper and further simplified:
39 *
40 * Bonwick J., Adams J.: Magazines and Vmem: Extending the Slab Allocator to
41 * Many CPUs and Arbitrary Resources, USENIX 2001
42 *
43 */
44
[63e27ef]45#include <assert.h>
[9fe7d6c]46#include <lib/ra.h>
47#include <typedefs.h>
48#include <mm/slab.h>
49#include <bitops.h>
[961c0484]50#include <panic.h>
51#include <adt/list.h>
[82cbf8c6]52#include <adt/hash.h>
[961c0484]53#include <adt/hash_table.h>
54#include <align.h>
55#include <macros.h>
[3342f33]56#include <synch/spinlock.h>
[aafed15]57#include <stdlib.h>
[961c0484]58
[41deb2a]59static slab_cache_t *ra_segment_cache;
60
[82cbf8c6]61/** Return the hash of the key stored in the item */
62static size_t used_hash(const ht_link_t *item)
[12cb03dd]63{
[82cbf8c6]64 ra_segment_t *seg = hash_table_get_inst(item, ra_segment_t, uh_link);
65 return hash_mix(seg->base);
[12cb03dd]66}
67
[82cbf8c6]68/** Return the hash of the key */
[5e801dc]69static size_t used_key_hash(const void *key)
[12cb03dd]70{
[5e801dc]71 const uintptr_t *base = key;
[82cbf8c6]72 return hash_mix(*base);
73}
[12cb03dd]74
[82cbf8c6]75/** Return true if the key is equal to the item's lookup key */
[0db0df2]76static bool used_key_equal(const void *key, size_t, const ht_link_t *item)
[82cbf8c6]77{
[5e801dc]78 const uintptr_t *base = key;
[82cbf8c6]79 ra_segment_t *seg = hash_table_get_inst(item, ra_segment_t, uh_link);
80 return seg->base == *base;
[12cb03dd]81}
82
[61eb2ce2]83static const hash_table_ops_t used_ops = {
[12cb03dd]84 .hash = used_hash,
[82cbf8c6]85 .key_hash = used_key_hash,
86 .key_equal = used_key_equal
[9fe7d6c]87};
88
[961c0484]89/** Calculate the segment size. */
90static size_t ra_segment_size_get(ra_segment_t *seg)
91{
92 ra_segment_t *nextseg;
93
94 nextseg = list_get_instance(seg->segment_link.next, ra_segment_t,
95 segment_link);
96 return nextseg->base - seg->base;
97}
98
99static ra_segment_t *ra_segment_create(uintptr_t base)
[9fe7d6c]100{
101 ra_segment_t *seg;
102
[41deb2a]103 seg = slab_alloc(ra_segment_cache, FRAME_ATOMIC);
[9fe7d6c]104 if (!seg)
105 return NULL;
106
107 link_initialize(&seg->segment_link);
[82cbf8c6]108 link_initialize(&seg->fl_link);
[9fe7d6c]109
110 seg->base = base;
[b4e59b3]111 seg->flags = 0;
[9fe7d6c]112
113 return seg;
114}
115
116static void ra_segment_destroy(ra_segment_t *seg)
117{
[41deb2a]118 slab_free(ra_segment_cache, seg);
[9fe7d6c]119}
120
121static ra_span_t *ra_span_create(uintptr_t base, size_t size)
122{
123 ra_span_t *span;
[961c0484]124 ra_segment_t *seg, *lastseg;
[9fe7d6c]125 unsigned int i;
126
[11b285d]127 span = (ra_span_t *) malloc(sizeof(ra_span_t));
[9fe7d6c]128 if (!span)
129 return NULL;
130
131 span->max_order = fnzb(size);
[961c0484]132 span->base = base;
133 span->size = size;
[9fe7d6c]134
[11b285d]135 span->free = (list_t *) malloc((span->max_order + 1) * sizeof(list_t));
[9fe7d6c]136 if (!span->free) {
137 free(span);
138 return NULL;
139 }
[961c0484]140
141 /*
142 * Create a segment to represent the entire size of the span.
143 */
144 seg = ra_segment_create(base);
[9fe7d6c]145 if (!seg) {
146 free(span->free);
147 free(span);
148 return NULL;
149 }
[b4e59b3]150 seg->flags = RA_SEGMENT_FREE;
[9fe7d6c]151
[961c0484]152 /*
153 * The last segment will be used as a sentinel at the end of the
154 * segment list so that it is possible to calculate the size for
155 * all other segments. It will not be placed in any free list or
156 * in the used segment hash and adjacent segments will not be
157 * coalesced with it.
158 */
159 lastseg = ra_segment_create(base + size);
160 if (!lastseg) {
161 ra_segment_destroy(seg);
162 free(span->free);
163 free(span);
164 return NULL;
165 }
166
[9fe7d6c]167 link_initialize(&span->span_link);
168 list_initialize(&span->segments);
169
[82cbf8c6]170 hash_table_create(&span->used, 0, 0, &used_ops);
[9fe7d6c]171
[c24b272f]172 for (i = 0; i <= span->max_order; i++)
[9fe7d6c]173 list_initialize(&span->free[i]);
174
[961c0484]175 /* Insert the first segment into the list of segments. */
[9fe7d6c]176 list_append(&seg->segment_link, &span->segments);
[961c0484]177 /* Insert the last segment into the list of segments. */
178 list_append(&lastseg->segment_link, &span->segments);
179
180 /* Insert the first segment into the respective free list. */
[82cbf8c6]181 list_append(&seg->fl_link, &span->free[span->max_order]);
[9fe7d6c]182
183 return span;
184}
185
[02667d9]186static void ra_span_destroy(ra_span_t *span)
187{
188 hash_table_destroy(&span->used);
189
190 list_foreach_safe(span->segments, cur, next) {
191 ra_segment_t *seg = list_get_instance(cur, ra_segment_t,
192 segment_link);
193 list_remove(&seg->segment_link);
194 if (seg->flags & RA_SEGMENT_FREE)
195 list_remove(&seg->fl_link);
196 ra_segment_destroy(seg);
197 }
198
199 free(span);
200}
201
[bb7e6fc5]202/** Create an empty arena. */
203ra_arena_t *ra_arena_create(void)
[9fe7d6c]204{
205 ra_arena_t *arena;
[961c0484]206
[11b285d]207 arena = (ra_arena_t *) malloc(sizeof(ra_arena_t));
[9fe7d6c]208 if (!arena)
209 return NULL;
210
[1295a1da]211 irq_spinlock_initialize(&arena->lock, "arena_lock");
[9fe7d6c]212 list_initialize(&arena->spans);
213
214 return arena;
215}
216
[02667d9]217void ra_arena_destroy(ra_arena_t *arena)
218{
219 /*
220 * No locking necessary as this is the cleanup and all users should have
221 * stopped using the arena already.
222 */
223 list_foreach_safe(arena->spans, cur, next) {
224 ra_span_t *span = list_get_instance(cur, ra_span_t, span_link);
225 list_remove(&span->span_link);
226 ra_span_destroy(span);
227 }
228
229 free(arena);
230}
231
[bb7e6fc5]232/** Add a span to arena. */
[961c0484]233bool ra_span_add(ra_arena_t *arena, uintptr_t base, size_t size)
[9fe7d6c]234{
235 ra_span_t *span;
236
237 span = ra_span_create(base, size);
[961c0484]238 if (!span)
239 return false;
[9fe7d6c]240
241 /* TODO: check for overlaps */
[1295a1da]242 irq_spinlock_lock(&arena->lock, true);
[9fe7d6c]243 list_append(&span->span_link, &arena->spans);
[1295a1da]244 irq_spinlock_unlock(&arena->lock, true);
[961c0484]245 return true;
[9fe7d6c]246}
247
[300f4c4]248static bool
249ra_span_alloc(ra_span_t *span, size_t size, size_t align, uintptr_t *base)
[9fe7d6c]250{
[961c0484]251 /*
252 * We need to add the maximum of align - 1 to be able to compensate for
253 * the worst case unaligned segment.
254 */
255 size_t needed = size + align - 1;
256 size_t order = ispwr2(needed) ? fnzb(needed) : fnzb(needed) + 1;
257 ra_segment_t *pred = NULL;
258 ra_segment_t *succ = NULL;
259
260 /*
261 * Find the free list of the smallest order which can satisfy this
262 * request.
263 */
264 for (; order <= span->max_order; order++) {
265 ra_segment_t *seg;
266 uintptr_t newbase;
267
268 if (list_empty(&span->free[order]))
269 continue;
270
271 /* Take the first segment from the free list. */
272 seg = list_get_instance(list_first(&span->free[order]),
[82cbf8c6]273 ra_segment_t, fl_link);
[961c0484]274
[63e27ef]275 assert(seg->flags & RA_SEGMENT_FREE);
[b4e59b3]276
[961c0484]277 /*
278 * See if we need to allocate new segments for the chopped-off
279 * parts of this segment.
280 */
281 if (!IS_ALIGNED(seg->base, align)) {
282 pred = ra_segment_create(seg->base);
283 if (!pred) {
284 /*
285 * Fail as we are unable to split the segment.
286 */
287 break;
288 }
[b4e59b3]289 pred->flags |= RA_SEGMENT_FREE;
[961c0484]290 }
291 newbase = ALIGN_UP(seg->base, align);
292 if (newbase + size != seg->base + ra_segment_size_get(seg)) {
[63e27ef]293 assert(newbase + (size - 1) < seg->base +
[c24b272f]294 (ra_segment_size_get(seg) - 1));
[961c0484]295 succ = ra_segment_create(newbase + size);
296 if (!succ) {
297 if (pred)
298 ra_segment_destroy(pred);
299 /*
300 * Fail as we are unable to split the segment.
301 */
302 break;
303 }
[b4e59b3]304 succ->flags |= RA_SEGMENT_FREE;
[961c0484]305 }
[a35b458]306
[961c0484]307 /* Put unneeded parts back. */
308 if (pred) {
309 size_t pred_order;
310
311 list_insert_before(&pred->segment_link,
312 &seg->segment_link);
313 pred_order = fnzb(ra_segment_size_get(pred));
[82cbf8c6]314 list_append(&pred->fl_link, &span->free[pred_order]);
[961c0484]315 }
316 if (succ) {
317 size_t succ_order;
318
319 list_insert_after(&succ->segment_link,
320 &seg->segment_link);
321 succ_order = fnzb(ra_segment_size_get(succ));
[82cbf8c6]322 list_append(&succ->fl_link, &span->free[succ_order]);
[961c0484]323 }
324
325 /* Now remove the found segment from the free list. */
[82cbf8c6]326 list_remove(&seg->fl_link);
[961c0484]327 seg->base = newbase;
[b4e59b3]328 seg->flags &= ~RA_SEGMENT_FREE;
[961c0484]329
330 /* Hash-in the segment into the used hash. */
[82cbf8c6]331 hash_table_insert(&span->used, &seg->uh_link);
[961c0484]332
[300f4c4]333 *base = newbase;
334 return true;
[961c0484]335 }
336
[300f4c4]337 return false;
[9fe7d6c]338}
339
[961c0484]340static void ra_span_free(ra_span_t *span, size_t base, size_t size)
341{
[b4e59b3]342 sysarg_t key = base;
[82cbf8c6]343 ht_link_t *link;
[b4e59b3]344 ra_segment_t *seg;
345 ra_segment_t *pred;
346 ra_segment_t *succ;
347 size_t order;
348
349 /*
350 * Locate the segment in the used hash table.
351 */
352 link = hash_table_find(&span->used, &key);
353 if (!link) {
[35ebd42]354 panic("Freeing segment which is not known to be used (base=%zx"
355 ", size=%zd).", base, size);
[b4e59b3]356 }
[82cbf8c6]357 seg = hash_table_get_inst(link, ra_segment_t, uh_link);
[b4e59b3]358
359 /*
360 * Hash out the segment.
361 */
[82cbf8c6]362 hash_table_remove_item(&span->used, link);
[b4e59b3]363
[63e27ef]364 assert(!(seg->flags & RA_SEGMENT_FREE));
365 assert(seg->base == base);
366 assert(ra_segment_size_get(seg) == size);
[b4e59b3]367
368 /*
369 * Check whether the segment can be coalesced with its left neighbor.
370 */
371 if (list_first(&span->segments) != &seg->segment_link) {
[82cbf8c6]372 pred = hash_table_get_inst(seg->segment_link.prev,
[b4e59b3]373 ra_segment_t, segment_link);
374
[63e27ef]375 assert(pred->base < seg->base);
[b4e59b3]376
377 if (pred->flags & RA_SEGMENT_FREE) {
378 /*
379 * The segment can be coalesced with its predecessor.
380 * Remove the predecessor from the free and segment
381 * lists, rebase the segment and throw the predecessor
382 * away.
383 */
[82cbf8c6]384 list_remove(&pred->fl_link);
[b4e59b3]385 list_remove(&pred->segment_link);
386 seg->base = pred->base;
387 ra_segment_destroy(pred);
388 }
389 }
390
391 /*
392 * Check whether the segment can be coalesced with its right neighbor.
393 */
[82cbf8c6]394 succ = hash_table_get_inst(seg->segment_link.next, ra_segment_t,
[b4e59b3]395 segment_link);
[63e27ef]396 assert(succ->base > seg->base);
[b4e59b3]397 if (succ->flags & RA_SEGMENT_FREE) {
398 /*
399 * The segment can be coalesced with its successor.
400 * Remove the successor from the free and segment lists
401 * and throw it away.
402 */
[82cbf8c6]403 list_remove(&succ->fl_link);
[b4e59b3]404 list_remove(&succ->segment_link);
405 ra_segment_destroy(succ);
406 }
407
408 /* Put the segment on the appropriate free list. */
409 seg->flags |= RA_SEGMENT_FREE;
410 order = fnzb(ra_segment_size_get(seg));
[82cbf8c6]411 list_append(&seg->fl_link, &span->free[order]);
[961c0484]412}
413
414/** Allocate resources from arena. */
[300f4c4]415bool
416ra_alloc(ra_arena_t *arena, size_t size, size_t alignment, uintptr_t *base)
[961c0484]417{
[300f4c4]418 bool success = false;
[961c0484]419
[63e27ef]420 assert(size >= 1);
421 assert(alignment >= 1);
422 assert(ispwr2(alignment));
[961c0484]423
[1295a1da]424 irq_spinlock_lock(&arena->lock, true);
[feeac0d]425 list_foreach(arena->spans, span_link, ra_span_t, span) {
[300f4c4]426 success = ra_span_alloc(span, size, alignment, base);
427 if (success)
[961c0484]428 break;
429 }
[1295a1da]430 irq_spinlock_unlock(&arena->lock, true);
[961c0484]431
[300f4c4]432 return success;
[961c0484]433}
434
435/* Return resources to arena. */
[9fe7d6c]436void ra_free(ra_arena_t *arena, uintptr_t base, size_t size)
437{
[1295a1da]438 irq_spinlock_lock(&arena->lock, true);
[feeac0d]439 list_foreach(arena->spans, span_link, ra_span_t, span) {
[961c0484]440 if (iswithin(span->base, span->size, base, size)) {
441 ra_span_free(span, base, size);
[1295a1da]442 irq_spinlock_unlock(&arena->lock, true);
[961c0484]443 return;
444 }
445 }
[1295a1da]446 irq_spinlock_unlock(&arena->lock, true);
[961c0484]447
[35ebd42]448 panic("Freeing to wrong arena (base=%" PRIxPTR ", size=%zd).",
[961c0484]449 base, size);
[9fe7d6c]450}
451
[41deb2a]452void ra_init(void)
453{
[f97f1e51]454 ra_segment_cache = slab_cache_create("ra_segment_t",
[41deb2a]455 sizeof(ra_segment_t), 0, NULL, NULL, SLAB_CACHE_MAGDEFERRED);
456}
[9fe7d6c]457
458/** @}
459 */
Note: See TracBrowser for help on using the repository browser.