source: mainline/kernel/generic/src/lib/ra.c@ 82cbf8c6

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 82cbf8c6 was 82cbf8c6, checked in by Jakub Jermar <jakub@…>, 8 years ago

Replace the old hash table implementation in the kernel with the newer one

This replaces the original hash table implementation with the resizable one
already used in uspace. Along the way, the IRQ hash table code was streamlined
and cleaned up.

  • Property mode set to 100644
File size: 10.8 KB
Line 
1/*
2 * Copyright (c) 2011 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup generic
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Resource allocator.
36 *
37 * This is a generic resource allocator, loosely based on the ideas presented
38 * in chapter 4 of the following paper and further simplified:
39 *
40 * Bonwick J., Adams J.: Magazines and Vmem: Extending the Slab Allocator to
41 * Many CPUs and Arbitrary Resources, USENIX 2001
42 *
43 */
44
45#include <assert.h>
46#include <lib/ra.h>
47#include <typedefs.h>
48#include <mm/slab.h>
49#include <bitops.h>
50#include <panic.h>
51#include <adt/list.h>
52#include <adt/hash.h>
53#include <adt/hash_table.h>
54#include <align.h>
55#include <macros.h>
56#include <synch/spinlock.h>
57
58static slab_cache_t *ra_segment_cache;
59
60/** Return the hash of the key stored in the item */
61static size_t used_hash(const ht_link_t *item)
62{
63 ra_segment_t *seg = hash_table_get_inst(item, ra_segment_t, uh_link);
64 return hash_mix(seg->base);
65}
66
67/** Return the hash of the key */
68static size_t used_key_hash(void *key)
69{
70 uintptr_t *base = (uintptr_t *) key;
71 return hash_mix(*base);
72}
73
74/** Return true if the key is equal to the item's lookup key */
75static bool used_key_equal(void *key, const ht_link_t *item)
76{
77 uintptr_t *base = (sysarg_t *) key;
78 ra_segment_t *seg = hash_table_get_inst(item, ra_segment_t, uh_link);
79 return seg->base == *base;
80}
81
82static hash_table_ops_t used_ops = {
83 .hash = used_hash,
84 .key_hash = used_key_hash,
85 .key_equal = used_key_equal
86};
87
88/** Calculate the segment size. */
89static size_t ra_segment_size_get(ra_segment_t *seg)
90{
91 ra_segment_t *nextseg;
92
93 nextseg = list_get_instance(seg->segment_link.next, ra_segment_t,
94 segment_link);
95 return nextseg->base - seg->base;
96}
97
98static ra_segment_t *ra_segment_create(uintptr_t base)
99{
100 ra_segment_t *seg;
101
102 seg = slab_alloc(ra_segment_cache, FRAME_ATOMIC);
103 if (!seg)
104 return NULL;
105
106 link_initialize(&seg->segment_link);
107 link_initialize(&seg->fl_link);
108
109 seg->base = base;
110 seg->flags = 0;
111
112 return seg;
113}
114
115static void ra_segment_destroy(ra_segment_t *seg)
116{
117 slab_free(ra_segment_cache, seg);
118}
119
120static ra_span_t *ra_span_create(uintptr_t base, size_t size)
121{
122 ra_span_t *span;
123 ra_segment_t *seg, *lastseg;
124 unsigned int i;
125
126 span = (ra_span_t *) malloc(sizeof(ra_span_t), FRAME_ATOMIC);
127 if (!span)
128 return NULL;
129
130 span->max_order = fnzb(size);
131 span->base = base;
132 span->size = size;
133
134 span->free = (list_t *) malloc((span->max_order + 1) * sizeof(list_t),
135 FRAME_ATOMIC);
136 if (!span->free) {
137 free(span);
138 return NULL;
139 }
140
141 /*
142 * Create a segment to represent the entire size of the span.
143 */
144 seg = ra_segment_create(base);
145 if (!seg) {
146 free(span->free);
147 free(span);
148 return NULL;
149 }
150 seg->flags = RA_SEGMENT_FREE;
151
152 /*
153 * The last segment will be used as a sentinel at the end of the
154 * segment list so that it is possible to calculate the size for
155 * all other segments. It will not be placed in any free list or
156 * in the used segment hash and adjacent segments will not be
157 * coalesced with it.
158 */
159 lastseg = ra_segment_create(base + size);
160 if (!lastseg) {
161 ra_segment_destroy(seg);
162 free(span->free);
163 free(span);
164 return NULL;
165 }
166
167 link_initialize(&span->span_link);
168 list_initialize(&span->segments);
169
170 hash_table_create(&span->used, 0, 0, &used_ops);
171
172 for (i = 0; i <= span->max_order; i++)
173 list_initialize(&span->free[i]);
174
175 /* Insert the first segment into the list of segments. */
176 list_append(&seg->segment_link, &span->segments);
177 /* Insert the last segment into the list of segments. */
178 list_append(&lastseg->segment_link, &span->segments);
179
180 /* Insert the first segment into the respective free list. */
181 list_append(&seg->fl_link, &span->free[span->max_order]);
182
183 return span;
184}
185
186/** Create an empty arena. */
187ra_arena_t *ra_arena_create(void)
188{
189 ra_arena_t *arena;
190
191 arena = (ra_arena_t *) malloc(sizeof(ra_arena_t), FRAME_ATOMIC);
192 if (!arena)
193 return NULL;
194
195 irq_spinlock_initialize(&arena->lock, "arena_lock");
196 list_initialize(&arena->spans);
197
198 return arena;
199}
200
201/** Add a span to arena. */
202bool ra_span_add(ra_arena_t *arena, uintptr_t base, size_t size)
203{
204 ra_span_t *span;
205
206 span = ra_span_create(base, size);
207 if (!span)
208 return false;
209
210 /* TODO: check for overlaps */
211 irq_spinlock_lock(&arena->lock, true);
212 list_append(&span->span_link, &arena->spans);
213 irq_spinlock_unlock(&arena->lock, true);
214 return true;
215}
216
217static bool
218ra_span_alloc(ra_span_t *span, size_t size, size_t align, uintptr_t *base)
219{
220 /*
221 * We need to add the maximum of align - 1 to be able to compensate for
222 * the worst case unaligned segment.
223 */
224 size_t needed = size + align - 1;
225 size_t order = ispwr2(needed) ? fnzb(needed) : fnzb(needed) + 1;
226 ra_segment_t *pred = NULL;
227 ra_segment_t *succ = NULL;
228
229 /*
230 * Find the free list of the smallest order which can satisfy this
231 * request.
232 */
233 for (; order <= span->max_order; order++) {
234 ra_segment_t *seg;
235 uintptr_t newbase;
236
237 if (list_empty(&span->free[order]))
238 continue;
239
240 /* Take the first segment from the free list. */
241 seg = list_get_instance(list_first(&span->free[order]),
242 ra_segment_t, fl_link);
243
244 assert(seg->flags & RA_SEGMENT_FREE);
245
246 /*
247 * See if we need to allocate new segments for the chopped-off
248 * parts of this segment.
249 */
250 if (!IS_ALIGNED(seg->base, align)) {
251 pred = ra_segment_create(seg->base);
252 if (!pred) {
253 /*
254 * Fail as we are unable to split the segment.
255 */
256 break;
257 }
258 pred->flags |= RA_SEGMENT_FREE;
259 }
260 newbase = ALIGN_UP(seg->base, align);
261 if (newbase + size != seg->base + ra_segment_size_get(seg)) {
262 assert(newbase + (size - 1) < seg->base +
263 (ra_segment_size_get(seg) - 1));
264 succ = ra_segment_create(newbase + size);
265 if (!succ) {
266 if (pred)
267 ra_segment_destroy(pred);
268 /*
269 * Fail as we are unable to split the segment.
270 */
271 break;
272 }
273 succ->flags |= RA_SEGMENT_FREE;
274 }
275
276
277 /* Put unneeded parts back. */
278 if (pred) {
279 size_t pred_order;
280
281 list_insert_before(&pred->segment_link,
282 &seg->segment_link);
283 pred_order = fnzb(ra_segment_size_get(pred));
284 list_append(&pred->fl_link, &span->free[pred_order]);
285 }
286 if (succ) {
287 size_t succ_order;
288
289 list_insert_after(&succ->segment_link,
290 &seg->segment_link);
291 succ_order = fnzb(ra_segment_size_get(succ));
292 list_append(&succ->fl_link, &span->free[succ_order]);
293 }
294
295 /* Now remove the found segment from the free list. */
296 list_remove(&seg->fl_link);
297 seg->base = newbase;
298 seg->flags &= ~RA_SEGMENT_FREE;
299
300 /* Hash-in the segment into the used hash. */
301 hash_table_insert(&span->used, &seg->uh_link);
302
303 *base = newbase;
304 return true;
305 }
306
307 return false;
308}
309
310static void ra_span_free(ra_span_t *span, size_t base, size_t size)
311{
312 sysarg_t key = base;
313 ht_link_t *link;
314 ra_segment_t *seg;
315 ra_segment_t *pred;
316 ra_segment_t *succ;
317 size_t order;
318
319 /*
320 * Locate the segment in the used hash table.
321 */
322 link = hash_table_find(&span->used, &key);
323 if (!link) {
324 panic("Freeing segment which is not known to be used (base=%"
325 PRIxn ", size=%" PRIdn ").", base, size);
326 }
327 seg = hash_table_get_inst(link, ra_segment_t, uh_link);
328
329 /*
330 * Hash out the segment.
331 */
332 hash_table_remove_item(&span->used, link);
333
334 assert(!(seg->flags & RA_SEGMENT_FREE));
335 assert(seg->base == base);
336 assert(ra_segment_size_get(seg) == size);
337
338 /*
339 * Check whether the segment can be coalesced with its left neighbor.
340 */
341 if (list_first(&span->segments) != &seg->segment_link) {
342 pred = hash_table_get_inst(seg->segment_link.prev,
343 ra_segment_t, segment_link);
344
345 assert(pred->base < seg->base);
346
347 if (pred->flags & RA_SEGMENT_FREE) {
348 /*
349 * The segment can be coalesced with its predecessor.
350 * Remove the predecessor from the free and segment
351 * lists, rebase the segment and throw the predecessor
352 * away.
353 */
354 list_remove(&pred->fl_link);
355 list_remove(&pred->segment_link);
356 seg->base = pred->base;
357 ra_segment_destroy(pred);
358 }
359 }
360
361 /*
362 * Check whether the segment can be coalesced with its right neighbor.
363 */
364 succ = hash_table_get_inst(seg->segment_link.next, ra_segment_t,
365 segment_link);
366 assert(succ->base > seg->base);
367 if (succ->flags & RA_SEGMENT_FREE) {
368 /*
369 * The segment can be coalesced with its successor.
370 * Remove the successor from the free and segment lists
371 * and throw it away.
372 */
373 list_remove(&succ->fl_link);
374 list_remove(&succ->segment_link);
375 ra_segment_destroy(succ);
376 }
377
378 /* Put the segment on the appropriate free list. */
379 seg->flags |= RA_SEGMENT_FREE;
380 order = fnzb(ra_segment_size_get(seg));
381 list_append(&seg->fl_link, &span->free[order]);
382}
383
384/** Allocate resources from arena. */
385bool
386ra_alloc(ra_arena_t *arena, size_t size, size_t alignment, uintptr_t *base)
387{
388 bool success = false;
389
390 assert(size >= 1);
391 assert(alignment >= 1);
392 assert(ispwr2(alignment));
393
394 irq_spinlock_lock(&arena->lock, true);
395 list_foreach(arena->spans, span_link, ra_span_t, span) {
396 success = ra_span_alloc(span, size, alignment, base);
397 if (success)
398 break;
399 }
400 irq_spinlock_unlock(&arena->lock, true);
401
402 return success;
403}
404
405/* Return resources to arena. */
406void ra_free(ra_arena_t *arena, uintptr_t base, size_t size)
407{
408 irq_spinlock_lock(&arena->lock, true);
409 list_foreach(arena->spans, span_link, ra_span_t, span) {
410 if (iswithin(span->base, span->size, base, size)) {
411 ra_span_free(span, base, size);
412 irq_spinlock_unlock(&arena->lock, true);
413 return;
414 }
415 }
416 irq_spinlock_unlock(&arena->lock, true);
417
418 panic("Freeing to wrong arena (base=%" PRIxn ", size=%" PRIdn ").",
419 base, size);
420}
421
422void ra_init(void)
423{
424 ra_segment_cache = slab_cache_create("ra_segment_t",
425 sizeof(ra_segment_t), 0, NULL, NULL, SLAB_CACHE_MAGDEFERRED);
426}
427
428/** @}
429 */
Note: See TracBrowser for help on using the repository browser.