source: mainline/uspace/lib/c/generic/malloc.c@ 48bd6f4

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 48bd6f4 was 3e6a98c5, checked in by Jiri Svoboda <jiri@…>, 13 years ago

Standards-compliant boolean type.

  • Property mode set to 100644
File size: 25.0 KB
RevLine 
[6db6fd1]1/*
2 * Copyright (c) 2009 Martin Decky
3 * Copyright (c) 2009 Petr Tuma
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup libc
31 * @{
32 */
33/** @file
34 */
35
36#include <malloc.h>
[3e6a98c5]37#include <stdbool.h>
[6db6fd1]38#include <as.h>
39#include <align.h>
40#include <macros.h>
41#include <assert.h>
42#include <errno.h>
43#include <bitops.h>
44#include <mem.h>
[3292623]45#include <futex.h>
[d161715]46#include <stdlib.h>
[6db6fd1]47#include <adt/gcdlcm.h>
[47b7006]48#include "private/malloc.h"
[6db6fd1]49
[0b37882]50/** Magic used in heap headers. */
51#define HEAP_BLOCK_HEAD_MAGIC UINT32_C(0xBEEF0101)
[6db6fd1]52
[0b37882]53/** Magic used in heap footers. */
54#define HEAP_BLOCK_FOOT_MAGIC UINT32_C(0xBEEF0202)
[6db6fd1]55
[0b37882]56/** Magic used in heap descriptor. */
57#define HEAP_AREA_MAGIC UINT32_C(0xBEEFCAFE)
58
59/** Allocation alignment.
60 *
61 * This also covers the alignment of fields
62 * in the heap header and footer.
63 *
64 */
[6db6fd1]65#define BASE_ALIGN 16
66
[013a5d7]67/** Heap shrink granularity
68 *
69 * Try not to pump and stress the heap to much
70 * by shrinking and enlarging it too often.
71 * A heap area won't shrunk if it the released
72 * free block is smaller than this constant.
73 *
74 */
75#define SHRINK_GRANULARITY (64 * PAGE_SIZE)
76
[0b37882]77/** Overhead of each heap block. */
78#define STRUCT_OVERHEAD \
79 (sizeof(heap_block_head_t) + sizeof(heap_block_foot_t))
80
[207533f]81/** Overhead of each area. */
82#define AREA_OVERHEAD(size) \
83 (ALIGN_UP(size + sizeof(heap_area_t), BASE_ALIGN))
84
[0b37882]85/** Calculate real size of a heap block.
86 *
87 * Add header and footer size.
88 *
[6db6fd1]89 */
[0b37882]90#define GROSS_SIZE(size) ((size) + STRUCT_OVERHEAD)
[6db6fd1]91
[0b37882]92/** Calculate net size of a heap block.
93 *
94 * Subtract header and footer size.
[6db6fd1]95 *
96 */
[0b37882]97#define NET_SIZE(size) ((size) - STRUCT_OVERHEAD)
[6db6fd1]98
[0b37882]99/** Get first block in heap area.
100 *
[6db6fd1]101 */
[013a5d7]102#define AREA_FIRST_BLOCK_HEAD(area) \
[0b37882]103 (ALIGN_UP(((uintptr_t) (area)) + sizeof(heap_area_t), BASE_ALIGN))
[6db6fd1]104
[013a5d7]105/** Get last block in heap area.
106 *
107 */
108#define AREA_LAST_BLOCK_FOOT(area) \
109 (((uintptr_t) (area)->end) - sizeof(heap_block_foot_t))
110
[7aafdb86]111#define AREA_LAST_BLOCK_HEAD(area) \
[da287d1]112 ((uintptr_t) BLOCK_HEAD(((heap_block_foot_t *) AREA_LAST_BLOCK_FOOT(area))))
[7aafdb86]113
[013a5d7]114/** Get header in heap block.
115 *
116 */
117#define BLOCK_HEAD(foot) \
118 ((heap_block_head_t *) \
119 (((uintptr_t) (foot)) + sizeof(heap_block_foot_t) - (foot)->size))
120
[0b37882]121/** Get footer in heap block.
122 *
[6db6fd1]123 */
[0b37882]124#define BLOCK_FOOT(head) \
125 ((heap_block_foot_t *) \
[013a5d7]126 (((uintptr_t) (head)) + (head)->size - sizeof(heap_block_foot_t)))
[0b37882]127
128/** Heap area.
129 *
130 * The memory managed by the heap allocator is divided into
131 * multiple discontinuous heaps. Each heap is represented
132 * by a separate address space area which has this structure
133 * at its very beginning.
134 *
135 */
136typedef struct heap_area {
137 /** Start of the heap area (including this structure)
138 *
139 * Aligned on page boundary.
140 *
141 */
142 void *start;
143
144 /** End of the heap area (aligned on page boundary) */
145 void *end;
146
[013a5d7]147 /** Previous heap area */
148 struct heap_area *prev;
149
[0b37882]150 /** Next heap area */
151 struct heap_area *next;
152
153 /** A magic value */
154 uint32_t magic;
155} heap_area_t;
[6db6fd1]156
157/** Header of a heap block
158 *
159 */
160typedef struct {
161 /* Size of the block (including header and footer) */
162 size_t size;
163
164 /* Indication of a free block */
165 bool free;
166
[0b37882]167 /** Heap area this block belongs to */
168 heap_area_t *area;
169
[6db6fd1]170 /* A magic value to detect overwrite of heap header */
171 uint32_t magic;
172} heap_block_head_t;
173
174/** Footer of a heap block
175 *
176 */
177typedef struct {
178 /* Size of the block (including header and footer) */
179 size_t size;
180
181 /* A magic value to detect overwrite of heap footer */
182 uint32_t magic;
183} heap_block_foot_t;
184
[0b37882]185/** First heap area */
186static heap_area_t *first_heap_area = NULL;
[6db6fd1]187
[0b37882]188/** Last heap area */
189static heap_area_t *last_heap_area = NULL;
[6db6fd1]190
[0b37882]191/** Next heap block to examine (next fit algorithm) */
[207533f]192static heap_block_head_t *next_fit = NULL;
[6db6fd1]193
[0b37882]194/** Futex for thread-safe heap manipulation */
195static futex_t malloc_futex = FUTEX_INITIALIZER;
[6db6fd1]196
[13f2461]197#ifndef NDEBUG
198
199#define malloc_assert(expr) \
200 do { \
201 if (!(expr)) {\
202 futex_up(&malloc_futex); \
[e06e2716]203 assert_abort(#expr, __FILE__, __LINE__); \
[13f2461]204 } \
205 } while (0)
206
207#else /* NDEBUG */
208
209#define malloc_assert(expr)
210
211#endif /* NDEBUG */
212
[6db6fd1]213/** Initialize a heap block
214 *
[0b37882]215 * Fill in the structures related to a heap block.
[3292623]216 * Should be called only inside the critical section.
[6db6fd1]217 *
218 * @param addr Address of the block.
219 * @param size Size of the block including the header and the footer.
220 * @param free Indication of a free block.
[0b37882]221 * @param area Heap area the block belongs to.
[6db6fd1]222 *
223 */
[0b37882]224static void block_init(void *addr, size_t size, bool free, heap_area_t *area)
[6db6fd1]225{
226 /* Calculate the position of the header and the footer */
227 heap_block_head_t *head = (heap_block_head_t *) addr;
228
229 head->size = size;
230 head->free = free;
[0b37882]231 head->area = area;
[6db6fd1]232 head->magic = HEAP_BLOCK_HEAD_MAGIC;
233
[0b37882]234 heap_block_foot_t *foot = BLOCK_FOOT(head);
235
[6db6fd1]236 foot->size = size;
237 foot->magic = HEAP_BLOCK_FOOT_MAGIC;
238}
239
240/** Check a heap block
241 *
242 * Verifies that the structures related to a heap block still contain
243 * the magic constants. This helps detect heap corruption early on.
[3292623]244 * Should be called only inside the critical section.
[6db6fd1]245 *
246 * @param addr Address of the block.
247 *
248 */
249static void block_check(void *addr)
250{
251 heap_block_head_t *head = (heap_block_head_t *) addr;
252
[13f2461]253 malloc_assert(head->magic == HEAP_BLOCK_HEAD_MAGIC);
[6db6fd1]254
[0b37882]255 heap_block_foot_t *foot = BLOCK_FOOT(head);
[6db6fd1]256
[13f2461]257 malloc_assert(foot->magic == HEAP_BLOCK_FOOT_MAGIC);
258 malloc_assert(head->size == foot->size);
[6db6fd1]259}
260
[0b37882]261/** Check a heap area structure
[013a5d7]262 *
263 * Should be called only inside the critical section.
[3292623]264 *
[0b37882]265 * @param addr Address of the heap area.
[3292623]266 *
[0b37882]267 */
268static void area_check(void *addr)
269{
270 heap_area_t *area = (heap_area_t *) addr;
271
[13f2461]272 malloc_assert(area->magic == HEAP_AREA_MAGIC);
273 malloc_assert(addr == area->start);
274 malloc_assert(area->start < area->end);
275 malloc_assert(((uintptr_t) area->start % PAGE_SIZE) == 0);
276 malloc_assert(((uintptr_t) area->end % PAGE_SIZE) == 0);
[0b37882]277}
278
279/** Create new heap area
280 *
[013a5d7]281 * Should be called only inside the critical section.
282 *
283 * @param size Size of the area.
[3292623]284 *
285 */
[0b37882]286static bool area_create(size_t size)
[6db6fd1]287{
[fbcdeb8]288 /* Align the heap area size on page boundary */
[0b37882]289 size_t asize = ALIGN_UP(size, PAGE_SIZE);
[faba839]290 void *astart = as_area_create(AS_AREA_ANY, asize,
[9727b92]291 AS_AREA_WRITE | AS_AREA_READ | AS_AREA_CACHEABLE);
[faba839]292 if (astart == AS_MAP_FAILED)
[e70edd1]293 return false;
[6db6fd1]294
[0b37882]295 heap_area_t *area = (heap_area_t *) astart;
296
297 area->start = astart;
[013a5d7]298 area->end = (void *) ((uintptr_t) astart + asize);
299 area->prev = NULL;
[0b37882]300 area->next = NULL;
301 area->magic = HEAP_AREA_MAGIC;
302
[013a5d7]303 void *block = (void *) AREA_FIRST_BLOCK_HEAD(area);
[0b37882]304 size_t bsize = (size_t) (area->end - block);
305
306 block_init(block, bsize, true, area);
307
308 if (last_heap_area == NULL) {
309 first_heap_area = area;
310 last_heap_area = area;
311 } else {
[013a5d7]312 area->prev = last_heap_area;
[0b37882]313 last_heap_area->next = area;
314 last_heap_area = area;
315 }
316
317 return true;
318}
319
320/** Try to enlarge a heap area
[013a5d7]321 *
322 * Should be called only inside the critical section.
[0b37882]323 *
324 * @param area Heap area to grow.
[013a5d7]325 * @param size Gross size to grow (bytes).
326 *
327 * @return True if successful.
[0b37882]328 *
329 */
330static bool area_grow(heap_area_t *area, size_t size)
331{
332 if (size == 0)
333 return true;
334
335 area_check(area);
336
337 /* New heap area size */
[013a5d7]338 size_t gross_size = (size_t) (area->end - area->start) + size;
339 size_t asize = ALIGN_UP(gross_size, PAGE_SIZE);
340 void *end = (void *) ((uintptr_t) area->start + asize);
[0b37882]341
342 /* Check for overflow */
343 if (end < area->start)
344 return false;
[6db6fd1]345
[0b37882]346 /* Resize the address space area */
347 int ret = as_area_resize(area->start, asize, 0);
348 if (ret != EOK)
[6db6fd1]349 return false;
350
[da287d1]351 heap_block_head_t *last_head =
352 (heap_block_head_t *) AREA_LAST_BLOCK_HEAD(area);
[7aafdb86]353
354 if (last_head->free) {
355 /* Add the new space to the last block. */
356 size_t net_size = (size_t) (end - area->end) + last_head->size;
357 malloc_assert(net_size > 0);
358 block_init(last_head, net_size, true, area);
359 } else {
360 /* Add new free block */
361 size_t net_size = (size_t) (end - area->end);
362 if (net_size > 0)
363 block_init(area->end, net_size, true, area);
364 }
[6db6fd1]365
[0b37882]366 /* Update heap area parameters */
367 area->end = end;
368
369 return true;
370}
371
[013a5d7]372/** Try to shrink heap
[3292623]373 *
[013a5d7]374 * Should be called only inside the critical section.
[0b37882]375 * In all cases the next pointer is reset.
[3292623]376 *
[013a5d7]377 * @param area Last modified heap area.
378 *
[3292623]379 */
[013a5d7]380static void heap_shrink(heap_area_t *area)
[6db6fd1]381{
[013a5d7]382 area_check(area);
383
384 heap_block_foot_t *last_foot =
385 (heap_block_foot_t *) AREA_LAST_BLOCK_FOOT(area);
386 heap_block_head_t *last_head = BLOCK_HEAD(last_foot);
387
388 block_check((void *) last_head);
[13f2461]389 malloc_assert(last_head->area == area);
[013a5d7]390
391 if (last_head->free) {
392 /*
393 * The last block of the heap area is
394 * unused. The area might be potentially
395 * shrunk.
396 */
397
398 heap_block_head_t *first_head =
399 (heap_block_head_t *) AREA_FIRST_BLOCK_HEAD(area);
400
401 block_check((void *) first_head);
[13f2461]402 malloc_assert(first_head->area == area);
[013a5d7]403
[e6eee2b]404 size_t shrink_size = ALIGN_DOWN(last_head->size, PAGE_SIZE);
405
[013a5d7]406 if (first_head == last_head) {
407 /*
408 * The entire heap area consists of a single
409 * free heap block. This means we can get rid
410 * of it entirely.
411 */
412
413 heap_area_t *prev = area->prev;
414 heap_area_t *next = area->next;
415
416 if (prev != NULL) {
417 area_check(prev);
418 prev->next = next;
419 } else
420 first_heap_area = next;
421
422 if (next != NULL) {
423 area_check(next);
424 next->prev = prev;
425 } else
426 last_heap_area = prev;
427
428 as_area_destroy(area->start);
[e6eee2b]429 } else if (shrink_size >= SHRINK_GRANULARITY) {
[013a5d7]430 /*
431 * Make sure that we always shrink the area
432 * by a multiple of page size and update
433 * the block layout accordingly.
434 */
435
436 size_t asize = (size_t) (area->end - area->start) - shrink_size;
437 void *end = (void *) ((uintptr_t) area->start + asize);
438
439 /* Resize the address space area */
440 int ret = as_area_resize(area->start, asize, 0);
441 if (ret != EOK)
442 abort();
443
444 /* Update heap area parameters */
445 area->end = end;
[207533f]446 size_t excess = ((size_t) area->end) - ((size_t) last_head);
[013a5d7]447
448 if (excess > 0) {
449 if (excess >= STRUCT_OVERHEAD) {
450 /*
451 * The previous block cannot be free and there
452 * is enough free space left in the area to
453 * create a new free block.
454 */
[207533f]455 block_init((void *) last_head, excess, true, area);
[013a5d7]456 } else {
457 /*
458 * The excess is small. Therefore just enlarge
459 * the previous block.
460 */
461 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
462 (((uintptr_t) last_head) - sizeof(heap_block_foot_t));
463 heap_block_head_t *prev_head = BLOCK_HEAD(prev_foot);
464
465 block_check((void *) prev_head);
466
467 block_init(prev_head, prev_head->size + excess,
468 prev_head->free, area);
469 }
470 }
471 }
472 }
473
[207533f]474 next_fit = NULL;
[6db6fd1]475}
476
477/** Initialize the heap allocator
478 *
[47b7006]479 * Create initial heap memory area. This routine is
480 * only called from libc initialization, thus we do not
481 * take any locks.
[6db6fd1]482 *
483 */
[47b7006]484void __malloc_init(void)
[6db6fd1]485{
[0b37882]486 if (!area_create(PAGE_SIZE))
[47b7006]487 abort();
[6db6fd1]488}
489
[3292623]490/** Split heap block and mark it as used.
491 *
492 * Should be called only inside the critical section.
493 *
494 * @param cur Heap block to split.
495 * @param size Number of bytes to split and mark from the beginning
496 * of the block.
497 *
498 */
[6db6fd1]499static void split_mark(heap_block_head_t *cur, const size_t size)
500{
[13f2461]501 malloc_assert(cur->size >= size);
[6db6fd1]502
503 /* See if we should split the block. */
504 size_t split_limit = GROSS_SIZE(size);
505
506 if (cur->size > split_limit) {
507 /* Block big enough -> split. */
508 void *next = ((void *) cur) + size;
[0b37882]509 block_init(next, cur->size - size, true, cur->area);
510 block_init(cur, size, false, cur->area);
[6db6fd1]511 } else {
512 /* Block too small -> use as is. */
513 cur->free = false;
514 }
515}
516
[0b37882]517/** Allocate memory from heap area starting from given block
[3292623]518 *
519 * Should be called only inside the critical section.
[0b37882]520 * As a side effect this function also sets the current
521 * pointer on successful allocation.
[6db6fd1]522 *
[0b37882]523 * @param area Heap area where to allocate from.
524 * @param first_block Starting heap block.
525 * @param final_block Heap block where to finish the search
526 * (may be NULL).
527 * @param real_size Gross number of bytes to allocate.
528 * @param falign Physical alignment of the block.
[6db6fd1]529 *
[0b37882]530 * @return Address of the allocated block or NULL on not enough memory.
[6db6fd1]531 *
532 */
[0b37882]533static void *malloc_area(heap_area_t *area, heap_block_head_t *first_block,
534 heap_block_head_t *final_block, size_t real_size, size_t falign)
[6db6fd1]535{
[0b37882]536 area_check((void *) area);
[13f2461]537 malloc_assert((void *) first_block >= (void *) AREA_FIRST_BLOCK_HEAD(area));
538 malloc_assert((void *) first_block < area->end);
[6db6fd1]539
[013a5d7]540 for (heap_block_head_t *cur = first_block; (void *) cur < area->end;
[0b37882]541 cur = (heap_block_head_t *) (((void *) cur) + cur->size)) {
[6db6fd1]542 block_check(cur);
543
[0b37882]544 /* Finish searching on the final block */
545 if ((final_block != NULL) && (cur == final_block))
546 break;
547
[6db6fd1]548 /* Try to find a block that is free and large enough. */
549 if ((cur->free) && (cur->size >= real_size)) {
[0b37882]550 /*
551 * We have found a suitable block.
552 * Check for alignment properties.
553 */
554 void *addr = (void *)
555 ((uintptr_t) cur + sizeof(heap_block_head_t));
556 void *aligned = (void *)
557 ALIGN_UP((uintptr_t) addr, falign);
[6db6fd1]558
559 if (addr == aligned) {
560 /* Exact block start including alignment. */
561 split_mark(cur, real_size);
[0b37882]562
[207533f]563 next_fit = cur;
[0b37882]564 return addr;
[6db6fd1]565 } else {
[d851f597]566 /* Block start has to be aligned */
[6db6fd1]567 size_t excess = (size_t) (aligned - addr);
568
569 if (cur->size >= real_size + excess) {
[0b37882]570 /*
571 * The current block is large enough to fit
572 * data in (including alignment).
573 */
[013a5d7]574 if ((void *) cur > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
[0b37882]575 /*
576 * There is a block before the current block.
577 * This previous block can be enlarged to
578 * compensate for the alignment excess.
579 */
580 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
581 ((void *) cur - sizeof(heap_block_foot_t));
[6db6fd1]582
[0b37882]583 heap_block_head_t *prev_head = (heap_block_head_t *)
584 ((void *) cur - prev_foot->size);
[6db6fd1]585
586 block_check(prev_head);
587
588 size_t reduced_size = cur->size - excess;
[d851f597]589 heap_block_head_t *next_head = ((void *) cur) + excess;
[6db6fd1]590
[0b37882]591 if ((!prev_head->free) &&
592 (excess >= STRUCT_OVERHEAD)) {
593 /*
594 * The previous block is not free and there
595 * is enough free space left to fill in
596 * a new free block between the previous
597 * and current block.
598 */
599 block_init(cur, excess, true, area);
[d851f597]600 } else {
[0b37882]601 /*
602 * The previous block is free (thus there
603 * is no need to induce additional
604 * fragmentation to the heap) or the
605 * excess is small. Therefore just enlarge
606 * the previous block.
607 */
608 block_init(prev_head, prev_head->size + excess,
609 prev_head->free, area);
[d851f597]610 }
611
[0b37882]612 block_init(next_head, reduced_size, true, area);
[d851f597]613 split_mark(next_head, real_size);
[0b37882]614
[207533f]615 next_fit = next_head;
[0b37882]616 return aligned;
[6db6fd1]617 } else {
[0b37882]618 /*
619 * The current block is the first block
620 * in the heap area. We have to make sure
621 * that the alignment excess is large enough
622 * to fit a new free block just before the
623 * current block.
624 */
[6db6fd1]625 while (excess < STRUCT_OVERHEAD) {
626 aligned += falign;
627 excess += falign;
628 }
629
[d851f597]630 /* Check for current block size again */
[6db6fd1]631 if (cur->size >= real_size + excess) {
632 size_t reduced_size = cur->size - excess;
[0b37882]633 cur = (heap_block_head_t *)
[013a5d7]634 (AREA_FIRST_BLOCK_HEAD(area) + excess);
[6db6fd1]635
[013a5d7]636 block_init((void *) AREA_FIRST_BLOCK_HEAD(area),
637 excess, true, area);
[0b37882]638 block_init(cur, reduced_size, true, area);
[6db6fd1]639 split_mark(cur, real_size);
[0b37882]640
[207533f]641 next_fit = cur;
[0b37882]642 return aligned;
[6db6fd1]643 }
644 }
645 }
646 }
647 }
[0b37882]648 }
649
650 return NULL;
651}
652
[7aafdb86]653/** Try to enlarge any of the heap areas.
654 *
[da287d1]655 * If successful, allocate block of the given size in the area.
[7aafdb86]656 * Should be called only inside the critical section.
657 *
[da287d1]658 * @param size Gross size of item to allocate (bytes).
[7aafdb86]659 * @param align Memory address alignment.
660 *
[da287d1]661 * @return Allocated block.
662 * @return NULL on failure.
663 *
[7aafdb86]664 */
665static void *heap_grow_and_alloc(size_t size, size_t align)
666{
667 if (size == 0)
668 return NULL;
[da287d1]669
[7aafdb86]670 /* First try to enlarge some existing area */
671 for (heap_area_t *area = first_heap_area; area != NULL;
672 area = area->next) {
[da287d1]673
[7aafdb86]674 if (area_grow(area, size + align)) {
[da287d1]675 heap_block_head_t *first =
676 (heap_block_head_t *) AREA_LAST_BLOCK_HEAD(area);
[7aafdb86]677
[da287d1]678 void *addr =
679 malloc_area(area, first, NULL, size, align);
[7aafdb86]680 malloc_assert(addr != NULL);
681 return addr;
682 }
683 }
684
685 /* Eventually try to create a new area */
686 if (area_create(AREA_OVERHEAD(size + align))) {
[da287d1]687 heap_block_head_t *first =
688 (heap_block_head_t *) AREA_FIRST_BLOCK_HEAD(last_heap_area);
[7aafdb86]689
[da287d1]690 void *addr =
691 malloc_area(last_heap_area, first, NULL, size, align);
[7aafdb86]692 malloc_assert(addr != NULL);
693 return addr;
694 }
695
696 return NULL;
697}
698
[0b37882]699/** Allocate a memory block
700 *
701 * Should be called only inside the critical section.
702 *
703 * @param size The size of the block to allocate.
704 * @param align Memory address alignment.
705 *
706 * @return Address of the allocated block or NULL on not enough memory.
707 *
708 */
709static void *malloc_internal(const size_t size, const size_t align)
710{
[13f2461]711 malloc_assert(first_heap_area != NULL);
[0b37882]712
713 if (align == 0)
714 return NULL;
715
716 size_t falign = lcm(align, BASE_ALIGN);
[da287d1]717
[e7c3fa0]718 /* Check for integer overflow. */
719 if (falign < align)
720 return NULL;
[0b37882]721
[c828803]722 /*
723 * The size of the allocated block needs to be naturally
724 * aligned, because the footer structure also needs to reside
725 * on a naturally aligned address in order to avoid unaligned
726 * memory accesses.
727 */
[da287d1]728 size_t gross_size = GROSS_SIZE(ALIGN_UP(size, BASE_ALIGN));
[0b37882]729
730 /* Try the next fit approach */
[da287d1]731 heap_block_head_t *split = next_fit;
[0b37882]732
733 if (split != NULL) {
[7aafdb86]734 void *addr = malloc_area(split->area, split, NULL, gross_size,
[0b37882]735 falign);
[6db6fd1]736
[0b37882]737 if (addr != NULL)
738 return addr;
[6db6fd1]739 }
740
[0b37882]741 /* Search the entire heap */
[013a5d7]742 for (heap_area_t *area = first_heap_area; area != NULL;
743 area = area->next) {
[0b37882]744 heap_block_head_t *first = (heap_block_head_t *)
[013a5d7]745 AREA_FIRST_BLOCK_HEAD(area);
[0b37882]746
[7aafdb86]747 void *addr = malloc_area(area, first, split, gross_size,
[0b37882]748 falign);
749
750 if (addr != NULL)
751 return addr;
752 }
753
[7aafdb86]754 /* Finally, try to grow heap space and allocate in the new area. */
755 return heap_grow_and_alloc(gross_size, falign);
[6db6fd1]756}
757
[3292623]758/** Allocate memory by number of elements
759 *
760 * @param nmemb Number of members to allocate.
761 * @param size Size of one member in bytes.
762 *
763 * @return Allocated memory or NULL.
764 *
765 */
[15b8e495]766void *calloc(const size_t nmemb, const size_t size)
767{
[c828803]768 // FIXME: Check for overflow
769
[e6b73ad0]770 void *block = malloc(nmemb * size);
771 if (block == NULL)
772 return NULL;
[3292623]773
[e6b73ad0]774 memset(block, 0, nmemb * size);
775 return block;
[15b8e495]776}
777
[3292623]778/** Allocate memory
779 *
780 * @param size Number of bytes to allocate.
781 *
782 * @return Allocated memory or NULL.
783 *
784 */
[6db6fd1]785void *malloc(const size_t size)
786{
[3292623]787 futex_down(&malloc_futex);
788 void *block = malloc_internal(size, BASE_ALIGN);
789 futex_up(&malloc_futex);
790
791 return block;
[6db6fd1]792}
793
[3292623]794/** Allocate memory with specified alignment
795 *
796 * @param align Alignment in byes.
797 * @param size Number of bytes to allocate.
798 *
799 * @return Allocated memory or NULL.
800 *
801 */
[6db6fd1]802void *memalign(const size_t align, const size_t size)
803{
804 if (align == 0)
805 return NULL;
806
807 size_t palign =
808 1 << (fnzb(max(sizeof(void *), align) - 1) + 1);
809
[3292623]810 futex_down(&malloc_futex);
811 void *block = malloc_internal(size, palign);
812 futex_up(&malloc_futex);
813
814 return block;
[6db6fd1]815}
816
[3292623]817/** Reallocate memory block
818 *
819 * @param addr Already allocated memory or NULL.
820 * @param size New size of the memory block.
821 *
822 * @return Reallocated memory or NULL.
823 *
824 */
[6db6fd1]825void *realloc(const void *addr, const size_t size)
826{
827 if (addr == NULL)
828 return malloc(size);
829
[3292623]830 futex_down(&malloc_futex);
831
[6db6fd1]832 /* Calculate the position of the header. */
833 heap_block_head_t *head =
834 (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
835
836 block_check(head);
[13f2461]837 malloc_assert(!head->free);
[6db6fd1]838
[0b37882]839 heap_area_t *area = head->area;
840
841 area_check(area);
[13f2461]842 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
843 malloc_assert((void *) head < area->end);
[0b37882]844
[6db6fd1]845 void *ptr = NULL;
[3292623]846 bool reloc = false;
[f450280]847 size_t real_size = GROSS_SIZE(ALIGN_UP(size, BASE_ALIGN));
[6db6fd1]848 size_t orig_size = head->size;
849
850 if (orig_size > real_size) {
851 /* Shrink */
852 if (orig_size - real_size >= STRUCT_OVERHEAD) {
[0b37882]853 /*
854 * Split the original block to a full block
855 * and a trailing free block.
856 */
857 block_init((void *) head, real_size, false, area);
[6db6fd1]858 block_init((void *) head + real_size,
[0b37882]859 orig_size - real_size, true, area);
[013a5d7]860 heap_shrink(area);
[6db6fd1]861 }
862
863 ptr = ((void *) head) + sizeof(heap_block_head_t);
864 } else {
[0b37882]865 /*
866 * Look at the next block. If it is free and the size is
867 * sufficient then merge the two. Otherwise just allocate
868 * a new block, copy the original data into it and
869 * free the original block.
870 */
[6db6fd1]871 heap_block_head_t *next_head =
872 (heap_block_head_t *) (((void *) head) + head->size);
873
[0b37882]874 if (((void *) next_head < area->end) &&
[7d88587]875 (head->size + next_head->size >= real_size) &&
876 (next_head->free)) {
[6db6fd1]877 block_check(next_head);
[0b37882]878 block_init(head, head->size + next_head->size, false, area);
[ba0aa6f]879 split_mark(head, real_size);
[6db6fd1]880
881 ptr = ((void *) head) + sizeof(heap_block_head_t);
[207533f]882 next_fit = NULL;
[3292623]883 } else
884 reloc = true;
885 }
886
887 futex_up(&malloc_futex);
888
889 if (reloc) {
890 ptr = malloc(size);
891 if (ptr != NULL) {
892 memcpy(ptr, addr, NET_SIZE(orig_size));
893 free(addr);
[6db6fd1]894 }
895 }
896
897 return ptr;
898}
899
900/** Free a memory block
901 *
902 * @param addr The address of the block.
[3292623]903 *
[6db6fd1]904 */
905void free(const void *addr)
906{
[3019612]907 if (addr == NULL)
908 return;
[c828803]909
[3292623]910 futex_down(&malloc_futex);
911
[6db6fd1]912 /* Calculate the position of the header. */
913 heap_block_head_t *head
914 = (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
915
916 block_check(head);
[13f2461]917 malloc_assert(!head->free);
[6db6fd1]918
[0b37882]919 heap_area_t *area = head->area;
920
921 area_check(area);
[13f2461]922 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
923 malloc_assert((void *) head < area->end);
[0b37882]924
[6db6fd1]925 /* Mark the block itself as free. */
926 head->free = true;
927
928 /* Look at the next block. If it is free, merge the two. */
929 heap_block_head_t *next_head
930 = (heap_block_head_t *) (((void *) head) + head->size);
931
[0b37882]932 if ((void *) next_head < area->end) {
[6db6fd1]933 block_check(next_head);
934 if (next_head->free)
[0b37882]935 block_init(head, head->size + next_head->size, true, area);
[6db6fd1]936 }
937
938 /* Look at the previous block. If it is free, merge the two. */
[013a5d7]939 if ((void *) head > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
[6db6fd1]940 heap_block_foot_t *prev_foot =
941 (heap_block_foot_t *) (((void *) head) - sizeof(heap_block_foot_t));
942
943 heap_block_head_t *prev_head =
944 (heap_block_head_t *) (((void *) head) - prev_foot->size);
945
946 block_check(prev_head);
947
948 if (prev_head->free)
[0b37882]949 block_init(prev_head, prev_head->size + head->size, true,
950 area);
[6db6fd1]951 }
952
[013a5d7]953 heap_shrink(area);
[3292623]954
955 futex_up(&malloc_futex);
[6db6fd1]956}
957
[1b3e854]958void *heap_check(void)
959{
960 futex_down(&malloc_futex);
961
962 if (first_heap_area == NULL) {
963 futex_up(&malloc_futex);
964 return (void *) -1;
965 }
966
967 /* Walk all heap areas */
968 for (heap_area_t *area = first_heap_area; area != NULL;
969 area = area->next) {
970
971 /* Check heap area consistency */
972 if ((area->magic != HEAP_AREA_MAGIC) ||
973 ((void *) area != area->start) ||
974 (area->start >= area->end) ||
975 (((uintptr_t) area->start % PAGE_SIZE) != 0) ||
976 (((uintptr_t) area->end % PAGE_SIZE) != 0)) {
977 futex_up(&malloc_futex);
978 return (void *) area;
979 }
980
981 /* Walk all heap blocks */
982 for (heap_block_head_t *head = (heap_block_head_t *)
983 AREA_FIRST_BLOCK_HEAD(area); (void *) head < area->end;
984 head = (heap_block_head_t *) (((void *) head) + head->size)) {
985
986 /* Check heap block consistency */
987 if (head->magic != HEAP_BLOCK_HEAD_MAGIC) {
988 futex_up(&malloc_futex);
989 return (void *) head;
990 }
991
992 heap_block_foot_t *foot = BLOCK_FOOT(head);
993
994 if ((foot->magic != HEAP_BLOCK_FOOT_MAGIC) ||
995 (head->size != foot->size)) {
996 futex_up(&malloc_futex);
997 return (void *) foot;
998 }
999 }
1000 }
1001
1002 futex_up(&malloc_futex);
1003
1004 return NULL;
1005}
1006
[6db6fd1]1007/** @}
1008 */
Note: See TracBrowser for help on using the repository browser.