source: mainline/uspace/lib/c/generic/malloc.c@ 34efa8a

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 34efa8a was e06e2716, checked in by Jakub Jermar <jakub@…>, 14 years ago

Passing stringified LINE does not conserve space very well,
pass it as integer again.

  • Property mode set to 100644
File size: 23.7 KB
RevLine 
[6db6fd1]1/*
2 * Copyright (c) 2009 Martin Decky
3 * Copyright (c) 2009 Petr Tuma
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup libc
31 * @{
32 */
33/** @file
34 */
35
36#include <malloc.h>
37#include <bool.h>
38#include <as.h>
39#include <align.h>
40#include <macros.h>
41#include <assert.h>
42#include <errno.h>
43#include <bitops.h>
44#include <mem.h>
[3292623]45#include <futex.h>
[d161715]46#include <stdlib.h>
[6db6fd1]47#include <adt/gcdlcm.h>
[47b7006]48#include "private/malloc.h"
[6db6fd1]49
[0b37882]50/** Magic used in heap headers. */
51#define HEAP_BLOCK_HEAD_MAGIC UINT32_C(0xBEEF0101)
[6db6fd1]52
[0b37882]53/** Magic used in heap footers. */
54#define HEAP_BLOCK_FOOT_MAGIC UINT32_C(0xBEEF0202)
[6db6fd1]55
[0b37882]56/** Magic used in heap descriptor. */
57#define HEAP_AREA_MAGIC UINT32_C(0xBEEFCAFE)
58
59/** Allocation alignment.
60 *
61 * This also covers the alignment of fields
62 * in the heap header and footer.
63 *
64 */
[6db6fd1]65#define BASE_ALIGN 16
66
[013a5d7]67/** Heap shrink granularity
68 *
69 * Try not to pump and stress the heap to much
70 * by shrinking and enlarging it too often.
71 * A heap area won't shrunk if it the released
72 * free block is smaller than this constant.
73 *
74 */
75#define SHRINK_GRANULARITY (64 * PAGE_SIZE)
76
[0b37882]77/** Overhead of each heap block. */
78#define STRUCT_OVERHEAD \
79 (sizeof(heap_block_head_t) + sizeof(heap_block_foot_t))
80
81/** Calculate real size of a heap block.
82 *
83 * Add header and footer size.
84 *
[6db6fd1]85 */
[0b37882]86#define GROSS_SIZE(size) ((size) + STRUCT_OVERHEAD)
[6db6fd1]87
[0b37882]88/** Calculate net size of a heap block.
89 *
90 * Subtract header and footer size.
[6db6fd1]91 *
92 */
[0b37882]93#define NET_SIZE(size) ((size) - STRUCT_OVERHEAD)
[6db6fd1]94
[0b37882]95/** Get first block in heap area.
96 *
[6db6fd1]97 */
[013a5d7]98#define AREA_FIRST_BLOCK_HEAD(area) \
[0b37882]99 (ALIGN_UP(((uintptr_t) (area)) + sizeof(heap_area_t), BASE_ALIGN))
[6db6fd1]100
[013a5d7]101/** Get last block in heap area.
102 *
103 */
104#define AREA_LAST_BLOCK_FOOT(area) \
105 (((uintptr_t) (area)->end) - sizeof(heap_block_foot_t))
106
107/** Get header in heap block.
108 *
109 */
110#define BLOCK_HEAD(foot) \
111 ((heap_block_head_t *) \
112 (((uintptr_t) (foot)) + sizeof(heap_block_foot_t) - (foot)->size))
113
[0b37882]114/** Get footer in heap block.
115 *
[6db6fd1]116 */
[0b37882]117#define BLOCK_FOOT(head) \
118 ((heap_block_foot_t *) \
[013a5d7]119 (((uintptr_t) (head)) + (head)->size - sizeof(heap_block_foot_t)))
[0b37882]120
121/** Heap area.
122 *
123 * The memory managed by the heap allocator is divided into
124 * multiple discontinuous heaps. Each heap is represented
125 * by a separate address space area which has this structure
126 * at its very beginning.
127 *
128 */
129typedef struct heap_area {
130 /** Start of the heap area (including this structure)
131 *
132 * Aligned on page boundary.
133 *
134 */
135 void *start;
136
137 /** End of the heap area (aligned on page boundary) */
138 void *end;
139
[013a5d7]140 /** Previous heap area */
141 struct heap_area *prev;
142
[0b37882]143 /** Next heap area */
144 struct heap_area *next;
145
146 /** A magic value */
147 uint32_t magic;
148} heap_area_t;
[6db6fd1]149
150/** Header of a heap block
151 *
152 */
153typedef struct {
154 /* Size of the block (including header and footer) */
155 size_t size;
156
157 /* Indication of a free block */
158 bool free;
159
[0b37882]160 /** Heap area this block belongs to */
161 heap_area_t *area;
162
[6db6fd1]163 /* A magic value to detect overwrite of heap header */
164 uint32_t magic;
165} heap_block_head_t;
166
167/** Footer of a heap block
168 *
169 */
170typedef struct {
171 /* Size of the block (including header and footer) */
172 size_t size;
173
174 /* A magic value to detect overwrite of heap footer */
175 uint32_t magic;
176} heap_block_foot_t;
177
[0b37882]178/** First heap area */
179static heap_area_t *first_heap_area = NULL;
[6db6fd1]180
[0b37882]181/** Last heap area */
182static heap_area_t *last_heap_area = NULL;
[6db6fd1]183
[0b37882]184/** Next heap block to examine (next fit algorithm) */
185static heap_block_head_t *next = NULL;
[6db6fd1]186
[0b37882]187/** Futex for thread-safe heap manipulation */
188static futex_t malloc_futex = FUTEX_INITIALIZER;
[6db6fd1]189
[13f2461]190#ifndef NDEBUG
191
192#define malloc_assert(expr) \
193 do { \
194 if (!(expr)) {\
195 futex_up(&malloc_futex); \
[e06e2716]196 assert_abort(#expr, __FILE__, __LINE__); \
[13f2461]197 } \
198 } while (0)
199
200#else /* NDEBUG */
201
202#define malloc_assert(expr)
203
204#endif /* NDEBUG */
205
[6db6fd1]206/** Initialize a heap block
207 *
[0b37882]208 * Fill in the structures related to a heap block.
[3292623]209 * Should be called only inside the critical section.
[6db6fd1]210 *
211 * @param addr Address of the block.
212 * @param size Size of the block including the header and the footer.
213 * @param free Indication of a free block.
[0b37882]214 * @param area Heap area the block belongs to.
[6db6fd1]215 *
216 */
[0b37882]217static void block_init(void *addr, size_t size, bool free, heap_area_t *area)
[6db6fd1]218{
219 /* Calculate the position of the header and the footer */
220 heap_block_head_t *head = (heap_block_head_t *) addr;
221
222 head->size = size;
223 head->free = free;
[0b37882]224 head->area = area;
[6db6fd1]225 head->magic = HEAP_BLOCK_HEAD_MAGIC;
226
[0b37882]227 heap_block_foot_t *foot = BLOCK_FOOT(head);
228
[6db6fd1]229 foot->size = size;
230 foot->magic = HEAP_BLOCK_FOOT_MAGIC;
231}
232
233/** Check a heap block
234 *
235 * Verifies that the structures related to a heap block still contain
236 * the magic constants. This helps detect heap corruption early on.
[3292623]237 * Should be called only inside the critical section.
[6db6fd1]238 *
239 * @param addr Address of the block.
240 *
241 */
242static void block_check(void *addr)
243{
244 heap_block_head_t *head = (heap_block_head_t *) addr;
245
[13f2461]246 malloc_assert(head->magic == HEAP_BLOCK_HEAD_MAGIC);
[6db6fd1]247
[0b37882]248 heap_block_foot_t *foot = BLOCK_FOOT(head);
[6db6fd1]249
[13f2461]250 malloc_assert(foot->magic == HEAP_BLOCK_FOOT_MAGIC);
251 malloc_assert(head->size == foot->size);
[6db6fd1]252}
253
[0b37882]254/** Check a heap area structure
[013a5d7]255 *
256 * Should be called only inside the critical section.
[3292623]257 *
[0b37882]258 * @param addr Address of the heap area.
[3292623]259 *
[0b37882]260 */
261static void area_check(void *addr)
262{
263 heap_area_t *area = (heap_area_t *) addr;
264
[13f2461]265 malloc_assert(area->magic == HEAP_AREA_MAGIC);
266 malloc_assert(addr == area->start);
267 malloc_assert(area->start < area->end);
268 malloc_assert(((uintptr_t) area->start % PAGE_SIZE) == 0);
269 malloc_assert(((uintptr_t) area->end % PAGE_SIZE) == 0);
[0b37882]270}
271
272/** Create new heap area
273 *
[013a5d7]274 * Should be called only inside the critical section.
275 *
276 * @param size Size of the area.
[3292623]277 *
278 */
[0b37882]279static bool area_create(size_t size)
[6db6fd1]280{
[0b37882]281 void *start = as_get_mappable_page(size);
282 if (start == NULL)
[6db6fd1]283 return false;
[0b37882]284
285 /* Align the heap area on page boundary */
286 void *astart = (void *) ALIGN_UP((uintptr_t) start, PAGE_SIZE);
287 size_t asize = ALIGN_UP(size, PAGE_SIZE);
288
289 astart = as_area_create(astart, asize, AS_AREA_WRITE | AS_AREA_READ);
290 if (astart == (void *) -1)
[e70edd1]291 return false;
[6db6fd1]292
[0b37882]293 heap_area_t *area = (heap_area_t *) astart;
294
295 area->start = astart;
[013a5d7]296 area->end = (void *) ((uintptr_t) astart + asize);
297 area->prev = NULL;
[0b37882]298 area->next = NULL;
299 area->magic = HEAP_AREA_MAGIC;
300
[013a5d7]301 void *block = (void *) AREA_FIRST_BLOCK_HEAD(area);
[0b37882]302 size_t bsize = (size_t) (area->end - block);
303
304 block_init(block, bsize, true, area);
305
306 if (last_heap_area == NULL) {
307 first_heap_area = area;
308 last_heap_area = area;
309 } else {
[013a5d7]310 area->prev = last_heap_area;
[0b37882]311 last_heap_area->next = area;
312 last_heap_area = area;
313 }
314
315 return true;
316}
317
318/** Try to enlarge a heap area
[013a5d7]319 *
320 * Should be called only inside the critical section.
[0b37882]321 *
322 * @param area Heap area to grow.
[013a5d7]323 * @param size Gross size to grow (bytes).
324 *
325 * @return True if successful.
[0b37882]326 *
327 */
328static bool area_grow(heap_area_t *area, size_t size)
329{
330 if (size == 0)
331 return true;
332
333 area_check(area);
334
335 /* New heap area size */
[013a5d7]336 size_t gross_size = (size_t) (area->end - area->start) + size;
337 size_t asize = ALIGN_UP(gross_size, PAGE_SIZE);
338 void *end = (void *) ((uintptr_t) area->start + asize);
[0b37882]339
340 /* Check for overflow */
341 if (end < area->start)
342 return false;
[6db6fd1]343
[0b37882]344 /* Resize the address space area */
345 int ret = as_area_resize(area->start, asize, 0);
346 if (ret != EOK)
[6db6fd1]347 return false;
348
[0b37882]349 /* Add new free block */
[013a5d7]350 size_t net_size = (size_t) (end - area->end);
351 if (net_size > 0)
352 block_init(area->end, net_size, true, area);
[6db6fd1]353
[0b37882]354 /* Update heap area parameters */
355 area->end = end;
356
357 return true;
358}
359
360/** Try to enlarge any of the heap areas
[013a5d7]361 *
362 * Should be called only inside the critical section.
[0b37882]363 *
364 * @param size Gross size of item to allocate (bytes).
365 *
366 */
367static bool heap_grow(size_t size)
368{
369 if (size == 0)
[6db6fd1]370 return true;
[0b37882]371
372 /* First try to enlarge some existing area */
[013a5d7]373 for (heap_area_t *area = first_heap_area; area != NULL;
374 area = area->next) {
[0b37882]375 if (area_grow(area, size))
376 return true;
[6db6fd1]377 }
378
[0b37882]379 /* Eventually try to create a new area */
[013a5d7]380 return area_create(AREA_FIRST_BLOCK_HEAD(size));
[6db6fd1]381}
382
[013a5d7]383/** Try to shrink heap
[3292623]384 *
[013a5d7]385 * Should be called only inside the critical section.
[0b37882]386 * In all cases the next pointer is reset.
[3292623]387 *
[013a5d7]388 * @param area Last modified heap area.
389 *
[3292623]390 */
[013a5d7]391static void heap_shrink(heap_area_t *area)
[6db6fd1]392{
[013a5d7]393 area_check(area);
394
395 heap_block_foot_t *last_foot =
396 (heap_block_foot_t *) AREA_LAST_BLOCK_FOOT(area);
397 heap_block_head_t *last_head = BLOCK_HEAD(last_foot);
398
399 block_check((void *) last_head);
[13f2461]400 malloc_assert(last_head->area == area);
[013a5d7]401
402 if (last_head->free) {
403 /*
404 * The last block of the heap area is
405 * unused. The area might be potentially
406 * shrunk.
407 */
408
409 heap_block_head_t *first_head =
410 (heap_block_head_t *) AREA_FIRST_BLOCK_HEAD(area);
411
412 block_check((void *) first_head);
[13f2461]413 malloc_assert(first_head->area == area);
[013a5d7]414
[e6eee2b]415 size_t shrink_size = ALIGN_DOWN(last_head->size, PAGE_SIZE);
416
[013a5d7]417 if (first_head == last_head) {
418 /*
419 * The entire heap area consists of a single
420 * free heap block. This means we can get rid
421 * of it entirely.
422 */
423
424 heap_area_t *prev = area->prev;
425 heap_area_t *next = area->next;
426
427 if (prev != NULL) {
428 area_check(prev);
429 prev->next = next;
430 } else
431 first_heap_area = next;
432
433 if (next != NULL) {
434 area_check(next);
435 next->prev = prev;
436 } else
437 last_heap_area = prev;
438
439 as_area_destroy(area->start);
[e6eee2b]440 } else if (shrink_size >= SHRINK_GRANULARITY) {
[013a5d7]441 /*
442 * Make sure that we always shrink the area
443 * by a multiple of page size and update
444 * the block layout accordingly.
445 */
446
447 size_t asize = (size_t) (area->end - area->start) - shrink_size;
448 void *end = (void *) ((uintptr_t) area->start + asize);
449
450 /* Resize the address space area */
451 int ret = as_area_resize(area->start, asize, 0);
452 if (ret != EOK)
453 abort();
454
455 /* Update heap area parameters */
456 area->end = end;
457
458 /* Update block layout */
459 void *last = (void *) last_head;
460 size_t excess = (size_t) (area->end - last);
461
462 if (excess > 0) {
463 if (excess >= STRUCT_OVERHEAD) {
464 /*
465 * The previous block cannot be free and there
466 * is enough free space left in the area to
467 * create a new free block.
468 */
469 block_init(last, excess, true, area);
470 } else {
471 /*
472 * The excess is small. Therefore just enlarge
473 * the previous block.
474 */
475 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
476 (((uintptr_t) last_head) - sizeof(heap_block_foot_t));
477 heap_block_head_t *prev_head = BLOCK_HEAD(prev_foot);
478
479 block_check((void *) prev_head);
480
481 block_init(prev_head, prev_head->size + excess,
482 prev_head->free, area);
483 }
484 }
485 }
486 }
487
[0b37882]488 next = NULL;
[6db6fd1]489}
490
491/** Initialize the heap allocator
492 *
[47b7006]493 * Create initial heap memory area. This routine is
494 * only called from libc initialization, thus we do not
495 * take any locks.
[6db6fd1]496 *
497 */
[47b7006]498void __malloc_init(void)
[6db6fd1]499{
[0b37882]500 if (!area_create(PAGE_SIZE))
[47b7006]501 abort();
[6db6fd1]502}
503
[3292623]504/** Split heap block and mark it as used.
505 *
506 * Should be called only inside the critical section.
507 *
508 * @param cur Heap block to split.
509 * @param size Number of bytes to split and mark from the beginning
510 * of the block.
511 *
512 */
[6db6fd1]513static void split_mark(heap_block_head_t *cur, const size_t size)
514{
[13f2461]515 malloc_assert(cur->size >= size);
[6db6fd1]516
517 /* See if we should split the block. */
518 size_t split_limit = GROSS_SIZE(size);
519
520 if (cur->size > split_limit) {
521 /* Block big enough -> split. */
522 void *next = ((void *) cur) + size;
[0b37882]523 block_init(next, cur->size - size, true, cur->area);
524 block_init(cur, size, false, cur->area);
[6db6fd1]525 } else {
526 /* Block too small -> use as is. */
527 cur->free = false;
528 }
529}
530
[0b37882]531/** Allocate memory from heap area starting from given block
[3292623]532 *
533 * Should be called only inside the critical section.
[0b37882]534 * As a side effect this function also sets the current
535 * pointer on successful allocation.
[6db6fd1]536 *
[0b37882]537 * @param area Heap area where to allocate from.
538 * @param first_block Starting heap block.
539 * @param final_block Heap block where to finish the search
540 * (may be NULL).
541 * @param real_size Gross number of bytes to allocate.
542 * @param falign Physical alignment of the block.
[6db6fd1]543 *
[0b37882]544 * @return Address of the allocated block or NULL on not enough memory.
[6db6fd1]545 *
546 */
[0b37882]547static void *malloc_area(heap_area_t *area, heap_block_head_t *first_block,
548 heap_block_head_t *final_block, size_t real_size, size_t falign)
[6db6fd1]549{
[0b37882]550 area_check((void *) area);
[13f2461]551 malloc_assert((void *) first_block >= (void *) AREA_FIRST_BLOCK_HEAD(area));
552 malloc_assert((void *) first_block < area->end);
[6db6fd1]553
[013a5d7]554 for (heap_block_head_t *cur = first_block; (void *) cur < area->end;
[0b37882]555 cur = (heap_block_head_t *) (((void *) cur) + cur->size)) {
[6db6fd1]556 block_check(cur);
557
[0b37882]558 /* Finish searching on the final block */
559 if ((final_block != NULL) && (cur == final_block))
560 break;
561
[6db6fd1]562 /* Try to find a block that is free and large enough. */
563 if ((cur->free) && (cur->size >= real_size)) {
[0b37882]564 /*
565 * We have found a suitable block.
566 * Check for alignment properties.
567 */
568 void *addr = (void *)
569 ((uintptr_t) cur + sizeof(heap_block_head_t));
570 void *aligned = (void *)
571 ALIGN_UP((uintptr_t) addr, falign);
[6db6fd1]572
573 if (addr == aligned) {
574 /* Exact block start including alignment. */
575 split_mark(cur, real_size);
[0b37882]576
577 next = cur;
578 return addr;
[6db6fd1]579 } else {
[d851f597]580 /* Block start has to be aligned */
[6db6fd1]581 size_t excess = (size_t) (aligned - addr);
582
583 if (cur->size >= real_size + excess) {
[0b37882]584 /*
585 * The current block is large enough to fit
586 * data in (including alignment).
587 */
[013a5d7]588 if ((void *) cur > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
[0b37882]589 /*
590 * There is a block before the current block.
591 * This previous block can be enlarged to
592 * compensate for the alignment excess.
593 */
594 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
595 ((void *) cur - sizeof(heap_block_foot_t));
[6db6fd1]596
[0b37882]597 heap_block_head_t *prev_head = (heap_block_head_t *)
598 ((void *) cur - prev_foot->size);
[6db6fd1]599
600 block_check(prev_head);
601
602 size_t reduced_size = cur->size - excess;
[d851f597]603 heap_block_head_t *next_head = ((void *) cur) + excess;
[6db6fd1]604
[0b37882]605 if ((!prev_head->free) &&
606 (excess >= STRUCT_OVERHEAD)) {
607 /*
608 * The previous block is not free and there
609 * is enough free space left to fill in
610 * a new free block between the previous
611 * and current block.
612 */
613 block_init(cur, excess, true, area);
[d851f597]614 } else {
[0b37882]615 /*
616 * The previous block is free (thus there
617 * is no need to induce additional
618 * fragmentation to the heap) or the
619 * excess is small. Therefore just enlarge
620 * the previous block.
621 */
622 block_init(prev_head, prev_head->size + excess,
623 prev_head->free, area);
[d851f597]624 }
625
[0b37882]626 block_init(next_head, reduced_size, true, area);
[d851f597]627 split_mark(next_head, real_size);
[0b37882]628
629 next = next_head;
630 return aligned;
[6db6fd1]631 } else {
[0b37882]632 /*
633 * The current block is the first block
634 * in the heap area. We have to make sure
635 * that the alignment excess is large enough
636 * to fit a new free block just before the
637 * current block.
638 */
[6db6fd1]639 while (excess < STRUCT_OVERHEAD) {
640 aligned += falign;
641 excess += falign;
642 }
643
[d851f597]644 /* Check for current block size again */
[6db6fd1]645 if (cur->size >= real_size + excess) {
646 size_t reduced_size = cur->size - excess;
[0b37882]647 cur = (heap_block_head_t *)
[013a5d7]648 (AREA_FIRST_BLOCK_HEAD(area) + excess);
[6db6fd1]649
[013a5d7]650 block_init((void *) AREA_FIRST_BLOCK_HEAD(area),
651 excess, true, area);
[0b37882]652 block_init(cur, reduced_size, true, area);
[6db6fd1]653 split_mark(cur, real_size);
[0b37882]654
655 next = cur;
656 return aligned;
[6db6fd1]657 }
658 }
659 }
660 }
661 }
[0b37882]662 }
663
664 return NULL;
665}
666
667/** Allocate a memory block
668 *
669 * Should be called only inside the critical section.
670 *
671 * @param size The size of the block to allocate.
672 * @param align Memory address alignment.
673 *
674 * @return Address of the allocated block or NULL on not enough memory.
675 *
676 */
677static void *malloc_internal(const size_t size, const size_t align)
678{
[13f2461]679 malloc_assert(first_heap_area != NULL);
[0b37882]680
681 if (align == 0)
682 return NULL;
683
684 size_t falign = lcm(align, BASE_ALIGN);
685 size_t real_size = GROSS_SIZE(ALIGN_UP(size, falign));
686
687 bool retry = false;
688 heap_block_head_t *split;
689
690loop:
691
692 /* Try the next fit approach */
693 split = next;
694
695 if (split != NULL) {
696 void *addr = malloc_area(split->area, split, NULL, real_size,
697 falign);
[6db6fd1]698
[0b37882]699 if (addr != NULL)
700 return addr;
[6db6fd1]701 }
702
[0b37882]703 /* Search the entire heap */
[013a5d7]704 for (heap_area_t *area = first_heap_area; area != NULL;
705 area = area->next) {
[0b37882]706 heap_block_head_t *first = (heap_block_head_t *)
[013a5d7]707 AREA_FIRST_BLOCK_HEAD(area);
[0b37882]708
709 void *addr = malloc_area(area, first, split, real_size,
710 falign);
711
712 if (addr != NULL)
713 return addr;
714 }
715
716 if (!retry) {
717 /* Try to grow the heap space */
718 if (heap_grow(real_size)) {
719 retry = true;
[6db6fd1]720 goto loop;
721 }
722 }
723
[0b37882]724 return NULL;
[6db6fd1]725}
726
[3292623]727/** Allocate memory by number of elements
728 *
729 * @param nmemb Number of members to allocate.
730 * @param size Size of one member in bytes.
731 *
732 * @return Allocated memory or NULL.
733 *
734 */
[15b8e495]735void *calloc(const size_t nmemb, const size_t size)
736{
[e6b73ad0]737 void *block = malloc(nmemb * size);
738 if (block == NULL)
739 return NULL;
[3292623]740
[e6b73ad0]741 memset(block, 0, nmemb * size);
742 return block;
[15b8e495]743}
744
[3292623]745/** Allocate memory
746 *
747 * @param size Number of bytes to allocate.
748 *
749 * @return Allocated memory or NULL.
750 *
751 */
[6db6fd1]752void *malloc(const size_t size)
753{
[3292623]754 futex_down(&malloc_futex);
755 void *block = malloc_internal(size, BASE_ALIGN);
756 futex_up(&malloc_futex);
757
758 return block;
[6db6fd1]759}
760
[3292623]761/** Allocate memory with specified alignment
762 *
763 * @param align Alignment in byes.
764 * @param size Number of bytes to allocate.
765 *
766 * @return Allocated memory or NULL.
767 *
768 */
[6db6fd1]769void *memalign(const size_t align, const size_t size)
770{
771 if (align == 0)
772 return NULL;
773
774 size_t palign =
775 1 << (fnzb(max(sizeof(void *), align) - 1) + 1);
776
[3292623]777 futex_down(&malloc_futex);
778 void *block = malloc_internal(size, palign);
779 futex_up(&malloc_futex);
780
781 return block;
[6db6fd1]782}
783
[3292623]784/** Reallocate memory block
785 *
786 * @param addr Already allocated memory or NULL.
787 * @param size New size of the memory block.
788 *
789 * @return Reallocated memory or NULL.
790 *
791 */
[6db6fd1]792void *realloc(const void *addr, const size_t size)
793{
794 if (addr == NULL)
795 return malloc(size);
796
[3292623]797 futex_down(&malloc_futex);
798
[6db6fd1]799 /* Calculate the position of the header. */
800 heap_block_head_t *head =
801 (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
802
803 block_check(head);
[13f2461]804 malloc_assert(!head->free);
[6db6fd1]805
[0b37882]806 heap_area_t *area = head->area;
807
808 area_check(area);
[13f2461]809 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
810 malloc_assert((void *) head < area->end);
[0b37882]811
[6db6fd1]812 void *ptr = NULL;
[3292623]813 bool reloc = false;
[f450280]814 size_t real_size = GROSS_SIZE(ALIGN_UP(size, BASE_ALIGN));
[6db6fd1]815 size_t orig_size = head->size;
816
817 if (orig_size > real_size) {
818 /* Shrink */
819 if (orig_size - real_size >= STRUCT_OVERHEAD) {
[0b37882]820 /*
821 * Split the original block to a full block
822 * and a trailing free block.
823 */
824 block_init((void *) head, real_size, false, area);
[6db6fd1]825 block_init((void *) head + real_size,
[0b37882]826 orig_size - real_size, true, area);
[013a5d7]827 heap_shrink(area);
[6db6fd1]828 }
829
830 ptr = ((void *) head) + sizeof(heap_block_head_t);
831 } else {
[0b37882]832 /*
833 * Look at the next block. If it is free and the size is
834 * sufficient then merge the two. Otherwise just allocate
835 * a new block, copy the original data into it and
836 * free the original block.
837 */
[6db6fd1]838 heap_block_head_t *next_head =
839 (heap_block_head_t *) (((void *) head) + head->size);
840
[0b37882]841 if (((void *) next_head < area->end) &&
[7d88587]842 (head->size + next_head->size >= real_size) &&
843 (next_head->free)) {
[6db6fd1]844 block_check(next_head);
[0b37882]845 block_init(head, head->size + next_head->size, false, area);
[ba0aa6f]846 split_mark(head, real_size);
[6db6fd1]847
848 ptr = ((void *) head) + sizeof(heap_block_head_t);
[0b37882]849 next = NULL;
[3292623]850 } else
851 reloc = true;
852 }
853
854 futex_up(&malloc_futex);
855
856 if (reloc) {
857 ptr = malloc(size);
858 if (ptr != NULL) {
859 memcpy(ptr, addr, NET_SIZE(orig_size));
860 free(addr);
[6db6fd1]861 }
862 }
863
864 return ptr;
865}
866
867/** Free a memory block
868 *
869 * @param addr The address of the block.
[3292623]870 *
[6db6fd1]871 */
872void free(const void *addr)
873{
[3292623]874 futex_down(&malloc_futex);
875
[6db6fd1]876 /* Calculate the position of the header. */
877 heap_block_head_t *head
878 = (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
879
880 block_check(head);
[13f2461]881 malloc_assert(!head->free);
[6db6fd1]882
[0b37882]883 heap_area_t *area = head->area;
884
885 area_check(area);
[13f2461]886 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
887 malloc_assert((void *) head < area->end);
[0b37882]888
[6db6fd1]889 /* Mark the block itself as free. */
890 head->free = true;
891
892 /* Look at the next block. If it is free, merge the two. */
893 heap_block_head_t *next_head
894 = (heap_block_head_t *) (((void *) head) + head->size);
895
[0b37882]896 if ((void *) next_head < area->end) {
[6db6fd1]897 block_check(next_head);
898 if (next_head->free)
[0b37882]899 block_init(head, head->size + next_head->size, true, area);
[6db6fd1]900 }
901
902 /* Look at the previous block. If it is free, merge the two. */
[013a5d7]903 if ((void *) head > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
[6db6fd1]904 heap_block_foot_t *prev_foot =
905 (heap_block_foot_t *) (((void *) head) - sizeof(heap_block_foot_t));
906
907 heap_block_head_t *prev_head =
908 (heap_block_head_t *) (((void *) head) - prev_foot->size);
909
910 block_check(prev_head);
911
912 if (prev_head->free)
[0b37882]913 block_init(prev_head, prev_head->size + head->size, true,
914 area);
[6db6fd1]915 }
916
[013a5d7]917 heap_shrink(area);
[3292623]918
919 futex_up(&malloc_futex);
[6db6fd1]920}
921
[1b3e854]922void *heap_check(void)
923{
924 futex_down(&malloc_futex);
925
926 if (first_heap_area == NULL) {
927 futex_up(&malloc_futex);
928 return (void *) -1;
929 }
930
931 /* Walk all heap areas */
932 for (heap_area_t *area = first_heap_area; area != NULL;
933 area = area->next) {
934
935 /* Check heap area consistency */
936 if ((area->magic != HEAP_AREA_MAGIC) ||
937 ((void *) area != area->start) ||
938 (area->start >= area->end) ||
939 (((uintptr_t) area->start % PAGE_SIZE) != 0) ||
940 (((uintptr_t) area->end % PAGE_SIZE) != 0)) {
941 futex_up(&malloc_futex);
942 return (void *) area;
943 }
944
945 /* Walk all heap blocks */
946 for (heap_block_head_t *head = (heap_block_head_t *)
947 AREA_FIRST_BLOCK_HEAD(area); (void *) head < area->end;
948 head = (heap_block_head_t *) (((void *) head) + head->size)) {
949
950 /* Check heap block consistency */
951 if (head->magic != HEAP_BLOCK_HEAD_MAGIC) {
952 futex_up(&malloc_futex);
953 return (void *) head;
954 }
955
956 heap_block_foot_t *foot = BLOCK_FOOT(head);
957
958 if ((foot->magic != HEAP_BLOCK_FOOT_MAGIC) ||
959 (head->size != foot->size)) {
960 futex_up(&malloc_futex);
961 return (void *) foot;
962 }
963 }
964 }
965
966 futex_up(&malloc_futex);
967
968 return NULL;
969}
970
[6db6fd1]971/** @}
972 */
Note: See TracBrowser for help on using the repository browser.