source: mainline/uspace/lib/c/generic/malloc.c@ 8895d05

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 8895d05 was 3019612, checked in by Martin Decky <martin@…>, 14 years ago

free(NULL) should be a no-op by convention

  • Property mode set to 100644
File size: 23.8 KB
RevLine 
[6db6fd1]1/*
2 * Copyright (c) 2009 Martin Decky
3 * Copyright (c) 2009 Petr Tuma
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup libc
31 * @{
32 */
33/** @file
34 */
35
36#include <malloc.h>
37#include <bool.h>
38#include <as.h>
39#include <align.h>
40#include <macros.h>
41#include <assert.h>
42#include <errno.h>
43#include <bitops.h>
44#include <mem.h>
[3292623]45#include <futex.h>
[d161715]46#include <stdlib.h>
[6db6fd1]47#include <adt/gcdlcm.h>
[47b7006]48#include "private/malloc.h"
[6db6fd1]49
[0b37882]50/** Magic used in heap headers. */
51#define HEAP_BLOCK_HEAD_MAGIC UINT32_C(0xBEEF0101)
[6db6fd1]52
[0b37882]53/** Magic used in heap footers. */
54#define HEAP_BLOCK_FOOT_MAGIC UINT32_C(0xBEEF0202)
[6db6fd1]55
[0b37882]56/** Magic used in heap descriptor. */
57#define HEAP_AREA_MAGIC UINT32_C(0xBEEFCAFE)
58
59/** Allocation alignment.
60 *
61 * This also covers the alignment of fields
62 * in the heap header and footer.
63 *
64 */
[6db6fd1]65#define BASE_ALIGN 16
66
[013a5d7]67/** Heap shrink granularity
68 *
69 * Try not to pump and stress the heap to much
70 * by shrinking and enlarging it too often.
71 * A heap area won't shrunk if it the released
72 * free block is smaller than this constant.
73 *
74 */
75#define SHRINK_GRANULARITY (64 * PAGE_SIZE)
76
[0b37882]77/** Overhead of each heap block. */
78#define STRUCT_OVERHEAD \
79 (sizeof(heap_block_head_t) + sizeof(heap_block_foot_t))
80
[207533f]81/** Overhead of each area. */
82#define AREA_OVERHEAD(size) \
83 (ALIGN_UP(size + sizeof(heap_area_t), BASE_ALIGN))
84
[0b37882]85/** Calculate real size of a heap block.
86 *
87 * Add header and footer size.
88 *
[6db6fd1]89 */
[0b37882]90#define GROSS_SIZE(size) ((size) + STRUCT_OVERHEAD)
[6db6fd1]91
[0b37882]92/** Calculate net size of a heap block.
93 *
94 * Subtract header and footer size.
[6db6fd1]95 *
96 */
[0b37882]97#define NET_SIZE(size) ((size) - STRUCT_OVERHEAD)
[6db6fd1]98
[0b37882]99/** Get first block in heap area.
100 *
[6db6fd1]101 */
[013a5d7]102#define AREA_FIRST_BLOCK_HEAD(area) \
[0b37882]103 (ALIGN_UP(((uintptr_t) (area)) + sizeof(heap_area_t), BASE_ALIGN))
[6db6fd1]104
[013a5d7]105/** Get last block in heap area.
106 *
107 */
108#define AREA_LAST_BLOCK_FOOT(area) \
109 (((uintptr_t) (area)->end) - sizeof(heap_block_foot_t))
110
111/** Get header in heap block.
112 *
113 */
114#define BLOCK_HEAD(foot) \
115 ((heap_block_head_t *) \
116 (((uintptr_t) (foot)) + sizeof(heap_block_foot_t) - (foot)->size))
117
[0b37882]118/** Get footer in heap block.
119 *
[6db6fd1]120 */
[0b37882]121#define BLOCK_FOOT(head) \
122 ((heap_block_foot_t *) \
[013a5d7]123 (((uintptr_t) (head)) + (head)->size - sizeof(heap_block_foot_t)))
[0b37882]124
125/** Heap area.
126 *
127 * The memory managed by the heap allocator is divided into
128 * multiple discontinuous heaps. Each heap is represented
129 * by a separate address space area which has this structure
130 * at its very beginning.
131 *
132 */
133typedef struct heap_area {
134 /** Start of the heap area (including this structure)
135 *
136 * Aligned on page boundary.
137 *
138 */
139 void *start;
140
141 /** End of the heap area (aligned on page boundary) */
142 void *end;
143
[013a5d7]144 /** Previous heap area */
145 struct heap_area *prev;
146
[0b37882]147 /** Next heap area */
148 struct heap_area *next;
149
150 /** A magic value */
151 uint32_t magic;
152} heap_area_t;
[6db6fd1]153
154/** Header of a heap block
155 *
156 */
157typedef struct {
158 /* Size of the block (including header and footer) */
159 size_t size;
160
161 /* Indication of a free block */
162 bool free;
163
[0b37882]164 /** Heap area this block belongs to */
165 heap_area_t *area;
166
[6db6fd1]167 /* A magic value to detect overwrite of heap header */
168 uint32_t magic;
169} heap_block_head_t;
170
171/** Footer of a heap block
172 *
173 */
174typedef struct {
175 /* Size of the block (including header and footer) */
176 size_t size;
177
178 /* A magic value to detect overwrite of heap footer */
179 uint32_t magic;
180} heap_block_foot_t;
181
[0b37882]182/** First heap area */
183static heap_area_t *first_heap_area = NULL;
[6db6fd1]184
[0b37882]185/** Last heap area */
186static heap_area_t *last_heap_area = NULL;
[6db6fd1]187
[0b37882]188/** Next heap block to examine (next fit algorithm) */
[207533f]189static heap_block_head_t *next_fit = NULL;
[6db6fd1]190
[0b37882]191/** Futex for thread-safe heap manipulation */
192static futex_t malloc_futex = FUTEX_INITIALIZER;
[6db6fd1]193
[13f2461]194#ifndef NDEBUG
195
196#define malloc_assert(expr) \
197 do { \
198 if (!(expr)) {\
199 futex_up(&malloc_futex); \
[e06e2716]200 assert_abort(#expr, __FILE__, __LINE__); \
[13f2461]201 } \
202 } while (0)
203
204#else /* NDEBUG */
205
206#define malloc_assert(expr)
207
208#endif /* NDEBUG */
209
[6db6fd1]210/** Initialize a heap block
211 *
[0b37882]212 * Fill in the structures related to a heap block.
[3292623]213 * Should be called only inside the critical section.
[6db6fd1]214 *
215 * @param addr Address of the block.
216 * @param size Size of the block including the header and the footer.
217 * @param free Indication of a free block.
[0b37882]218 * @param area Heap area the block belongs to.
[6db6fd1]219 *
220 */
[0b37882]221static void block_init(void *addr, size_t size, bool free, heap_area_t *area)
[6db6fd1]222{
223 /* Calculate the position of the header and the footer */
224 heap_block_head_t *head = (heap_block_head_t *) addr;
225
226 head->size = size;
227 head->free = free;
[0b37882]228 head->area = area;
[6db6fd1]229 head->magic = HEAP_BLOCK_HEAD_MAGIC;
230
[0b37882]231 heap_block_foot_t *foot = BLOCK_FOOT(head);
232
[6db6fd1]233 foot->size = size;
234 foot->magic = HEAP_BLOCK_FOOT_MAGIC;
235}
236
237/** Check a heap block
238 *
239 * Verifies that the structures related to a heap block still contain
240 * the magic constants. This helps detect heap corruption early on.
[3292623]241 * Should be called only inside the critical section.
[6db6fd1]242 *
243 * @param addr Address of the block.
244 *
245 */
246static void block_check(void *addr)
247{
248 heap_block_head_t *head = (heap_block_head_t *) addr;
249
[13f2461]250 malloc_assert(head->magic == HEAP_BLOCK_HEAD_MAGIC);
[6db6fd1]251
[0b37882]252 heap_block_foot_t *foot = BLOCK_FOOT(head);
[6db6fd1]253
[13f2461]254 malloc_assert(foot->magic == HEAP_BLOCK_FOOT_MAGIC);
255 malloc_assert(head->size == foot->size);
[6db6fd1]256}
257
[0b37882]258/** Check a heap area structure
[013a5d7]259 *
260 * Should be called only inside the critical section.
[3292623]261 *
[0b37882]262 * @param addr Address of the heap area.
[3292623]263 *
[0b37882]264 */
265static void area_check(void *addr)
266{
267 heap_area_t *area = (heap_area_t *) addr;
268
[13f2461]269 malloc_assert(area->magic == HEAP_AREA_MAGIC);
270 malloc_assert(addr == area->start);
271 malloc_assert(area->start < area->end);
272 malloc_assert(((uintptr_t) area->start % PAGE_SIZE) == 0);
273 malloc_assert(((uintptr_t) area->end % PAGE_SIZE) == 0);
[0b37882]274}
275
276/** Create new heap area
277 *
[013a5d7]278 * Should be called only inside the critical section.
279 *
280 * @param size Size of the area.
[3292623]281 *
282 */
[0b37882]283static bool area_create(size_t size)
[6db6fd1]284{
[0b37882]285 void *start = as_get_mappable_page(size);
286 if (start == NULL)
[6db6fd1]287 return false;
[0b37882]288
289 /* Align the heap area on page boundary */
290 void *astart = (void *) ALIGN_UP((uintptr_t) start, PAGE_SIZE);
291 size_t asize = ALIGN_UP(size, PAGE_SIZE);
292
293 astart = as_area_create(astart, asize, AS_AREA_WRITE | AS_AREA_READ);
294 if (astart == (void *) -1)
[e70edd1]295 return false;
[6db6fd1]296
[0b37882]297 heap_area_t *area = (heap_area_t *) astart;
298
299 area->start = astart;
[013a5d7]300 area->end = (void *) ((uintptr_t) astart + asize);
301 area->prev = NULL;
[0b37882]302 area->next = NULL;
303 area->magic = HEAP_AREA_MAGIC;
304
[013a5d7]305 void *block = (void *) AREA_FIRST_BLOCK_HEAD(area);
[0b37882]306 size_t bsize = (size_t) (area->end - block);
307
308 block_init(block, bsize, true, area);
309
310 if (last_heap_area == NULL) {
311 first_heap_area = area;
312 last_heap_area = area;
313 } else {
[013a5d7]314 area->prev = last_heap_area;
[0b37882]315 last_heap_area->next = area;
316 last_heap_area = area;
317 }
318
319 return true;
320}
321
322/** Try to enlarge a heap area
[013a5d7]323 *
324 * Should be called only inside the critical section.
[0b37882]325 *
326 * @param area Heap area to grow.
[013a5d7]327 * @param size Gross size to grow (bytes).
328 *
329 * @return True if successful.
[0b37882]330 *
331 */
332static bool area_grow(heap_area_t *area, size_t size)
333{
334 if (size == 0)
335 return true;
336
337 area_check(area);
338
339 /* New heap area size */
[013a5d7]340 size_t gross_size = (size_t) (area->end - area->start) + size;
341 size_t asize = ALIGN_UP(gross_size, PAGE_SIZE);
342 void *end = (void *) ((uintptr_t) area->start + asize);
[0b37882]343
344 /* Check for overflow */
345 if (end < area->start)
346 return false;
[6db6fd1]347
[0b37882]348 /* Resize the address space area */
349 int ret = as_area_resize(area->start, asize, 0);
350 if (ret != EOK)
[6db6fd1]351 return false;
352
[0b37882]353 /* Add new free block */
[013a5d7]354 size_t net_size = (size_t) (end - area->end);
355 if (net_size > 0)
356 block_init(area->end, net_size, true, area);
[6db6fd1]357
[0b37882]358 /* Update heap area parameters */
359 area->end = end;
360
361 return true;
362}
363
364/** Try to enlarge any of the heap areas
[013a5d7]365 *
366 * Should be called only inside the critical section.
[0b37882]367 *
368 * @param size Gross size of item to allocate (bytes).
369 *
370 */
371static bool heap_grow(size_t size)
372{
373 if (size == 0)
[6db6fd1]374 return true;
[0b37882]375
376 /* First try to enlarge some existing area */
[013a5d7]377 for (heap_area_t *area = first_heap_area; area != NULL;
378 area = area->next) {
[0b37882]379 if (area_grow(area, size))
380 return true;
[6db6fd1]381 }
382
[0b37882]383 /* Eventually try to create a new area */
[207533f]384 return area_create(AREA_OVERHEAD(size));
[6db6fd1]385}
386
[013a5d7]387/** Try to shrink heap
[3292623]388 *
[013a5d7]389 * Should be called only inside the critical section.
[0b37882]390 * In all cases the next pointer is reset.
[3292623]391 *
[013a5d7]392 * @param area Last modified heap area.
393 *
[3292623]394 */
[013a5d7]395static void heap_shrink(heap_area_t *area)
[6db6fd1]396{
[013a5d7]397 area_check(area);
398
399 heap_block_foot_t *last_foot =
400 (heap_block_foot_t *) AREA_LAST_BLOCK_FOOT(area);
401 heap_block_head_t *last_head = BLOCK_HEAD(last_foot);
402
403 block_check((void *) last_head);
[13f2461]404 malloc_assert(last_head->area == area);
[013a5d7]405
406 if (last_head->free) {
407 /*
408 * The last block of the heap area is
409 * unused. The area might be potentially
410 * shrunk.
411 */
412
413 heap_block_head_t *first_head =
414 (heap_block_head_t *) AREA_FIRST_BLOCK_HEAD(area);
415
416 block_check((void *) first_head);
[13f2461]417 malloc_assert(first_head->area == area);
[013a5d7]418
[e6eee2b]419 size_t shrink_size = ALIGN_DOWN(last_head->size, PAGE_SIZE);
420
[013a5d7]421 if (first_head == last_head) {
422 /*
423 * The entire heap area consists of a single
424 * free heap block. This means we can get rid
425 * of it entirely.
426 */
427
428 heap_area_t *prev = area->prev;
429 heap_area_t *next = area->next;
430
431 if (prev != NULL) {
432 area_check(prev);
433 prev->next = next;
434 } else
435 first_heap_area = next;
436
437 if (next != NULL) {
438 area_check(next);
439 next->prev = prev;
440 } else
441 last_heap_area = prev;
442
443 as_area_destroy(area->start);
[e6eee2b]444 } else if (shrink_size >= SHRINK_GRANULARITY) {
[013a5d7]445 /*
446 * Make sure that we always shrink the area
447 * by a multiple of page size and update
448 * the block layout accordingly.
449 */
450
451 size_t asize = (size_t) (area->end - area->start) - shrink_size;
452 void *end = (void *) ((uintptr_t) area->start + asize);
453
454 /* Resize the address space area */
455 int ret = as_area_resize(area->start, asize, 0);
456 if (ret != EOK)
457 abort();
458
459 /* Update heap area parameters */
460 area->end = end;
[207533f]461 size_t excess = ((size_t) area->end) - ((size_t) last_head);
[013a5d7]462
463 if (excess > 0) {
464 if (excess >= STRUCT_OVERHEAD) {
465 /*
466 * The previous block cannot be free and there
467 * is enough free space left in the area to
468 * create a new free block.
469 */
[207533f]470 block_init((void *) last_head, excess, true, area);
[013a5d7]471 } else {
472 /*
473 * The excess is small. Therefore just enlarge
474 * the previous block.
475 */
476 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
477 (((uintptr_t) last_head) - sizeof(heap_block_foot_t));
478 heap_block_head_t *prev_head = BLOCK_HEAD(prev_foot);
479
480 block_check((void *) prev_head);
481
482 block_init(prev_head, prev_head->size + excess,
483 prev_head->free, area);
484 }
485 }
486 }
487 }
488
[207533f]489 next_fit = NULL;
[6db6fd1]490}
491
492/** Initialize the heap allocator
493 *
[47b7006]494 * Create initial heap memory area. This routine is
495 * only called from libc initialization, thus we do not
496 * take any locks.
[6db6fd1]497 *
498 */
[47b7006]499void __malloc_init(void)
[6db6fd1]500{
[0b37882]501 if (!area_create(PAGE_SIZE))
[47b7006]502 abort();
[6db6fd1]503}
504
[3292623]505/** Split heap block and mark it as used.
506 *
507 * Should be called only inside the critical section.
508 *
509 * @param cur Heap block to split.
510 * @param size Number of bytes to split and mark from the beginning
511 * of the block.
512 *
513 */
[6db6fd1]514static void split_mark(heap_block_head_t *cur, const size_t size)
515{
[13f2461]516 malloc_assert(cur->size >= size);
[6db6fd1]517
518 /* See if we should split the block. */
519 size_t split_limit = GROSS_SIZE(size);
520
521 if (cur->size > split_limit) {
522 /* Block big enough -> split. */
523 void *next = ((void *) cur) + size;
[0b37882]524 block_init(next, cur->size - size, true, cur->area);
525 block_init(cur, size, false, cur->area);
[6db6fd1]526 } else {
527 /* Block too small -> use as is. */
528 cur->free = false;
529 }
530}
531
[0b37882]532/** Allocate memory from heap area starting from given block
[3292623]533 *
534 * Should be called only inside the critical section.
[0b37882]535 * As a side effect this function also sets the current
536 * pointer on successful allocation.
[6db6fd1]537 *
[0b37882]538 * @param area Heap area where to allocate from.
539 * @param first_block Starting heap block.
540 * @param final_block Heap block where to finish the search
541 * (may be NULL).
542 * @param real_size Gross number of bytes to allocate.
543 * @param falign Physical alignment of the block.
[6db6fd1]544 *
[0b37882]545 * @return Address of the allocated block or NULL on not enough memory.
[6db6fd1]546 *
547 */
[0b37882]548static void *malloc_area(heap_area_t *area, heap_block_head_t *first_block,
549 heap_block_head_t *final_block, size_t real_size, size_t falign)
[6db6fd1]550{
[0b37882]551 area_check((void *) area);
[13f2461]552 malloc_assert((void *) first_block >= (void *) AREA_FIRST_BLOCK_HEAD(area));
553 malloc_assert((void *) first_block < area->end);
[6db6fd1]554
[013a5d7]555 for (heap_block_head_t *cur = first_block; (void *) cur < area->end;
[0b37882]556 cur = (heap_block_head_t *) (((void *) cur) + cur->size)) {
[6db6fd1]557 block_check(cur);
558
[0b37882]559 /* Finish searching on the final block */
560 if ((final_block != NULL) && (cur == final_block))
561 break;
562
[6db6fd1]563 /* Try to find a block that is free and large enough. */
564 if ((cur->free) && (cur->size >= real_size)) {
[0b37882]565 /*
566 * We have found a suitable block.
567 * Check for alignment properties.
568 */
569 void *addr = (void *)
570 ((uintptr_t) cur + sizeof(heap_block_head_t));
571 void *aligned = (void *)
572 ALIGN_UP((uintptr_t) addr, falign);
[6db6fd1]573
574 if (addr == aligned) {
575 /* Exact block start including alignment. */
576 split_mark(cur, real_size);
[0b37882]577
[207533f]578 next_fit = cur;
[0b37882]579 return addr;
[6db6fd1]580 } else {
[d851f597]581 /* Block start has to be aligned */
[6db6fd1]582 size_t excess = (size_t) (aligned - addr);
583
584 if (cur->size >= real_size + excess) {
[0b37882]585 /*
586 * The current block is large enough to fit
587 * data in (including alignment).
588 */
[013a5d7]589 if ((void *) cur > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
[0b37882]590 /*
591 * There is a block before the current block.
592 * This previous block can be enlarged to
593 * compensate for the alignment excess.
594 */
595 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
596 ((void *) cur - sizeof(heap_block_foot_t));
[6db6fd1]597
[0b37882]598 heap_block_head_t *prev_head = (heap_block_head_t *)
599 ((void *) cur - prev_foot->size);
[6db6fd1]600
601 block_check(prev_head);
602
603 size_t reduced_size = cur->size - excess;
[d851f597]604 heap_block_head_t *next_head = ((void *) cur) + excess;
[6db6fd1]605
[0b37882]606 if ((!prev_head->free) &&
607 (excess >= STRUCT_OVERHEAD)) {
608 /*
609 * The previous block is not free and there
610 * is enough free space left to fill in
611 * a new free block between the previous
612 * and current block.
613 */
614 block_init(cur, excess, true, area);
[d851f597]615 } else {
[0b37882]616 /*
617 * The previous block is free (thus there
618 * is no need to induce additional
619 * fragmentation to the heap) or the
620 * excess is small. Therefore just enlarge
621 * the previous block.
622 */
623 block_init(prev_head, prev_head->size + excess,
624 prev_head->free, area);
[d851f597]625 }
626
[0b37882]627 block_init(next_head, reduced_size, true, area);
[d851f597]628 split_mark(next_head, real_size);
[0b37882]629
[207533f]630 next_fit = next_head;
[0b37882]631 return aligned;
[6db6fd1]632 } else {
[0b37882]633 /*
634 * The current block is the first block
635 * in the heap area. We have to make sure
636 * that the alignment excess is large enough
637 * to fit a new free block just before the
638 * current block.
639 */
[6db6fd1]640 while (excess < STRUCT_OVERHEAD) {
641 aligned += falign;
642 excess += falign;
643 }
644
[d851f597]645 /* Check for current block size again */
[6db6fd1]646 if (cur->size >= real_size + excess) {
647 size_t reduced_size = cur->size - excess;
[0b37882]648 cur = (heap_block_head_t *)
[013a5d7]649 (AREA_FIRST_BLOCK_HEAD(area) + excess);
[6db6fd1]650
[013a5d7]651 block_init((void *) AREA_FIRST_BLOCK_HEAD(area),
652 excess, true, area);
[0b37882]653 block_init(cur, reduced_size, true, area);
[6db6fd1]654 split_mark(cur, real_size);
[0b37882]655
[207533f]656 next_fit = cur;
[0b37882]657 return aligned;
[6db6fd1]658 }
659 }
660 }
661 }
662 }
[0b37882]663 }
664
665 return NULL;
666}
667
668/** Allocate a memory block
669 *
670 * Should be called only inside the critical section.
671 *
672 * @param size The size of the block to allocate.
673 * @param align Memory address alignment.
674 *
675 * @return Address of the allocated block or NULL on not enough memory.
676 *
677 */
678static void *malloc_internal(const size_t size, const size_t align)
679{
[13f2461]680 malloc_assert(first_heap_area != NULL);
[0b37882]681
682 if (align == 0)
683 return NULL;
684
685 size_t falign = lcm(align, BASE_ALIGN);
686 size_t real_size = GROSS_SIZE(ALIGN_UP(size, falign));
687
688 bool retry = false;
689 heap_block_head_t *split;
690
691loop:
692
693 /* Try the next fit approach */
[207533f]694 split = next_fit;
[0b37882]695
696 if (split != NULL) {
697 void *addr = malloc_area(split->area, split, NULL, real_size,
698 falign);
[6db6fd1]699
[0b37882]700 if (addr != NULL)
701 return addr;
[6db6fd1]702 }
703
[0b37882]704 /* Search the entire heap */
[013a5d7]705 for (heap_area_t *area = first_heap_area; area != NULL;
706 area = area->next) {
[0b37882]707 heap_block_head_t *first = (heap_block_head_t *)
[013a5d7]708 AREA_FIRST_BLOCK_HEAD(area);
[0b37882]709
710 void *addr = malloc_area(area, first, split, real_size,
711 falign);
712
713 if (addr != NULL)
714 return addr;
715 }
716
717 if (!retry) {
718 /* Try to grow the heap space */
719 if (heap_grow(real_size)) {
720 retry = true;
[6db6fd1]721 goto loop;
722 }
723 }
724
[0b37882]725 return NULL;
[6db6fd1]726}
727
[3292623]728/** Allocate memory by number of elements
729 *
730 * @param nmemb Number of members to allocate.
731 * @param size Size of one member in bytes.
732 *
733 * @return Allocated memory or NULL.
734 *
735 */
[15b8e495]736void *calloc(const size_t nmemb, const size_t size)
737{
[e6b73ad0]738 void *block = malloc(nmemb * size);
739 if (block == NULL)
740 return NULL;
[3292623]741
[e6b73ad0]742 memset(block, 0, nmemb * size);
743 return block;
[15b8e495]744}
745
[3292623]746/** Allocate memory
747 *
748 * @param size Number of bytes to allocate.
749 *
750 * @return Allocated memory or NULL.
751 *
752 */
[6db6fd1]753void *malloc(const size_t size)
754{
[3292623]755 futex_down(&malloc_futex);
756 void *block = malloc_internal(size, BASE_ALIGN);
757 futex_up(&malloc_futex);
758
759 return block;
[6db6fd1]760}
761
[3292623]762/** Allocate memory with specified alignment
763 *
764 * @param align Alignment in byes.
765 * @param size Number of bytes to allocate.
766 *
767 * @return Allocated memory or NULL.
768 *
769 */
[6db6fd1]770void *memalign(const size_t align, const size_t size)
771{
772 if (align == 0)
773 return NULL;
774
775 size_t palign =
776 1 << (fnzb(max(sizeof(void *), align) - 1) + 1);
777
[3292623]778 futex_down(&malloc_futex);
779 void *block = malloc_internal(size, palign);
780 futex_up(&malloc_futex);
781
782 return block;
[6db6fd1]783}
784
[3292623]785/** Reallocate memory block
786 *
787 * @param addr Already allocated memory or NULL.
788 * @param size New size of the memory block.
789 *
790 * @return Reallocated memory or NULL.
791 *
792 */
[6db6fd1]793void *realloc(const void *addr, const size_t size)
794{
795 if (addr == NULL)
796 return malloc(size);
797
[3292623]798 futex_down(&malloc_futex);
799
[6db6fd1]800 /* Calculate the position of the header. */
801 heap_block_head_t *head =
802 (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
803
804 block_check(head);
[13f2461]805 malloc_assert(!head->free);
[6db6fd1]806
[0b37882]807 heap_area_t *area = head->area;
808
809 area_check(area);
[13f2461]810 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
811 malloc_assert((void *) head < area->end);
[0b37882]812
[6db6fd1]813 void *ptr = NULL;
[3292623]814 bool reloc = false;
[f450280]815 size_t real_size = GROSS_SIZE(ALIGN_UP(size, BASE_ALIGN));
[6db6fd1]816 size_t orig_size = head->size;
817
818 if (orig_size > real_size) {
819 /* Shrink */
820 if (orig_size - real_size >= STRUCT_OVERHEAD) {
[0b37882]821 /*
822 * Split the original block to a full block
823 * and a trailing free block.
824 */
825 block_init((void *) head, real_size, false, area);
[6db6fd1]826 block_init((void *) head + real_size,
[0b37882]827 orig_size - real_size, true, area);
[013a5d7]828 heap_shrink(area);
[6db6fd1]829 }
830
831 ptr = ((void *) head) + sizeof(heap_block_head_t);
832 } else {
[0b37882]833 /*
834 * Look at the next block. If it is free and the size is
835 * sufficient then merge the two. Otherwise just allocate
836 * a new block, copy the original data into it and
837 * free the original block.
838 */
[6db6fd1]839 heap_block_head_t *next_head =
840 (heap_block_head_t *) (((void *) head) + head->size);
841
[0b37882]842 if (((void *) next_head < area->end) &&
[7d88587]843 (head->size + next_head->size >= real_size) &&
844 (next_head->free)) {
[6db6fd1]845 block_check(next_head);
[0b37882]846 block_init(head, head->size + next_head->size, false, area);
[ba0aa6f]847 split_mark(head, real_size);
[6db6fd1]848
849 ptr = ((void *) head) + sizeof(heap_block_head_t);
[207533f]850 next_fit = NULL;
[3292623]851 } else
852 reloc = true;
853 }
854
855 futex_up(&malloc_futex);
856
857 if (reloc) {
858 ptr = malloc(size);
859 if (ptr != NULL) {
860 memcpy(ptr, addr, NET_SIZE(orig_size));
861 free(addr);
[6db6fd1]862 }
863 }
864
865 return ptr;
866}
867
868/** Free a memory block
869 *
870 * @param addr The address of the block.
[3292623]871 *
[6db6fd1]872 */
873void free(const void *addr)
874{
[3019612]875 if (addr == NULL)
876 return;
877
[3292623]878 futex_down(&malloc_futex);
879
[6db6fd1]880 /* Calculate the position of the header. */
881 heap_block_head_t *head
882 = (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
883
884 block_check(head);
[13f2461]885 malloc_assert(!head->free);
[6db6fd1]886
[0b37882]887 heap_area_t *area = head->area;
888
889 area_check(area);
[13f2461]890 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
891 malloc_assert((void *) head < area->end);
[0b37882]892
[6db6fd1]893 /* Mark the block itself as free. */
894 head->free = true;
895
896 /* Look at the next block. If it is free, merge the two. */
897 heap_block_head_t *next_head
898 = (heap_block_head_t *) (((void *) head) + head->size);
899
[0b37882]900 if ((void *) next_head < area->end) {
[6db6fd1]901 block_check(next_head);
902 if (next_head->free)
[0b37882]903 block_init(head, head->size + next_head->size, true, area);
[6db6fd1]904 }
905
906 /* Look at the previous block. If it is free, merge the two. */
[013a5d7]907 if ((void *) head > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
[6db6fd1]908 heap_block_foot_t *prev_foot =
909 (heap_block_foot_t *) (((void *) head) - sizeof(heap_block_foot_t));
910
911 heap_block_head_t *prev_head =
912 (heap_block_head_t *) (((void *) head) - prev_foot->size);
913
914 block_check(prev_head);
915
916 if (prev_head->free)
[0b37882]917 block_init(prev_head, prev_head->size + head->size, true,
918 area);
[6db6fd1]919 }
920
[013a5d7]921 heap_shrink(area);
[3292623]922
923 futex_up(&malloc_futex);
[6db6fd1]924}
925
[1b3e854]926void *heap_check(void)
927{
928 futex_down(&malloc_futex);
929
930 if (first_heap_area == NULL) {
931 futex_up(&malloc_futex);
932 return (void *) -1;
933 }
934
935 /* Walk all heap areas */
936 for (heap_area_t *area = first_heap_area; area != NULL;
937 area = area->next) {
938
939 /* Check heap area consistency */
940 if ((area->magic != HEAP_AREA_MAGIC) ||
941 ((void *) area != area->start) ||
942 (area->start >= area->end) ||
943 (((uintptr_t) area->start % PAGE_SIZE) != 0) ||
944 (((uintptr_t) area->end % PAGE_SIZE) != 0)) {
945 futex_up(&malloc_futex);
946 return (void *) area;
947 }
948
949 /* Walk all heap blocks */
950 for (heap_block_head_t *head = (heap_block_head_t *)
951 AREA_FIRST_BLOCK_HEAD(area); (void *) head < area->end;
952 head = (heap_block_head_t *) (((void *) head) + head->size)) {
953
954 /* Check heap block consistency */
955 if (head->magic != HEAP_BLOCK_HEAD_MAGIC) {
956 futex_up(&malloc_futex);
957 return (void *) head;
958 }
959
960 heap_block_foot_t *foot = BLOCK_FOOT(head);
961
962 if ((foot->magic != HEAP_BLOCK_FOOT_MAGIC) ||
963 (head->size != foot->size)) {
964 futex_up(&malloc_futex);
965 return (void *) foot;
966 }
967 }
968 }
969
970 futex_up(&malloc_futex);
971
972 return NULL;
973}
974
[6db6fd1]975/** @}
976 */
Note: See TracBrowser for help on using the repository browser.