source: mainline/uspace/lib/c/generic/malloc.c@ 2902e1bb

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 2902e1bb was faba839, checked in by Martin Decky <martin@…>, 13 years ago

use symbolic values for address space constants

  • Property mode set to 100644
File size: 23.7 KB
RevLine 
[6db6fd1]1/*
2 * Copyright (c) 2009 Martin Decky
3 * Copyright (c) 2009 Petr Tuma
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup libc
31 * @{
32 */
33/** @file
34 */
35
36#include <malloc.h>
37#include <bool.h>
38#include <as.h>
39#include <align.h>
40#include <macros.h>
41#include <assert.h>
42#include <errno.h>
43#include <bitops.h>
44#include <mem.h>
[3292623]45#include <futex.h>
[d161715]46#include <stdlib.h>
[6db6fd1]47#include <adt/gcdlcm.h>
[47b7006]48#include "private/malloc.h"
[6db6fd1]49
[0b37882]50/** Magic used in heap headers. */
51#define HEAP_BLOCK_HEAD_MAGIC UINT32_C(0xBEEF0101)
[6db6fd1]52
[0b37882]53/** Magic used in heap footers. */
54#define HEAP_BLOCK_FOOT_MAGIC UINT32_C(0xBEEF0202)
[6db6fd1]55
[0b37882]56/** Magic used in heap descriptor. */
57#define HEAP_AREA_MAGIC UINT32_C(0xBEEFCAFE)
58
59/** Allocation alignment.
60 *
61 * This also covers the alignment of fields
62 * in the heap header and footer.
63 *
64 */
[6db6fd1]65#define BASE_ALIGN 16
66
[013a5d7]67/** Heap shrink granularity
68 *
69 * Try not to pump and stress the heap to much
70 * by shrinking and enlarging it too often.
71 * A heap area won't shrunk if it the released
72 * free block is smaller than this constant.
73 *
74 */
75#define SHRINK_GRANULARITY (64 * PAGE_SIZE)
76
[0b37882]77/** Overhead of each heap block. */
78#define STRUCT_OVERHEAD \
79 (sizeof(heap_block_head_t) + sizeof(heap_block_foot_t))
80
[207533f]81/** Overhead of each area. */
82#define AREA_OVERHEAD(size) \
83 (ALIGN_UP(size + sizeof(heap_area_t), BASE_ALIGN))
84
[0b37882]85/** Calculate real size of a heap block.
86 *
87 * Add header and footer size.
88 *
[6db6fd1]89 */
[0b37882]90#define GROSS_SIZE(size) ((size) + STRUCT_OVERHEAD)
[6db6fd1]91
[0b37882]92/** Calculate net size of a heap block.
93 *
94 * Subtract header and footer size.
[6db6fd1]95 *
96 */
[0b37882]97#define NET_SIZE(size) ((size) - STRUCT_OVERHEAD)
[6db6fd1]98
[0b37882]99/** Get first block in heap area.
100 *
[6db6fd1]101 */
[013a5d7]102#define AREA_FIRST_BLOCK_HEAD(area) \
[0b37882]103 (ALIGN_UP(((uintptr_t) (area)) + sizeof(heap_area_t), BASE_ALIGN))
[6db6fd1]104
[013a5d7]105/** Get last block in heap area.
106 *
107 */
108#define AREA_LAST_BLOCK_FOOT(area) \
109 (((uintptr_t) (area)->end) - sizeof(heap_block_foot_t))
110
111/** Get header in heap block.
112 *
113 */
114#define BLOCK_HEAD(foot) \
115 ((heap_block_head_t *) \
116 (((uintptr_t) (foot)) + sizeof(heap_block_foot_t) - (foot)->size))
117
[0b37882]118/** Get footer in heap block.
119 *
[6db6fd1]120 */
[0b37882]121#define BLOCK_FOOT(head) \
122 ((heap_block_foot_t *) \
[013a5d7]123 (((uintptr_t) (head)) + (head)->size - sizeof(heap_block_foot_t)))
[0b37882]124
125/** Heap area.
126 *
127 * The memory managed by the heap allocator is divided into
128 * multiple discontinuous heaps. Each heap is represented
129 * by a separate address space area which has this structure
130 * at its very beginning.
131 *
132 */
133typedef struct heap_area {
134 /** Start of the heap area (including this structure)
135 *
136 * Aligned on page boundary.
137 *
138 */
139 void *start;
140
141 /** End of the heap area (aligned on page boundary) */
142 void *end;
143
[013a5d7]144 /** Previous heap area */
145 struct heap_area *prev;
146
[0b37882]147 /** Next heap area */
148 struct heap_area *next;
149
150 /** A magic value */
151 uint32_t magic;
152} heap_area_t;
[6db6fd1]153
154/** Header of a heap block
155 *
156 */
157typedef struct {
158 /* Size of the block (including header and footer) */
159 size_t size;
160
161 /* Indication of a free block */
162 bool free;
163
[0b37882]164 /** Heap area this block belongs to */
165 heap_area_t *area;
166
[6db6fd1]167 /* A magic value to detect overwrite of heap header */
168 uint32_t magic;
169} heap_block_head_t;
170
171/** Footer of a heap block
172 *
173 */
174typedef struct {
175 /* Size of the block (including header and footer) */
176 size_t size;
177
178 /* A magic value to detect overwrite of heap footer */
179 uint32_t magic;
180} heap_block_foot_t;
181
[0b37882]182/** First heap area */
183static heap_area_t *first_heap_area = NULL;
[6db6fd1]184
[0b37882]185/** Last heap area */
186static heap_area_t *last_heap_area = NULL;
[6db6fd1]187
[0b37882]188/** Next heap block to examine (next fit algorithm) */
[207533f]189static heap_block_head_t *next_fit = NULL;
[6db6fd1]190
[0b37882]191/** Futex for thread-safe heap manipulation */
192static futex_t malloc_futex = FUTEX_INITIALIZER;
[6db6fd1]193
[13f2461]194#ifndef NDEBUG
195
196#define malloc_assert(expr) \
197 do { \
198 if (!(expr)) {\
199 futex_up(&malloc_futex); \
[e06e2716]200 assert_abort(#expr, __FILE__, __LINE__); \
[13f2461]201 } \
202 } while (0)
203
204#else /* NDEBUG */
205
206#define malloc_assert(expr)
207
208#endif /* NDEBUG */
209
[6db6fd1]210/** Initialize a heap block
211 *
[0b37882]212 * Fill in the structures related to a heap block.
[3292623]213 * Should be called only inside the critical section.
[6db6fd1]214 *
215 * @param addr Address of the block.
216 * @param size Size of the block including the header and the footer.
217 * @param free Indication of a free block.
[0b37882]218 * @param area Heap area the block belongs to.
[6db6fd1]219 *
220 */
[0b37882]221static void block_init(void *addr, size_t size, bool free, heap_area_t *area)
[6db6fd1]222{
223 /* Calculate the position of the header and the footer */
224 heap_block_head_t *head = (heap_block_head_t *) addr;
225
226 head->size = size;
227 head->free = free;
[0b37882]228 head->area = area;
[6db6fd1]229 head->magic = HEAP_BLOCK_HEAD_MAGIC;
230
[0b37882]231 heap_block_foot_t *foot = BLOCK_FOOT(head);
232
[6db6fd1]233 foot->size = size;
234 foot->magic = HEAP_BLOCK_FOOT_MAGIC;
235}
236
237/** Check a heap block
238 *
239 * Verifies that the structures related to a heap block still contain
240 * the magic constants. This helps detect heap corruption early on.
[3292623]241 * Should be called only inside the critical section.
[6db6fd1]242 *
243 * @param addr Address of the block.
244 *
245 */
246static void block_check(void *addr)
247{
248 heap_block_head_t *head = (heap_block_head_t *) addr;
249
[13f2461]250 malloc_assert(head->magic == HEAP_BLOCK_HEAD_MAGIC);
[6db6fd1]251
[0b37882]252 heap_block_foot_t *foot = BLOCK_FOOT(head);
[6db6fd1]253
[13f2461]254 malloc_assert(foot->magic == HEAP_BLOCK_FOOT_MAGIC);
255 malloc_assert(head->size == foot->size);
[6db6fd1]256}
257
[0b37882]258/** Check a heap area structure
[013a5d7]259 *
260 * Should be called only inside the critical section.
[3292623]261 *
[0b37882]262 * @param addr Address of the heap area.
[3292623]263 *
[0b37882]264 */
265static void area_check(void *addr)
266{
267 heap_area_t *area = (heap_area_t *) addr;
268
[13f2461]269 malloc_assert(area->magic == HEAP_AREA_MAGIC);
270 malloc_assert(addr == area->start);
271 malloc_assert(area->start < area->end);
272 malloc_assert(((uintptr_t) area->start % PAGE_SIZE) == 0);
273 malloc_assert(((uintptr_t) area->end % PAGE_SIZE) == 0);
[0b37882]274}
275
276/** Create new heap area
277 *
[013a5d7]278 * Should be called only inside the critical section.
279 *
280 * @param size Size of the area.
[3292623]281 *
282 */
[0b37882]283static bool area_create(size_t size)
[6db6fd1]284{
[fbcdeb8]285 /* Align the heap area size on page boundary */
[0b37882]286 size_t asize = ALIGN_UP(size, PAGE_SIZE);
[faba839]287 void *astart = as_area_create(AS_AREA_ANY, asize,
[fbcdeb8]288 AS_AREA_WRITE | AS_AREA_READ);
[faba839]289 if (astart == AS_MAP_FAILED)
[e70edd1]290 return false;
[6db6fd1]291
[0b37882]292 heap_area_t *area = (heap_area_t *) astart;
293
294 area->start = astart;
[013a5d7]295 area->end = (void *) ((uintptr_t) astart + asize);
296 area->prev = NULL;
[0b37882]297 area->next = NULL;
298 area->magic = HEAP_AREA_MAGIC;
299
[013a5d7]300 void *block = (void *) AREA_FIRST_BLOCK_HEAD(area);
[0b37882]301 size_t bsize = (size_t) (area->end - block);
302
303 block_init(block, bsize, true, area);
304
305 if (last_heap_area == NULL) {
306 first_heap_area = area;
307 last_heap_area = area;
308 } else {
[013a5d7]309 area->prev = last_heap_area;
[0b37882]310 last_heap_area->next = area;
311 last_heap_area = area;
312 }
313
314 return true;
315}
316
317/** Try to enlarge a heap area
[013a5d7]318 *
319 * Should be called only inside the critical section.
[0b37882]320 *
321 * @param area Heap area to grow.
[013a5d7]322 * @param size Gross size to grow (bytes).
323 *
324 * @return True if successful.
[0b37882]325 *
326 */
327static bool area_grow(heap_area_t *area, size_t size)
328{
329 if (size == 0)
330 return true;
331
332 area_check(area);
333
334 /* New heap area size */
[013a5d7]335 size_t gross_size = (size_t) (area->end - area->start) + size;
336 size_t asize = ALIGN_UP(gross_size, PAGE_SIZE);
337 void *end = (void *) ((uintptr_t) area->start + asize);
[0b37882]338
339 /* Check for overflow */
340 if (end < area->start)
341 return false;
[6db6fd1]342
[0b37882]343 /* Resize the address space area */
344 int ret = as_area_resize(area->start, asize, 0);
345 if (ret != EOK)
[6db6fd1]346 return false;
347
[0b37882]348 /* Add new free block */
[013a5d7]349 size_t net_size = (size_t) (end - area->end);
350 if (net_size > 0)
351 block_init(area->end, net_size, true, area);
[6db6fd1]352
[0b37882]353 /* Update heap area parameters */
354 area->end = end;
355
356 return true;
357}
358
359/** Try to enlarge any of the heap areas
[013a5d7]360 *
361 * Should be called only inside the critical section.
[0b37882]362 *
363 * @param size Gross size of item to allocate (bytes).
364 *
365 */
366static bool heap_grow(size_t size)
367{
368 if (size == 0)
[6db6fd1]369 return true;
[0b37882]370
371 /* First try to enlarge some existing area */
[013a5d7]372 for (heap_area_t *area = first_heap_area; area != NULL;
373 area = area->next) {
[0b37882]374 if (area_grow(area, size))
375 return true;
[6db6fd1]376 }
377
[0b37882]378 /* Eventually try to create a new area */
[207533f]379 return area_create(AREA_OVERHEAD(size));
[6db6fd1]380}
381
[013a5d7]382/** Try to shrink heap
[3292623]383 *
[013a5d7]384 * Should be called only inside the critical section.
[0b37882]385 * In all cases the next pointer is reset.
[3292623]386 *
[013a5d7]387 * @param area Last modified heap area.
388 *
[3292623]389 */
[013a5d7]390static void heap_shrink(heap_area_t *area)
[6db6fd1]391{
[013a5d7]392 area_check(area);
393
394 heap_block_foot_t *last_foot =
395 (heap_block_foot_t *) AREA_LAST_BLOCK_FOOT(area);
396 heap_block_head_t *last_head = BLOCK_HEAD(last_foot);
397
398 block_check((void *) last_head);
[13f2461]399 malloc_assert(last_head->area == area);
[013a5d7]400
401 if (last_head->free) {
402 /*
403 * The last block of the heap area is
404 * unused. The area might be potentially
405 * shrunk.
406 */
407
408 heap_block_head_t *first_head =
409 (heap_block_head_t *) AREA_FIRST_BLOCK_HEAD(area);
410
411 block_check((void *) first_head);
[13f2461]412 malloc_assert(first_head->area == area);
[013a5d7]413
[e6eee2b]414 size_t shrink_size = ALIGN_DOWN(last_head->size, PAGE_SIZE);
415
[013a5d7]416 if (first_head == last_head) {
417 /*
418 * The entire heap area consists of a single
419 * free heap block. This means we can get rid
420 * of it entirely.
421 */
422
423 heap_area_t *prev = area->prev;
424 heap_area_t *next = area->next;
425
426 if (prev != NULL) {
427 area_check(prev);
428 prev->next = next;
429 } else
430 first_heap_area = next;
431
432 if (next != NULL) {
433 area_check(next);
434 next->prev = prev;
435 } else
436 last_heap_area = prev;
437
438 as_area_destroy(area->start);
[e6eee2b]439 } else if (shrink_size >= SHRINK_GRANULARITY) {
[013a5d7]440 /*
441 * Make sure that we always shrink the area
442 * by a multiple of page size and update
443 * the block layout accordingly.
444 */
445
446 size_t asize = (size_t) (area->end - area->start) - shrink_size;
447 void *end = (void *) ((uintptr_t) area->start + asize);
448
449 /* Resize the address space area */
450 int ret = as_area_resize(area->start, asize, 0);
451 if (ret != EOK)
452 abort();
453
454 /* Update heap area parameters */
455 area->end = end;
[207533f]456 size_t excess = ((size_t) area->end) - ((size_t) last_head);
[013a5d7]457
458 if (excess > 0) {
459 if (excess >= STRUCT_OVERHEAD) {
460 /*
461 * The previous block cannot be free and there
462 * is enough free space left in the area to
463 * create a new free block.
464 */
[207533f]465 block_init((void *) last_head, excess, true, area);
[013a5d7]466 } else {
467 /*
468 * The excess is small. Therefore just enlarge
469 * the previous block.
470 */
471 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
472 (((uintptr_t) last_head) - sizeof(heap_block_foot_t));
473 heap_block_head_t *prev_head = BLOCK_HEAD(prev_foot);
474
475 block_check((void *) prev_head);
476
477 block_init(prev_head, prev_head->size + excess,
478 prev_head->free, area);
479 }
480 }
481 }
482 }
483
[207533f]484 next_fit = NULL;
[6db6fd1]485}
486
487/** Initialize the heap allocator
488 *
[47b7006]489 * Create initial heap memory area. This routine is
490 * only called from libc initialization, thus we do not
491 * take any locks.
[6db6fd1]492 *
493 */
[47b7006]494void __malloc_init(void)
[6db6fd1]495{
[0b37882]496 if (!area_create(PAGE_SIZE))
[47b7006]497 abort();
[6db6fd1]498}
499
[3292623]500/** Split heap block and mark it as used.
501 *
502 * Should be called only inside the critical section.
503 *
504 * @param cur Heap block to split.
505 * @param size Number of bytes to split and mark from the beginning
506 * of the block.
507 *
508 */
[6db6fd1]509static void split_mark(heap_block_head_t *cur, const size_t size)
510{
[13f2461]511 malloc_assert(cur->size >= size);
[6db6fd1]512
513 /* See if we should split the block. */
514 size_t split_limit = GROSS_SIZE(size);
515
516 if (cur->size > split_limit) {
517 /* Block big enough -> split. */
518 void *next = ((void *) cur) + size;
[0b37882]519 block_init(next, cur->size - size, true, cur->area);
520 block_init(cur, size, false, cur->area);
[6db6fd1]521 } else {
522 /* Block too small -> use as is. */
523 cur->free = false;
524 }
525}
526
[0b37882]527/** Allocate memory from heap area starting from given block
[3292623]528 *
529 * Should be called only inside the critical section.
[0b37882]530 * As a side effect this function also sets the current
531 * pointer on successful allocation.
[6db6fd1]532 *
[0b37882]533 * @param area Heap area where to allocate from.
534 * @param first_block Starting heap block.
535 * @param final_block Heap block where to finish the search
536 * (may be NULL).
537 * @param real_size Gross number of bytes to allocate.
538 * @param falign Physical alignment of the block.
[6db6fd1]539 *
[0b37882]540 * @return Address of the allocated block or NULL on not enough memory.
[6db6fd1]541 *
542 */
[0b37882]543static void *malloc_area(heap_area_t *area, heap_block_head_t *first_block,
544 heap_block_head_t *final_block, size_t real_size, size_t falign)
[6db6fd1]545{
[0b37882]546 area_check((void *) area);
[13f2461]547 malloc_assert((void *) first_block >= (void *) AREA_FIRST_BLOCK_HEAD(area));
548 malloc_assert((void *) first_block < area->end);
[6db6fd1]549
[013a5d7]550 for (heap_block_head_t *cur = first_block; (void *) cur < area->end;
[0b37882]551 cur = (heap_block_head_t *) (((void *) cur) + cur->size)) {
[6db6fd1]552 block_check(cur);
553
[0b37882]554 /* Finish searching on the final block */
555 if ((final_block != NULL) && (cur == final_block))
556 break;
557
[6db6fd1]558 /* Try to find a block that is free and large enough. */
559 if ((cur->free) && (cur->size >= real_size)) {
[0b37882]560 /*
561 * We have found a suitable block.
562 * Check for alignment properties.
563 */
564 void *addr = (void *)
565 ((uintptr_t) cur + sizeof(heap_block_head_t));
566 void *aligned = (void *)
567 ALIGN_UP((uintptr_t) addr, falign);
[6db6fd1]568
569 if (addr == aligned) {
570 /* Exact block start including alignment. */
571 split_mark(cur, real_size);
[0b37882]572
[207533f]573 next_fit = cur;
[0b37882]574 return addr;
[6db6fd1]575 } else {
[d851f597]576 /* Block start has to be aligned */
[6db6fd1]577 size_t excess = (size_t) (aligned - addr);
578
579 if (cur->size >= real_size + excess) {
[0b37882]580 /*
581 * The current block is large enough to fit
582 * data in (including alignment).
583 */
[013a5d7]584 if ((void *) cur > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
[0b37882]585 /*
586 * There is a block before the current block.
587 * This previous block can be enlarged to
588 * compensate for the alignment excess.
589 */
590 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
591 ((void *) cur - sizeof(heap_block_foot_t));
[6db6fd1]592
[0b37882]593 heap_block_head_t *prev_head = (heap_block_head_t *)
594 ((void *) cur - prev_foot->size);
[6db6fd1]595
596 block_check(prev_head);
597
598 size_t reduced_size = cur->size - excess;
[d851f597]599 heap_block_head_t *next_head = ((void *) cur) + excess;
[6db6fd1]600
[0b37882]601 if ((!prev_head->free) &&
602 (excess >= STRUCT_OVERHEAD)) {
603 /*
604 * The previous block is not free and there
605 * is enough free space left to fill in
606 * a new free block between the previous
607 * and current block.
608 */
609 block_init(cur, excess, true, area);
[d851f597]610 } else {
[0b37882]611 /*
612 * The previous block is free (thus there
613 * is no need to induce additional
614 * fragmentation to the heap) or the
615 * excess is small. Therefore just enlarge
616 * the previous block.
617 */
618 block_init(prev_head, prev_head->size + excess,
619 prev_head->free, area);
[d851f597]620 }
621
[0b37882]622 block_init(next_head, reduced_size, true, area);
[d851f597]623 split_mark(next_head, real_size);
[0b37882]624
[207533f]625 next_fit = next_head;
[0b37882]626 return aligned;
[6db6fd1]627 } else {
[0b37882]628 /*
629 * The current block is the first block
630 * in the heap area. We have to make sure
631 * that the alignment excess is large enough
632 * to fit a new free block just before the
633 * current block.
634 */
[6db6fd1]635 while (excess < STRUCT_OVERHEAD) {
636 aligned += falign;
637 excess += falign;
638 }
639
[d851f597]640 /* Check for current block size again */
[6db6fd1]641 if (cur->size >= real_size + excess) {
642 size_t reduced_size = cur->size - excess;
[0b37882]643 cur = (heap_block_head_t *)
[013a5d7]644 (AREA_FIRST_BLOCK_HEAD(area) + excess);
[6db6fd1]645
[013a5d7]646 block_init((void *) AREA_FIRST_BLOCK_HEAD(area),
647 excess, true, area);
[0b37882]648 block_init(cur, reduced_size, true, area);
[6db6fd1]649 split_mark(cur, real_size);
[0b37882]650
[207533f]651 next_fit = cur;
[0b37882]652 return aligned;
[6db6fd1]653 }
654 }
655 }
656 }
657 }
[0b37882]658 }
659
660 return NULL;
661}
662
663/** Allocate a memory block
664 *
665 * Should be called only inside the critical section.
666 *
667 * @param size The size of the block to allocate.
668 * @param align Memory address alignment.
669 *
670 * @return Address of the allocated block or NULL on not enough memory.
671 *
672 */
673static void *malloc_internal(const size_t size, const size_t align)
674{
[13f2461]675 malloc_assert(first_heap_area != NULL);
[0b37882]676
677 if (align == 0)
678 return NULL;
679
680 size_t falign = lcm(align, BASE_ALIGN);
681 size_t real_size = GROSS_SIZE(ALIGN_UP(size, falign));
682
683 bool retry = false;
684 heap_block_head_t *split;
685
686loop:
687
688 /* Try the next fit approach */
[207533f]689 split = next_fit;
[0b37882]690
691 if (split != NULL) {
692 void *addr = malloc_area(split->area, split, NULL, real_size,
693 falign);
[6db6fd1]694
[0b37882]695 if (addr != NULL)
696 return addr;
[6db6fd1]697 }
698
[0b37882]699 /* Search the entire heap */
[013a5d7]700 for (heap_area_t *area = first_heap_area; area != NULL;
701 area = area->next) {
[0b37882]702 heap_block_head_t *first = (heap_block_head_t *)
[013a5d7]703 AREA_FIRST_BLOCK_HEAD(area);
[0b37882]704
705 void *addr = malloc_area(area, first, split, real_size,
706 falign);
707
708 if (addr != NULL)
709 return addr;
710 }
711
712 if (!retry) {
713 /* Try to grow the heap space */
714 if (heap_grow(real_size)) {
715 retry = true;
[6db6fd1]716 goto loop;
717 }
718 }
719
[0b37882]720 return NULL;
[6db6fd1]721}
722
[3292623]723/** Allocate memory by number of elements
724 *
725 * @param nmemb Number of members to allocate.
726 * @param size Size of one member in bytes.
727 *
728 * @return Allocated memory or NULL.
729 *
730 */
[15b8e495]731void *calloc(const size_t nmemb, const size_t size)
732{
[e6b73ad0]733 void *block = malloc(nmemb * size);
734 if (block == NULL)
735 return NULL;
[3292623]736
[e6b73ad0]737 memset(block, 0, nmemb * size);
738 return block;
[15b8e495]739}
740
[3292623]741/** Allocate memory
742 *
743 * @param size Number of bytes to allocate.
744 *
745 * @return Allocated memory or NULL.
746 *
747 */
[6db6fd1]748void *malloc(const size_t size)
749{
[3292623]750 futex_down(&malloc_futex);
751 void *block = malloc_internal(size, BASE_ALIGN);
752 futex_up(&malloc_futex);
753
754 return block;
[6db6fd1]755}
756
[3292623]757/** Allocate memory with specified alignment
758 *
759 * @param align Alignment in byes.
760 * @param size Number of bytes to allocate.
761 *
762 * @return Allocated memory or NULL.
763 *
764 */
[6db6fd1]765void *memalign(const size_t align, const size_t size)
766{
767 if (align == 0)
768 return NULL;
769
770 size_t palign =
771 1 << (fnzb(max(sizeof(void *), align) - 1) + 1);
772
[3292623]773 futex_down(&malloc_futex);
774 void *block = malloc_internal(size, palign);
775 futex_up(&malloc_futex);
776
777 return block;
[6db6fd1]778}
779
[3292623]780/** Reallocate memory block
781 *
782 * @param addr Already allocated memory or NULL.
783 * @param size New size of the memory block.
784 *
785 * @return Reallocated memory or NULL.
786 *
787 */
[6db6fd1]788void *realloc(const void *addr, const size_t size)
789{
790 if (addr == NULL)
791 return malloc(size);
792
[3292623]793 futex_down(&malloc_futex);
794
[6db6fd1]795 /* Calculate the position of the header. */
796 heap_block_head_t *head =
797 (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
798
799 block_check(head);
[13f2461]800 malloc_assert(!head->free);
[6db6fd1]801
[0b37882]802 heap_area_t *area = head->area;
803
804 area_check(area);
[13f2461]805 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
806 malloc_assert((void *) head < area->end);
[0b37882]807
[6db6fd1]808 void *ptr = NULL;
[3292623]809 bool reloc = false;
[f450280]810 size_t real_size = GROSS_SIZE(ALIGN_UP(size, BASE_ALIGN));
[6db6fd1]811 size_t orig_size = head->size;
812
813 if (orig_size > real_size) {
814 /* Shrink */
815 if (orig_size - real_size >= STRUCT_OVERHEAD) {
[0b37882]816 /*
817 * Split the original block to a full block
818 * and a trailing free block.
819 */
820 block_init((void *) head, real_size, false, area);
[6db6fd1]821 block_init((void *) head + real_size,
[0b37882]822 orig_size - real_size, true, area);
[013a5d7]823 heap_shrink(area);
[6db6fd1]824 }
825
826 ptr = ((void *) head) + sizeof(heap_block_head_t);
827 } else {
[0b37882]828 /*
829 * Look at the next block. If it is free and the size is
830 * sufficient then merge the two. Otherwise just allocate
831 * a new block, copy the original data into it and
832 * free the original block.
833 */
[6db6fd1]834 heap_block_head_t *next_head =
835 (heap_block_head_t *) (((void *) head) + head->size);
836
[0b37882]837 if (((void *) next_head < area->end) &&
[7d88587]838 (head->size + next_head->size >= real_size) &&
839 (next_head->free)) {
[6db6fd1]840 block_check(next_head);
[0b37882]841 block_init(head, head->size + next_head->size, false, area);
[ba0aa6f]842 split_mark(head, real_size);
[6db6fd1]843
844 ptr = ((void *) head) + sizeof(heap_block_head_t);
[207533f]845 next_fit = NULL;
[3292623]846 } else
847 reloc = true;
848 }
849
850 futex_up(&malloc_futex);
851
852 if (reloc) {
853 ptr = malloc(size);
854 if (ptr != NULL) {
855 memcpy(ptr, addr, NET_SIZE(orig_size));
856 free(addr);
[6db6fd1]857 }
858 }
859
860 return ptr;
861}
862
863/** Free a memory block
864 *
865 * @param addr The address of the block.
[3292623]866 *
[6db6fd1]867 */
868void free(const void *addr)
869{
[3019612]870 if (addr == NULL)
871 return;
872
[3292623]873 futex_down(&malloc_futex);
874
[6db6fd1]875 /* Calculate the position of the header. */
876 heap_block_head_t *head
877 = (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
878
879 block_check(head);
[13f2461]880 malloc_assert(!head->free);
[6db6fd1]881
[0b37882]882 heap_area_t *area = head->area;
883
884 area_check(area);
[13f2461]885 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
886 malloc_assert((void *) head < area->end);
[0b37882]887
[6db6fd1]888 /* Mark the block itself as free. */
889 head->free = true;
890
891 /* Look at the next block. If it is free, merge the two. */
892 heap_block_head_t *next_head
893 = (heap_block_head_t *) (((void *) head) + head->size);
894
[0b37882]895 if ((void *) next_head < area->end) {
[6db6fd1]896 block_check(next_head);
897 if (next_head->free)
[0b37882]898 block_init(head, head->size + next_head->size, true, area);
[6db6fd1]899 }
900
901 /* Look at the previous block. If it is free, merge the two. */
[013a5d7]902 if ((void *) head > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
[6db6fd1]903 heap_block_foot_t *prev_foot =
904 (heap_block_foot_t *) (((void *) head) - sizeof(heap_block_foot_t));
905
906 heap_block_head_t *prev_head =
907 (heap_block_head_t *) (((void *) head) - prev_foot->size);
908
909 block_check(prev_head);
910
911 if (prev_head->free)
[0b37882]912 block_init(prev_head, prev_head->size + head->size, true,
913 area);
[6db6fd1]914 }
915
[013a5d7]916 heap_shrink(area);
[3292623]917
918 futex_up(&malloc_futex);
[6db6fd1]919}
920
[1b3e854]921void *heap_check(void)
922{
923 futex_down(&malloc_futex);
924
925 if (first_heap_area == NULL) {
926 futex_up(&malloc_futex);
927 return (void *) -1;
928 }
929
930 /* Walk all heap areas */
931 for (heap_area_t *area = first_heap_area; area != NULL;
932 area = area->next) {
933
934 /* Check heap area consistency */
935 if ((area->magic != HEAP_AREA_MAGIC) ||
936 ((void *) area != area->start) ||
937 (area->start >= area->end) ||
938 (((uintptr_t) area->start % PAGE_SIZE) != 0) ||
939 (((uintptr_t) area->end % PAGE_SIZE) != 0)) {
940 futex_up(&malloc_futex);
941 return (void *) area;
942 }
943
944 /* Walk all heap blocks */
945 for (heap_block_head_t *head = (heap_block_head_t *)
946 AREA_FIRST_BLOCK_HEAD(area); (void *) head < area->end;
947 head = (heap_block_head_t *) (((void *) head) + head->size)) {
948
949 /* Check heap block consistency */
950 if (head->magic != HEAP_BLOCK_HEAD_MAGIC) {
951 futex_up(&malloc_futex);
952 return (void *) head;
953 }
954
955 heap_block_foot_t *foot = BLOCK_FOOT(head);
956
957 if ((foot->magic != HEAP_BLOCK_FOOT_MAGIC) ||
958 (head->size != foot->size)) {
959 futex_up(&malloc_futex);
960 return (void *) foot;
961 }
962 }
963 }
964
965 futex_up(&malloc_futex);
966
967 return NULL;
968}
969
[6db6fd1]970/** @}
971 */
Note: See TracBrowser for help on using the repository browser.