source: mainline/uspace/lib/c/generic/malloc.c@ 9a3b469

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 9a3b469 was 9a3b469, checked in by Adam Hraska <adam.hraska+hos@…>, 13 years ago

malloc avoids using futexes during initialization of the main thread of a program.

  • Property mode set to 100644
File size: 25.4 KB
Line 
1/*
2 * Copyright (c) 2009 Martin Decky
3 * Copyright (c) 2009 Petr Tuma
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup libc
31 * @{
32 */
33/** @file
34 */
35
36#include <malloc.h>
37#include <bool.h>
38#include <as.h>
39#include <align.h>
40#include <macros.h>
41#include <assert.h>
42#include <errno.h>
43#include <bitops.h>
44#include <mem.h>
45#include <futex.h>
46#include <stdlib.h>
47#include <adt/gcdlcm.h>
48#include "private/malloc.h"
49#include "private/thread.h"
50
51/** Magic used in heap headers. */
52#define HEAP_BLOCK_HEAD_MAGIC UINT32_C(0xBEEF0101)
53
54/** Magic used in heap footers. */
55#define HEAP_BLOCK_FOOT_MAGIC UINT32_C(0xBEEF0202)
56
57/** Magic used in heap descriptor. */
58#define HEAP_AREA_MAGIC UINT32_C(0xBEEFCAFE)
59
60/** Allocation alignment.
61 *
62 * This also covers the alignment of fields
63 * in the heap header and footer.
64 *
65 */
66#define BASE_ALIGN 16
67
68/** Heap shrink granularity
69 *
70 * Try not to pump and stress the heap to much
71 * by shrinking and enlarging it too often.
72 * A heap area won't shrunk if it the released
73 * free block is smaller than this constant.
74 *
75 */
76#define SHRINK_GRANULARITY (64 * PAGE_SIZE)
77
78/** Overhead of each heap block. */
79#define STRUCT_OVERHEAD \
80 (sizeof(heap_block_head_t) + sizeof(heap_block_foot_t))
81
82/** Overhead of each area. */
83#define AREA_OVERHEAD(size) \
84 (ALIGN_UP(size + sizeof(heap_area_t), BASE_ALIGN))
85
86/** Calculate real size of a heap block.
87 *
88 * Add header and footer size.
89 *
90 */
91#define GROSS_SIZE(size) ((size) + STRUCT_OVERHEAD)
92
93/** Calculate net size of a heap block.
94 *
95 * Subtract header and footer size.
96 *
97 */
98#define NET_SIZE(size) ((size) - STRUCT_OVERHEAD)
99
100/** Get first block in heap area.
101 *
102 */
103#define AREA_FIRST_BLOCK_HEAD(area) \
104 (ALIGN_UP(((uintptr_t) (area)) + sizeof(heap_area_t), BASE_ALIGN))
105
106/** Get last block in heap area.
107 *
108 */
109#define AREA_LAST_BLOCK_FOOT(area) \
110 (((uintptr_t) (area)->end) - sizeof(heap_block_foot_t))
111
112#define AREA_LAST_BLOCK_HEAD(area) \
113 ((uintptr_t) BLOCK_HEAD(((heap_block_foot_t *) AREA_LAST_BLOCK_FOOT(area))))
114
115/** Get header in heap block.
116 *
117 */
118#define BLOCK_HEAD(foot) \
119 ((heap_block_head_t *) \
120 (((uintptr_t) (foot)) + sizeof(heap_block_foot_t) - (foot)->size))
121
122/** Get footer in heap block.
123 *
124 */
125#define BLOCK_FOOT(head) \
126 ((heap_block_foot_t *) \
127 (((uintptr_t) (head)) + (head)->size - sizeof(heap_block_foot_t)))
128
129/** Heap area.
130 *
131 * The memory managed by the heap allocator is divided into
132 * multiple discontinuous heaps. Each heap is represented
133 * by a separate address space area which has this structure
134 * at its very beginning.
135 *
136 */
137typedef struct heap_area {
138 /** Start of the heap area (including this structure)
139 *
140 * Aligned on page boundary.
141 *
142 */
143 void *start;
144
145 /** End of the heap area (aligned on page boundary) */
146 void *end;
147
148 /** Previous heap area */
149 struct heap_area *prev;
150
151 /** Next heap area */
152 struct heap_area *next;
153
154 /** A magic value */
155 uint32_t magic;
156} heap_area_t;
157
158/** Header of a heap block
159 *
160 */
161typedef struct {
162 /* Size of the block (including header and footer) */
163 size_t size;
164
165 /* Indication of a free block */
166 bool free;
167
168 /** Heap area this block belongs to */
169 heap_area_t *area;
170
171 /* A magic value to detect overwrite of heap header */
172 uint32_t magic;
173} heap_block_head_t;
174
175/** Footer of a heap block
176 *
177 */
178typedef struct {
179 /* Size of the block (including header and footer) */
180 size_t size;
181
182 /* A magic value to detect overwrite of heap footer */
183 uint32_t magic;
184} heap_block_foot_t;
185
186/** First heap area */
187static heap_area_t *first_heap_area = NULL;
188
189/** Last heap area */
190static heap_area_t *last_heap_area = NULL;
191
192/** Next heap block to examine (next fit algorithm) */
193static heap_block_head_t *next_fit = NULL;
194
195/** Futex for thread-safe heap manipulation */
196static futex_t malloc_futex = FUTEX_INITIALIZER;
197
198#ifndef NDEBUG
199
200#define malloc_assert(expr) \
201 do { \
202 if (!(expr)) {\
203 futex_up(&malloc_futex); \
204 assert_abort(#expr, __FILE__, __LINE__); \
205 } \
206 } while (0)
207
208#else /* NDEBUG */
209
210#define malloc_assert(expr)
211
212#endif /* NDEBUG */
213
214/** Initialize a heap block
215 *
216 * Fill in the structures related to a heap block.
217 * Should be called only inside the critical section.
218 *
219 * @param addr Address of the block.
220 * @param size Size of the block including the header and the footer.
221 * @param free Indication of a free block.
222 * @param area Heap area the block belongs to.
223 *
224 */
225static void block_init(void *addr, size_t size, bool free, heap_area_t *area)
226{
227 /* Calculate the position of the header and the footer */
228 heap_block_head_t *head = (heap_block_head_t *) addr;
229
230 head->size = size;
231 head->free = free;
232 head->area = area;
233 head->magic = HEAP_BLOCK_HEAD_MAGIC;
234
235 heap_block_foot_t *foot = BLOCK_FOOT(head);
236
237 foot->size = size;
238 foot->magic = HEAP_BLOCK_FOOT_MAGIC;
239}
240
241/** Check a heap block
242 *
243 * Verifies that the structures related to a heap block still contain
244 * the magic constants. This helps detect heap corruption early on.
245 * Should be called only inside the critical section.
246 *
247 * @param addr Address of the block.
248 *
249 */
250static void block_check(void *addr)
251{
252 heap_block_head_t *head = (heap_block_head_t *) addr;
253
254 malloc_assert(head->magic == HEAP_BLOCK_HEAD_MAGIC);
255
256 heap_block_foot_t *foot = BLOCK_FOOT(head);
257
258 malloc_assert(foot->magic == HEAP_BLOCK_FOOT_MAGIC);
259 malloc_assert(head->size == foot->size);
260}
261
262/** Check a heap area structure
263 *
264 * Should be called only inside the critical section.
265 *
266 * @param addr Address of the heap area.
267 *
268 */
269static void area_check(void *addr)
270{
271 heap_area_t *area = (heap_area_t *) addr;
272
273 malloc_assert(area->magic == HEAP_AREA_MAGIC);
274 malloc_assert(addr == area->start);
275 malloc_assert(area->start < area->end);
276 malloc_assert(((uintptr_t) area->start % PAGE_SIZE) == 0);
277 malloc_assert(((uintptr_t) area->end % PAGE_SIZE) == 0);
278}
279
280/** Create new heap area
281 *
282 * Should be called only inside the critical section.
283 *
284 * @param size Size of the area.
285 *
286 */
287static bool area_create(size_t size)
288{
289 /* Align the heap area size on page boundary */
290 size_t asize = ALIGN_UP(size, PAGE_SIZE);
291 void *astart = as_area_create(AS_AREA_ANY, asize,
292 AS_AREA_WRITE | AS_AREA_READ | AS_AREA_CACHEABLE);
293 if (astart == AS_MAP_FAILED)
294 return false;
295
296 heap_area_t *area = (heap_area_t *) astart;
297
298 area->start = astart;
299 area->end = (void *) ((uintptr_t) astart + asize);
300 area->prev = NULL;
301 area->next = NULL;
302 area->magic = HEAP_AREA_MAGIC;
303
304 void *block = (void *) AREA_FIRST_BLOCK_HEAD(area);
305 size_t bsize = (size_t) (area->end - block);
306
307 block_init(block, bsize, true, area);
308
309 if (last_heap_area == NULL) {
310 first_heap_area = area;
311 last_heap_area = area;
312 } else {
313 area->prev = last_heap_area;
314 last_heap_area->next = area;
315 last_heap_area = area;
316 }
317
318 return true;
319}
320
321/** Try to enlarge a heap area
322 *
323 * Should be called only inside the critical section.
324 *
325 * @param area Heap area to grow.
326 * @param size Gross size to grow (bytes).
327 *
328 * @return True if successful.
329 *
330 */
331static bool area_grow(heap_area_t *area, size_t size)
332{
333 if (size == 0)
334 return true;
335
336 area_check(area);
337
338 /* New heap area size */
339 size_t gross_size = (size_t) (area->end - area->start) + size;
340 size_t asize = ALIGN_UP(gross_size, PAGE_SIZE);
341 void *end = (void *) ((uintptr_t) area->start + asize);
342
343 /* Check for overflow */
344 if (end < area->start)
345 return false;
346
347 /* Resize the address space area */
348 int ret = as_area_resize(area->start, asize, 0);
349 if (ret != EOK)
350 return false;
351
352 heap_block_head_t *last_head =
353 (heap_block_head_t *) AREA_LAST_BLOCK_HEAD(area);
354
355 if (last_head->free) {
356 /* Add the new space to the last block. */
357 size_t net_size = (size_t) (end - area->end) + last_head->size;
358 malloc_assert(net_size > 0);
359 block_init(last_head, net_size, true, area);
360 } else {
361 /* Add new free block */
362 size_t net_size = (size_t) (end - area->end);
363 if (net_size > 0)
364 block_init(area->end, net_size, true, area);
365 }
366
367 /* Update heap area parameters */
368 area->end = end;
369
370 return true;
371}
372
373/** Try to shrink heap
374 *
375 * Should be called only inside the critical section.
376 * In all cases the next pointer is reset.
377 *
378 * @param area Last modified heap area.
379 *
380 */
381static void heap_shrink(heap_area_t *area)
382{
383 area_check(area);
384
385 heap_block_foot_t *last_foot =
386 (heap_block_foot_t *) AREA_LAST_BLOCK_FOOT(area);
387 heap_block_head_t *last_head = BLOCK_HEAD(last_foot);
388
389 block_check((void *) last_head);
390 malloc_assert(last_head->area == area);
391
392 if (last_head->free) {
393 /*
394 * The last block of the heap area is
395 * unused. The area might be potentially
396 * shrunk.
397 */
398
399 heap_block_head_t *first_head =
400 (heap_block_head_t *) AREA_FIRST_BLOCK_HEAD(area);
401
402 block_check((void *) first_head);
403 malloc_assert(first_head->area == area);
404
405 size_t shrink_size = ALIGN_DOWN(last_head->size, PAGE_SIZE);
406
407 if (first_head == last_head) {
408 /*
409 * The entire heap area consists of a single
410 * free heap block. This means we can get rid
411 * of it entirely.
412 */
413
414 heap_area_t *prev = area->prev;
415 heap_area_t *next = area->next;
416
417 if (prev != NULL) {
418 area_check(prev);
419 prev->next = next;
420 } else
421 first_heap_area = next;
422
423 if (next != NULL) {
424 area_check(next);
425 next->prev = prev;
426 } else
427 last_heap_area = prev;
428
429 as_area_destroy(area->start);
430 } else if (shrink_size >= SHRINK_GRANULARITY) {
431 /*
432 * Make sure that we always shrink the area
433 * by a multiple of page size and update
434 * the block layout accordingly.
435 */
436
437 size_t asize = (size_t) (area->end - area->start) - shrink_size;
438 void *end = (void *) ((uintptr_t) area->start + asize);
439
440 /* Resize the address space area */
441 int ret = as_area_resize(area->start, asize, 0);
442 if (ret != EOK)
443 abort();
444
445 /* Update heap area parameters */
446 area->end = end;
447 size_t excess = ((size_t) area->end) - ((size_t) last_head);
448
449 if (excess > 0) {
450 if (excess >= STRUCT_OVERHEAD) {
451 /*
452 * The previous block cannot be free and there
453 * is enough free space left in the area to
454 * create a new free block.
455 */
456 block_init((void *) last_head, excess, true, area);
457 } else {
458 /*
459 * The excess is small. Therefore just enlarge
460 * the previous block.
461 */
462 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
463 (((uintptr_t) last_head) - sizeof(heap_block_foot_t));
464 heap_block_head_t *prev_head = BLOCK_HEAD(prev_foot);
465
466 block_check((void *) prev_head);
467
468 block_init(prev_head, prev_head->size + excess,
469 prev_head->free, area);
470 }
471 }
472 }
473 }
474
475 next_fit = NULL;
476}
477
478/** Initialize the heap allocator
479 *
480 * Create initial heap memory area. This routine is
481 * only called from libc initialization, thus we do not
482 * take any locks.
483 *
484 */
485void __malloc_init(void)
486{
487 if (!area_create(PAGE_SIZE))
488 abort();
489}
490
491/** Split heap block and mark it as used.
492 *
493 * Should be called only inside the critical section.
494 *
495 * @param cur Heap block to split.
496 * @param size Number of bytes to split and mark from the beginning
497 * of the block.
498 *
499 */
500static void split_mark(heap_block_head_t *cur, const size_t size)
501{
502 malloc_assert(cur->size >= size);
503
504 /* See if we should split the block. */
505 size_t split_limit = GROSS_SIZE(size);
506
507 if (cur->size > split_limit) {
508 /* Block big enough -> split. */
509 void *next = ((void *) cur) + size;
510 block_init(next, cur->size - size, true, cur->area);
511 block_init(cur, size, false, cur->area);
512 } else {
513 /* Block too small -> use as is. */
514 cur->free = false;
515 }
516}
517
518/** Allocate memory from heap area starting from given block
519 *
520 * Should be called only inside the critical section.
521 * As a side effect this function also sets the current
522 * pointer on successful allocation.
523 *
524 * @param area Heap area where to allocate from.
525 * @param first_block Starting heap block.
526 * @param final_block Heap block where to finish the search
527 * (may be NULL).
528 * @param real_size Gross number of bytes to allocate.
529 * @param falign Physical alignment of the block.
530 *
531 * @return Address of the allocated block or NULL on not enough memory.
532 *
533 */
534static void *malloc_area(heap_area_t *area, heap_block_head_t *first_block,
535 heap_block_head_t *final_block, size_t real_size, size_t falign)
536{
537 area_check((void *) area);
538 malloc_assert((void *) first_block >= (void *) AREA_FIRST_BLOCK_HEAD(area));
539 malloc_assert((void *) first_block < area->end);
540
541 for (heap_block_head_t *cur = first_block; (void *) cur < area->end;
542 cur = (heap_block_head_t *) (((void *) cur) + cur->size)) {
543 block_check(cur);
544
545 /* Finish searching on the final block */
546 if ((final_block != NULL) && (cur == final_block))
547 break;
548
549 /* Try to find a block that is free and large enough. */
550 if ((cur->free) && (cur->size >= real_size)) {
551 /*
552 * We have found a suitable block.
553 * Check for alignment properties.
554 */
555 void *addr = (void *)
556 ((uintptr_t) cur + sizeof(heap_block_head_t));
557 void *aligned = (void *)
558 ALIGN_UP((uintptr_t) addr, falign);
559
560 if (addr == aligned) {
561 /* Exact block start including alignment. */
562 split_mark(cur, real_size);
563
564 next_fit = cur;
565 return addr;
566 } else {
567 /* Block start has to be aligned */
568 size_t excess = (size_t) (aligned - addr);
569
570 if (cur->size >= real_size + excess) {
571 /*
572 * The current block is large enough to fit
573 * data in (including alignment).
574 */
575 if ((void *) cur > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
576 /*
577 * There is a block before the current block.
578 * This previous block can be enlarged to
579 * compensate for the alignment excess.
580 */
581 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
582 ((void *) cur - sizeof(heap_block_foot_t));
583
584 heap_block_head_t *prev_head = (heap_block_head_t *)
585 ((void *) cur - prev_foot->size);
586
587 block_check(prev_head);
588
589 size_t reduced_size = cur->size - excess;
590 heap_block_head_t *next_head = ((void *) cur) + excess;
591
592 if ((!prev_head->free) &&
593 (excess >= STRUCT_OVERHEAD)) {
594 /*
595 * The previous block is not free and there
596 * is enough free space left to fill in
597 * a new free block between the previous
598 * and current block.
599 */
600 block_init(cur, excess, true, area);
601 } else {
602 /*
603 * The previous block is free (thus there
604 * is no need to induce additional
605 * fragmentation to the heap) or the
606 * excess is small. Therefore just enlarge
607 * the previous block.
608 */
609 block_init(prev_head, prev_head->size + excess,
610 prev_head->free, area);
611 }
612
613 block_init(next_head, reduced_size, true, area);
614 split_mark(next_head, real_size);
615
616 next_fit = next_head;
617 return aligned;
618 } else {
619 /*
620 * The current block is the first block
621 * in the heap area. We have to make sure
622 * that the alignment excess is large enough
623 * to fit a new free block just before the
624 * current block.
625 */
626 while (excess < STRUCT_OVERHEAD) {
627 aligned += falign;
628 excess += falign;
629 }
630
631 /* Check for current block size again */
632 if (cur->size >= real_size + excess) {
633 size_t reduced_size = cur->size - excess;
634 cur = (heap_block_head_t *)
635 (AREA_FIRST_BLOCK_HEAD(area) + excess);
636
637 block_init((void *) AREA_FIRST_BLOCK_HEAD(area),
638 excess, true, area);
639 block_init(cur, reduced_size, true, area);
640 split_mark(cur, real_size);
641
642 next_fit = cur;
643 return aligned;
644 }
645 }
646 }
647 }
648 }
649 }
650
651 return NULL;
652}
653
654/** Try to enlarge any of the heap areas.
655 *
656 * If successful, allocate block of the given size in the area.
657 * Should be called only inside the critical section.
658 *
659 * @param size Gross size of item to allocate (bytes).
660 * @param align Memory address alignment.
661 *
662 * @return Allocated block.
663 * @return NULL on failure.
664 *
665 */
666static void *heap_grow_and_alloc(size_t size, size_t align)
667{
668 if (size == 0)
669 return NULL;
670
671 /* First try to enlarge some existing area */
672 for (heap_area_t *area = first_heap_area; area != NULL;
673 area = area->next) {
674
675 if (area_grow(area, size + align)) {
676 heap_block_head_t *first =
677 (heap_block_head_t *) AREA_LAST_BLOCK_HEAD(area);
678
679 void *addr =
680 malloc_area(area, first, NULL, size, align);
681 malloc_assert(addr != NULL);
682 return addr;
683 }
684 }
685
686 /* Eventually try to create a new area */
687 if (area_create(AREA_OVERHEAD(size + align))) {
688 heap_block_head_t *first =
689 (heap_block_head_t *) AREA_FIRST_BLOCK_HEAD(last_heap_area);
690
691 void *addr =
692 malloc_area(last_heap_area, first, NULL, size, align);
693 malloc_assert(addr != NULL);
694 return addr;
695 }
696
697 return NULL;
698}
699
700/** Allocate a memory block
701 *
702 * Should be called only inside the critical section.
703 *
704 * @param size The size of the block to allocate.
705 * @param align Memory address alignment.
706 *
707 * @return Address of the allocated block or NULL on not enough memory.
708 *
709 */
710static void *malloc_internal(const size_t size, const size_t align)
711{
712 malloc_assert(first_heap_area != NULL);
713
714 if (align == 0)
715 return NULL;
716
717 size_t falign = lcm(align, BASE_ALIGN);
718
719 /* Check for integer overflow. */
720 if (falign < align)
721 return NULL;
722
723 /*
724 * The size of the allocated block needs to be naturally
725 * aligned, because the footer structure also needs to reside
726 * on a naturally aligned address in order to avoid unaligned
727 * memory accesses.
728 */
729 size_t gross_size = GROSS_SIZE(ALIGN_UP(size, BASE_ALIGN));
730
731 /* Try the next fit approach */
732 heap_block_head_t *split = next_fit;
733
734 if (split != NULL) {
735 void *addr = malloc_area(split->area, split, NULL, gross_size,
736 falign);
737
738 if (addr != NULL)
739 return addr;
740 }
741
742 /* Search the entire heap */
743 for (heap_area_t *area = first_heap_area; area != NULL;
744 area = area->next) {
745 heap_block_head_t *first = (heap_block_head_t *)
746 AREA_FIRST_BLOCK_HEAD(area);
747
748 void *addr = malloc_area(area, first, split, gross_size,
749 falign);
750
751 if (addr != NULL)
752 return addr;
753 }
754
755 /* Finally, try to grow heap space and allocate in the new area. */
756 return heap_grow_and_alloc(gross_size, falign);
757}
758
759/** Allocate memory by number of elements
760 *
761 * @param nmemb Number of members to allocate.
762 * @param size Size of one member in bytes.
763 *
764 * @return Allocated memory or NULL.
765 *
766 */
767void *calloc(const size_t nmemb, const size_t size)
768{
769 // FIXME: Check for overflow
770
771 void *block = malloc(nmemb * size);
772 if (block == NULL)
773 return NULL;
774
775 memset(block, 0, nmemb * size);
776 return block;
777}
778
779/** Allocate memory
780 *
781 * @param size Number of bytes to allocate.
782 *
783 * @return Allocated memory or NULL.
784 *
785 */
786void *malloc(const size_t size)
787{
788 /* Do not use futexes for allocations during main thread initialization. */
789 if (0 == atomic_get(&_created_thread_cnt)) {
790 return malloc_internal(size, BASE_ALIGN);
791 } else {
792 futex_down(&malloc_futex);
793 void *block = malloc_internal(size, BASE_ALIGN);
794 futex_up(&malloc_futex);
795 return block;
796 }
797}
798
799/** Allocate memory with specified alignment
800 *
801 * @param align Alignment in byes.
802 * @param size Number of bytes to allocate.
803 *
804 * @return Allocated memory or NULL.
805 *
806 */
807void *memalign(const size_t align, const size_t size)
808{
809 if (align == 0)
810 return NULL;
811
812 size_t palign =
813 1 << (fnzb(max(sizeof(void *), align) - 1) + 1);
814
815 /* Do not use futexes for allocations during main thread initialization. */
816 if (0 == atomic_get(&_created_thread_cnt)) {
817 return malloc_internal(size, palign);
818 } else {
819 futex_down(&malloc_futex);
820 void *block = malloc_internal(size, palign);
821 futex_up(&malloc_futex);
822
823 return block;
824 }
825}
826
827/** Reallocate memory block
828 *
829 * @param addr Already allocated memory or NULL.
830 * @param size New size of the memory block.
831 *
832 * @return Reallocated memory or NULL.
833 *
834 */
835void *realloc(const void *addr, const size_t size)
836{
837 if (addr == NULL)
838 return malloc(size);
839
840 futex_down(&malloc_futex);
841
842 /* Calculate the position of the header. */
843 heap_block_head_t *head =
844 (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
845
846 block_check(head);
847 malloc_assert(!head->free);
848
849 heap_area_t *area = head->area;
850
851 area_check(area);
852 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
853 malloc_assert((void *) head < area->end);
854
855 void *ptr = NULL;
856 bool reloc = false;
857 size_t real_size = GROSS_SIZE(ALIGN_UP(size, BASE_ALIGN));
858 size_t orig_size = head->size;
859
860 if (orig_size > real_size) {
861 /* Shrink */
862 if (orig_size - real_size >= STRUCT_OVERHEAD) {
863 /*
864 * Split the original block to a full block
865 * and a trailing free block.
866 */
867 block_init((void *) head, real_size, false, area);
868 block_init((void *) head + real_size,
869 orig_size - real_size, true, area);
870 heap_shrink(area);
871 }
872
873 ptr = ((void *) head) + sizeof(heap_block_head_t);
874 } else {
875 /*
876 * Look at the next block. If it is free and the size is
877 * sufficient then merge the two. Otherwise just allocate
878 * a new block, copy the original data into it and
879 * free the original block.
880 */
881 heap_block_head_t *next_head =
882 (heap_block_head_t *) (((void *) head) + head->size);
883
884 if (((void *) next_head < area->end) &&
885 (head->size + next_head->size >= real_size) &&
886 (next_head->free)) {
887 block_check(next_head);
888 block_init(head, head->size + next_head->size, false, area);
889 split_mark(head, real_size);
890
891 ptr = ((void *) head) + sizeof(heap_block_head_t);
892 next_fit = NULL;
893 } else
894 reloc = true;
895 }
896
897 futex_up(&malloc_futex);
898
899 if (reloc) {
900 ptr = malloc(size);
901 if (ptr != NULL) {
902 memcpy(ptr, addr, NET_SIZE(orig_size));
903 free(addr);
904 }
905 }
906
907 return ptr;
908}
909
910/** Free a memory block
911 *
912 * @param addr The address of the block.
913 *
914 */
915void free(const void *addr)
916{
917 if (addr == NULL)
918 return;
919
920 futex_down(&malloc_futex);
921
922 /* Calculate the position of the header. */
923 heap_block_head_t *head
924 = (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
925
926 block_check(head);
927 malloc_assert(!head->free);
928
929 heap_area_t *area = head->area;
930
931 area_check(area);
932 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
933 malloc_assert((void *) head < area->end);
934
935 /* Mark the block itself as free. */
936 head->free = true;
937
938 /* Look at the next block. If it is free, merge the two. */
939 heap_block_head_t *next_head
940 = (heap_block_head_t *) (((void *) head) + head->size);
941
942 if ((void *) next_head < area->end) {
943 block_check(next_head);
944 if (next_head->free)
945 block_init(head, head->size + next_head->size, true, area);
946 }
947
948 /* Look at the previous block. If it is free, merge the two. */
949 if ((void *) head > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
950 heap_block_foot_t *prev_foot =
951 (heap_block_foot_t *) (((void *) head) - sizeof(heap_block_foot_t));
952
953 heap_block_head_t *prev_head =
954 (heap_block_head_t *) (((void *) head) - prev_foot->size);
955
956 block_check(prev_head);
957
958 if (prev_head->free)
959 block_init(prev_head, prev_head->size + head->size, true,
960 area);
961 }
962
963 heap_shrink(area);
964
965 futex_up(&malloc_futex);
966}
967
968void *heap_check(void)
969{
970 futex_down(&malloc_futex);
971
972 if (first_heap_area == NULL) {
973 futex_up(&malloc_futex);
974 return (void *) -1;
975 }
976
977 /* Walk all heap areas */
978 for (heap_area_t *area = first_heap_area; area != NULL;
979 area = area->next) {
980
981 /* Check heap area consistency */
982 if ((area->magic != HEAP_AREA_MAGIC) ||
983 ((void *) area != area->start) ||
984 (area->start >= area->end) ||
985 (((uintptr_t) area->start % PAGE_SIZE) != 0) ||
986 (((uintptr_t) area->end % PAGE_SIZE) != 0)) {
987 futex_up(&malloc_futex);
988 return (void *) area;
989 }
990
991 /* Walk all heap blocks */
992 for (heap_block_head_t *head = (heap_block_head_t *)
993 AREA_FIRST_BLOCK_HEAD(area); (void *) head < area->end;
994 head = (heap_block_head_t *) (((void *) head) + head->size)) {
995
996 /* Check heap block consistency */
997 if (head->magic != HEAP_BLOCK_HEAD_MAGIC) {
998 futex_up(&malloc_futex);
999 return (void *) head;
1000 }
1001
1002 heap_block_foot_t *foot = BLOCK_FOOT(head);
1003
1004 if ((foot->magic != HEAP_BLOCK_FOOT_MAGIC) ||
1005 (head->size != foot->size)) {
1006 futex_up(&malloc_futex);
1007 return (void *) foot;
1008 }
1009 }
1010 }
1011
1012 futex_up(&malloc_futex);
1013
1014 return NULL;
1015}
1016
1017/** @}
1018 */
Note: See TracBrowser for help on using the repository browser.