source: mainline/uspace/lib/c/generic/malloc.c@ b078a42

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since b078a42 was b078a42, checked in by Martin Decky <martin@…>, 13 years ago

it is imperative that the _size_ of the allocated block is at least BASE_ALIGN-aligned
(because the tail structure needs to be placed on a naturally aligned address, otherwise we hit unaligned memory access on platforms which are less forgiving)

  • Property mode set to 100644
File size: 24.7 KB
Line 
1/*
2 * Copyright (c) 2009 Martin Decky
3 * Copyright (c) 2009 Petr Tuma
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup libc
31 * @{
32 */
33/** @file
34 */
35
36#include <malloc.h>
37#include <bool.h>
38#include <as.h>
39#include <align.h>
40#include <macros.h>
41#include <assert.h>
42#include <errno.h>
43#include <bitops.h>
44#include <mem.h>
45#include <futex.h>
46#include <stdlib.h>
47#include <adt/gcdlcm.h>
48#include "private/malloc.h"
49
50/** Magic used in heap headers. */
51#define HEAP_BLOCK_HEAD_MAGIC UINT32_C(0xBEEF0101)
52
53/** Magic used in heap footers. */
54#define HEAP_BLOCK_FOOT_MAGIC UINT32_C(0xBEEF0202)
55
56/** Magic used in heap descriptor. */
57#define HEAP_AREA_MAGIC UINT32_C(0xBEEFCAFE)
58
59/** Allocation alignment.
60 *
61 * This also covers the alignment of fields
62 * in the heap header and footer.
63 *
64 */
65#define BASE_ALIGN 16
66
67/** Heap shrink granularity
68 *
69 * Try not to pump and stress the heap to much
70 * by shrinking and enlarging it too often.
71 * A heap area won't shrunk if it the released
72 * free block is smaller than this constant.
73 *
74 */
75#define SHRINK_GRANULARITY (64 * PAGE_SIZE)
76
77/** Overhead of each heap block. */
78#define STRUCT_OVERHEAD \
79 (sizeof(heap_block_head_t) + sizeof(heap_block_foot_t))
80
81/** Overhead of each area. */
82#define AREA_OVERHEAD(size) \
83 (ALIGN_UP(size + sizeof(heap_area_t), BASE_ALIGN))
84
85/** Calculate real size of a heap block.
86 *
87 * Add header and footer size.
88 *
89 */
90#define GROSS_SIZE(size) ((size) + STRUCT_OVERHEAD)
91
92/** Calculate net size of a heap block.
93 *
94 * Subtract header and footer size.
95 *
96 */
97#define NET_SIZE(size) ((size) - STRUCT_OVERHEAD)
98
99/** Get first block in heap area.
100 *
101 */
102#define AREA_FIRST_BLOCK_HEAD(area) \
103 (ALIGN_UP(((uintptr_t) (area)) + sizeof(heap_area_t), BASE_ALIGN))
104
105/** Get last block in heap area.
106 *
107 */
108#define AREA_LAST_BLOCK_FOOT(area) \
109 (((uintptr_t) (area)->end) - sizeof(heap_block_foot_t))
110
111#define AREA_LAST_BLOCK_HEAD(area) \
112 ((uintptr_t) BLOCK_HEAD(((heap_block_foot_t *)AREA_LAST_BLOCK_FOOT(area))))
113
114/** Get header in heap block.
115 *
116 */
117#define BLOCK_HEAD(foot) \
118 ((heap_block_head_t *) \
119 (((uintptr_t) (foot)) + sizeof(heap_block_foot_t) - (foot)->size))
120
121/** Get footer in heap block.
122 *
123 */
124#define BLOCK_FOOT(head) \
125 ((heap_block_foot_t *) \
126 (((uintptr_t) (head)) + (head)->size - sizeof(heap_block_foot_t)))
127
128/** Heap area.
129 *
130 * The memory managed by the heap allocator is divided into
131 * multiple discontinuous heaps. Each heap is represented
132 * by a separate address space area which has this structure
133 * at its very beginning.
134 *
135 */
136typedef struct heap_area {
137 /** Start of the heap area (including this structure)
138 *
139 * Aligned on page boundary.
140 *
141 */
142 void *start;
143
144 /** End of the heap area (aligned on page boundary) */
145 void *end;
146
147 /** Previous heap area */
148 struct heap_area *prev;
149
150 /** Next heap area */
151 struct heap_area *next;
152
153 /** A magic value */
154 uint32_t magic;
155} heap_area_t;
156
157/** Header of a heap block
158 *
159 */
160typedef struct {
161 /* Size of the block (including header and footer) */
162 size_t size;
163
164 /* Indication of a free block */
165 bool free;
166
167 /** Heap area this block belongs to */
168 heap_area_t *area;
169
170 /* A magic value to detect overwrite of heap header */
171 uint32_t magic;
172} heap_block_head_t;
173
174/** Footer of a heap block
175 *
176 */
177typedef struct {
178 /* Size of the block (including header and footer) */
179 size_t size;
180
181 /* A magic value to detect overwrite of heap footer */
182 uint32_t magic;
183} heap_block_foot_t;
184
185/** First heap area */
186static heap_area_t *first_heap_area = NULL;
187
188/** Last heap area */
189static heap_area_t *last_heap_area = NULL;
190
191/** Next heap block to examine (next fit algorithm) */
192static heap_block_head_t *next_fit = NULL;
193
194/** Futex for thread-safe heap manipulation */
195static futex_t malloc_futex = FUTEX_INITIALIZER;
196
197#ifndef NDEBUG
198
199#define malloc_assert(expr) \
200 do { \
201 if (!(expr)) {\
202 futex_up(&malloc_futex); \
203 assert_abort(#expr, __FILE__, __LINE__); \
204 } \
205 } while (0)
206
207#else /* NDEBUG */
208
209#define malloc_assert(expr)
210
211#endif /* NDEBUG */
212
213/** Initialize a heap block
214 *
215 * Fill in the structures related to a heap block.
216 * Should be called only inside the critical section.
217 *
218 * @param addr Address of the block.
219 * @param size Size of the block including the header and the footer.
220 * @param free Indication of a free block.
221 * @param area Heap area the block belongs to.
222 *
223 */
224static void block_init(void *addr, size_t size, bool free, heap_area_t *area)
225{
226 /* Calculate the position of the header and the footer */
227 heap_block_head_t *head = (heap_block_head_t *) addr;
228
229 head->size = size;
230 head->free = free;
231 head->area = area;
232 head->magic = HEAP_BLOCK_HEAD_MAGIC;
233
234 heap_block_foot_t *foot = BLOCK_FOOT(head);
235
236 foot->size = size;
237 foot->magic = HEAP_BLOCK_FOOT_MAGIC;
238}
239
240/** Check a heap block
241 *
242 * Verifies that the structures related to a heap block still contain
243 * the magic constants. This helps detect heap corruption early on.
244 * Should be called only inside the critical section.
245 *
246 * @param addr Address of the block.
247 *
248 */
249static void block_check(void *addr)
250{
251 heap_block_head_t *head = (heap_block_head_t *) addr;
252
253 malloc_assert(head->magic == HEAP_BLOCK_HEAD_MAGIC);
254
255 heap_block_foot_t *foot = BLOCK_FOOT(head);
256
257 malloc_assert(foot->magic == HEAP_BLOCK_FOOT_MAGIC);
258 malloc_assert(head->size == foot->size);
259}
260
261/** Check a heap area structure
262 *
263 * Should be called only inside the critical section.
264 *
265 * @param addr Address of the heap area.
266 *
267 */
268static void area_check(void *addr)
269{
270 heap_area_t *area = (heap_area_t *) addr;
271
272 malloc_assert(area->magic == HEAP_AREA_MAGIC);
273 malloc_assert(addr == area->start);
274 malloc_assert(area->start < area->end);
275 malloc_assert(((uintptr_t) area->start % PAGE_SIZE) == 0);
276 malloc_assert(((uintptr_t) area->end % PAGE_SIZE) == 0);
277}
278
279/** Create new heap area
280 *
281 * Should be called only inside the critical section.
282 *
283 * @param size Size of the area.
284 *
285 */
286static bool area_create(size_t size)
287{
288 /* Align the heap area size on page boundary */
289 size_t asize = ALIGN_UP(size, PAGE_SIZE);
290 void *astart = as_area_create(AS_AREA_ANY, asize,
291 AS_AREA_WRITE | AS_AREA_READ);
292 if (astart == AS_MAP_FAILED)
293 return false;
294
295 heap_area_t *area = (heap_area_t *) astart;
296
297 area->start = astart;
298 area->end = (void *) ((uintptr_t) astart + asize);
299 area->prev = NULL;
300 area->next = NULL;
301 area->magic = HEAP_AREA_MAGIC;
302
303 void *block = (void *) AREA_FIRST_BLOCK_HEAD(area);
304 size_t bsize = (size_t) (area->end - block);
305
306 block_init(block, bsize, true, area);
307
308 if (last_heap_area == NULL) {
309 first_heap_area = area;
310 last_heap_area = area;
311 } else {
312 area->prev = last_heap_area;
313 last_heap_area->next = area;
314 last_heap_area = area;
315 }
316
317 return true;
318}
319
320/** Try to enlarge a heap area
321 *
322 * Should be called only inside the critical section.
323 *
324 * @param area Heap area to grow.
325 * @param size Gross size to grow (bytes).
326 *
327 * @return True if successful.
328 *
329 */
330static bool area_grow(heap_area_t *area, size_t size)
331{
332 if (size == 0)
333 return true;
334
335 area_check(area);
336
337 /* New heap area size */
338 size_t gross_size = (size_t) (area->end - area->start) + size;
339 size_t asize = ALIGN_UP(gross_size, PAGE_SIZE);
340 void *end = (void *) ((uintptr_t) area->start + asize);
341
342 /* Check for overflow */
343 if (end < area->start)
344 return false;
345
346 /* Resize the address space area */
347 int ret = as_area_resize(area->start, asize, 0);
348 if (ret != EOK)
349 return false;
350
351 heap_block_head_t *last_head = (heap_block_head_t *) AREA_LAST_BLOCK_HEAD(area);
352
353 if (last_head->free) {
354 /* Add the new space to the last block. */
355 size_t net_size = (size_t) (end - area->end) + last_head->size;
356 malloc_assert(net_size > 0);
357 block_init(last_head, net_size, true, area);
358 } else {
359 /* Add new free block */
360 size_t net_size = (size_t) (end - area->end);
361 if (net_size > 0)
362 block_init(area->end, net_size, true, area);
363 }
364
365 /* Update heap area parameters */
366 area->end = end;
367
368 return true;
369}
370
371/** Try to shrink heap
372 *
373 * Should be called only inside the critical section.
374 * In all cases the next pointer is reset.
375 *
376 * @param area Last modified heap area.
377 *
378 */
379static void heap_shrink(heap_area_t *area)
380{
381 area_check(area);
382
383 heap_block_foot_t *last_foot =
384 (heap_block_foot_t *) AREA_LAST_BLOCK_FOOT(area);
385 heap_block_head_t *last_head = BLOCK_HEAD(last_foot);
386
387 block_check((void *) last_head);
388 malloc_assert(last_head->area == area);
389
390 if (last_head->free) {
391 /*
392 * The last block of the heap area is
393 * unused. The area might be potentially
394 * shrunk.
395 */
396
397 heap_block_head_t *first_head =
398 (heap_block_head_t *) AREA_FIRST_BLOCK_HEAD(area);
399
400 block_check((void *) first_head);
401 malloc_assert(first_head->area == area);
402
403 size_t shrink_size = ALIGN_DOWN(last_head->size, PAGE_SIZE);
404
405 if (first_head == last_head) {
406 /*
407 * The entire heap area consists of a single
408 * free heap block. This means we can get rid
409 * of it entirely.
410 */
411
412 heap_area_t *prev = area->prev;
413 heap_area_t *next = area->next;
414
415 if (prev != NULL) {
416 area_check(prev);
417 prev->next = next;
418 } else
419 first_heap_area = next;
420
421 if (next != NULL) {
422 area_check(next);
423 next->prev = prev;
424 } else
425 last_heap_area = prev;
426
427 as_area_destroy(area->start);
428 } else if (shrink_size >= SHRINK_GRANULARITY) {
429 /*
430 * Make sure that we always shrink the area
431 * by a multiple of page size and update
432 * the block layout accordingly.
433 */
434
435 size_t asize = (size_t) (area->end - area->start) - shrink_size;
436 void *end = (void *) ((uintptr_t) area->start + asize);
437
438 /* Resize the address space area */
439 int ret = as_area_resize(area->start, asize, 0);
440 if (ret != EOK)
441 abort();
442
443 /* Update heap area parameters */
444 area->end = end;
445 size_t excess = ((size_t) area->end) - ((size_t) last_head);
446
447 if (excess > 0) {
448 if (excess >= STRUCT_OVERHEAD) {
449 /*
450 * The previous block cannot be free and there
451 * is enough free space left in the area to
452 * create a new free block.
453 */
454 block_init((void *) last_head, excess, true, area);
455 } else {
456 /*
457 * The excess is small. Therefore just enlarge
458 * the previous block.
459 */
460 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
461 (((uintptr_t) last_head) - sizeof(heap_block_foot_t));
462 heap_block_head_t *prev_head = BLOCK_HEAD(prev_foot);
463
464 block_check((void *) prev_head);
465
466 block_init(prev_head, prev_head->size + excess,
467 prev_head->free, area);
468 }
469 }
470 }
471 }
472
473 next_fit = NULL;
474}
475
476/** Initialize the heap allocator
477 *
478 * Create initial heap memory area. This routine is
479 * only called from libc initialization, thus we do not
480 * take any locks.
481 *
482 */
483void __malloc_init(void)
484{
485 if (!area_create(PAGE_SIZE))
486 abort();
487}
488
489/** Split heap block and mark it as used.
490 *
491 * Should be called only inside the critical section.
492 *
493 * @param cur Heap block to split.
494 * @param size Number of bytes to split and mark from the beginning
495 * of the block.
496 *
497 */
498static void split_mark(heap_block_head_t *cur, const size_t size)
499{
500 malloc_assert(cur->size >= size);
501
502 /* See if we should split the block. */
503 size_t split_limit = GROSS_SIZE(size);
504
505 if (cur->size > split_limit) {
506 /* Block big enough -> split. */
507 void *next = ((void *) cur) + size;
508 block_init(next, cur->size - size, true, cur->area);
509 block_init(cur, size, false, cur->area);
510 } else {
511 /* Block too small -> use as is. */
512 cur->free = false;
513 }
514}
515
516/** Allocate memory from heap area starting from given block
517 *
518 * Should be called only inside the critical section.
519 * As a side effect this function also sets the current
520 * pointer on successful allocation.
521 *
522 * @param area Heap area where to allocate from.
523 * @param first_block Starting heap block.
524 * @param final_block Heap block where to finish the search
525 * (may be NULL).
526 * @param real_size Gross number of bytes to allocate.
527 * @param falign Physical alignment of the block.
528 *
529 * @return Address of the allocated block or NULL on not enough memory.
530 *
531 */
532static void *malloc_area(heap_area_t *area, heap_block_head_t *first_block,
533 heap_block_head_t *final_block, size_t real_size, size_t falign)
534{
535 area_check((void *) area);
536 malloc_assert((void *) first_block >= (void *) AREA_FIRST_BLOCK_HEAD(area));
537 malloc_assert((void *) first_block < area->end);
538
539 for (heap_block_head_t *cur = first_block; (void *) cur < area->end;
540 cur = (heap_block_head_t *) (((void *) cur) + cur->size)) {
541 block_check(cur);
542
543 /* Finish searching on the final block */
544 if ((final_block != NULL) && (cur == final_block))
545 break;
546
547 /* Try to find a block that is free and large enough. */
548 if ((cur->free) && (cur->size >= real_size)) {
549 /*
550 * We have found a suitable block.
551 * Check for alignment properties.
552 */
553 void *addr = (void *)
554 ((uintptr_t) cur + sizeof(heap_block_head_t));
555 void *aligned = (void *)
556 ALIGN_UP((uintptr_t) addr, falign);
557
558 if (addr == aligned) {
559 /* Exact block start including alignment. */
560 split_mark(cur, real_size);
561
562 next_fit = cur;
563 return addr;
564 } else {
565 /* Block start has to be aligned */
566 size_t excess = (size_t) (aligned - addr);
567
568 if (cur->size >= real_size + excess) {
569 /*
570 * The current block is large enough to fit
571 * data in (including alignment).
572 */
573 if ((void *) cur > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
574 /*
575 * There is a block before the current block.
576 * This previous block can be enlarged to
577 * compensate for the alignment excess.
578 */
579 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
580 ((void *) cur - sizeof(heap_block_foot_t));
581
582 heap_block_head_t *prev_head = (heap_block_head_t *)
583 ((void *) cur - prev_foot->size);
584
585 block_check(prev_head);
586
587 size_t reduced_size = cur->size - excess;
588 heap_block_head_t *next_head = ((void *) cur) + excess;
589
590 if ((!prev_head->free) &&
591 (excess >= STRUCT_OVERHEAD)) {
592 /*
593 * The previous block is not free and there
594 * is enough free space left to fill in
595 * a new free block between the previous
596 * and current block.
597 */
598 block_init(cur, excess, true, area);
599 } else {
600 /*
601 * The previous block is free (thus there
602 * is no need to induce additional
603 * fragmentation to the heap) or the
604 * excess is small. Therefore just enlarge
605 * the previous block.
606 */
607 block_init(prev_head, prev_head->size + excess,
608 prev_head->free, area);
609 }
610
611 block_init(next_head, reduced_size, true, area);
612 split_mark(next_head, real_size);
613
614 next_fit = next_head;
615 return aligned;
616 } else {
617 /*
618 * The current block is the first block
619 * in the heap area. We have to make sure
620 * that the alignment excess is large enough
621 * to fit a new free block just before the
622 * current block.
623 */
624 while (excess < STRUCT_OVERHEAD) {
625 aligned += falign;
626 excess += falign;
627 }
628
629 /* Check for current block size again */
630 if (cur->size >= real_size + excess) {
631 size_t reduced_size = cur->size - excess;
632 cur = (heap_block_head_t *)
633 (AREA_FIRST_BLOCK_HEAD(area) + excess);
634
635 block_init((void *) AREA_FIRST_BLOCK_HEAD(area),
636 excess, true, area);
637 block_init(cur, reduced_size, true, area);
638 split_mark(cur, real_size);
639
640 next_fit = cur;
641 return aligned;
642 }
643 }
644 }
645 }
646 }
647 }
648
649 return NULL;
650}
651
652/** Try to enlarge any of the heap areas.
653 * If successful, allocate block of the given size in the area.
654 *
655 * Should be called only inside the critical section.
656 *
657 * @param size Gross size of item to allocate (bytes).
658 * @param align Memory address alignment.
659 *
660 */
661static void *heap_grow_and_alloc(size_t size, size_t align)
662{
663 if (size == 0)
664 return NULL;
665
666 /* First try to enlarge some existing area */
667 for (heap_area_t *area = first_heap_area; area != NULL;
668 area = area->next) {
669
670 if (area_grow(area, size + align)) {
671 heap_block_head_t *first = (heap_block_head_t *) AREA_LAST_BLOCK_HEAD(area);
672
673 void *addr = malloc_area(area, first, NULL, size, align);
674 malloc_assert(addr != NULL);
675 return addr;
676 }
677 }
678
679 /* Eventually try to create a new area */
680 if (area_create(AREA_OVERHEAD(size + align))) {
681 heap_block_head_t *first = (heap_block_head_t *) AREA_FIRST_BLOCK_HEAD(last_heap_area);
682
683 void *addr = malloc_area(last_heap_area, first, NULL, size, align);
684 malloc_assert(addr != NULL);
685 return addr;
686 }
687
688 return NULL;
689}
690
691/** Allocate a memory block
692 *
693 * Should be called only inside the critical section.
694 *
695 * @param size The size of the block to allocate.
696 * @param align Memory address alignment.
697 *
698 * @return Address of the allocated block or NULL on not enough memory.
699 *
700 */
701static void *malloc_internal(const size_t size, const size_t align)
702{
703 malloc_assert(first_heap_area != NULL);
704
705 if (align == 0)
706 return NULL;
707
708 size_t falign = lcm(align, BASE_ALIGN);
709
710 /* Check for integer overflow. */
711 if (falign < align)
712 return NULL;
713
714 size_t gross_size = GROSS_SIZE(ALIGN_UP(size, BASE_ALIGN));
715
716 heap_block_head_t *split;
717
718 /* Try the next fit approach */
719 split = next_fit;
720
721 if (split != NULL) {
722 void *addr = malloc_area(split->area, split, NULL, gross_size,
723 falign);
724
725 if (addr != NULL)
726 return addr;
727 }
728
729 /* Search the entire heap */
730 for (heap_area_t *area = first_heap_area; area != NULL;
731 area = area->next) {
732 heap_block_head_t *first = (heap_block_head_t *)
733 AREA_FIRST_BLOCK_HEAD(area);
734
735 void *addr = malloc_area(area, first, split, gross_size,
736 falign);
737
738 if (addr != NULL)
739 return addr;
740 }
741
742 /* Finally, try to grow heap space and allocate in the new area. */
743 return heap_grow_and_alloc(gross_size, falign);
744}
745
746/** Allocate memory by number of elements
747 *
748 * @param nmemb Number of members to allocate.
749 * @param size Size of one member in bytes.
750 *
751 * @return Allocated memory or NULL.
752 *
753 */
754void *calloc(const size_t nmemb, const size_t size)
755{
756 void *block = malloc(nmemb * size);
757 if (block == NULL)
758 return NULL;
759
760 memset(block, 0, nmemb * size);
761 return block;
762}
763
764/** Allocate memory
765 *
766 * @param size Number of bytes to allocate.
767 *
768 * @return Allocated memory or NULL.
769 *
770 */
771void *malloc(const size_t size)
772{
773 futex_down(&malloc_futex);
774 void *block = malloc_internal(size, BASE_ALIGN);
775 futex_up(&malloc_futex);
776
777 return block;
778}
779
780/** Allocate memory with specified alignment
781 *
782 * @param align Alignment in byes.
783 * @param size Number of bytes to allocate.
784 *
785 * @return Allocated memory or NULL.
786 *
787 */
788void *memalign(const size_t align, const size_t size)
789{
790 if (align == 0)
791 return NULL;
792
793 size_t palign =
794 1 << (fnzb(max(sizeof(void *), align) - 1) + 1);
795
796 futex_down(&malloc_futex);
797 void *block = malloc_internal(size, palign);
798 futex_up(&malloc_futex);
799
800 return block;
801}
802
803/** Reallocate memory block
804 *
805 * @param addr Already allocated memory or NULL.
806 * @param size New size of the memory block.
807 *
808 * @return Reallocated memory or NULL.
809 *
810 */
811void *realloc(const void *addr, const size_t size)
812{
813 if (addr == NULL)
814 return malloc(size);
815
816 futex_down(&malloc_futex);
817
818 /* Calculate the position of the header. */
819 heap_block_head_t *head =
820 (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
821
822 block_check(head);
823 malloc_assert(!head->free);
824
825 heap_area_t *area = head->area;
826
827 area_check(area);
828 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
829 malloc_assert((void *) head < area->end);
830
831 void *ptr = NULL;
832 bool reloc = false;
833 size_t real_size = GROSS_SIZE(ALIGN_UP(size, BASE_ALIGN));
834 size_t orig_size = head->size;
835
836 if (orig_size > real_size) {
837 /* Shrink */
838 if (orig_size - real_size >= STRUCT_OVERHEAD) {
839 /*
840 * Split the original block to a full block
841 * and a trailing free block.
842 */
843 block_init((void *) head, real_size, false, area);
844 block_init((void *) head + real_size,
845 orig_size - real_size, true, area);
846 heap_shrink(area);
847 }
848
849 ptr = ((void *) head) + sizeof(heap_block_head_t);
850 } else {
851 /*
852 * Look at the next block. If it is free and the size is
853 * sufficient then merge the two. Otherwise just allocate
854 * a new block, copy the original data into it and
855 * free the original block.
856 */
857 heap_block_head_t *next_head =
858 (heap_block_head_t *) (((void *) head) + head->size);
859
860 if (((void *) next_head < area->end) &&
861 (head->size + next_head->size >= real_size) &&
862 (next_head->free)) {
863 block_check(next_head);
864 block_init(head, head->size + next_head->size, false, area);
865 split_mark(head, real_size);
866
867 ptr = ((void *) head) + sizeof(heap_block_head_t);
868 next_fit = NULL;
869 } else
870 reloc = true;
871 }
872
873 futex_up(&malloc_futex);
874
875 if (reloc) {
876 ptr = malloc(size);
877 if (ptr != NULL) {
878 memcpy(ptr, addr, NET_SIZE(orig_size));
879 free(addr);
880 }
881 }
882
883 return ptr;
884}
885
886/** Free a memory block
887 *
888 * @param addr The address of the block.
889 *
890 */
891void free(const void *addr)
892{
893 if (addr == NULL)
894 return;
895
896 futex_down(&malloc_futex);
897
898 /* Calculate the position of the header. */
899 heap_block_head_t *head
900 = (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
901
902 block_check(head);
903 malloc_assert(!head->free);
904
905 heap_area_t *area = head->area;
906
907 area_check(area);
908 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
909 malloc_assert((void *) head < area->end);
910
911 /* Mark the block itself as free. */
912 head->free = true;
913
914 /* Look at the next block. If it is free, merge the two. */
915 heap_block_head_t *next_head
916 = (heap_block_head_t *) (((void *) head) + head->size);
917
918 if ((void *) next_head < area->end) {
919 block_check(next_head);
920 if (next_head->free)
921 block_init(head, head->size + next_head->size, true, area);
922 }
923
924 /* Look at the previous block. If it is free, merge the two. */
925 if ((void *) head > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
926 heap_block_foot_t *prev_foot =
927 (heap_block_foot_t *) (((void *) head) - sizeof(heap_block_foot_t));
928
929 heap_block_head_t *prev_head =
930 (heap_block_head_t *) (((void *) head) - prev_foot->size);
931
932 block_check(prev_head);
933
934 if (prev_head->free)
935 block_init(prev_head, prev_head->size + head->size, true,
936 area);
937 }
938
939 heap_shrink(area);
940
941 futex_up(&malloc_futex);
942}
943
944void *heap_check(void)
945{
946 futex_down(&malloc_futex);
947
948 if (first_heap_area == NULL) {
949 futex_up(&malloc_futex);
950 return (void *) -1;
951 }
952
953 /* Walk all heap areas */
954 for (heap_area_t *area = first_heap_area; area != NULL;
955 area = area->next) {
956
957 /* Check heap area consistency */
958 if ((area->magic != HEAP_AREA_MAGIC) ||
959 ((void *) area != area->start) ||
960 (area->start >= area->end) ||
961 (((uintptr_t) area->start % PAGE_SIZE) != 0) ||
962 (((uintptr_t) area->end % PAGE_SIZE) != 0)) {
963 futex_up(&malloc_futex);
964 return (void *) area;
965 }
966
967 /* Walk all heap blocks */
968 for (heap_block_head_t *head = (heap_block_head_t *)
969 AREA_FIRST_BLOCK_HEAD(area); (void *) head < area->end;
970 head = (heap_block_head_t *) (((void *) head) + head->size)) {
971
972 /* Check heap block consistency */
973 if (head->magic != HEAP_BLOCK_HEAD_MAGIC) {
974 futex_up(&malloc_futex);
975 return (void *) head;
976 }
977
978 heap_block_foot_t *foot = BLOCK_FOOT(head);
979
980 if ((foot->magic != HEAP_BLOCK_FOOT_MAGIC) ||
981 (head->size != foot->size)) {
982 futex_up(&malloc_futex);
983 return (void *) foot;
984 }
985 }
986 }
987
988 futex_up(&malloc_futex);
989
990 return NULL;
991}
992
993/** @}
994 */
Note: See TracBrowser for help on using the repository browser.