source: mainline/uspace/lib/c/generic/malloc.c@ 13f2461

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 13f2461 was 13f2461, checked in by Jakub Jermar <jakub@…>, 14 years ago

Merge the functionality of assert_abort() and assert_static_abort() into
assert_abort().

Print the unexpanded message to klog first. Then try to print the
message and the stack trace to standard output using printf. Use the
same unabbreviated message wording for both klog and stdout. If a
nested or a parallel assert is detected, send the message only to klog
and then abort.

Re-introduce malloc_assert() to up the malloc_futex. This appears to be
inevitable if we want to print to stdout. To prevent undesired macro
expansion, call assert_abort() from malloc_assert() directly.

  • Property mode set to 100644
File size: 23.7 KB
Line 
1/*
2 * Copyright (c) 2009 Martin Decky
3 * Copyright (c) 2009 Petr Tuma
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup libc
31 * @{
32 */
33/** @file
34 */
35
36#include <malloc.h>
37#include <bool.h>
38#include <as.h>
39#include <align.h>
40#include <macros.h>
41#include <assert.h>
42#include <errno.h>
43#include <bitops.h>
44#include <mem.h>
45#include <futex.h>
46#include <stdlib.h>
47#include <adt/gcdlcm.h>
48#include "private/malloc.h"
49
50/** Magic used in heap headers. */
51#define HEAP_BLOCK_HEAD_MAGIC UINT32_C(0xBEEF0101)
52
53/** Magic used in heap footers. */
54#define HEAP_BLOCK_FOOT_MAGIC UINT32_C(0xBEEF0202)
55
56/** Magic used in heap descriptor. */
57#define HEAP_AREA_MAGIC UINT32_C(0xBEEFCAFE)
58
59/** Allocation alignment.
60 *
61 * This also covers the alignment of fields
62 * in the heap header and footer.
63 *
64 */
65#define BASE_ALIGN 16
66
67/** Heap shrink granularity
68 *
69 * Try not to pump and stress the heap to much
70 * by shrinking and enlarging it too often.
71 * A heap area won't shrunk if it the released
72 * free block is smaller than this constant.
73 *
74 */
75#define SHRINK_GRANULARITY (64 * PAGE_SIZE)
76
77/** Overhead of each heap block. */
78#define STRUCT_OVERHEAD \
79 (sizeof(heap_block_head_t) + sizeof(heap_block_foot_t))
80
81/** Calculate real size of a heap block.
82 *
83 * Add header and footer size.
84 *
85 */
86#define GROSS_SIZE(size) ((size) + STRUCT_OVERHEAD)
87
88/** Calculate net size of a heap block.
89 *
90 * Subtract header and footer size.
91 *
92 */
93#define NET_SIZE(size) ((size) - STRUCT_OVERHEAD)
94
95/** Get first block in heap area.
96 *
97 */
98#define AREA_FIRST_BLOCK_HEAD(area) \
99 (ALIGN_UP(((uintptr_t) (area)) + sizeof(heap_area_t), BASE_ALIGN))
100
101/** Get last block in heap area.
102 *
103 */
104#define AREA_LAST_BLOCK_FOOT(area) \
105 (((uintptr_t) (area)->end) - sizeof(heap_block_foot_t))
106
107/** Get header in heap block.
108 *
109 */
110#define BLOCK_HEAD(foot) \
111 ((heap_block_head_t *) \
112 (((uintptr_t) (foot)) + sizeof(heap_block_foot_t) - (foot)->size))
113
114/** Get footer in heap block.
115 *
116 */
117#define BLOCK_FOOT(head) \
118 ((heap_block_foot_t *) \
119 (((uintptr_t) (head)) + (head)->size - sizeof(heap_block_foot_t)))
120
121/** Heap area.
122 *
123 * The memory managed by the heap allocator is divided into
124 * multiple discontinuous heaps. Each heap is represented
125 * by a separate address space area which has this structure
126 * at its very beginning.
127 *
128 */
129typedef struct heap_area {
130 /** Start of the heap area (including this structure)
131 *
132 * Aligned on page boundary.
133 *
134 */
135 void *start;
136
137 /** End of the heap area (aligned on page boundary) */
138 void *end;
139
140 /** Previous heap area */
141 struct heap_area *prev;
142
143 /** Next heap area */
144 struct heap_area *next;
145
146 /** A magic value */
147 uint32_t magic;
148} heap_area_t;
149
150/** Header of a heap block
151 *
152 */
153typedef struct {
154 /* Size of the block (including header and footer) */
155 size_t size;
156
157 /* Indication of a free block */
158 bool free;
159
160 /** Heap area this block belongs to */
161 heap_area_t *area;
162
163 /* A magic value to detect overwrite of heap header */
164 uint32_t magic;
165} heap_block_head_t;
166
167/** Footer of a heap block
168 *
169 */
170typedef struct {
171 /* Size of the block (including header and footer) */
172 size_t size;
173
174 /* A magic value to detect overwrite of heap footer */
175 uint32_t magic;
176} heap_block_foot_t;
177
178/** First heap area */
179static heap_area_t *first_heap_area = NULL;
180
181/** Last heap area */
182static heap_area_t *last_heap_area = NULL;
183
184/** Next heap block to examine (next fit algorithm) */
185static heap_block_head_t *next = NULL;
186
187/** Futex for thread-safe heap manipulation */
188static futex_t malloc_futex = FUTEX_INITIALIZER;
189
190#ifndef NDEBUG
191
192#define malloc_assert(expr) \
193 do { \
194 if (!(expr)) {\
195 futex_up(&malloc_futex); \
196 assert_abort(#expr, __FILE__, STR2(__LINE__)); \
197 } \
198 } while (0)
199
200#else /* NDEBUG */
201
202#define malloc_assert(expr)
203
204#endif /* NDEBUG */
205
206/** Initialize a heap block
207 *
208 * Fill in the structures related to a heap block.
209 * Should be called only inside the critical section.
210 *
211 * @param addr Address of the block.
212 * @param size Size of the block including the header and the footer.
213 * @param free Indication of a free block.
214 * @param area Heap area the block belongs to.
215 *
216 */
217static void block_init(void *addr, size_t size, bool free, heap_area_t *area)
218{
219 /* Calculate the position of the header and the footer */
220 heap_block_head_t *head = (heap_block_head_t *) addr;
221
222 head->size = size;
223 head->free = free;
224 head->area = area;
225 head->magic = HEAP_BLOCK_HEAD_MAGIC;
226
227 heap_block_foot_t *foot = BLOCK_FOOT(head);
228
229 foot->size = size;
230 foot->magic = HEAP_BLOCK_FOOT_MAGIC;
231}
232
233/** Check a heap block
234 *
235 * Verifies that the structures related to a heap block still contain
236 * the magic constants. This helps detect heap corruption early on.
237 * Should be called only inside the critical section.
238 *
239 * @param addr Address of the block.
240 *
241 */
242static void block_check(void *addr)
243{
244 heap_block_head_t *head = (heap_block_head_t *) addr;
245
246 malloc_assert(head->magic == HEAP_BLOCK_HEAD_MAGIC);
247
248 heap_block_foot_t *foot = BLOCK_FOOT(head);
249
250 malloc_assert(foot->magic == HEAP_BLOCK_FOOT_MAGIC);
251 malloc_assert(head->size == foot->size);
252}
253
254/** Check a heap area structure
255 *
256 * Should be called only inside the critical section.
257 *
258 * @param addr Address of the heap area.
259 *
260 */
261static void area_check(void *addr)
262{
263 heap_area_t *area = (heap_area_t *) addr;
264
265 malloc_assert(area->magic == HEAP_AREA_MAGIC);
266 malloc_assert(addr == area->start);
267 malloc_assert(area->start < area->end);
268 malloc_assert(((uintptr_t) area->start % PAGE_SIZE) == 0);
269 malloc_assert(((uintptr_t) area->end % PAGE_SIZE) == 0);
270}
271
272/** Create new heap area
273 *
274 * Should be called only inside the critical section.
275 *
276 * @param size Size of the area.
277 *
278 */
279static bool area_create(size_t size)
280{
281 void *start = as_get_mappable_page(size);
282 if (start == NULL)
283 return false;
284
285 /* Align the heap area on page boundary */
286 void *astart = (void *) ALIGN_UP((uintptr_t) start, PAGE_SIZE);
287 size_t asize = ALIGN_UP(size, PAGE_SIZE);
288
289 astart = as_area_create(astart, asize, AS_AREA_WRITE | AS_AREA_READ);
290 if (astart == (void *) -1)
291 return false;
292
293 heap_area_t *area = (heap_area_t *) astart;
294
295 area->start = astart;
296 area->end = (void *) ((uintptr_t) astart + asize);
297 area->prev = NULL;
298 area->next = NULL;
299 area->magic = HEAP_AREA_MAGIC;
300
301 void *block = (void *) AREA_FIRST_BLOCK_HEAD(area);
302 size_t bsize = (size_t) (area->end - block);
303
304 block_init(block, bsize, true, area);
305
306 if (last_heap_area == NULL) {
307 first_heap_area = area;
308 last_heap_area = area;
309 } else {
310 area->prev = last_heap_area;
311 last_heap_area->next = area;
312 last_heap_area = area;
313 }
314
315 return true;
316}
317
318/** Try to enlarge a heap area
319 *
320 * Should be called only inside the critical section.
321 *
322 * @param area Heap area to grow.
323 * @param size Gross size to grow (bytes).
324 *
325 * @return True if successful.
326 *
327 */
328static bool area_grow(heap_area_t *area, size_t size)
329{
330 if (size == 0)
331 return true;
332
333 area_check(area);
334
335 /* New heap area size */
336 size_t gross_size = (size_t) (area->end - area->start) + size;
337 size_t asize = ALIGN_UP(gross_size, PAGE_SIZE);
338 void *end = (void *) ((uintptr_t) area->start + asize);
339
340 /* Check for overflow */
341 if (end < area->start)
342 return false;
343
344 /* Resize the address space area */
345 int ret = as_area_resize(area->start, asize, 0);
346 if (ret != EOK)
347 return false;
348
349 /* Add new free block */
350 size_t net_size = (size_t) (end - area->end);
351 if (net_size > 0)
352 block_init(area->end, net_size, true, area);
353
354 /* Update heap area parameters */
355 area->end = end;
356
357 return true;
358}
359
360/** Try to enlarge any of the heap areas
361 *
362 * Should be called only inside the critical section.
363 *
364 * @param size Gross size of item to allocate (bytes).
365 *
366 */
367static bool heap_grow(size_t size)
368{
369 if (size == 0)
370 return true;
371
372 /* First try to enlarge some existing area */
373 for (heap_area_t *area = first_heap_area; area != NULL;
374 area = area->next) {
375 if (area_grow(area, size))
376 return true;
377 }
378
379 /* Eventually try to create a new area */
380 return area_create(AREA_FIRST_BLOCK_HEAD(size));
381}
382
383/** Try to shrink heap
384 *
385 * Should be called only inside the critical section.
386 * In all cases the next pointer is reset.
387 *
388 * @param area Last modified heap area.
389 *
390 */
391static void heap_shrink(heap_area_t *area)
392{
393 area_check(area);
394
395 heap_block_foot_t *last_foot =
396 (heap_block_foot_t *) AREA_LAST_BLOCK_FOOT(area);
397 heap_block_head_t *last_head = BLOCK_HEAD(last_foot);
398
399 block_check((void *) last_head);
400 malloc_assert(last_head->area == area);
401
402 if (last_head->free) {
403 /*
404 * The last block of the heap area is
405 * unused. The area might be potentially
406 * shrunk.
407 */
408
409 heap_block_head_t *first_head =
410 (heap_block_head_t *) AREA_FIRST_BLOCK_HEAD(area);
411
412 block_check((void *) first_head);
413 malloc_assert(first_head->area == area);
414
415 size_t shrink_size = ALIGN_DOWN(last_head->size, PAGE_SIZE);
416
417 if (first_head == last_head) {
418 /*
419 * The entire heap area consists of a single
420 * free heap block. This means we can get rid
421 * of it entirely.
422 */
423
424 heap_area_t *prev = area->prev;
425 heap_area_t *next = area->next;
426
427 if (prev != NULL) {
428 area_check(prev);
429 prev->next = next;
430 } else
431 first_heap_area = next;
432
433 if (next != NULL) {
434 area_check(next);
435 next->prev = prev;
436 } else
437 last_heap_area = prev;
438
439 as_area_destroy(area->start);
440 } else if (shrink_size >= SHRINK_GRANULARITY) {
441 /*
442 * Make sure that we always shrink the area
443 * by a multiple of page size and update
444 * the block layout accordingly.
445 */
446
447 size_t asize = (size_t) (area->end - area->start) - shrink_size;
448 void *end = (void *) ((uintptr_t) area->start + asize);
449
450 /* Resize the address space area */
451 int ret = as_area_resize(area->start, asize, 0);
452 if (ret != EOK)
453 abort();
454
455 /* Update heap area parameters */
456 area->end = end;
457
458 /* Update block layout */
459 void *last = (void *) last_head;
460 size_t excess = (size_t) (area->end - last);
461
462 if (excess > 0) {
463 if (excess >= STRUCT_OVERHEAD) {
464 /*
465 * The previous block cannot be free and there
466 * is enough free space left in the area to
467 * create a new free block.
468 */
469 block_init(last, excess, true, area);
470 } else {
471 /*
472 * The excess is small. Therefore just enlarge
473 * the previous block.
474 */
475 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
476 (((uintptr_t) last_head) - sizeof(heap_block_foot_t));
477 heap_block_head_t *prev_head = BLOCK_HEAD(prev_foot);
478
479 block_check((void *) prev_head);
480
481 block_init(prev_head, prev_head->size + excess,
482 prev_head->free, area);
483 }
484 }
485 }
486 }
487
488 next = NULL;
489}
490
491/** Initialize the heap allocator
492 *
493 * Create initial heap memory area. This routine is
494 * only called from libc initialization, thus we do not
495 * take any locks.
496 *
497 */
498void __malloc_init(void)
499{
500 if (!area_create(PAGE_SIZE))
501 abort();
502}
503
504/** Split heap block and mark it as used.
505 *
506 * Should be called only inside the critical section.
507 *
508 * @param cur Heap block to split.
509 * @param size Number of bytes to split and mark from the beginning
510 * of the block.
511 *
512 */
513static void split_mark(heap_block_head_t *cur, const size_t size)
514{
515 malloc_assert(cur->size >= size);
516
517 /* See if we should split the block. */
518 size_t split_limit = GROSS_SIZE(size);
519
520 if (cur->size > split_limit) {
521 /* Block big enough -> split. */
522 void *next = ((void *) cur) + size;
523 block_init(next, cur->size - size, true, cur->area);
524 block_init(cur, size, false, cur->area);
525 } else {
526 /* Block too small -> use as is. */
527 cur->free = false;
528 }
529}
530
531/** Allocate memory from heap area starting from given block
532 *
533 * Should be called only inside the critical section.
534 * As a side effect this function also sets the current
535 * pointer on successful allocation.
536 *
537 * @param area Heap area where to allocate from.
538 * @param first_block Starting heap block.
539 * @param final_block Heap block where to finish the search
540 * (may be NULL).
541 * @param real_size Gross number of bytes to allocate.
542 * @param falign Physical alignment of the block.
543 *
544 * @return Address of the allocated block or NULL on not enough memory.
545 *
546 */
547static void *malloc_area(heap_area_t *area, heap_block_head_t *first_block,
548 heap_block_head_t *final_block, size_t real_size, size_t falign)
549{
550 area_check((void *) area);
551 malloc_assert((void *) first_block >= (void *) AREA_FIRST_BLOCK_HEAD(area));
552 malloc_assert((void *) first_block < area->end);
553
554 for (heap_block_head_t *cur = first_block; (void *) cur < area->end;
555 cur = (heap_block_head_t *) (((void *) cur) + cur->size)) {
556 block_check(cur);
557
558 /* Finish searching on the final block */
559 if ((final_block != NULL) && (cur == final_block))
560 break;
561
562 /* Try to find a block that is free and large enough. */
563 if ((cur->free) && (cur->size >= real_size)) {
564 /*
565 * We have found a suitable block.
566 * Check for alignment properties.
567 */
568 void *addr = (void *)
569 ((uintptr_t) cur + sizeof(heap_block_head_t));
570 void *aligned = (void *)
571 ALIGN_UP((uintptr_t) addr, falign);
572
573 if (addr == aligned) {
574 /* Exact block start including alignment. */
575 split_mark(cur, real_size);
576
577 next = cur;
578 return addr;
579 } else {
580 /* Block start has to be aligned */
581 size_t excess = (size_t) (aligned - addr);
582
583 if (cur->size >= real_size + excess) {
584 /*
585 * The current block is large enough to fit
586 * data in (including alignment).
587 */
588 if ((void *) cur > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
589 /*
590 * There is a block before the current block.
591 * This previous block can be enlarged to
592 * compensate for the alignment excess.
593 */
594 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
595 ((void *) cur - sizeof(heap_block_foot_t));
596
597 heap_block_head_t *prev_head = (heap_block_head_t *)
598 ((void *) cur - prev_foot->size);
599
600 block_check(prev_head);
601
602 size_t reduced_size = cur->size - excess;
603 heap_block_head_t *next_head = ((void *) cur) + excess;
604
605 if ((!prev_head->free) &&
606 (excess >= STRUCT_OVERHEAD)) {
607 /*
608 * The previous block is not free and there
609 * is enough free space left to fill in
610 * a new free block between the previous
611 * and current block.
612 */
613 block_init(cur, excess, true, area);
614 } else {
615 /*
616 * The previous block is free (thus there
617 * is no need to induce additional
618 * fragmentation to the heap) or the
619 * excess is small. Therefore just enlarge
620 * the previous block.
621 */
622 block_init(prev_head, prev_head->size + excess,
623 prev_head->free, area);
624 }
625
626 block_init(next_head, reduced_size, true, area);
627 split_mark(next_head, real_size);
628
629 next = next_head;
630 return aligned;
631 } else {
632 /*
633 * The current block is the first block
634 * in the heap area. We have to make sure
635 * that the alignment excess is large enough
636 * to fit a new free block just before the
637 * current block.
638 */
639 while (excess < STRUCT_OVERHEAD) {
640 aligned += falign;
641 excess += falign;
642 }
643
644 /* Check for current block size again */
645 if (cur->size >= real_size + excess) {
646 size_t reduced_size = cur->size - excess;
647 cur = (heap_block_head_t *)
648 (AREA_FIRST_BLOCK_HEAD(area) + excess);
649
650 block_init((void *) AREA_FIRST_BLOCK_HEAD(area),
651 excess, true, area);
652 block_init(cur, reduced_size, true, area);
653 split_mark(cur, real_size);
654
655 next = cur;
656 return aligned;
657 }
658 }
659 }
660 }
661 }
662 }
663
664 return NULL;
665}
666
667/** Allocate a memory block
668 *
669 * Should be called only inside the critical section.
670 *
671 * @param size The size of the block to allocate.
672 * @param align Memory address alignment.
673 *
674 * @return Address of the allocated block or NULL on not enough memory.
675 *
676 */
677static void *malloc_internal(const size_t size, const size_t align)
678{
679 malloc_assert(first_heap_area != NULL);
680
681 if (align == 0)
682 return NULL;
683
684 size_t falign = lcm(align, BASE_ALIGN);
685 size_t real_size = GROSS_SIZE(ALIGN_UP(size, falign));
686
687 bool retry = false;
688 heap_block_head_t *split;
689
690loop:
691
692 /* Try the next fit approach */
693 split = next;
694
695 if (split != NULL) {
696 void *addr = malloc_area(split->area, split, NULL, real_size,
697 falign);
698
699 if (addr != NULL)
700 return addr;
701 }
702
703 /* Search the entire heap */
704 for (heap_area_t *area = first_heap_area; area != NULL;
705 area = area->next) {
706 heap_block_head_t *first = (heap_block_head_t *)
707 AREA_FIRST_BLOCK_HEAD(area);
708
709 void *addr = malloc_area(area, first, split, real_size,
710 falign);
711
712 if (addr != NULL)
713 return addr;
714 }
715
716 if (!retry) {
717 /* Try to grow the heap space */
718 if (heap_grow(real_size)) {
719 retry = true;
720 goto loop;
721 }
722 }
723
724 return NULL;
725}
726
727/** Allocate memory by number of elements
728 *
729 * @param nmemb Number of members to allocate.
730 * @param size Size of one member in bytes.
731 *
732 * @return Allocated memory or NULL.
733 *
734 */
735void *calloc(const size_t nmemb, const size_t size)
736{
737 void *block = malloc(nmemb * size);
738 if (block == NULL)
739 return NULL;
740
741 memset(block, 0, nmemb * size);
742 return block;
743}
744
745/** Allocate memory
746 *
747 * @param size Number of bytes to allocate.
748 *
749 * @return Allocated memory or NULL.
750 *
751 */
752void *malloc(const size_t size)
753{
754 futex_down(&malloc_futex);
755 void *block = malloc_internal(size, BASE_ALIGN);
756 futex_up(&malloc_futex);
757
758 return block;
759}
760
761/** Allocate memory with specified alignment
762 *
763 * @param align Alignment in byes.
764 * @param size Number of bytes to allocate.
765 *
766 * @return Allocated memory or NULL.
767 *
768 */
769void *memalign(const size_t align, const size_t size)
770{
771 if (align == 0)
772 return NULL;
773
774 size_t palign =
775 1 << (fnzb(max(sizeof(void *), align) - 1) + 1);
776
777 futex_down(&malloc_futex);
778 void *block = malloc_internal(size, palign);
779 futex_up(&malloc_futex);
780
781 return block;
782}
783
784/** Reallocate memory block
785 *
786 * @param addr Already allocated memory or NULL.
787 * @param size New size of the memory block.
788 *
789 * @return Reallocated memory or NULL.
790 *
791 */
792void *realloc(const void *addr, const size_t size)
793{
794 if (addr == NULL)
795 return malloc(size);
796
797 futex_down(&malloc_futex);
798
799 /* Calculate the position of the header. */
800 heap_block_head_t *head =
801 (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
802
803 block_check(head);
804 malloc_assert(!head->free);
805
806 heap_area_t *area = head->area;
807
808 area_check(area);
809 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
810 malloc_assert((void *) head < area->end);
811
812 void *ptr = NULL;
813 bool reloc = false;
814 size_t real_size = GROSS_SIZE(ALIGN_UP(size, BASE_ALIGN));
815 size_t orig_size = head->size;
816
817 if (orig_size > real_size) {
818 /* Shrink */
819 if (orig_size - real_size >= STRUCT_OVERHEAD) {
820 /*
821 * Split the original block to a full block
822 * and a trailing free block.
823 */
824 block_init((void *) head, real_size, false, area);
825 block_init((void *) head + real_size,
826 orig_size - real_size, true, area);
827 heap_shrink(area);
828 }
829
830 ptr = ((void *) head) + sizeof(heap_block_head_t);
831 } else {
832 /*
833 * Look at the next block. If it is free and the size is
834 * sufficient then merge the two. Otherwise just allocate
835 * a new block, copy the original data into it and
836 * free the original block.
837 */
838 heap_block_head_t *next_head =
839 (heap_block_head_t *) (((void *) head) + head->size);
840
841 if (((void *) next_head < area->end) &&
842 (head->size + next_head->size >= real_size) &&
843 (next_head->free)) {
844 block_check(next_head);
845 block_init(head, head->size + next_head->size, false, area);
846 split_mark(head, real_size);
847
848 ptr = ((void *) head) + sizeof(heap_block_head_t);
849 next = NULL;
850 } else
851 reloc = true;
852 }
853
854 futex_up(&malloc_futex);
855
856 if (reloc) {
857 ptr = malloc(size);
858 if (ptr != NULL) {
859 memcpy(ptr, addr, NET_SIZE(orig_size));
860 free(addr);
861 }
862 }
863
864 return ptr;
865}
866
867/** Free a memory block
868 *
869 * @param addr The address of the block.
870 *
871 */
872void free(const void *addr)
873{
874 futex_down(&malloc_futex);
875
876 /* Calculate the position of the header. */
877 heap_block_head_t *head
878 = (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
879
880 block_check(head);
881 malloc_assert(!head->free);
882
883 heap_area_t *area = head->area;
884
885 area_check(area);
886 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
887 malloc_assert((void *) head < area->end);
888
889 /* Mark the block itself as free. */
890 head->free = true;
891
892 /* Look at the next block. If it is free, merge the two. */
893 heap_block_head_t *next_head
894 = (heap_block_head_t *) (((void *) head) + head->size);
895
896 if ((void *) next_head < area->end) {
897 block_check(next_head);
898 if (next_head->free)
899 block_init(head, head->size + next_head->size, true, area);
900 }
901
902 /* Look at the previous block. If it is free, merge the two. */
903 if ((void *) head > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
904 heap_block_foot_t *prev_foot =
905 (heap_block_foot_t *) (((void *) head) - sizeof(heap_block_foot_t));
906
907 heap_block_head_t *prev_head =
908 (heap_block_head_t *) (((void *) head) - prev_foot->size);
909
910 block_check(prev_head);
911
912 if (prev_head->free)
913 block_init(prev_head, prev_head->size + head->size, true,
914 area);
915 }
916
917 heap_shrink(area);
918
919 futex_up(&malloc_futex);
920}
921
922void *heap_check(void)
923{
924 futex_down(&malloc_futex);
925
926 if (first_heap_area == NULL) {
927 futex_up(&malloc_futex);
928 return (void *) -1;
929 }
930
931 /* Walk all heap areas */
932 for (heap_area_t *area = first_heap_area; area != NULL;
933 area = area->next) {
934
935 /* Check heap area consistency */
936 if ((area->magic != HEAP_AREA_MAGIC) ||
937 ((void *) area != area->start) ||
938 (area->start >= area->end) ||
939 (((uintptr_t) area->start % PAGE_SIZE) != 0) ||
940 (((uintptr_t) area->end % PAGE_SIZE) != 0)) {
941 futex_up(&malloc_futex);
942 return (void *) area;
943 }
944
945 /* Walk all heap blocks */
946 for (heap_block_head_t *head = (heap_block_head_t *)
947 AREA_FIRST_BLOCK_HEAD(area); (void *) head < area->end;
948 head = (heap_block_head_t *) (((void *) head) + head->size)) {
949
950 /* Check heap block consistency */
951 if (head->magic != HEAP_BLOCK_HEAD_MAGIC) {
952 futex_up(&malloc_futex);
953 return (void *) head;
954 }
955
956 heap_block_foot_t *foot = BLOCK_FOOT(head);
957
958 if ((foot->magic != HEAP_BLOCK_FOOT_MAGIC) ||
959 (head->size != foot->size)) {
960 futex_up(&malloc_futex);
961 return (void *) foot;
962 }
963 }
964 }
965
966 futex_up(&malloc_futex);
967
968 return NULL;
969}
970
971/** @}
972 */
Note: See TracBrowser for help on using the repository browser.