source: mainline/uspace/lib/c/generic/malloc.c@ faba839

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since faba839 was faba839, checked in by Martin Decky <martin@…>, 13 years ago

use symbolic values for address space constants

  • Property mode set to 100644
File size: 23.7 KB
Line 
1/*
2 * Copyright (c) 2009 Martin Decky
3 * Copyright (c) 2009 Petr Tuma
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup libc
31 * @{
32 */
33/** @file
34 */
35
36#include <malloc.h>
37#include <bool.h>
38#include <as.h>
39#include <align.h>
40#include <macros.h>
41#include <assert.h>
42#include <errno.h>
43#include <bitops.h>
44#include <mem.h>
45#include <futex.h>
46#include <stdlib.h>
47#include <adt/gcdlcm.h>
48#include "private/malloc.h"
49
50/** Magic used in heap headers. */
51#define HEAP_BLOCK_HEAD_MAGIC UINT32_C(0xBEEF0101)
52
53/** Magic used in heap footers. */
54#define HEAP_BLOCK_FOOT_MAGIC UINT32_C(0xBEEF0202)
55
56/** Magic used in heap descriptor. */
57#define HEAP_AREA_MAGIC UINT32_C(0xBEEFCAFE)
58
59/** Allocation alignment.
60 *
61 * This also covers the alignment of fields
62 * in the heap header and footer.
63 *
64 */
65#define BASE_ALIGN 16
66
67/** Heap shrink granularity
68 *
69 * Try not to pump and stress the heap to much
70 * by shrinking and enlarging it too often.
71 * A heap area won't shrunk if it the released
72 * free block is smaller than this constant.
73 *
74 */
75#define SHRINK_GRANULARITY (64 * PAGE_SIZE)
76
77/** Overhead of each heap block. */
78#define STRUCT_OVERHEAD \
79 (sizeof(heap_block_head_t) + sizeof(heap_block_foot_t))
80
81/** Overhead of each area. */
82#define AREA_OVERHEAD(size) \
83 (ALIGN_UP(size + sizeof(heap_area_t), BASE_ALIGN))
84
85/** Calculate real size of a heap block.
86 *
87 * Add header and footer size.
88 *
89 */
90#define GROSS_SIZE(size) ((size) + STRUCT_OVERHEAD)
91
92/** Calculate net size of a heap block.
93 *
94 * Subtract header and footer size.
95 *
96 */
97#define NET_SIZE(size) ((size) - STRUCT_OVERHEAD)
98
99/** Get first block in heap area.
100 *
101 */
102#define AREA_FIRST_BLOCK_HEAD(area) \
103 (ALIGN_UP(((uintptr_t) (area)) + sizeof(heap_area_t), BASE_ALIGN))
104
105/** Get last block in heap area.
106 *
107 */
108#define AREA_LAST_BLOCK_FOOT(area) \
109 (((uintptr_t) (area)->end) - sizeof(heap_block_foot_t))
110
111/** Get header in heap block.
112 *
113 */
114#define BLOCK_HEAD(foot) \
115 ((heap_block_head_t *) \
116 (((uintptr_t) (foot)) + sizeof(heap_block_foot_t) - (foot)->size))
117
118/** Get footer in heap block.
119 *
120 */
121#define BLOCK_FOOT(head) \
122 ((heap_block_foot_t *) \
123 (((uintptr_t) (head)) + (head)->size - sizeof(heap_block_foot_t)))
124
125/** Heap area.
126 *
127 * The memory managed by the heap allocator is divided into
128 * multiple discontinuous heaps. Each heap is represented
129 * by a separate address space area which has this structure
130 * at its very beginning.
131 *
132 */
133typedef struct heap_area {
134 /** Start of the heap area (including this structure)
135 *
136 * Aligned on page boundary.
137 *
138 */
139 void *start;
140
141 /** End of the heap area (aligned on page boundary) */
142 void *end;
143
144 /** Previous heap area */
145 struct heap_area *prev;
146
147 /** Next heap area */
148 struct heap_area *next;
149
150 /** A magic value */
151 uint32_t magic;
152} heap_area_t;
153
154/** Header of a heap block
155 *
156 */
157typedef struct {
158 /* Size of the block (including header and footer) */
159 size_t size;
160
161 /* Indication of a free block */
162 bool free;
163
164 /** Heap area this block belongs to */
165 heap_area_t *area;
166
167 /* A magic value to detect overwrite of heap header */
168 uint32_t magic;
169} heap_block_head_t;
170
171/** Footer of a heap block
172 *
173 */
174typedef struct {
175 /* Size of the block (including header and footer) */
176 size_t size;
177
178 /* A magic value to detect overwrite of heap footer */
179 uint32_t magic;
180} heap_block_foot_t;
181
182/** First heap area */
183static heap_area_t *first_heap_area = NULL;
184
185/** Last heap area */
186static heap_area_t *last_heap_area = NULL;
187
188/** Next heap block to examine (next fit algorithm) */
189static heap_block_head_t *next_fit = NULL;
190
191/** Futex for thread-safe heap manipulation */
192static futex_t malloc_futex = FUTEX_INITIALIZER;
193
194#ifndef NDEBUG
195
196#define malloc_assert(expr) \
197 do { \
198 if (!(expr)) {\
199 futex_up(&malloc_futex); \
200 assert_abort(#expr, __FILE__, __LINE__); \
201 } \
202 } while (0)
203
204#else /* NDEBUG */
205
206#define malloc_assert(expr)
207
208#endif /* NDEBUG */
209
210/** Initialize a heap block
211 *
212 * Fill in the structures related to a heap block.
213 * Should be called only inside the critical section.
214 *
215 * @param addr Address of the block.
216 * @param size Size of the block including the header and the footer.
217 * @param free Indication of a free block.
218 * @param area Heap area the block belongs to.
219 *
220 */
221static void block_init(void *addr, size_t size, bool free, heap_area_t *area)
222{
223 /* Calculate the position of the header and the footer */
224 heap_block_head_t *head = (heap_block_head_t *) addr;
225
226 head->size = size;
227 head->free = free;
228 head->area = area;
229 head->magic = HEAP_BLOCK_HEAD_MAGIC;
230
231 heap_block_foot_t *foot = BLOCK_FOOT(head);
232
233 foot->size = size;
234 foot->magic = HEAP_BLOCK_FOOT_MAGIC;
235}
236
237/** Check a heap block
238 *
239 * Verifies that the structures related to a heap block still contain
240 * the magic constants. This helps detect heap corruption early on.
241 * Should be called only inside the critical section.
242 *
243 * @param addr Address of the block.
244 *
245 */
246static void block_check(void *addr)
247{
248 heap_block_head_t *head = (heap_block_head_t *) addr;
249
250 malloc_assert(head->magic == HEAP_BLOCK_HEAD_MAGIC);
251
252 heap_block_foot_t *foot = BLOCK_FOOT(head);
253
254 malloc_assert(foot->magic == HEAP_BLOCK_FOOT_MAGIC);
255 malloc_assert(head->size == foot->size);
256}
257
258/** Check a heap area structure
259 *
260 * Should be called only inside the critical section.
261 *
262 * @param addr Address of the heap area.
263 *
264 */
265static void area_check(void *addr)
266{
267 heap_area_t *area = (heap_area_t *) addr;
268
269 malloc_assert(area->magic == HEAP_AREA_MAGIC);
270 malloc_assert(addr == area->start);
271 malloc_assert(area->start < area->end);
272 malloc_assert(((uintptr_t) area->start % PAGE_SIZE) == 0);
273 malloc_assert(((uintptr_t) area->end % PAGE_SIZE) == 0);
274}
275
276/** Create new heap area
277 *
278 * Should be called only inside the critical section.
279 *
280 * @param size Size of the area.
281 *
282 */
283static bool area_create(size_t size)
284{
285 /* Align the heap area size on page boundary */
286 size_t asize = ALIGN_UP(size, PAGE_SIZE);
287 void *astart = as_area_create(AS_AREA_ANY, asize,
288 AS_AREA_WRITE | AS_AREA_READ);
289 if (astart == AS_MAP_FAILED)
290 return false;
291
292 heap_area_t *area = (heap_area_t *) astart;
293
294 area->start = astart;
295 area->end = (void *) ((uintptr_t) astart + asize);
296 area->prev = NULL;
297 area->next = NULL;
298 area->magic = HEAP_AREA_MAGIC;
299
300 void *block = (void *) AREA_FIRST_BLOCK_HEAD(area);
301 size_t bsize = (size_t) (area->end - block);
302
303 block_init(block, bsize, true, area);
304
305 if (last_heap_area == NULL) {
306 first_heap_area = area;
307 last_heap_area = area;
308 } else {
309 area->prev = last_heap_area;
310 last_heap_area->next = area;
311 last_heap_area = area;
312 }
313
314 return true;
315}
316
317/** Try to enlarge a heap area
318 *
319 * Should be called only inside the critical section.
320 *
321 * @param area Heap area to grow.
322 * @param size Gross size to grow (bytes).
323 *
324 * @return True if successful.
325 *
326 */
327static bool area_grow(heap_area_t *area, size_t size)
328{
329 if (size == 0)
330 return true;
331
332 area_check(area);
333
334 /* New heap area size */
335 size_t gross_size = (size_t) (area->end - area->start) + size;
336 size_t asize = ALIGN_UP(gross_size, PAGE_SIZE);
337 void *end = (void *) ((uintptr_t) area->start + asize);
338
339 /* Check for overflow */
340 if (end < area->start)
341 return false;
342
343 /* Resize the address space area */
344 int ret = as_area_resize(area->start, asize, 0);
345 if (ret != EOK)
346 return false;
347
348 /* Add new free block */
349 size_t net_size = (size_t) (end - area->end);
350 if (net_size > 0)
351 block_init(area->end, net_size, true, area);
352
353 /* Update heap area parameters */
354 area->end = end;
355
356 return true;
357}
358
359/** Try to enlarge any of the heap areas
360 *
361 * Should be called only inside the critical section.
362 *
363 * @param size Gross size of item to allocate (bytes).
364 *
365 */
366static bool heap_grow(size_t size)
367{
368 if (size == 0)
369 return true;
370
371 /* First try to enlarge some existing area */
372 for (heap_area_t *area = first_heap_area; area != NULL;
373 area = area->next) {
374 if (area_grow(area, size))
375 return true;
376 }
377
378 /* Eventually try to create a new area */
379 return area_create(AREA_OVERHEAD(size));
380}
381
382/** Try to shrink heap
383 *
384 * Should be called only inside the critical section.
385 * In all cases the next pointer is reset.
386 *
387 * @param area Last modified heap area.
388 *
389 */
390static void heap_shrink(heap_area_t *area)
391{
392 area_check(area);
393
394 heap_block_foot_t *last_foot =
395 (heap_block_foot_t *) AREA_LAST_BLOCK_FOOT(area);
396 heap_block_head_t *last_head = BLOCK_HEAD(last_foot);
397
398 block_check((void *) last_head);
399 malloc_assert(last_head->area == area);
400
401 if (last_head->free) {
402 /*
403 * The last block of the heap area is
404 * unused. The area might be potentially
405 * shrunk.
406 */
407
408 heap_block_head_t *first_head =
409 (heap_block_head_t *) AREA_FIRST_BLOCK_HEAD(area);
410
411 block_check((void *) first_head);
412 malloc_assert(first_head->area == area);
413
414 size_t shrink_size = ALIGN_DOWN(last_head->size, PAGE_SIZE);
415
416 if (first_head == last_head) {
417 /*
418 * The entire heap area consists of a single
419 * free heap block. This means we can get rid
420 * of it entirely.
421 */
422
423 heap_area_t *prev = area->prev;
424 heap_area_t *next = area->next;
425
426 if (prev != NULL) {
427 area_check(prev);
428 prev->next = next;
429 } else
430 first_heap_area = next;
431
432 if (next != NULL) {
433 area_check(next);
434 next->prev = prev;
435 } else
436 last_heap_area = prev;
437
438 as_area_destroy(area->start);
439 } else if (shrink_size >= SHRINK_GRANULARITY) {
440 /*
441 * Make sure that we always shrink the area
442 * by a multiple of page size and update
443 * the block layout accordingly.
444 */
445
446 size_t asize = (size_t) (area->end - area->start) - shrink_size;
447 void *end = (void *) ((uintptr_t) area->start + asize);
448
449 /* Resize the address space area */
450 int ret = as_area_resize(area->start, asize, 0);
451 if (ret != EOK)
452 abort();
453
454 /* Update heap area parameters */
455 area->end = end;
456 size_t excess = ((size_t) area->end) - ((size_t) last_head);
457
458 if (excess > 0) {
459 if (excess >= STRUCT_OVERHEAD) {
460 /*
461 * The previous block cannot be free and there
462 * is enough free space left in the area to
463 * create a new free block.
464 */
465 block_init((void *) last_head, excess, true, area);
466 } else {
467 /*
468 * The excess is small. Therefore just enlarge
469 * the previous block.
470 */
471 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
472 (((uintptr_t) last_head) - sizeof(heap_block_foot_t));
473 heap_block_head_t *prev_head = BLOCK_HEAD(prev_foot);
474
475 block_check((void *) prev_head);
476
477 block_init(prev_head, prev_head->size + excess,
478 prev_head->free, area);
479 }
480 }
481 }
482 }
483
484 next_fit = NULL;
485}
486
487/** Initialize the heap allocator
488 *
489 * Create initial heap memory area. This routine is
490 * only called from libc initialization, thus we do not
491 * take any locks.
492 *
493 */
494void __malloc_init(void)
495{
496 if (!area_create(PAGE_SIZE))
497 abort();
498}
499
500/** Split heap block and mark it as used.
501 *
502 * Should be called only inside the critical section.
503 *
504 * @param cur Heap block to split.
505 * @param size Number of bytes to split and mark from the beginning
506 * of the block.
507 *
508 */
509static void split_mark(heap_block_head_t *cur, const size_t size)
510{
511 malloc_assert(cur->size >= size);
512
513 /* See if we should split the block. */
514 size_t split_limit = GROSS_SIZE(size);
515
516 if (cur->size > split_limit) {
517 /* Block big enough -> split. */
518 void *next = ((void *) cur) + size;
519 block_init(next, cur->size - size, true, cur->area);
520 block_init(cur, size, false, cur->area);
521 } else {
522 /* Block too small -> use as is. */
523 cur->free = false;
524 }
525}
526
527/** Allocate memory from heap area starting from given block
528 *
529 * Should be called only inside the critical section.
530 * As a side effect this function also sets the current
531 * pointer on successful allocation.
532 *
533 * @param area Heap area where to allocate from.
534 * @param first_block Starting heap block.
535 * @param final_block Heap block where to finish the search
536 * (may be NULL).
537 * @param real_size Gross number of bytes to allocate.
538 * @param falign Physical alignment of the block.
539 *
540 * @return Address of the allocated block or NULL on not enough memory.
541 *
542 */
543static void *malloc_area(heap_area_t *area, heap_block_head_t *first_block,
544 heap_block_head_t *final_block, size_t real_size, size_t falign)
545{
546 area_check((void *) area);
547 malloc_assert((void *) first_block >= (void *) AREA_FIRST_BLOCK_HEAD(area));
548 malloc_assert((void *) first_block < area->end);
549
550 for (heap_block_head_t *cur = first_block; (void *) cur < area->end;
551 cur = (heap_block_head_t *) (((void *) cur) + cur->size)) {
552 block_check(cur);
553
554 /* Finish searching on the final block */
555 if ((final_block != NULL) && (cur == final_block))
556 break;
557
558 /* Try to find a block that is free and large enough. */
559 if ((cur->free) && (cur->size >= real_size)) {
560 /*
561 * We have found a suitable block.
562 * Check for alignment properties.
563 */
564 void *addr = (void *)
565 ((uintptr_t) cur + sizeof(heap_block_head_t));
566 void *aligned = (void *)
567 ALIGN_UP((uintptr_t) addr, falign);
568
569 if (addr == aligned) {
570 /* Exact block start including alignment. */
571 split_mark(cur, real_size);
572
573 next_fit = cur;
574 return addr;
575 } else {
576 /* Block start has to be aligned */
577 size_t excess = (size_t) (aligned - addr);
578
579 if (cur->size >= real_size + excess) {
580 /*
581 * The current block is large enough to fit
582 * data in (including alignment).
583 */
584 if ((void *) cur > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
585 /*
586 * There is a block before the current block.
587 * This previous block can be enlarged to
588 * compensate for the alignment excess.
589 */
590 heap_block_foot_t *prev_foot = (heap_block_foot_t *)
591 ((void *) cur - sizeof(heap_block_foot_t));
592
593 heap_block_head_t *prev_head = (heap_block_head_t *)
594 ((void *) cur - prev_foot->size);
595
596 block_check(prev_head);
597
598 size_t reduced_size = cur->size - excess;
599 heap_block_head_t *next_head = ((void *) cur) + excess;
600
601 if ((!prev_head->free) &&
602 (excess >= STRUCT_OVERHEAD)) {
603 /*
604 * The previous block is not free and there
605 * is enough free space left to fill in
606 * a new free block between the previous
607 * and current block.
608 */
609 block_init(cur, excess, true, area);
610 } else {
611 /*
612 * The previous block is free (thus there
613 * is no need to induce additional
614 * fragmentation to the heap) or the
615 * excess is small. Therefore just enlarge
616 * the previous block.
617 */
618 block_init(prev_head, prev_head->size + excess,
619 prev_head->free, area);
620 }
621
622 block_init(next_head, reduced_size, true, area);
623 split_mark(next_head, real_size);
624
625 next_fit = next_head;
626 return aligned;
627 } else {
628 /*
629 * The current block is the first block
630 * in the heap area. We have to make sure
631 * that the alignment excess is large enough
632 * to fit a new free block just before the
633 * current block.
634 */
635 while (excess < STRUCT_OVERHEAD) {
636 aligned += falign;
637 excess += falign;
638 }
639
640 /* Check for current block size again */
641 if (cur->size >= real_size + excess) {
642 size_t reduced_size = cur->size - excess;
643 cur = (heap_block_head_t *)
644 (AREA_FIRST_BLOCK_HEAD(area) + excess);
645
646 block_init((void *) AREA_FIRST_BLOCK_HEAD(area),
647 excess, true, area);
648 block_init(cur, reduced_size, true, area);
649 split_mark(cur, real_size);
650
651 next_fit = cur;
652 return aligned;
653 }
654 }
655 }
656 }
657 }
658 }
659
660 return NULL;
661}
662
663/** Allocate a memory block
664 *
665 * Should be called only inside the critical section.
666 *
667 * @param size The size of the block to allocate.
668 * @param align Memory address alignment.
669 *
670 * @return Address of the allocated block or NULL on not enough memory.
671 *
672 */
673static void *malloc_internal(const size_t size, const size_t align)
674{
675 malloc_assert(first_heap_area != NULL);
676
677 if (align == 0)
678 return NULL;
679
680 size_t falign = lcm(align, BASE_ALIGN);
681 size_t real_size = GROSS_SIZE(ALIGN_UP(size, falign));
682
683 bool retry = false;
684 heap_block_head_t *split;
685
686loop:
687
688 /* Try the next fit approach */
689 split = next_fit;
690
691 if (split != NULL) {
692 void *addr = malloc_area(split->area, split, NULL, real_size,
693 falign);
694
695 if (addr != NULL)
696 return addr;
697 }
698
699 /* Search the entire heap */
700 for (heap_area_t *area = first_heap_area; area != NULL;
701 area = area->next) {
702 heap_block_head_t *first = (heap_block_head_t *)
703 AREA_FIRST_BLOCK_HEAD(area);
704
705 void *addr = malloc_area(area, first, split, real_size,
706 falign);
707
708 if (addr != NULL)
709 return addr;
710 }
711
712 if (!retry) {
713 /* Try to grow the heap space */
714 if (heap_grow(real_size)) {
715 retry = true;
716 goto loop;
717 }
718 }
719
720 return NULL;
721}
722
723/** Allocate memory by number of elements
724 *
725 * @param nmemb Number of members to allocate.
726 * @param size Size of one member in bytes.
727 *
728 * @return Allocated memory or NULL.
729 *
730 */
731void *calloc(const size_t nmemb, const size_t size)
732{
733 void *block = malloc(nmemb * size);
734 if (block == NULL)
735 return NULL;
736
737 memset(block, 0, nmemb * size);
738 return block;
739}
740
741/** Allocate memory
742 *
743 * @param size Number of bytes to allocate.
744 *
745 * @return Allocated memory or NULL.
746 *
747 */
748void *malloc(const size_t size)
749{
750 futex_down(&malloc_futex);
751 void *block = malloc_internal(size, BASE_ALIGN);
752 futex_up(&malloc_futex);
753
754 return block;
755}
756
757/** Allocate memory with specified alignment
758 *
759 * @param align Alignment in byes.
760 * @param size Number of bytes to allocate.
761 *
762 * @return Allocated memory or NULL.
763 *
764 */
765void *memalign(const size_t align, const size_t size)
766{
767 if (align == 0)
768 return NULL;
769
770 size_t palign =
771 1 << (fnzb(max(sizeof(void *), align) - 1) + 1);
772
773 futex_down(&malloc_futex);
774 void *block = malloc_internal(size, palign);
775 futex_up(&malloc_futex);
776
777 return block;
778}
779
780/** Reallocate memory block
781 *
782 * @param addr Already allocated memory or NULL.
783 * @param size New size of the memory block.
784 *
785 * @return Reallocated memory or NULL.
786 *
787 */
788void *realloc(const void *addr, const size_t size)
789{
790 if (addr == NULL)
791 return malloc(size);
792
793 futex_down(&malloc_futex);
794
795 /* Calculate the position of the header. */
796 heap_block_head_t *head =
797 (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
798
799 block_check(head);
800 malloc_assert(!head->free);
801
802 heap_area_t *area = head->area;
803
804 area_check(area);
805 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
806 malloc_assert((void *) head < area->end);
807
808 void *ptr = NULL;
809 bool reloc = false;
810 size_t real_size = GROSS_SIZE(ALIGN_UP(size, BASE_ALIGN));
811 size_t orig_size = head->size;
812
813 if (orig_size > real_size) {
814 /* Shrink */
815 if (orig_size - real_size >= STRUCT_OVERHEAD) {
816 /*
817 * Split the original block to a full block
818 * and a trailing free block.
819 */
820 block_init((void *) head, real_size, false, area);
821 block_init((void *) head + real_size,
822 orig_size - real_size, true, area);
823 heap_shrink(area);
824 }
825
826 ptr = ((void *) head) + sizeof(heap_block_head_t);
827 } else {
828 /*
829 * Look at the next block. If it is free and the size is
830 * sufficient then merge the two. Otherwise just allocate
831 * a new block, copy the original data into it and
832 * free the original block.
833 */
834 heap_block_head_t *next_head =
835 (heap_block_head_t *) (((void *) head) + head->size);
836
837 if (((void *) next_head < area->end) &&
838 (head->size + next_head->size >= real_size) &&
839 (next_head->free)) {
840 block_check(next_head);
841 block_init(head, head->size + next_head->size, false, area);
842 split_mark(head, real_size);
843
844 ptr = ((void *) head) + sizeof(heap_block_head_t);
845 next_fit = NULL;
846 } else
847 reloc = true;
848 }
849
850 futex_up(&malloc_futex);
851
852 if (reloc) {
853 ptr = malloc(size);
854 if (ptr != NULL) {
855 memcpy(ptr, addr, NET_SIZE(orig_size));
856 free(addr);
857 }
858 }
859
860 return ptr;
861}
862
863/** Free a memory block
864 *
865 * @param addr The address of the block.
866 *
867 */
868void free(const void *addr)
869{
870 if (addr == NULL)
871 return;
872
873 futex_down(&malloc_futex);
874
875 /* Calculate the position of the header. */
876 heap_block_head_t *head
877 = (heap_block_head_t *) (addr - sizeof(heap_block_head_t));
878
879 block_check(head);
880 malloc_assert(!head->free);
881
882 heap_area_t *area = head->area;
883
884 area_check(area);
885 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));
886 malloc_assert((void *) head < area->end);
887
888 /* Mark the block itself as free. */
889 head->free = true;
890
891 /* Look at the next block. If it is free, merge the two. */
892 heap_block_head_t *next_head
893 = (heap_block_head_t *) (((void *) head) + head->size);
894
895 if ((void *) next_head < area->end) {
896 block_check(next_head);
897 if (next_head->free)
898 block_init(head, head->size + next_head->size, true, area);
899 }
900
901 /* Look at the previous block. If it is free, merge the two. */
902 if ((void *) head > (void *) AREA_FIRST_BLOCK_HEAD(area)) {
903 heap_block_foot_t *prev_foot =
904 (heap_block_foot_t *) (((void *) head) - sizeof(heap_block_foot_t));
905
906 heap_block_head_t *prev_head =
907 (heap_block_head_t *) (((void *) head) - prev_foot->size);
908
909 block_check(prev_head);
910
911 if (prev_head->free)
912 block_init(prev_head, prev_head->size + head->size, true,
913 area);
914 }
915
916 heap_shrink(area);
917
918 futex_up(&malloc_futex);
919}
920
921void *heap_check(void)
922{
923 futex_down(&malloc_futex);
924
925 if (first_heap_area == NULL) {
926 futex_up(&malloc_futex);
927 return (void *) -1;
928 }
929
930 /* Walk all heap areas */
931 for (heap_area_t *area = first_heap_area; area != NULL;
932 area = area->next) {
933
934 /* Check heap area consistency */
935 if ((area->magic != HEAP_AREA_MAGIC) ||
936 ((void *) area != area->start) ||
937 (area->start >= area->end) ||
938 (((uintptr_t) area->start % PAGE_SIZE) != 0) ||
939 (((uintptr_t) area->end % PAGE_SIZE) != 0)) {
940 futex_up(&malloc_futex);
941 return (void *) area;
942 }
943
944 /* Walk all heap blocks */
945 for (heap_block_head_t *head = (heap_block_head_t *)
946 AREA_FIRST_BLOCK_HEAD(area); (void *) head < area->end;
947 head = (heap_block_head_t *) (((void *) head) + head->size)) {
948
949 /* Check heap block consistency */
950 if (head->magic != HEAP_BLOCK_HEAD_MAGIC) {
951 futex_up(&malloc_futex);
952 return (void *) head;
953 }
954
955 heap_block_foot_t *foot = BLOCK_FOOT(head);
956
957 if ((foot->magic != HEAP_BLOCK_FOOT_MAGIC) ||
958 (head->size != foot->size)) {
959 futex_up(&malloc_futex);
960 return (void *) foot;
961 }
962 }
963 }
964
965 futex_up(&malloc_futex);
966
967 return NULL;
968}
969
970/** @}
971 */
Note: See TracBrowser for help on using the repository browser.